2024-02-22 13:03:18

by Chengming Zhou

[permalink] [raw]
Subject: [PATCH] mm, slab: remove the corner case of inc_slabs_node()

From: Chengming Zhou <[email protected]>

We already have the inc_slabs_node() after kmem_cache_node->node[node]
initialized in early_kmem_cache_node_alloc(), this special case of
inc_slabs_node() can be removed. Then we don't need to consider the
existence of kmem_cache_node in inc_slabs_node() anymore.

Signed-off-by: Chengming Zhou <[email protected]>
---
mm/slub.c | 13 ++-----------
1 file changed, 2 insertions(+), 11 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 284b751b3b64..3f413e5e1415 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1500,16 +1500,8 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);

- /*
- * May be called early in order to allocate a slab for the
- * kmem_cache_node structure. Solve the chicken-egg
- * dilemma by deferring the increment of the count during
- * bootstrap (see early_kmem_cache_node_alloc).
- */
- if (likely(n)) {
- atomic_long_inc(&n->nr_slabs);
- atomic_long_add(objects, &n->total_objects);
- }
+ atomic_long_inc(&n->nr_slabs);
+ atomic_long_add(objects, &n->total_objects);
}
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
{
@@ -4877,7 +4869,6 @@ static void early_kmem_cache_node_alloc(int node)
slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);

BUG_ON(!slab);
- inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects);
if (slab_nid(slab) != node) {
pr_err("SLUB: Unable to allocate memory from node %d\n", node);
pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
--
2.40.1



2024-03-01 16:04:28

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] mm, slab: remove the corner case of inc_slabs_node()

On 2/22/24 14:02, [email protected] wrote:
> From: Chengming Zhou <[email protected]>
>
> We already have the inc_slabs_node() after kmem_cache_node->node[node]
> initialized in early_kmem_cache_node_alloc(), this special case of
> inc_slabs_node() can be removed. Then we don't need to consider the
> existence of kmem_cache_node in inc_slabs_node() anymore.
>
> Signed-off-by: Chengming Zhou <[email protected]>

Well spotted, thank. Added to slab/for-next.

> ---
> mm/slub.c | 13 ++-----------
> 1 file changed, 2 insertions(+), 11 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 284b751b3b64..3f413e5e1415 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1500,16 +1500,8 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
> {
> struct kmem_cache_node *n = get_node(s, node);
>
> - /*
> - * May be called early in order to allocate a slab for the
> - * kmem_cache_node structure. Solve the chicken-egg
> - * dilemma by deferring the increment of the count during
> - * bootstrap (see early_kmem_cache_node_alloc).
> - */
> - if (likely(n)) {
> - atomic_long_inc(&n->nr_slabs);
> - atomic_long_add(objects, &n->total_objects);
> - }
> + atomic_long_inc(&n->nr_slabs);
> + atomic_long_add(objects, &n->total_objects);
> }
> static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
> {
> @@ -4877,7 +4869,6 @@ static void early_kmem_cache_node_alloc(int node)
> slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
>
> BUG_ON(!slab);
> - inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects);
> if (slab_nid(slab) != node) {
> pr_err("SLUB: Unable to allocate memory from node %d\n", node);
> pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");