2022-04-14 17:25:12

by Hyeonggon Yoo

[permalink] [raw]
Subject: [PATCH v2 02/23] mm/slab: cleanup slab_alloc() and slab_alloc_node()

Make slab_alloc_node() available even when CONFIG_NUMA=n and
make slab_alloc() wrapper of slab_alloc_node().

This is necessary for further cleanup.

Signed-off-by: Hyeonggon Yoo <[email protected]>
---
mm/slab.c | 50 +++++++++++++-------------------------------------
1 file changed, 13 insertions(+), 37 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index d854c24d5f5a..f033d5b4fefb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3187,38 +3187,6 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
return obj ? obj : fallback_alloc(cachep, flags);
}

-static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);
-
-static __always_inline void *
-slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
- unsigned long caller)
-{
- unsigned long save_flags;
- void *ptr;
- struct obj_cgroup *objcg = NULL;
- bool init = false;
-
- flags &= gfp_allowed_mask;
- cachep = slab_pre_alloc_hook(cachep, NULL, &objcg, 1, flags);
- if (unlikely(!cachep))
- return NULL;
-
- ptr = kfence_alloc(cachep, orig_size, flags);
- if (unlikely(ptr))
- goto out_hooks;
-
- cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
- ptr = __do_cache_alloc(cachep, flags, nodeid);
- local_irq_restore(save_flags);
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
- init = slab_want_init_on_alloc(flags, cachep);
-
-out_hooks:
- slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
- return ptr;
-}
-
static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
@@ -3267,8 +3235,8 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unus
#endif /* CONFIG_NUMA */

static __always_inline void *
-slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
- size_t orig_size, unsigned long caller)
+slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
+ int nodeid, size_t orig_size, unsigned long caller)
{
unsigned long save_flags;
void *objp;
@@ -3286,7 +3254,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,

cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
+ objp = __do_cache_alloc(cachep, flags, nodeid);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
@@ -3297,6 +3265,14 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
return objp;
}

+static __always_inline void *
+slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
+ size_t orig_size, unsigned long caller)
+{
+ return slab_alloc_node(cachep, lru, flags, NUMA_NO_NODE, orig_size,
+ caller);
+}
+
/*
* Caller needs to acquire correct kmem_cache_node's list_lock
* @list: List of detached free slabs should be freed by caller
@@ -3585,7 +3561,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
*/
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
+ void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);

trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
@@ -3603,7 +3579,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
{
void *ret;

- ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
+ ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_);

ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
--
2.32.0


2022-04-26 00:14:21

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2 02/23] mm/slab: cleanup slab_alloc() and slab_alloc_node()

On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Make slab_alloc_node() available even when CONFIG_NUMA=n and
> make slab_alloc() wrapper of slab_alloc_node().
>
> This is necessary for further cleanup.

It also removes a lot of nearly-duplicite code.

> Signed-off-by: Hyeonggon Yoo <[email protected]>

Reviewed-by: Vlastimil Babka <[email protected]>