2022-04-15 12:25:04

by Hyeonggon Yoo

[permalink] [raw]
Subject: [PATCH v2 03/23] mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions

Now that slab_alloc_node() is available for SLAB when CONFIG_NUMA=n,
remove CONFIG_NUMA ifdefs for common kmalloc functions.

Signed-off-by: Hyeonggon Yoo <[email protected]>
---
include/linux/slab.h | 28 ----------------------------
mm/slab.c | 2 --
mm/slob.c | 5 +----
mm/slub.c | 6 ------
4 files changed, 1 insertion(+), 40 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 11ceddcae9f4..a3b9d4c20d7e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -444,38 +444,18 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}

-#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
__alloc_size(1);
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#else
-static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_slab_alignment __alloc_size(3);

-#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment
__alloc_size(4);
-#else
-static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags, int node, size_t size)
-{
- return kmem_cache_alloc_trace(s, gfpflags, size);
-}
-#endif /* CONFIG_NUMA */

#else /* CONFIG_TRACING */
static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
@@ -689,20 +669,12 @@ static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t
}


-#ifdef CONFIG_NUMA
extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)

-#else /* CONFIG_NUMA */
-
-#define kmalloc_node_track_caller(size, flags, node) \
- kmalloc_track_caller(size, flags)
-
-#endif /* CONFIG_NUMA */
-
/*
* Shortcuts
*/
diff --git a/mm/slab.c b/mm/slab.c
index f033d5b4fefb..5ad55ca96ab6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3545,7 +3545,6 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif

-#ifdef CONFIG_NUMA
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
@@ -3619,7 +3618,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-#endif /* CONFIG_NUMA */

#ifdef CONFIG_PRINTK
void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
diff --git a/mm/slob.c b/mm/slob.c
index dfa6808dff36..c8c3b5662edf 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -534,14 +534,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
}
EXPORT_SYMBOL(__kmalloc_track_caller);

-#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
int node, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-#endif

void kfree(const void *block)
{
@@ -641,7 +639,7 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_
return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(kmem_cache_alloc_lru);
-#ifdef CONFIG_NUMA
+
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
return __do_kmalloc_node(size, gfp, node, _RET_IP_);
@@ -653,7 +651,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
return slob_alloc_node(cachep, gfp, node);
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#endif

static void __kmem_cache_free(void *b, int size)
{
diff --git a/mm/slub.c b/mm/slub.c
index d7e8355b2f08..e36c148e5069 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3260,7 +3260,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif

-#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
@@ -3287,7 +3286,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
-#endif /* CONFIG_NUMA */

/*
* Slow path handling. This may still be called frequently since objects
@@ -4424,7 +4422,6 @@ void *__kmalloc(size_t size, gfp_t flags)
}
EXPORT_SYMBOL(__kmalloc);

-#ifdef CONFIG_NUMA
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
@@ -4471,7 +4468,6 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_NUMA */

#ifdef CONFIG_HARDENED_USERCOPY
/*
@@ -4929,7 +4925,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
}
EXPORT_SYMBOL(__kmalloc_track_caller);

-#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
@@ -4959,7 +4954,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return ret;
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-#endif

#ifdef CONFIG_SYSFS
static int count_inuse(struct slab *slab)
--
2.32.0


2022-04-25 18:46:35

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2 03/23] mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions

On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Now that slab_alloc_node() is available for SLAB when CONFIG_NUMA=n,
> remove CONFIG_NUMA ifdefs for common kmalloc functions.
>
> Signed-off-by: Hyeonggon Yoo <[email protected]>

Reviewed-by: Vlastimil Babka <[email protected]>