2022-04-16 00:53:52

by Hyeonggon Yoo

[permalink] [raw]
Subject: [PATCH v2 12/23] mm/slab_common: cleanup kmalloc()

Now that kmalloc() and kmalloc_node() do same job, make kmalloc()
wrapper of kmalloc_node().

Remove kmem_cache_alloc_trace() that is now unused.

Signed-off-by: Hyeonggon Yoo <[email protected]>
---
include/linux/slab.h | 93 +++++++++++++++-----------------------------
mm/slab.c | 16 --------
mm/slub.c | 12 ------
3 files changed, 32 insertions(+), 89 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index eb457f20f415..ea168f8a248d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -497,23 +497,10 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}

#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
- __assume_slab_alignment __alloc_size(3);
-
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment
__alloc_size(4);
-
#else /* CONFIG_TRACING */
-static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
-
static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size)
{
@@ -532,6 +519,37 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
return kmalloc_large_node(size, flags, NUMA_NO_NODE);
}

+#ifndef CONFIG_SLOB
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size)) {
+ unsigned int index;
+
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags)][index],
+ flags, node, size);
+ }
+ return __kmalloc_node(size, flags, node);
+}
+#else
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ return __kmalloc_node(size, flags, node);
+}
+#endif
+
+
/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
@@ -588,55 +606,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
*/
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size)) {
-#ifndef CONFIG_SLOB
- unsigned int index;
-#endif
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
- flags, size);
-#endif
- }
- return __kmalloc(size, flags);
-}
-
-#ifndef CONFIG_SLOB
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size)) {
- unsigned int index;
-
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_node(size, flags, node);
-
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
- flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-#else
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_node(size, flags, node);
-
- return __kmalloc_node(size, flags, node);
+ return kmalloc_node(size, flags, NUMA_NO_NODE);
}
-#endif

/**
* kmalloc_array - allocate memory for an array.
diff --git a/mm/slab.c b/mm/slab.c
index c5ffe54c207a..b0aaca017f42 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3507,22 +3507,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

-#ifdef CONFIG_TRACING
-void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
- void *ret;
-
- ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
-
- ret = kasan_kmalloc(cachep, ret, size, flags);
- trace_kmalloc(_RET_IP_, ret,
- size, cachep->size, flags);
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_alloc_trace);
-#endif
-
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags,
diff --git a/mm/slub.c b/mm/slub.c
index 2a2be2a8a5d0..892988990da7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3216,18 +3216,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *l
return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
}

-
-#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_alloc_trace);
-#endif
-
void *__kmem_cache_alloc_node(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags,
int node, unsigned long caller __maybe_unused)
{
--
2.32.0


2022-04-27 10:34:49

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2 12/23] mm/slab_common: cleanup kmalloc()

On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Now that kmalloc() and kmalloc_node() do same job, make kmalloc()
> wrapper of kmalloc_node().
>
> Remove kmem_cache_alloc_trace() that is now unused.
>
> Signed-off-by: Hyeonggon Yoo <[email protected]>

From correctness point of view:

Reviewed-by: Vlastimil Babka <[email protected]>

But yeah, impact of requiring NUMA_NO_NODE parameter should be evaluated. If
it's significant I believe we should be still able to implement the common
kmalloc, but keep separate kmalloc and kmalloc_node entry points.

> ---
> include/linux/slab.h | 93 +++++++++++++++-----------------------------
> mm/slab.c | 16 --------
> mm/slub.c | 12 ------
> 3 files changed, 32 insertions(+), 89 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index eb457f20f415..ea168f8a248d 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -497,23 +497,10 @@ static __always_inline void kfree_bulk(size_t size, void **p)
> }
>
> #ifdef CONFIG_TRACING
> -extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
> - __assume_slab_alignment __alloc_size(3);
> -
> extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
> int node, size_t size) __assume_slab_alignment
> __alloc_size(4);
> -
> #else /* CONFIG_TRACING */
> -static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
> - gfp_t flags, size_t size)
> -{
> - void *ret = kmem_cache_alloc(s, flags);
> -
> - ret = kasan_kmalloc(s, ret, size, flags);
> - return ret;
> -}
> -
> static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
> int node, size_t size)
> {
> @@ -532,6 +519,37 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
> return kmalloc_large_node(size, flags, NUMA_NO_NODE);
> }
>
> +#ifndef CONFIG_SLOB
> +static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> + if (__builtin_constant_p(size)) {
> + unsigned int index;
> +
> + if (size > KMALLOC_MAX_CACHE_SIZE)
> + return kmalloc_large_node(size, flags, node);
> +
> + index = kmalloc_index(size);
> +
> + if (!index)
> + return ZERO_SIZE_PTR;
> +
> + return kmem_cache_alloc_node_trace(
> + kmalloc_caches[kmalloc_type(flags)][index],
> + flags, node, size);
> + }
> + return __kmalloc_node(size, flags, node);
> +}
> +#else
> +static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> + if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> + return kmalloc_large_node(size, flags, node);
> +
> + return __kmalloc_node(size, flags, node);
> +}
> +#endif
> +
> +
> /**
> * kmalloc - allocate memory
> * @size: how many bytes of memory are required.
> @@ -588,55 +606,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
> */
> static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
> {
> - if (__builtin_constant_p(size)) {
> -#ifndef CONFIG_SLOB
> - unsigned int index;
> -#endif
> - if (size > KMALLOC_MAX_CACHE_SIZE)
> - return kmalloc_large(size, flags);
> -#ifndef CONFIG_SLOB
> - index = kmalloc_index(size);
> -
> - if (!index)
> - return ZERO_SIZE_PTR;
> -
> - return kmem_cache_alloc_trace(
> - kmalloc_caches[kmalloc_type(flags)][index],
> - flags, size);
> -#endif
> - }
> - return __kmalloc(size, flags);
> -}
> -
> -#ifndef CONFIG_SLOB
> -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> -{
> - if (__builtin_constant_p(size)) {
> - unsigned int index;
> -
> - if (size > KMALLOC_MAX_CACHE_SIZE)
> - return kmalloc_large_node(size, flags, node);
> -
> - index = kmalloc_index(size);
> -
> - if (!index)
> - return ZERO_SIZE_PTR;
> -
> - return kmem_cache_alloc_node_trace(
> - kmalloc_caches[kmalloc_type(flags)][index],
> - flags, node, size);
> - }
> - return __kmalloc_node(size, flags, node);
> -}
> -#else
> -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> -{
> - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> - return kmalloc_large_node(size, flags, node);
> -
> - return __kmalloc_node(size, flags, node);
> + return kmalloc_node(size, flags, NUMA_NO_NODE);
> }
> -#endif
>
> /**
> * kmalloc_array - allocate memory for an array.
> diff --git a/mm/slab.c b/mm/slab.c
> index c5ffe54c207a..b0aaca017f42 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3507,22 +3507,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
> }
> EXPORT_SYMBOL(kmem_cache_alloc_bulk);
>
> -#ifdef CONFIG_TRACING
> -void *
> -kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
> -{
> - void *ret;
> -
> - ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
> -
> - ret = kasan_kmalloc(cachep, ret, size, flags);
> - trace_kmalloc(_RET_IP_, ret,
> - size, cachep->size, flags);
> - return ret;
> -}
> -EXPORT_SYMBOL(kmem_cache_alloc_trace);
> -#endif
> -
> #ifdef CONFIG_TRACING
> void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
> gfp_t flags,
> diff --git a/mm/slub.c b/mm/slub.c
> index 2a2be2a8a5d0..892988990da7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3216,18 +3216,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *l
> return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
> }
>
> -
> -#ifdef CONFIG_TRACING
> -void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
> -{
> - void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
> - trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
> - ret = kasan_kmalloc(s, ret, size, gfpflags);
> - return ret;
> -}
> -EXPORT_SYMBOL(kmem_cache_alloc_trace);
> -#endif
> -
> void *__kmem_cache_alloc_node(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags,
> int node, unsigned long caller __maybe_unused)
> {

2022-04-27 10:43:41

by Joe Perches

[permalink] [raw]
Subject: Re: [PATCH v2 12/23] mm/slab_common: cleanup kmalloc()

On Thu, 2022-04-14 at 17:57 +0900, Hyeonggon Yoo wrote:
> Now that kmalloc() and kmalloc_node() do same job, make kmalloc()
> wrapper of kmalloc_node().

Why make every kmalloc larger object code size ?

What is the overall object size change in a typical defconfig ?


2022-04-29 01:05:04

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH v2 12/23] mm/slab_common: cleanup kmalloc()

On Tue, Apr 26, 2022 at 11:00:32AM -0700, Joe Perches wrote:
> On Thu, 2022-04-14 at 17:57 +0900, Hyeonggon Yoo wrote:
> > Now that kmalloc() and kmalloc_node() do same job, make kmalloc()
> > wrapper of kmalloc_node().
>
> Why make every kmalloc larger object code size ?
>
> What is the overall object size change in a typical defconfig ?

Thank you for pointing this.

It increases every call to kmalloc() is increased (+5 bytes) due to
passing NUMA_NO_NODE. on x86_64's defconfig, the difference of vmlinux size is not that
visible because this patch also removes tracepoint.

But yeah, increasing 5 bytes for every caller seems questionable.
I'll think about this for a bit :)

Thanks!

--
Thanks,
Hyeonggon