Make __kmalloc() wrapper of __kmalloc_node().
Signed-off-by: Hyeonggon Yoo <[email protected]>
---
include/linux/slab.h | 13 ++++++++++---
mm/slab.c | 34 ----------------------------------
mm/slob.c | 6 ------
mm/slub.c | 23 -----------------------
4 files changed, 10 insertions(+), 66 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index acdb4b7428f9..4c06d15f731c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -419,7 +419,16 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+extern void *__kmalloc_node(size_t size, gfp_t flags, int node)
+ __assume_kmalloc_alignment
+ __alloc_size(1);
+
+static __always_inline __alloc_size(1) __assume_kmalloc_alignment
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __kmalloc_node(size, flags, NUMA_NO_NODE);
+}
+
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
@@ -444,8 +453,6 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
- __alloc_size(1);
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
diff --git a/mm/slab.c b/mm/slab.c
index 5f20efc7a330..db7eab9e2e9f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3641,40 +3641,6 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
}
#endif
-/**
- * __do_kmalloc - allocate memory
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @caller: function caller for debug tracking of the caller
- *
- * Return: pointer to the allocated memory or %NULL in case of error
- */
-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
- unsigned long caller)
-{
- struct kmem_cache *cachep;
- void *ret;
-
- if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
- return NULL;
- cachep = kmalloc_slab(size, flags);
- if (unlikely(ZERO_OR_NULL_PTR(cachep)))
- return cachep;
- ret = slab_alloc(cachep, NULL, flags, size, caller);
-
- ret = kasan_kmalloc(cachep, ret, size, flags);
- trace_kmalloc(caller, ret,
- size, cachep->size, flags);
-
- return ret;
-}
-
-void *__kmalloc(size_t size, gfp_t flags)
-{
- return __do_kmalloc(size, flags, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc);
-
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
diff --git a/mm/slob.c b/mm/slob.c
index 6d0fc6ad1413..ab67c8219e8d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -522,12 +522,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
return ret;
}
-void *__kmalloc(size_t size, gfp_t gfp)
-{
- return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
-}
-EXPORT_SYMBOL(__kmalloc);
-
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
int node, unsigned long caller)
{
diff --git a/mm/slub.c b/mm/slub.c
index e425c5c372de..44170b4f084b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4399,29 +4399,6 @@ static int __init setup_slub_min_objects(char *str)
__setup("slub_min_objects=", setup_slub_min_objects);
-void *__kmalloc(size_t size, gfp_t flags)
-{
- struct kmem_cache *s;
- void *ret;
-
- if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
- return kmalloc_large(size, flags);
-
- s = kmalloc_slab(size, flags);
-
- if (unlikely(ZERO_OR_NULL_PTR(s)))
- return s;
-
- ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
-
- trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(__kmalloc);
-
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
--
2.32.0
On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Make __kmalloc() wrapper of __kmalloc_node().
Again/similarly, looks like this will make SLUB not miss trace for
kmalloc_large() anymore.
> Signed-off-by: Hyeonggon Yoo <[email protected]>
Reviewed-by: Vlastimil Babka <[email protected]>
Nit below:
> ---
> include/linux/slab.h | 13 ++++++++++---
> mm/slab.c | 34 ----------------------------------
> mm/slob.c | 6 ------
> mm/slub.c | 23 -----------------------
> 4 files changed, 10 insertions(+), 66 deletions(-)
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index acdb4b7428f9..4c06d15f731c 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -419,7 +419,16 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
> #define kmalloc_index(s) __kmalloc_index(s, true)
> #endif /* !CONFIG_SLOB */
>
> -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
> +extern void *__kmalloc_node(size_t size, gfp_t flags, int node)
> + __assume_kmalloc_alignment
> + __alloc_size(1);
> +
Again, no 'extern' please.
> +static __always_inline __alloc_size(1) __assume_kmalloc_alignment
> +void *__kmalloc(size_t size, gfp_t flags)
> +{
> + return __kmalloc_node(size, flags, NUMA_NO_NODE);
> +}
> +
> void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
> void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
> gfp_t gfpflags) __assume_slab_alignment __malloc;