This should result in better code. Currently kfree() makes a function
call between compilation units to __kmem_cache_free() which does its own
virt_to_slab(), throwing away the struct slab pointer we already had in
kfree(). Now it can be reused. Additionally kfree() can now inline the
whole SLUB freeing fastpath.
Also move over free_large_kmalloc() as the only callsites are now in
slub.c, and make it static.
Reviewed-by: Kees Cook <[email protected]>
Signed-off-by: Vlastimil Babka <[email protected]>
---
mm/slab.h | 4 ----
mm/slab_common.c | 45 ---------------------------------------------
mm/slub.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++-----
3 files changed, 46 insertions(+), 54 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 5ae6a978e9c2..35a55c4a407d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -395,8 +395,6 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t orig_size,
unsigned long caller);
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
-
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
@@ -559,8 +557,6 @@ static inline int memcg_alloc_slab_cgroups(struct slab *slab,
}
#endif /* CONFIG_MEMCG_KMEM */
-void free_large_kmalloc(struct folio *folio, void *object);
-
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index bbc2e3f061f1..f4f275613d2a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -963,22 +963,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
slab_state = UP;
}
-void free_large_kmalloc(struct folio *folio, void *object)
-{
- unsigned int order = folio_order(folio);
-
- if (WARN_ON_ONCE(order == 0))
- pr_warn_once("object pointer: 0x%p\n", object);
-
- kmemleak_free(object);
- kasan_kfree_large(object);
- kmsan_kfree_large(object);
-
- mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(folio_page(folio, 0), order);
-}
-
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
static __always_inline
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
@@ -1023,35 +1007,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-/**
- * kfree - free previously allocated memory
- * @object: pointer returned by kmalloc() or kmem_cache_alloc()
- *
- * If @object is NULL, no operation is performed.
- */
-void kfree(const void *object)
-{
- struct folio *folio;
- struct slab *slab;
- struct kmem_cache *s;
-
- trace_kfree(_RET_IP_, object);
-
- if (unlikely(ZERO_OR_NULL_PTR(object)))
- return;
-
- folio = virt_to_folio(object);
- if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
- return;
- }
-
- slab = folio_slab(folio);
- s = slab->slab_cache;
- __kmem_cache_free(s, (void *)object, _RET_IP_);
-}
-EXPORT_SYMBOL(kfree);
-
/**
* __ksize -- Report full size of underlying allocation
* @object: pointer to the object
diff --git a/mm/slub.c b/mm/slub.c
index cc801f8258fe..2baa9e94d9df 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4197,11 +4197,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return cachep;
}
-void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
-{
- slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
-}
-
/**
* kmem_cache_free - Deallocate an object
* @s: The cache the allocation was from.
@@ -4220,6 +4215,52 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
+static void free_large_kmalloc(struct folio *folio, void *object)
+{
+ unsigned int order = folio_order(folio);
+
+ if (WARN_ON_ONCE(order == 0))
+ pr_warn_once("object pointer: 0x%p\n", object);
+
+ kmemleak_free(object);
+ kasan_kfree_large(object);
+ kmsan_kfree_large(object);
+
+ mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+ -(PAGE_SIZE << order));
+ __free_pages(folio_page(folio, 0), order);
+}
+
+/**
+ * kfree - free previously allocated memory
+ * @object: pointer returned by kmalloc() or kmem_cache_alloc()
+ *
+ * If @object is NULL, no operation is performed.
+ */
+void kfree(const void *object)
+{
+ struct folio *folio;
+ struct slab *slab;
+ struct kmem_cache *s;
+ void *x = (void *)object;
+
+ trace_kfree(_RET_IP_, object);
+
+ if (unlikely(ZERO_OR_NULL_PTR(object)))
+ return;
+
+ folio = virt_to_folio(object);
+ if (unlikely(!folio_test_slab(folio))) {
+ free_large_kmalloc(folio, (void *)object);
+ return;
+ }
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+ slab_free(s, slab, x, NULL, &x, 1, _RET_IP_);
+}
+EXPORT_SYMBOL(kfree);
+
struct detached_freelist {
struct slab *slab;
void *tail;
--
2.42.1
On Mon, Nov 20, 2023 at 07:34:27PM +0100, Vlastimil Babka wrote:
> This should result in better code. Currently kfree() makes a function
> call between compilation units to __kmem_cache_free() which does its own
> virt_to_slab(), throwing away the struct slab pointer we already had in
> kfree(). Now it can be reused. Additionally kfree() can now inline the
> whole SLUB freeing fastpath.
>
> Also move over free_large_kmalloc() as the only callsites are now in
> slub.c, and make it static.
>
> Reviewed-by: Kees Cook <[email protected]>
> Signed-off-by: Vlastimil Babka <[email protected]>
> ---
> mm/slab.h | 4 ----
> mm/slab_common.c | 45 ---------------------------------------------
> mm/slub.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++-----
> 3 files changed, 46 insertions(+), 54 deletions(-)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 5ae6a978e9c2..35a55c4a407d 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -395,8 +395,6 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
> void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> int node, size_t orig_size,
> unsigned long caller);
> -void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
> -
> gfp_t kmalloc_fix_flags(gfp_t flags);
>
> /* Functions provided by the slab allocators */
> @@ -559,8 +557,6 @@ static inline int memcg_alloc_slab_cgroups(struct slab *slab,
> }
> #endif /* CONFIG_MEMCG_KMEM */
>
> -void free_large_kmalloc(struct folio *folio, void *object);
> -
> size_t __ksize(const void *objp);
>
> static inline size_t slab_ksize(const struct kmem_cache *s)
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index bbc2e3f061f1..f4f275613d2a 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -963,22 +963,6 @@ void __init create_kmalloc_caches(slab_flags_t flags)
> slab_state = UP;
> }
>
> -void free_large_kmalloc(struct folio *folio, void *object)
> -{
> - unsigned int order = folio_order(folio);
> -
> - if (WARN_ON_ONCE(order == 0))
> - pr_warn_once("object pointer: 0x%p\n", object);
> -
> - kmemleak_free(object);
> - kasan_kfree_large(object);
> - kmsan_kfree_large(object);
> -
> - mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
> - -(PAGE_SIZE << order));
> - __free_pages(folio_page(folio, 0), order);
> -}
> -
> static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
> static __always_inline
> void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
> @@ -1023,35 +1007,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
> }
> EXPORT_SYMBOL(__kmalloc_node_track_caller);
>
> -/**
> - * kfree - free previously allocated memory
> - * @object: pointer returned by kmalloc() or kmem_cache_alloc()
> - *
> - * If @object is NULL, no operation is performed.
> - */
> -void kfree(const void *object)
> -{
> - struct folio *folio;
> - struct slab *slab;
> - struct kmem_cache *s;
> -
> - trace_kfree(_RET_IP_, object);
> -
> - if (unlikely(ZERO_OR_NULL_PTR(object)))
> - return;
> -
> - folio = virt_to_folio(object);
> - if (unlikely(!folio_test_slab(folio))) {
> - free_large_kmalloc(folio, (void *)object);
> - return;
> - }
> -
> - slab = folio_slab(folio);
> - s = slab->slab_cache;
> - __kmem_cache_free(s, (void *)object, _RET_IP_);
> -}
> -EXPORT_SYMBOL(kfree);
> -
> /**
> * __ksize -- Report full size of underlying allocation
> * @object: pointer to the object
> diff --git a/mm/slub.c b/mm/slub.c
> index cc801f8258fe..2baa9e94d9df 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4197,11 +4197,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
> return cachep;
> }
>
> -void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
> -{
> - slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
> -}
> -
> /**
> * kmem_cache_free - Deallocate an object
> * @s: The cache the allocation was from.
> @@ -4220,6 +4215,52 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
> }
> EXPORT_SYMBOL(kmem_cache_free);
>
> +static void free_large_kmalloc(struct folio *folio, void *object)
> +{
> + unsigned int order = folio_order(folio);
> +
> + if (WARN_ON_ONCE(order == 0))
> + pr_warn_once("object pointer: 0x%p\n", object);
> +
> + kmemleak_free(object);
> + kasan_kfree_large(object);
> + kmsan_kfree_large(object);
> +
> + mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
> + -(PAGE_SIZE << order));
> + __free_pages(folio_page(folio, 0), order);
> +}
> +
> +/**
> + * kfree - free previously allocated memory
> + * @object: pointer returned by kmalloc() or kmem_cache_alloc()
> + *
> + * If @object is NULL, no operation is performed.
> + */
> +void kfree(const void *object)
> +{
> + struct folio *folio;
> + struct slab *slab;
> + struct kmem_cache *s;
> + void *x = (void *)object;
> +
> + trace_kfree(_RET_IP_, object);
> +
> + if (unlikely(ZERO_OR_NULL_PTR(object)))
> + return;
> +
> + folio = virt_to_folio(object);
> + if (unlikely(!folio_test_slab(folio))) {
> + free_large_kmalloc(folio, (void *)object);
> + return;
> + }
> +
> + slab = folio_slab(folio);
> + s = slab->slab_cache;
> + slab_free(s, slab, x, NULL, &x, 1, _RET_IP_);
> +}
> +EXPORT_SYMBOL(kfree);
> +
> struct detached_freelist {
> struct slab *slab;
> void *tail;
Looks good to me,
Reviewed-by: Hyeonggon Yoo <[email protected]>
nit: mm/kfence/report.c checks if a function name starts with
"__kmem_cache_free" which is removed by this patch.
>
> --
> 2.42.1
>
>