2022-05-02 20:20:21

by Muchun Song

[permalink] [raw]
Subject: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

Most callers of memcg_slab_free_hook() already know the slab, which could
be passed to memcg_slab_free_hook() directly to reduce the overhead of an
another call of virt_to_slab(). For bulk freeing of objects, the call of
slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
freeing in slab_free().

Move the calling site of memcg_slab_free_hook() from do_slab_free() to
slab_free() for slub to make the code clearer since the logic is weird
(e.g. the caller need to judge whether it needs to call
memcg_slab_free_hook()). It is easy to make mistakes like missing calling
of memcg_slab_free_hook() like fixes of:

commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")

This optimization is mainly for bulk objects freeing. The following numbers
is shown for 16-object freeing.

before after
kmem_cache_free_bulk: ~430 ns ~400 ns

The overhead is reduced by about 7% for 16-object freeing.

Signed-off-by: Muchun Song <[email protected]>
---
v2:
- Add numbers to commit log.

mm/slab.c | 4 ++--
mm/slab.h | 30 ++++++++---------------------
mm/slub.c | 66 +++++++++++++++++++++------------------------------------------
3 files changed, 32 insertions(+), 68 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index f8cd00f4ba13..2174962055ae 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
{
bool init;

+ memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
+
if (is_kfence_address(objp)) {
kmemleak_free_recursive(objp, cachep->flags);
- memcg_slab_free_hook(cachep, &objp, 1);
__kfence_free(objp);
return;
}
@@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
- memcg_slab_free_hook(cachep, &objp, 1);

/*
* Skip calling cache_free_alien() when the platform is not numa.
diff --git a/mm/slab.h b/mm/slab.h
index db9fb5c8dae7..a8d5eb1c323f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
obj_cgroup_put(objcg);
}

-static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects)
{
- struct kmem_cache *s;
struct obj_cgroup **objcgs;
- struct obj_cgroup *objcg;
- struct slab *slab;
- unsigned int off;
int i;

if (!memcg_kmem_enabled())
return;

- for (i = 0; i < objects; i++) {
- if (unlikely(!p[i]))
- continue;
-
- slab = virt_to_slab(p[i]);
- /* we could be given a kmalloc_large() object, skip those */
- if (!slab)
- continue;
-
- objcgs = slab_objcgs(slab);
- if (!objcgs)
- continue;
+ objcgs = slab_objcgs(slab);
+ if (!objcgs)
+ return;

- if (!s_orig)
- s = slab->slab_cache;
- else
- s = s_orig;
+ for (i = 0; i < objects; i++) {
+ struct obj_cgroup *objcg;
+ unsigned int off;

off = obj_to_index(s, slab, p[i]);
objcg = objcgs[off];
@@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
{
}

-static inline void memcg_slab_free_hook(struct kmem_cache *s,
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects)
{
}
diff --git a/mm/slub.c b/mm/slub.c
index 1f699ddfff7f..3794afe32b5f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3435,9 +3435,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long tid;

- /* memcg_slab_free_hook() is already called for bulk free. */
- if (!tail)
- memcg_slab_free_hook(s, &head, 1);
redo:
/*
* Determine the currently cpus per cpu slab.
@@ -3497,9 +3494,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
}

static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
- void *head, void *tail, int cnt,
+ void *head, void *tail, void **p, int cnt,
unsigned long addr)
{
+ memcg_slab_free_hook(s, slab, p, cnt);
/*
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
@@ -3521,7 +3519,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
if (!s)
return;
trace_kmem_cache_free(_RET_IP_, x, s->name);
- slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
+ slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);

@@ -3562,79 +3560,59 @@ static inline
int build_detached_freelist(struct kmem_cache *s, size_t size,
void **p, struct detached_freelist *df)
{
- size_t first_skipped_index = 0;
int lookahead = 3;
void *object;
struct folio *folio;
- struct slab *slab;
-
- /* Always re-init detached_freelist */
- df->slab = NULL;
-
- do {
- object = p[--size];
- /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
- } while (!object && size);
-
- if (!object)
- return 0;
+ size_t same;

+ object = p[--size];
folio = virt_to_folio(object);
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) {
free_large_kmalloc(folio, object);
- p[size] = NULL; /* mark object processed */
+ df->slab = NULL;
return size;
}
/* Derive kmem_cache from object */
- slab = folio_slab(folio);
- df->s = slab->slab_cache;
+ df->slab = folio_slab(folio);
+ df->s = df->slab->slab_cache;
} else {
- slab = folio_slab(folio);
+ df->slab = folio_slab(folio);
df->s = cache_from_obj(s, object); /* Support for memcg */
}

- if (is_kfence_address(object)) {
- slab_free_hook(df->s, object, false);
- __kfence_free(object);
- p[size] = NULL; /* mark object processed */
- return size;
- }
-
/* Start new detached freelist */
- df->slab = slab;
- set_freepointer(df->s, object, NULL);
df->tail = object;
df->freelist = object;
- p[size] = NULL; /* mark object processed */
df->cnt = 1;

+ if (is_kfence_address(object))
+ return size;
+
+ set_freepointer(df->s, object, NULL);
+
+ same = size;
while (size) {
object = p[--size];
- if (!object)
- continue; /* Skip processed objects */
-
/* df->slab is always set at this point */
if (df->slab == virt_to_slab(object)) {
/* Opportunity build freelist */
set_freepointer(df->s, object, df->freelist);
df->freelist = object;
df->cnt++;
- p[size] = NULL; /* mark object processed */
-
+ same--;
+ if (size != same)
+ swap(p[size], p[same]);
continue;
}

/* Limit look ahead search */
if (!--lookahead)
break;
-
- if (!first_skipped_index)
- first_skipped_index = size + 1;
}

- return first_skipped_index;
+ return same;
}

/* Note that interrupts must be enabled when calling this function. */
@@ -3643,7 +3621,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
if (WARN_ON(!size))
return;

- memcg_slab_free_hook(s, p, size);
do {
struct detached_freelist df;

@@ -3651,7 +3628,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
if (!df.slab)
continue;

- slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
+ slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
+ _RET_IP_);
} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
@@ -4554,7 +4532,7 @@ void kfree(const void *x)
return;
}
slab = folio_slab(folio);
- slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
+ slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);

--
2.11.0


2022-05-09 08:39:54

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On Fri, Apr 29, 2022 at 08:30:44PM +0800, Muchun Song wrote:
> Most callers of memcg_slab_free_hook() already know the slab, which could
> be passed to memcg_slab_free_hook() directly to reduce the overhead of an
> another call of virt_to_slab(). For bulk freeing of objects, the call of
> slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
> Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
> those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
> freeing in slab_free().
>
> Move the calling site of memcg_slab_free_hook() from do_slab_free() to
> slab_free() for slub to make the code clearer since the logic is weird
> (e.g. the caller need to judge whether it needs to call
> memcg_slab_free_hook()). It is easy to make mistakes like missing calling
> of memcg_slab_free_hook() like fixes of:
>
> commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
>
> This optimization is mainly for bulk objects freeing. The following numbers
> is shown for 16-object freeing.
>
> before after
> kmem_cache_free_bulk: ~430 ns ~400 ns
>
> The overhead is reduced by about 7% for 16-object freeing.
>
> Signed-off-by: Muchun Song <[email protected]>
> ---
> v2:
> - Add numbers to commit log.
>
> mm/slab.c | 4 ++--
> mm/slab.h | 30 ++++++++---------------------
> mm/slub.c | 66 +++++++++++++++++++++------------------------------------------
> 3 files changed, 32 insertions(+), 68 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index f8cd00f4ba13..2174962055ae 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> {
> bool init;
>
> + memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
> +
> if (is_kfence_address(objp)) {
> kmemleak_free_recursive(objp, cachep->flags);
> - memcg_slab_free_hook(cachep, &objp, 1);
> __kfence_free(objp);
> return;
> }
> @@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> check_irq_off();
> kmemleak_free_recursive(objp, cachep->flags);
> objp = cache_free_debugcheck(cachep, objp, caller);
> - memcg_slab_free_hook(cachep, &objp, 1);
>

SLAB part just looks fine.

> /*
> * Skip calling cache_free_alien() when the platform is not numa.
> diff --git a/mm/slab.h b/mm/slab.h
> index db9fb5c8dae7..a8d5eb1c323f 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> obj_cgroup_put(objcg);
> }
>
> -static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
> +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> void **p, int objects)
> {
> - struct kmem_cache *s;
> struct obj_cgroup **objcgs;
> - struct obj_cgroup *objcg;
> - struct slab *slab;
> - unsigned int off;
> int i;
>
> if (!memcg_kmem_enabled())
> return;
>
> - for (i = 0; i < objects; i++) {
> - if (unlikely(!p[i]))
> - continue;
> -
> - slab = virt_to_slab(p[i]);
> - /* we could be given a kmalloc_large() object, skip those */
> - if (!slab)
> - continue;
> -

Oh, memcg_slab_free_hook() no longer takes kmalloc_large() object becuase
such objects does not have corresponding slab.

> - objcgs = slab_objcgs(slab);
> - if (!objcgs)
> - continue;
> + objcgs = slab_objcgs(slab);
> + if (!objcgs)
> + return;

Now all objects in void **p should be in same slab,
so that we can remove some redundant part of loops that finds slab.
(virt_to_slab() and slab_objcgs())

> - if (!s_orig)
> - s = slab->slab_cache;
> - else
> - s = s_orig;
> + for (i = 0; i < objects; i++) {
> + struct obj_cgroup *objcg;
> + unsigned int off;
>
> off = obj_to_index(s, slab, p[i]);
> objcg = objcgs[off];
> @@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> {
> }
>
> -static inline void memcg_slab_free_hook(struct kmem_cache *s,
> +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> void **p, int objects)
> {
> }
> diff --git a/mm/slub.c b/mm/slub.c
> index 1f699ddfff7f..3794afe32b5f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3435,9 +3435,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> struct kmem_cache_cpu *c;
> unsigned long tid;
>
> - /* memcg_slab_free_hook() is already called for bulk free. */
> - if (!tail)
> - memcg_slab_free_hook(s, &head, 1);
> redo:
> /*
> * Determine the currently cpus per cpu slab.
> @@ -3497,9 +3494,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> }
>
> static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
> - void *head, void *tail, int cnt,
> + void *head, void *tail, void **p, int cnt,
> unsigned long addr)
> {
> + memcg_slab_free_hook(s, slab, p, cnt);
> /*
> * With KASAN enabled slab_free_freelist_hook modifies the freelist
> * to remove objects, whose reuse must be delayed.
> @@ -3521,7 +3519,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
> if (!s)
> return;
> trace_kmem_cache_free(_RET_IP_, x, s->name);
> - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
> + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
> }

memcg slab free hook is moved from do_slab_free() to slab_free().

> EXPORT_SYMBOL(kmem_cache_free);
>
> @@ -3562,79 +3560,59 @@ static inline
> int build_detached_freelist(struct kmem_cache *s, size_t size,
> void **p, struct detached_freelist *df)
> {
> - size_t first_skipped_index = 0;
> int lookahead = 3;
> void *object;
> struct folio *folio;
> - struct slab *slab;
> -
> - /* Always re-init detached_freelist */
> - df->slab = NULL;
> -
> - do {
> - object = p[--size];
> - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
> - } while (!object && size);
> -
> - if (!object)
> - return 0;


Now it does not mark p[size] = NULL as processed

> + size_t same;
>
> + object = p[--size];
> folio = virt_to_folio(object);
> if (!s) {
> /* Handle kalloc'ed objects */
> if (unlikely(!folio_test_slab(folio))) {
> free_large_kmalloc(folio, object);
> - p[size] = NULL; /* mark object processed */
> + df->slab = NULL;
> return size;

This part looks good too as kmem_cache_free_bulk() just skips if
df->slab == NULL.

> }
> /* Derive kmem_cache from object */
> - slab = folio_slab(folio);
> - df->s = slab->slab_cache;
> + df->slab = folio_slab(folio);
> + df->s = df->slab->slab_cache;
> } else {
> - slab = folio_slab(folio);
> + df->slab = folio_slab(folio);
> df->s = cache_from_obj(s, object); /* Support for memcg */
> }
>
> - if (is_kfence_address(object)) {
> - slab_free_hook(df->s, object, false);
> - __kfence_free(object);
> - p[size] = NULL; /* mark object processed */
> - return size;
> - }
> -
> /* Start new detached freelist */
> - df->slab = slab;
> - set_freepointer(df->s, object, NULL);
> df->tail = object;
> df->freelist = object;
> - p[size] = NULL; /* mark object processed */
> df->cnt = 1;
>
> + if (is_kfence_address(object))
> + return size;
> +

and kfence too is freed in slab_free() using detached freelist after
this patch.

> + set_freepointer(df->s, object, NULL);
> +
> + same = size;
> while (size) {
> object = p[--size];
> - if (!object)
> - continue; /* Skip processed objects */
> -
> /* df->slab is always set at this point */
> if (df->slab == virt_to_slab(object)) {
> /* Opportunity build freelist */
> set_freepointer(df->s, object, df->freelist);
> df->freelist = object;
> df->cnt++;
> - p[size] = NULL; /* mark object processed */
> -
> + same--;
> + if (size != same)
> + swap(p[size], p[same]);
> continue;

Now that SLUB does not mark processed objects as p[size] = NULL,
SLUB should put objects in same slab together, so swap() to put them
together.

> }
>
> /* Limit look ahead search */
> if (!--lookahead)
> break;
> -
> - if (!first_skipped_index)
> - first_skipped_index = size + 1;
> }
>
> - return first_skipped_index;
> + return same;
> }

same is last index of freed object in previous step,
so that should not be problem if next step treat that
as 'size' of array.

>
> /* Note that interrupts must be enabled when calling this function. */
> @@ -3643,7 +3621,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> if (WARN_ON(!size))
> return;
>
> - memcg_slab_free_hook(s, p, size);
> do {
> struct detached_freelist df;
>
> @@ -3651,7 +3628,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> if (!df.slab)
> continue;
>
> - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
> + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
> + _RET_IP_);
> } while (likely(size));
> }
> EXPORT_SYMBOL(kmem_cache_free_bulk);
> @@ -4554,7 +4532,7 @@ void kfree(const void *x)
> return;
> }
> slab = folio_slab(folio);
> - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
> + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
> }
> EXPORT_SYMBOL(kfree);
>
> --
> 2.11.0
>
>

Nice improvements,
Looks good to me.

Reviewed-by: Hyeonggon Yoo <[email protected]>

--
Thanks,
Hyeonggon

2022-05-25 12:13:53

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On 4/29/22 14:30, Muchun Song wrote:
> Most callers of memcg_slab_free_hook() already know the slab, which could
> be passed to memcg_slab_free_hook() directly to reduce the overhead of an
> another call of virt_to_slab(). For bulk freeing of objects, the call of
> slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
> Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
> those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
> freeing in slab_free().
>
> Move the calling site of memcg_slab_free_hook() from do_slab_free() to
> slab_free() for slub to make the code clearer since the logic is weird
> (e.g. the caller need to judge whether it needs to call
> memcg_slab_free_hook()). It is easy to make mistakes like missing calling
> of memcg_slab_free_hook() like fixes of:
>
> commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")

Hm is this commit also fixing such bug? in mm/slab.c __cache_free():

/* KASAN might put objp into memory quarantine, delaying its reuse. */
if (kasan_slab_free(cachep, objp, init))
return;

before this patch we do not reach memcg_slab_free_hook() if
kasan_slab_free() retuns true, after this patch we do. AFAICS SLUB always
did memcg_slab_free_hook() in case of kasan_slab_free() so it's the correct
thing to do?

> This optimization is mainly for bulk objects freeing. The following numbers
> is shown for 16-object freeing.
>
> before after
> kmem_cache_free_bulk: ~430 ns ~400 ns
>
> The overhead is reduced by about 7% for 16-object freeing.
>
> Signed-off-by: Muchun Song <[email protected]>

Otherwise looks good, will add to slab tree for 5.20, thanks.

> ---
> v2:
> - Add numbers to commit log.
>
> mm/slab.c | 4 ++--
> mm/slab.h | 30 ++++++++---------------------
> mm/slub.c | 66 +++++++++++++++++++++------------------------------------------
> 3 files changed, 32 insertions(+), 68 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index f8cd00f4ba13..2174962055ae 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> {
> bool init;
>
> + memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
> +
> if (is_kfence_address(objp)) {
> kmemleak_free_recursive(objp, cachep->flags);
> - memcg_slab_free_hook(cachep, &objp, 1);
> __kfence_free(objp);
> return;
> }
> @@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> check_irq_off();
> kmemleak_free_recursive(objp, cachep->flags);
> objp = cache_free_debugcheck(cachep, objp, caller);
> - memcg_slab_free_hook(cachep, &objp, 1);
>
> /*
> * Skip calling cache_free_alien() when the platform is not numa.
> diff --git a/mm/slab.h b/mm/slab.h
> index db9fb5c8dae7..a8d5eb1c323f 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> obj_cgroup_put(objcg);
> }
>
> -static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
> +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> void **p, int objects)
> {
> - struct kmem_cache *s;
> struct obj_cgroup **objcgs;
> - struct obj_cgroup *objcg;
> - struct slab *slab;
> - unsigned int off;
> int i;
>
> if (!memcg_kmem_enabled())
> return;
>
> - for (i = 0; i < objects; i++) {
> - if (unlikely(!p[i]))
> - continue;
> -
> - slab = virt_to_slab(p[i]);
> - /* we could be given a kmalloc_large() object, skip those */
> - if (!slab)
> - continue;
> -
> - objcgs = slab_objcgs(slab);
> - if (!objcgs)
> - continue;
> + objcgs = slab_objcgs(slab);
> + if (!objcgs)
> + return;
>
> - if (!s_orig)
> - s = slab->slab_cache;
> - else
> - s = s_orig;
> + for (i = 0; i < objects; i++) {
> + struct obj_cgroup *objcg;
> + unsigned int off;
>
> off = obj_to_index(s, slab, p[i]);
> objcg = objcgs[off];
> @@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> {
> }
>
> -static inline void memcg_slab_free_hook(struct kmem_cache *s,
> +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> void **p, int objects)
> {
> }
> diff --git a/mm/slub.c b/mm/slub.c
> index 1f699ddfff7f..3794afe32b5f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3435,9 +3435,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> struct kmem_cache_cpu *c;
> unsigned long tid;
>
> - /* memcg_slab_free_hook() is already called for bulk free. */
> - if (!tail)
> - memcg_slab_free_hook(s, &head, 1);
> redo:
> /*
> * Determine the currently cpus per cpu slab.
> @@ -3497,9 +3494,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> }
>
> static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
> - void *head, void *tail, int cnt,
> + void *head, void *tail, void **p, int cnt,
> unsigned long addr)
> {
> + memcg_slab_free_hook(s, slab, p, cnt);
> /*
> * With KASAN enabled slab_free_freelist_hook modifies the freelist
> * to remove objects, whose reuse must be delayed.
> @@ -3521,7 +3519,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
> if (!s)
> return;
> trace_kmem_cache_free(_RET_IP_, x, s->name);
> - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
> + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
> }
> EXPORT_SYMBOL(kmem_cache_free);
>
> @@ -3562,79 +3560,59 @@ static inline
> int build_detached_freelist(struct kmem_cache *s, size_t size,
> void **p, struct detached_freelist *df)
> {
> - size_t first_skipped_index = 0;
> int lookahead = 3;
> void *object;
> struct folio *folio;
> - struct slab *slab;
> -
> - /* Always re-init detached_freelist */
> - df->slab = NULL;
> -
> - do {
> - object = p[--size];
> - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
> - } while (!object && size);
> -
> - if (!object)
> - return 0;
> + size_t same;
>
> + object = p[--size];
> folio = virt_to_folio(object);
> if (!s) {
> /* Handle kalloc'ed objects */
> if (unlikely(!folio_test_slab(folio))) {
> free_large_kmalloc(folio, object);
> - p[size] = NULL; /* mark object processed */
> + df->slab = NULL;
> return size;
> }
> /* Derive kmem_cache from object */
> - slab = folio_slab(folio);
> - df->s = slab->slab_cache;
> + df->slab = folio_slab(folio);
> + df->s = df->slab->slab_cache;
> } else {
> - slab = folio_slab(folio);
> + df->slab = folio_slab(folio);
> df->s = cache_from_obj(s, object); /* Support for memcg */
> }
>
> - if (is_kfence_address(object)) {
> - slab_free_hook(df->s, object, false);
> - __kfence_free(object);
> - p[size] = NULL; /* mark object processed */
> - return size;
> - }
> -
> /* Start new detached freelist */
> - df->slab = slab;
> - set_freepointer(df->s, object, NULL);
> df->tail = object;
> df->freelist = object;
> - p[size] = NULL; /* mark object processed */
> df->cnt = 1;
>
> + if (is_kfence_address(object))
> + return size;
> +
> + set_freepointer(df->s, object, NULL);
> +
> + same = size;
> while (size) {
> object = p[--size];
> - if (!object)
> - continue; /* Skip processed objects */
> -
> /* df->slab is always set at this point */
> if (df->slab == virt_to_slab(object)) {
> /* Opportunity build freelist */
> set_freepointer(df->s, object, df->freelist);
> df->freelist = object;
> df->cnt++;
> - p[size] = NULL; /* mark object processed */
> -
> + same--;
> + if (size != same)
> + swap(p[size], p[same]);
> continue;
> }
>
> /* Limit look ahead search */
> if (!--lookahead)
> break;
> -
> - if (!first_skipped_index)
> - first_skipped_index = size + 1;
> }
>
> - return first_skipped_index;
> + return same;
> }
>
> /* Note that interrupts must be enabled when calling this function. */
> @@ -3643,7 +3621,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> if (WARN_ON(!size))
> return;
>
> - memcg_slab_free_hook(s, p, size);
> do {
> struct detached_freelist df;
>
> @@ -3651,7 +3628,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> if (!df.slab)
> continue;
>
> - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
> + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
> + _RET_IP_);
> } while (likely(size));
> }
> EXPORT_SYMBOL(kmem_cache_free_bulk);
> @@ -4554,7 +4532,7 @@ void kfree(const void *x)
> return;
> }
> slab = folio_slab(folio);
> - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
> + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
> }
> EXPORT_SYMBOL(kfree);
>


2022-05-25 15:13:33

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On 5/25/22 04:18, Muchun Song wrote:
> On Tue, May 24, 2022 at 07:05:31PM +0200, Vlastimil Babka wrote:
>> On 4/29/22 14:30, Muchun Song wrote:
>> > Most callers of memcg_slab_free_hook() already know the slab, which could
>> > be passed to memcg_slab_free_hook() directly to reduce the overhead of an
>> > another call of virt_to_slab(). For bulk freeing of objects, the call of
>> > slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
>> > Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
>> > those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
>> > freeing in slab_free().
>> >
>> > Move the calling site of memcg_slab_free_hook() from do_slab_free() to
>> > slab_free() for slub to make the code clearer since the logic is weird
>> > (e.g. the caller need to judge whether it needs to call
>> > memcg_slab_free_hook()). It is easy to make mistakes like missing calling
>> > of memcg_slab_free_hook() like fixes of:
>> >
>> > commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
>> > commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
>>
>> Hm is this commit also fixing such bug? in mm/slab.c __cache_free():
>>
>
> Right.
>
>> /* KASAN might put objp into memory quarantine, delaying its reuse. */
>> if (kasan_slab_free(cachep, objp, init))
>> return;
>>
>> before this patch we do not reach memcg_slab_free_hook() if
>> kasan_slab_free() retuns true, after this patch we do. AFAICS SLUB always
>> did memcg_slab_free_hook() in case of kasan_slab_free() so it's the correct
>> thing to do?
>>
>
> I don't think it is an issue since memcg_slab_free_hook()
> mainly does memory accounting housekeeping. Doing it in
> advance is not an issue. Actually, we already have done
> this since

Yes, it's not an issue. What is likely an issue is skipping the memcg hook
it as the accounting will become wrong.

> commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
>
> From this commit, the object freed via kmem_cache_free_bulk()
> in mm/slub.c have called memcg_slab_free_hook() even if
> kasan_slab_free() retuns true. Right?

Yeah, so that means SLAB will indeed be fixed by this patch.
We should mention it in the changelog then. I wouldn't cc stable though, as
it's quite a corner case and your patch is not trivial, backports might
conflict and/or miss some prerequisity etc. If we cared a lot, a preceding
small patch fixing just the SLAB case would be safer.

>> > This optimization is mainly for bulk objects freeing. The following numbers
>> > is shown for 16-object freeing.
>> >
>> > before after
>> > kmem_cache_free_bulk: ~430 ns ~400 ns
>> >
>> > The overhead is reduced by about 7% for 16-object freeing.
>> >
>> > Signed-off-by: Muchun Song <[email protected]>
>>
>> Otherwise looks good, will add to slab tree for 5.20, thanks.
>>
>> > ---
>> > v2:
>> > - Add numbers to commit log.
>> >
>> > mm/slab.c | 4 ++--
>> > mm/slab.h | 30 ++++++++---------------------
>> > mm/slub.c | 66 +++++++++++++++++++++------------------------------------------
>> > 3 files changed, 32 insertions(+), 68 deletions(-)
>> >
>> > diff --git a/mm/slab.c b/mm/slab.c
>> > index f8cd00f4ba13..2174962055ae 100644
>> > --- a/mm/slab.c
>> > +++ b/mm/slab.c
>> > @@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
>> > {
>> > bool init;
>> >
>> > + memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
>> > +
>> > if (is_kfence_address(objp)) {
>> > kmemleak_free_recursive(objp, cachep->flags);
>> > - memcg_slab_free_hook(cachep, &objp, 1);
>> > __kfence_free(objp);
>> > return;
>> > }
>> > @@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
>> > check_irq_off();
>> > kmemleak_free_recursive(objp, cachep->flags);
>> > objp = cache_free_debugcheck(cachep, objp, caller);
>> > - memcg_slab_free_hook(cachep, &objp, 1);
>> >
>> > /*
>> > * Skip calling cache_free_alien() when the platform is not numa.
>> > diff --git a/mm/slab.h b/mm/slab.h
>> > index db9fb5c8dae7..a8d5eb1c323f 100644
>> > --- a/mm/slab.h
>> > +++ b/mm/slab.h
>> > @@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
>> > obj_cgroup_put(objcg);
>> > }
>> >
>> > -static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
>> > +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>> > void **p, int objects)
>> > {
>> > - struct kmem_cache *s;
>> > struct obj_cgroup **objcgs;
>> > - struct obj_cgroup *objcg;
>> > - struct slab *slab;
>> > - unsigned int off;
>> > int i;
>> >
>> > if (!memcg_kmem_enabled())
>> > return;
>> >
>> > - for (i = 0; i < objects; i++) {
>> > - if (unlikely(!p[i]))
>> > - continue;
>> > -
>> > - slab = virt_to_slab(p[i]);
>> > - /* we could be given a kmalloc_large() object, skip those */
>> > - if (!slab)
>> > - continue;
>> > -
>> > - objcgs = slab_objcgs(slab);
>> > - if (!objcgs)
>> > - continue;
>> > + objcgs = slab_objcgs(slab);
>> > + if (!objcgs)
>> > + return;
>> >
>> > - if (!s_orig)
>> > - s = slab->slab_cache;
>> > - else
>> > - s = s_orig;
>> > + for (i = 0; i < objects; i++) {
>> > + struct obj_cgroup *objcg;
>> > + unsigned int off;
>> >
>> > off = obj_to_index(s, slab, p[i]);
>> > objcg = objcgs[off];
>> > @@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
>> > {
>> > }
>> >
>> > -static inline void memcg_slab_free_hook(struct kmem_cache *s,
>> > +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>> > void **p, int objects)
>> > {
>> > }
>> > diff --git a/mm/slub.c b/mm/slub.c
>> > index 1f699ddfff7f..3794afe32b5f 100644
>> > --- a/mm/slub.c
>> > +++ b/mm/slub.c
>> > @@ -3435,9 +3435,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
>> > struct kmem_cache_cpu *c;
>> > unsigned long tid;
>> >
>> > - /* memcg_slab_free_hook() is already called for bulk free. */
>> > - if (!tail)
>> > - memcg_slab_free_hook(s, &head, 1);
>> > redo:
>> > /*
>> > * Determine the currently cpus per cpu slab.
>> > @@ -3497,9 +3494,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
>> > }
>> >
>> > static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
>> > - void *head, void *tail, int cnt,
>> > + void *head, void *tail, void **p, int cnt,
>> > unsigned long addr)
>> > {
>> > + memcg_slab_free_hook(s, slab, p, cnt);
>> > /*
>> > * With KASAN enabled slab_free_freelist_hook modifies the freelist
>> > * to remove objects, whose reuse must be delayed.
>> > @@ -3521,7 +3519,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
>> > if (!s)
>> > return;
>> > trace_kmem_cache_free(_RET_IP_, x, s->name);
>> > - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
>> > + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
>> > }
>> > EXPORT_SYMBOL(kmem_cache_free);
>> >
>> > @@ -3562,79 +3560,59 @@ static inline
>> > int build_detached_freelist(struct kmem_cache *s, size_t size,
>> > void **p, struct detached_freelist *df)
>> > {
>> > - size_t first_skipped_index = 0;
>> > int lookahead = 3;
>> > void *object;
>> > struct folio *folio;
>> > - struct slab *slab;
>> > -
>> > - /* Always re-init detached_freelist */
>> > - df->slab = NULL;
>> > -
>> > - do {
>> > - object = p[--size];
>> > - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
>> > - } while (!object && size);
>> > -
>> > - if (!object)
>> > - return 0;
>> > + size_t same;
>> >
>> > + object = p[--size];
>> > folio = virt_to_folio(object);
>> > if (!s) {
>> > /* Handle kalloc'ed objects */
>> > if (unlikely(!folio_test_slab(folio))) {
>> > free_large_kmalloc(folio, object);
>> > - p[size] = NULL; /* mark object processed */
>> > + df->slab = NULL;
>> > return size;
>> > }
>> > /* Derive kmem_cache from object */
>> > - slab = folio_slab(folio);
>> > - df->s = slab->slab_cache;
>> > + df->slab = folio_slab(folio);
>> > + df->s = df->slab->slab_cache;
>> > } else {
>> > - slab = folio_slab(folio);
>> > + df->slab = folio_slab(folio);
>> > df->s = cache_from_obj(s, object); /* Support for memcg */
>> > }
>> >
>> > - if (is_kfence_address(object)) {
>> > - slab_free_hook(df->s, object, false);
>> > - __kfence_free(object);
>> > - p[size] = NULL; /* mark object processed */
>> > - return size;
>> > - }
>> > -
>> > /* Start new detached freelist */
>> > - df->slab = slab;
>> > - set_freepointer(df->s, object, NULL);
>> > df->tail = object;
>> > df->freelist = object;
>> > - p[size] = NULL; /* mark object processed */
>> > df->cnt = 1;
>> >
>> > + if (is_kfence_address(object))
>> > + return size;
>> > +
>> > + set_freepointer(df->s, object, NULL);
>> > +
>> > + same = size;
>> > while (size) {
>> > object = p[--size];
>> > - if (!object)
>> > - continue; /* Skip processed objects */
>> > -
>> > /* df->slab is always set at this point */
>> > if (df->slab == virt_to_slab(object)) {
>> > /* Opportunity build freelist */
>> > set_freepointer(df->s, object, df->freelist);
>> > df->freelist = object;
>> > df->cnt++;
>> > - p[size] = NULL; /* mark object processed */
>> > -
>> > + same--;
>> > + if (size != same)
>> > + swap(p[size], p[same]);
>> > continue;
>> > }
>> >
>> > /* Limit look ahead search */
>> > if (!--lookahead)
>> > break;
>> > -
>> > - if (!first_skipped_index)
>> > - first_skipped_index = size + 1;
>> > }
>> >
>> > - return first_skipped_index;
>> > + return same;
>> > }
>> >
>> > /* Note that interrupts must be enabled when calling this function. */
>> > @@ -3643,7 +3621,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
>> > if (WARN_ON(!size))
>> > return;
>> >
>> > - memcg_slab_free_hook(s, p, size);
>> > do {
>> > struct detached_freelist df;
>> >
>> > @@ -3651,7 +3628,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
>> > if (!df.slab)
>> > continue;
>> >
>> > - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
>> > + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
>> > + _RET_IP_);
>> > } while (likely(size));
>> > }
>> > EXPORT_SYMBOL(kmem_cache_free_bulk);
>> > @@ -4554,7 +4532,7 @@ void kfree(const void *x)
>> > return;
>> > }
>> > slab = folio_slab(folio);
>> > - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
>> > + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
>> > }
>> > EXPORT_SYMBOL(kfree);
>> >
>>
>>


2022-05-26 00:19:21

by Muchun Song

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On Wed, May 25, 2022 at 12:09:19PM +0200, Vlastimil Babka wrote:
> On 5/25/22 11:15, Muchun Song wrote:
> > On Wed, May 25, 2022 at 09:34:58AM +0200, Vlastimil Babka wrote:
> >> On 5/25/22 04:18, Muchun Song wrote:
> >> > On Tue, May 24, 2022 at 07:05:31PM +0200, Vlastimil Babka wrote:
> >> >> On 4/29/22 14:30, Muchun Song wrote:
> >> >> > Most callers of memcg_slab_free_hook() already know the slab, which could
> >> >> > be passed to memcg_slab_free_hook() directly to reduce the overhead of an
> >> >> > another call of virt_to_slab(). For bulk freeing of objects, the call of
> >> >> > slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
> >> >> > Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
> >> >> > those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
> >> >> > freeing in slab_free().
> >> >> >
> >> >> > Move the calling site of memcg_slab_free_hook() from do_slab_free() to
> >> >> > slab_free() for slub to make the code clearer since the logic is weird
> >> >> > (e.g. the caller need to judge whether it needs to call
> >> >> > memcg_slab_free_hook()). It is easy to make mistakes like missing calling
> >> >> > of memcg_slab_free_hook() like fixes of:
> >> >> >
> >> >> > commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> >> >> > commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
> >> >>
> >> >> Hm is this commit also fixing such bug? in mm/slab.c __cache_free():
> >> >>
> >
> > Sorry, I think I have misread it and misled you here. I mean commit
>
> My bad, I should have said "this patch" referring to yours, not "this
> commit" which could refer to ae085d7f9365.
>
> > ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
> > is a bug fix, this commit does not fix any issue since __cache_free()
> > will be called from qlink_free() and __cache_free() will call
> > memcg_slab_free_hook(), so there is no issues. This commit is totally
> > an improvements for memcg_slab_free_hook().
>
> Ah, I see, indeed. The un-quarantining in kasan went through
> memcg_slab_free_hook() before your patch. But yeah it's better to do it
> earlier as the freed object's user is who charged it to the memcg, and is no
> longer using it - no reason to keep it accounted while in kasan's quarantine.
>

I couldn't agree more.

Thanks.

2022-05-26 10:52:25

by Muchun Song

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On Wed, May 25, 2022 at 09:34:58AM +0200, Vlastimil Babka wrote:
> On 5/25/22 04:18, Muchun Song wrote:
> > On Tue, May 24, 2022 at 07:05:31PM +0200, Vlastimil Babka wrote:
> >> On 4/29/22 14:30, Muchun Song wrote:
> >> > Most callers of memcg_slab_free_hook() already know the slab, which could
> >> > be passed to memcg_slab_free_hook() directly to reduce the overhead of an
> >> > another call of virt_to_slab(). For bulk freeing of objects, the call of
> >> > slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
> >> > Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
> >> > those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
> >> > freeing in slab_free().
> >> >
> >> > Move the calling site of memcg_slab_free_hook() from do_slab_free() to
> >> > slab_free() for slub to make the code clearer since the logic is weird
> >> > (e.g. the caller need to judge whether it needs to call
> >> > memcg_slab_free_hook()). It is easy to make mistakes like missing calling
> >> > of memcg_slab_free_hook() like fixes of:
> >> >
> >> > commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> >> > commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
> >>
> >> Hm is this commit also fixing such bug? in mm/slab.c __cache_free():
> >>

Sorry, I think I have misread it and misled you here. I mean commit
ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
is a bug fix, this commit does not fix any issue since __cache_free()
will be called from qlink_free() and __cache_free() will call
memcg_slab_free_hook(), so there is no issues. This commit is totally
an improvements for memcg_slab_free_hook().

> >
> > Right.
> >
> >> /* KASAN might put objp into memory quarantine, delaying its reuse. */
> >> if (kasan_slab_free(cachep, objp, init))
> >> return;
> >>
> >> before this patch we do not reach memcg_slab_free_hook() if
> >> kasan_slab_free() retuns true, after this patch we do. AFAICS SLUB always
> >> did memcg_slab_free_hook() in case of kasan_slab_free() so it's the correct
> >> thing to do?
> >>
> >
> > I don't think it is an issue since memcg_slab_free_hook()
> > mainly does memory accounting housekeeping. Doing it in
> > advance is not an issue. Actually, we already have done
> > this since
>
> Yes, it's not an issue. What is likely an issue is skipping the memcg hook
> it as the accounting will become wrong.
>
> > commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> >
> > From this commit, the object freed via kmem_cache_free_bulk()
> > in mm/slub.c have called memcg_slab_free_hook() even if
> > kasan_slab_free() retuns true. Right?
>
> Yeah, so that means SLAB will indeed be fixed by this patch.

When I reached here, I realized I may have misled you in the previous thread.
This commit does not fix any bugs. Sorry for the confusing.

> We should mention it in the changelog then. I wouldn't cc stable though, as

So I think we do not need to mention it in the chagelog.

Thanks.

> it's quite a corner case and your patch is not trivial, backports might
> conflict and/or miss some prerequisity etc. If we cared a lot, a preceding
> small patch fixing just the SLAB case would be safer.
>
> >> > This optimization is mainly for bulk objects freeing. The following numbers
> >> > is shown for 16-object freeing.
> >> >
> >> > before after
> >> > kmem_cache_free_bulk: ~430 ns ~400 ns
> >> >
> >> > The overhead is reduced by about 7% for 16-object freeing.
> >> >
> >> > Signed-off-by: Muchun Song <[email protected]>
> >>
> >> Otherwise looks good, will add to slab tree for 5.20, thanks.
> >>
> >> > ---
> >> > v2:
> >> > - Add numbers to commit log.
> >> >
> >> > mm/slab.c | 4 ++--
> >> > mm/slab.h | 30 ++++++++---------------------
> >> > mm/slub.c | 66 +++++++++++++++++++++------------------------------------------
> >> > 3 files changed, 32 insertions(+), 68 deletions(-)
> >> >
> >> > diff --git a/mm/slab.c b/mm/slab.c
> >> > index f8cd00f4ba13..2174962055ae 100644
> >> > --- a/mm/slab.c
> >> > +++ b/mm/slab.c
> >> > @@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> >> > {
> >> > bool init;
> >> >
> >> > + memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
> >> > +
> >> > if (is_kfence_address(objp)) {
> >> > kmemleak_free_recursive(objp, cachep->flags);
> >> > - memcg_slab_free_hook(cachep, &objp, 1);
> >> > __kfence_free(objp);
> >> > return;
> >> > }
> >> > @@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> >> > check_irq_off();
> >> > kmemleak_free_recursive(objp, cachep->flags);
> >> > objp = cache_free_debugcheck(cachep, objp, caller);
> >> > - memcg_slab_free_hook(cachep, &objp, 1);
> >> >
> >> > /*
> >> > * Skip calling cache_free_alien() when the platform is not numa.
> >> > diff --git a/mm/slab.h b/mm/slab.h
> >> > index db9fb5c8dae7..a8d5eb1c323f 100644
> >> > --- a/mm/slab.h
> >> > +++ b/mm/slab.h
> >> > @@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> >> > obj_cgroup_put(objcg);
> >> > }
> >> >
> >> > -static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
> >> > +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> >> > void **p, int objects)
> >> > {
> >> > - struct kmem_cache *s;
> >> > struct obj_cgroup **objcgs;
> >> > - struct obj_cgroup *objcg;
> >> > - struct slab *slab;
> >> > - unsigned int off;
> >> > int i;
> >> >
> >> > if (!memcg_kmem_enabled())
> >> > return;
> >> >
> >> > - for (i = 0; i < objects; i++) {
> >> > - if (unlikely(!p[i]))
> >> > - continue;
> >> > -
> >> > - slab = virt_to_slab(p[i]);
> >> > - /* we could be given a kmalloc_large() object, skip those */
> >> > - if (!slab)
> >> > - continue;
> >> > -
> >> > - objcgs = slab_objcgs(slab);
> >> > - if (!objcgs)
> >> > - continue;
> >> > + objcgs = slab_objcgs(slab);
> >> > + if (!objcgs)
> >> > + return;
> >> >
> >> > - if (!s_orig)
> >> > - s = slab->slab_cache;
> >> > - else
> >> > - s = s_orig;
> >> > + for (i = 0; i < objects; i++) {
> >> > + struct obj_cgroup *objcg;
> >> > + unsigned int off;
> >> >
> >> > off = obj_to_index(s, slab, p[i]);
> >> > objcg = objcgs[off];
> >> > @@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> >> > {
> >> > }
> >> >
> >> > -static inline void memcg_slab_free_hook(struct kmem_cache *s,
> >> > +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> >> > void **p, int objects)
> >> > {
> >> > }
> >> > diff --git a/mm/slub.c b/mm/slub.c
> >> > index 1f699ddfff7f..3794afe32b5f 100644
> >> > --- a/mm/slub.c
> >> > +++ b/mm/slub.c
> >> > @@ -3435,9 +3435,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> >> > struct kmem_cache_cpu *c;
> >> > unsigned long tid;
> >> >
> >> > - /* memcg_slab_free_hook() is already called for bulk free. */
> >> > - if (!tail)
> >> > - memcg_slab_free_hook(s, &head, 1);
> >> > redo:
> >> > /*
> >> > * Determine the currently cpus per cpu slab.
> >> > @@ -3497,9 +3494,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> >> > }
> >> >
> >> > static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
> >> > - void *head, void *tail, int cnt,
> >> > + void *head, void *tail, void **p, int cnt,
> >> > unsigned long addr)
> >> > {
> >> > + memcg_slab_free_hook(s, slab, p, cnt);
> >> > /*
> >> > * With KASAN enabled slab_free_freelist_hook modifies the freelist
> >> > * to remove objects, whose reuse must be delayed.
> >> > @@ -3521,7 +3519,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
> >> > if (!s)
> >> > return;
> >> > trace_kmem_cache_free(_RET_IP_, x, s->name);
> >> > - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
> >> > + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
> >> > }
> >> > EXPORT_SYMBOL(kmem_cache_free);
> >> >
> >> > @@ -3562,79 +3560,59 @@ static inline
> >> > int build_detached_freelist(struct kmem_cache *s, size_t size,
> >> > void **p, struct detached_freelist *df)
> >> > {
> >> > - size_t first_skipped_index = 0;
> >> > int lookahead = 3;
> >> > void *object;
> >> > struct folio *folio;
> >> > - struct slab *slab;
> >> > -
> >> > - /* Always re-init detached_freelist */
> >> > - df->slab = NULL;
> >> > -
> >> > - do {
> >> > - object = p[--size];
> >> > - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
> >> > - } while (!object && size);
> >> > -
> >> > - if (!object)
> >> > - return 0;
> >> > + size_t same;
> >> >
> >> > + object = p[--size];
> >> > folio = virt_to_folio(object);
> >> > if (!s) {
> >> > /* Handle kalloc'ed objects */
> >> > if (unlikely(!folio_test_slab(folio))) {
> >> > free_large_kmalloc(folio, object);
> >> > - p[size] = NULL; /* mark object processed */
> >> > + df->slab = NULL;
> >> > return size;
> >> > }
> >> > /* Derive kmem_cache from object */
> >> > - slab = folio_slab(folio);
> >> > - df->s = slab->slab_cache;
> >> > + df->slab = folio_slab(folio);
> >> > + df->s = df->slab->slab_cache;
> >> > } else {
> >> > - slab = folio_slab(folio);
> >> > + df->slab = folio_slab(folio);
> >> > df->s = cache_from_obj(s, object); /* Support for memcg */
> >> > }
> >> >
> >> > - if (is_kfence_address(object)) {
> >> > - slab_free_hook(df->s, object, false);
> >> > - __kfence_free(object);
> >> > - p[size] = NULL; /* mark object processed */
> >> > - return size;
> >> > - }
> >> > -
> >> > /* Start new detached freelist */
> >> > - df->slab = slab;
> >> > - set_freepointer(df->s, object, NULL);
> >> > df->tail = object;
> >> > df->freelist = object;
> >> > - p[size] = NULL; /* mark object processed */
> >> > df->cnt = 1;
> >> >
> >> > + if (is_kfence_address(object))
> >> > + return size;
> >> > +
> >> > + set_freepointer(df->s, object, NULL);
> >> > +
> >> > + same = size;
> >> > while (size) {
> >> > object = p[--size];
> >> > - if (!object)
> >> > - continue; /* Skip processed objects */
> >> > -
> >> > /* df->slab is always set at this point */
> >> > if (df->slab == virt_to_slab(object)) {
> >> > /* Opportunity build freelist */
> >> > set_freepointer(df->s, object, df->freelist);
> >> > df->freelist = object;
> >> > df->cnt++;
> >> > - p[size] = NULL; /* mark object processed */
> >> > -
> >> > + same--;
> >> > + if (size != same)
> >> > + swap(p[size], p[same]);
> >> > continue;
> >> > }
> >> >
> >> > /* Limit look ahead search */
> >> > if (!--lookahead)
> >> > break;
> >> > -
> >> > - if (!first_skipped_index)
> >> > - first_skipped_index = size + 1;
> >> > }
> >> >
> >> > - return first_skipped_index;
> >> > + return same;
> >> > }
> >> >
> >> > /* Note that interrupts must be enabled when calling this function. */
> >> > @@ -3643,7 +3621,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> >> > if (WARN_ON(!size))
> >> > return;
> >> >
> >> > - memcg_slab_free_hook(s, p, size);
> >> > do {
> >> > struct detached_freelist df;
> >> >
> >> > @@ -3651,7 +3628,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> >> > if (!df.slab)
> >> > continue;
> >> >
> >> > - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
> >> > + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
> >> > + _RET_IP_);
> >> > } while (likely(size));
> >> > }
> >> > EXPORT_SYMBOL(kmem_cache_free_bulk);
> >> > @@ -4554,7 +4532,7 @@ void kfree(const void *x)
> >> > return;
> >> > }
> >> > slab = folio_slab(folio);
> >> > - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
> >> > + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
> >> > }
> >> > EXPORT_SYMBOL(kfree);
> >> >
> >>
> >>
>
>

2022-05-26 18:32:54

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On 5/25/22 11:15, Muchun Song wrote:
> On Wed, May 25, 2022 at 09:34:58AM +0200, Vlastimil Babka wrote:
>> On 5/25/22 04:18, Muchun Song wrote:
>> > On Tue, May 24, 2022 at 07:05:31PM +0200, Vlastimil Babka wrote:
>> >> On 4/29/22 14:30, Muchun Song wrote:
>> >> > Most callers of memcg_slab_free_hook() already know the slab, which could
>> >> > be passed to memcg_slab_free_hook() directly to reduce the overhead of an
>> >> > another call of virt_to_slab(). For bulk freeing of objects, the call of
>> >> > slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
>> >> > Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
>> >> > those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
>> >> > freeing in slab_free().
>> >> >
>> >> > Move the calling site of memcg_slab_free_hook() from do_slab_free() to
>> >> > slab_free() for slub to make the code clearer since the logic is weird
>> >> > (e.g. the caller need to judge whether it needs to call
>> >> > memcg_slab_free_hook()). It is easy to make mistakes like missing calling
>> >> > of memcg_slab_free_hook() like fixes of:
>> >> >
>> >> > commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
>> >> > commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
>> >>
>> >> Hm is this commit also fixing such bug? in mm/slab.c __cache_free():
>> >>
>
> Sorry, I think I have misread it and misled you here. I mean commit

My bad, I should have said "this patch" referring to yours, not "this
commit" which could refer to ae085d7f9365.

> ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
> is a bug fix, this commit does not fix any issue since __cache_free()
> will be called from qlink_free() and __cache_free() will call
> memcg_slab_free_hook(), so there is no issues. This commit is totally
> an improvements for memcg_slab_free_hook().

Ah, I see, indeed. The un-quarantining in kasan went through
memcg_slab_free_hook() before your patch. But yeah it's better to do it
earlier as the freed object's user is who charged it to the memcg, and is no
longer using it - no reason to keep it accounted while in kasan's quarantine.

2022-05-26 22:12:00

by Muchun Song

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On Tue, May 24, 2022 at 07:05:31PM +0200, Vlastimil Babka wrote:
> On 4/29/22 14:30, Muchun Song wrote:
> > Most callers of memcg_slab_free_hook() already know the slab, which could
> > be passed to memcg_slab_free_hook() directly to reduce the overhead of an
> > another call of virt_to_slab(). For bulk freeing of objects, the call of
> > slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
> > Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
> > those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
> > freeing in slab_free().
> >
> > Move the calling site of memcg_slab_free_hook() from do_slab_free() to
> > slab_free() for slub to make the code clearer since the logic is weird
> > (e.g. the caller need to judge whether it needs to call
> > memcg_slab_free_hook()). It is easy to make mistakes like missing calling
> > of memcg_slab_free_hook() like fixes of:
> >
> > commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> > commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
>
> Hm is this commit also fixing such bug? in mm/slab.c __cache_free():
>

Right.

> /* KASAN might put objp into memory quarantine, delaying its reuse. */
> if (kasan_slab_free(cachep, objp, init))
> return;
>
> before this patch we do not reach memcg_slab_free_hook() if
> kasan_slab_free() retuns true, after this patch we do. AFAICS SLUB always
> did memcg_slab_free_hook() in case of kasan_slab_free() so it's the correct
> thing to do?
>

I don't think it is an issue since memcg_slab_free_hook()
mainly does memory accounting housekeeping. Doing it in
advance is not an issue. Actually, we already have done
this since

commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")

From this commit, the object freed via kmem_cache_free_bulk()
in mm/slub.c have called memcg_slab_free_hook() even if
kasan_slab_free() retuns true. Right?

> > This optimization is mainly for bulk objects freeing. The following numbers
> > is shown for 16-object freeing.
> >
> > before after
> > kmem_cache_free_bulk: ~430 ns ~400 ns
> >
> > The overhead is reduced by about 7% for 16-object freeing.
> >
> > Signed-off-by: Muchun Song <[email protected]>
>
> Otherwise looks good, will add to slab tree for 5.20, thanks.
>
> > ---
> > v2:
> > - Add numbers to commit log.
> >
> > mm/slab.c | 4 ++--
> > mm/slab.h | 30 ++++++++---------------------
> > mm/slub.c | 66 +++++++++++++++++++++------------------------------------------
> > 3 files changed, 32 insertions(+), 68 deletions(-)
> >
> > diff --git a/mm/slab.c b/mm/slab.c
> > index f8cd00f4ba13..2174962055ae 100644
> > --- a/mm/slab.c
> > +++ b/mm/slab.c
> > @@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> > {
> > bool init;
> >
> > + memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
> > +
> > if (is_kfence_address(objp)) {
> > kmemleak_free_recursive(objp, cachep->flags);
> > - memcg_slab_free_hook(cachep, &objp, 1);
> > __kfence_free(objp);
> > return;
> > }
> > @@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
> > check_irq_off();
> > kmemleak_free_recursive(objp, cachep->flags);
> > objp = cache_free_debugcheck(cachep, objp, caller);
> > - memcg_slab_free_hook(cachep, &objp, 1);
> >
> > /*
> > * Skip calling cache_free_alien() when the platform is not numa.
> > diff --git a/mm/slab.h b/mm/slab.h
> > index db9fb5c8dae7..a8d5eb1c323f 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> > obj_cgroup_put(objcg);
> > }
> >
> > -static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
> > +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> > void **p, int objects)
> > {
> > - struct kmem_cache *s;
> > struct obj_cgroup **objcgs;
> > - struct obj_cgroup *objcg;
> > - struct slab *slab;
> > - unsigned int off;
> > int i;
> >
> > if (!memcg_kmem_enabled())
> > return;
> >
> > - for (i = 0; i < objects; i++) {
> > - if (unlikely(!p[i]))
> > - continue;
> > -
> > - slab = virt_to_slab(p[i]);
> > - /* we could be given a kmalloc_large() object, skip those */
> > - if (!slab)
> > - continue;
> > -
> > - objcgs = slab_objcgs(slab);
> > - if (!objcgs)
> > - continue;
> > + objcgs = slab_objcgs(slab);
> > + if (!objcgs)
> > + return;
> >
> > - if (!s_orig)
> > - s = slab->slab_cache;
> > - else
> > - s = s_orig;
> > + for (i = 0; i < objects; i++) {
> > + struct obj_cgroup *objcg;
> > + unsigned int off;
> >
> > off = obj_to_index(s, slab, p[i]);
> > objcg = objcgs[off];
> > @@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> > {
> > }
> >
> > -static inline void memcg_slab_free_hook(struct kmem_cache *s,
> > +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> > void **p, int objects)
> > {
> > }
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 1f699ddfff7f..3794afe32b5f 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3435,9 +3435,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> > struct kmem_cache_cpu *c;
> > unsigned long tid;
> >
> > - /* memcg_slab_free_hook() is already called for bulk free. */
> > - if (!tail)
> > - memcg_slab_free_hook(s, &head, 1);
> > redo:
> > /*
> > * Determine the currently cpus per cpu slab.
> > @@ -3497,9 +3494,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
> > }
> >
> > static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
> > - void *head, void *tail, int cnt,
> > + void *head, void *tail, void **p, int cnt,
> > unsigned long addr)
> > {
> > + memcg_slab_free_hook(s, slab, p, cnt);
> > /*
> > * With KASAN enabled slab_free_freelist_hook modifies the freelist
> > * to remove objects, whose reuse must be delayed.
> > @@ -3521,7 +3519,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
> > if (!s)
> > return;
> > trace_kmem_cache_free(_RET_IP_, x, s->name);
> > - slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
> > + slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
> > }
> > EXPORT_SYMBOL(kmem_cache_free);
> >
> > @@ -3562,79 +3560,59 @@ static inline
> > int build_detached_freelist(struct kmem_cache *s, size_t size,
> > void **p, struct detached_freelist *df)
> > {
> > - size_t first_skipped_index = 0;
> > int lookahead = 3;
> > void *object;
> > struct folio *folio;
> > - struct slab *slab;
> > -
> > - /* Always re-init detached_freelist */
> > - df->slab = NULL;
> > -
> > - do {
> > - object = p[--size];
> > - /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
> > - } while (!object && size);
> > -
> > - if (!object)
> > - return 0;
> > + size_t same;
> >
> > + object = p[--size];
> > folio = virt_to_folio(object);
> > if (!s) {
> > /* Handle kalloc'ed objects */
> > if (unlikely(!folio_test_slab(folio))) {
> > free_large_kmalloc(folio, object);
> > - p[size] = NULL; /* mark object processed */
> > + df->slab = NULL;
> > return size;
> > }
> > /* Derive kmem_cache from object */
> > - slab = folio_slab(folio);
> > - df->s = slab->slab_cache;
> > + df->slab = folio_slab(folio);
> > + df->s = df->slab->slab_cache;
> > } else {
> > - slab = folio_slab(folio);
> > + df->slab = folio_slab(folio);
> > df->s = cache_from_obj(s, object); /* Support for memcg */
> > }
> >
> > - if (is_kfence_address(object)) {
> > - slab_free_hook(df->s, object, false);
> > - __kfence_free(object);
> > - p[size] = NULL; /* mark object processed */
> > - return size;
> > - }
> > -
> > /* Start new detached freelist */
> > - df->slab = slab;
> > - set_freepointer(df->s, object, NULL);
> > df->tail = object;
> > df->freelist = object;
> > - p[size] = NULL; /* mark object processed */
> > df->cnt = 1;
> >
> > + if (is_kfence_address(object))
> > + return size;
> > +
> > + set_freepointer(df->s, object, NULL);
> > +
> > + same = size;
> > while (size) {
> > object = p[--size];
> > - if (!object)
> > - continue; /* Skip processed objects */
> > -
> > /* df->slab is always set at this point */
> > if (df->slab == virt_to_slab(object)) {
> > /* Opportunity build freelist */
> > set_freepointer(df->s, object, df->freelist);
> > df->freelist = object;
> > df->cnt++;
> > - p[size] = NULL; /* mark object processed */
> > -
> > + same--;
> > + if (size != same)
> > + swap(p[size], p[same]);
> > continue;
> > }
> >
> > /* Limit look ahead search */
> > if (!--lookahead)
> > break;
> > -
> > - if (!first_skipped_index)
> > - first_skipped_index = size + 1;
> > }
> >
> > - return first_skipped_index;
> > + return same;
> > }
> >
> > /* Note that interrupts must be enabled when calling this function. */
> > @@ -3643,7 +3621,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> > if (WARN_ON(!size))
> > return;
> >
> > - memcg_slab_free_hook(s, p, size);
> > do {
> > struct detached_freelist df;
> >
> > @@ -3651,7 +3628,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> > if (!df.slab)
> > continue;
> >
> > - slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
> > + slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
> > + _RET_IP_);
> > } while (likely(size));
> > }
> > EXPORT_SYMBOL(kmem_cache_free_bulk);
> > @@ -4554,7 +4532,7 @@ void kfree(const void *x)
> > return;
> > }
> > slab = folio_slab(folio);
> > - slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
> > + slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
> > }
> > EXPORT_SYMBOL(kfree);
> >
>
>

2022-06-09 07:21:18

by Muchun Song

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On Fri, Apr 29, 2022 at 08:30:44PM +0800, Muchun Song wrote:
> Most callers of memcg_slab_free_hook() already know the slab, which could
> be passed to memcg_slab_free_hook() directly to reduce the overhead of an
> another call of virt_to_slab(). For bulk freeing of objects, the call of
> slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
> Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
> those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
> freeing in slab_free().
>
> Move the calling site of memcg_slab_free_hook() from do_slab_free() to
> slab_free() for slub to make the code clearer since the logic is weird
> (e.g. the caller need to judge whether it needs to call
> memcg_slab_free_hook()). It is easy to make mistakes like missing calling
> of memcg_slab_free_hook() like fixes of:
>
> commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
> commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
>
> This optimization is mainly for bulk objects freeing. The following numbers
> is shown for 16-object freeing.
>
> before after
> kmem_cache_free_bulk: ~430 ns ~400 ns
>
> The overhead is reduced by about 7% for 16-object freeing.
>
> Signed-off-by: Muchun Song <[email protected]>

Hi Vlastimil,

Wolud you mind picking it up? I did not see this patch on the
slab tree.

Thanks.

2022-06-14 13:55:17

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] mm: slab: optimize memcg_slab_free_hook()

On 6/9/22 08:34, Muchun Song wrote:
> On Fri, Apr 29, 2022 at 08:30:44PM +0800, Muchun Song wrote:
>> Most callers of memcg_slab_free_hook() already know the slab, which could
>> be passed to memcg_slab_free_hook() directly to reduce the overhead of an
>> another call of virt_to_slab(). For bulk freeing of objects, the call of
>> slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
>> Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
>> those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
>> freeing in slab_free().
>>
>> Move the calling site of memcg_slab_free_hook() from do_slab_free() to
>> slab_free() for slub to make the code clearer since the logic is weird
>> (e.g. the caller need to judge whether it needs to call
>> memcg_slab_free_hook()). It is easy to make mistakes like missing calling
>> of memcg_slab_free_hook() like fixes of:
>>
>> commit d1b2cf6cb84a ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
>> commit ae085d7f9365 ("mm: kfence: fix missing objcg housekeeping for SLAB")
>>
>> This optimization is mainly for bulk objects freeing. The following numbers
>> is shown for 16-object freeing.
>>
>> before after
>> kmem_cache_free_bulk: ~430 ns ~400 ns
>>
>> The overhead is reduced by about 7% for 16-object freeing.
>>
>> Signed-off-by: Muchun Song <[email protected]>
>
> Hi Vlastimil,
>
> Wolud you mind picking it up? I did not see this patch on the
> slab tree.

Sorry, was waiting for rc1 to start the for-5.20 branches and was away for
another week then. Now pushed to slab/for-5.20/optimizations

> Thanks.
>