2024-02-21 19:50:38

by Suren Baghdasaryan

[permalink] [raw]
Subject: [PATCH v4 22/36] mm/slab: add allocation accounting into slab allocation and free paths

Account slab allocations using codetag reference embedded into slabobj_ext.

Signed-off-by: Suren Baghdasaryan <[email protected]>
Co-developed-by: Kent Overstreet <[email protected]>
Signed-off-by: Kent Overstreet <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
---
mm/slab.h | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
mm/slub.c | 9 ++++++++
2 files changed, 75 insertions(+)

diff --git a/mm/slab.h b/mm/slab.h
index 13b6ba2abd74..c4bd0d5348cb 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -567,6 +567,46 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab);

+static inline bool need_slab_obj_ext(void)
+{
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+ if (mem_alloc_profiling_enabled())
+ return true;
+#endif
+ /*
+ * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
+ * inside memcg_slab_post_alloc_hook. No other users for now.
+ */
+ return false;
+}
+
+static inline struct slabobj_ext *
+prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
+{
+ struct slab *slab;
+
+ if (!p)
+ return NULL;
+
+ if (!need_slab_obj_ext())
+ return NULL;
+
+ if (s->flags & SLAB_NO_OBJ_EXT)
+ return NULL;
+
+ if (flags & __GFP_NO_OBJ_EXT)
+ return NULL;
+
+ slab = virt_to_slab(p);
+ if (!slab_obj_exts(slab) &&
+ WARN(alloc_slab_obj_exts(slab, s, flags, false),
+ "%s, %s: Failed to create slab extension vector!\n",
+ __func__, s->name))
+ return NULL;
+
+ return slab_obj_exts(slab) + obj_to_index(s, slab, p);
+}
+
#else /* CONFIG_SLAB_OBJ_EXT */

static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
@@ -589,6 +629,32 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)

#endif /* CONFIG_SLAB_OBJ_EXT */

+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab,
+ void **p, int objects)
+{
+ struct slabobj_ext *obj_exts;
+ int i;
+
+ obj_exts = slab_obj_exts(slab);
+ if (!obj_exts)
+ return;
+
+ for (i = 0; i < objects; i++) {
+ unsigned int off = obj_to_index(s, slab, p[i]);
+
+ alloc_tag_sub(&obj_exts[off].ref, s->size);
+ }
+}
+
+#else
+
+static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab,
+ void **p, int objects) {}
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
#ifdef CONFIG_MEMCG_KMEM
void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
enum node_stat_item idx, int nr);
diff --git a/mm/slub.c b/mm/slub.c
index 5dc7beda6c0d..a69b6b4c8df6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3826,6 +3826,7 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
unsigned int orig_size)
{
unsigned int zero_size = s->object_size;
+ struct slabobj_ext *obj_exts;
bool kasan_init = init;
size_t i;
gfp_t init_flags = flags & gfp_allowed_mask;
@@ -3868,6 +3869,12 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, init_flags);
kmsan_slab_alloc(s, p[i], init_flags);
+ obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+ /* obj_exts can be allocated for other reasons */
+ if (likely(obj_exts) && mem_alloc_profiling_enabled())
+ alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
+#endif
}

memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
@@ -4346,6 +4353,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
unsigned long addr)
{
memcg_slab_free_hook(s, slab, &object, 1);
+ alloc_tagging_slab_free_hook(s, slab, &object, 1);

if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
do_slab_free(s, slab, object, object, 1, addr);
@@ -4356,6 +4364,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
void *tail, void **p, int cnt, unsigned long addr)
{
memcg_slab_free_hook(s, slab, p, cnt);
+ alloc_tagging_slab_free_hook(s, slab, p, cnt);
/*
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
--
2.44.0.rc0.258.g7320e95886-goog



2024-02-27 13:09:19

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v4 22/36] mm/slab: add allocation accounting into slab allocation and free paths



On 2/21/24 20:40, Suren Baghdasaryan wrote:
> Account slab allocations using codetag reference embedded into slabobj_ext.
>
> Signed-off-by: Suren Baghdasaryan <[email protected]>
> Co-developed-by: Kent Overstreet <[email protected]>
> Signed-off-by: Kent Overstreet <[email protected]>
> Reviewed-by: Kees Cook <[email protected]>
> ---
> mm/slab.h | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
> mm/slub.c | 9 ++++++++
> 2 files changed, 75 insertions(+)
>
> diff --git a/mm/slab.h b/mm/slab.h
> index 13b6ba2abd74..c4bd0d5348cb 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -567,6 +567,46 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
> int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> gfp_t gfp, bool new_slab);
>
> +static inline bool need_slab_obj_ext(void)
> +{
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
> + if (mem_alloc_profiling_enabled())
> + return true;
> +#endif
> + /*
> + * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
> + * inside memcg_slab_post_alloc_hook. No other users for now.
> + */
> + return false;
> +}
> +
> +static inline struct slabobj_ext *
> +prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> +{
> + struct slab *slab;
> +
> + if (!p)
> + return NULL;
> +
> + if (!need_slab_obj_ext())
> + return NULL;
> +
> + if (s->flags & SLAB_NO_OBJ_EXT)
> + return NULL;
> +
> + if (flags & __GFP_NO_OBJ_EXT)
> + return NULL;
> +
> + slab = virt_to_slab(p);
> + if (!slab_obj_exts(slab) &&
> + WARN(alloc_slab_obj_exts(slab, s, flags, false),
> + "%s, %s: Failed to create slab extension vector!\n",
> + __func__, s->name))
> + return NULL;
> +
> + return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> +}
> +
> #else /* CONFIG_SLAB_OBJ_EXT */
>
> static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
> @@ -589,6 +629,32 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>
> #endif /* CONFIG_SLAB_OBJ_EXT */
>
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
> +
> +static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> + void **p, int objects)

Only used from mm/slub.c so could move?

> +{
> + struct slabobj_ext *obj_exts;
> + int i;
> +
> + obj_exts = slab_obj_exts(slab);
> + if (!obj_exts)
> + return;
> +
> + for (i = 0; i < objects; i++) {
> + unsigned int off = obj_to_index(s, slab, p[i]);
> +
> + alloc_tag_sub(&obj_exts[off].ref, s->size);
> + }
> +}
> +
> +#else
> +
> +static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> + void **p, int objects) {}
> +
> +#endif /* CONFIG_MEM_ALLOC_PROFILING */
> +
> #ifdef CONFIG_MEMCG_KMEM
> void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
> enum node_stat_item idx, int nr);
> diff --git a/mm/slub.c b/mm/slub.c
> index 5dc7beda6c0d..a69b6b4c8df6 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3826,6 +3826,7 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
> unsigned int orig_size)
> {
> unsigned int zero_size = s->object_size;
> + struct slabobj_ext *obj_exts;
> bool kasan_init = init;
> size_t i;
> gfp_t init_flags = flags & gfp_allowed_mask;
> @@ -3868,6 +3869,12 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
> kmemleak_alloc_recursive(p[i], s->object_size, 1,
> s->flags, init_flags);
> kmsan_slab_alloc(s, p[i], init_flags);
> + obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
> + /* obj_exts can be allocated for other reasons */
> + if (likely(obj_exts) && mem_alloc_profiling_enabled())
> + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
> +#endif

I think that like in the page allocator, this could be better guarded by
mem_alloc_profiling_enabled() as the outermost thing.

> }
>
> memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> @@ -4346,6 +4353,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
> unsigned long addr)
> {
> memcg_slab_free_hook(s, slab, &object, 1);
> + alloc_tagging_slab_free_hook(s, slab, &object, 1);

Same here, the static key is not even inside of this?

>
> if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> do_slab_free(s, slab, object, object, 1, addr);
> @@ -4356,6 +4364,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
> void *tail, void **p, int cnt, unsigned long addr)
> {
> memcg_slab_free_hook(s, slab, p, cnt);
> + alloc_tagging_slab_free_hook(s, slab, p, cnt);

Ditto.

> /*
> * With KASAN enabled slab_free_freelist_hook modifies the freelist
> * to remove objects, whose reuse must be delayed.

2024-02-27 16:16:31

by Suren Baghdasaryan

[permalink] [raw]
Subject: Re: [PATCH v4 22/36] mm/slab: add allocation accounting into slab allocation and free paths

On Tue, Feb 27, 2024 at 5:07 AM Vlastimil Babka <[email protected]> wrote:
>
>
>
> On 2/21/24 20:40, Suren Baghdasaryan wrote:
> > Account slab allocations using codetag reference embedded into slabobj_ext.
> >
> > Signed-off-by: Suren Baghdasaryan <[email protected]>
> > Co-developed-by: Kent Overstreet <[email protected]>
> > Signed-off-by: Kent Overstreet <[email protected]>
> > Reviewed-by: Kees Cook <[email protected]>
> > ---
> > mm/slab.h | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
> > mm/slub.c | 9 ++++++++
> > 2 files changed, 75 insertions(+)
> >
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 13b6ba2abd74..c4bd0d5348cb 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -567,6 +567,46 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
> > int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> > gfp_t gfp, bool new_slab);
> >
> > +static inline bool need_slab_obj_ext(void)
> > +{
> > +#ifdef CONFIG_MEM_ALLOC_PROFILING
> > + if (mem_alloc_profiling_enabled())
> > + return true;
> > +#endif
> > + /*
> > + * CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
> > + * inside memcg_slab_post_alloc_hook. No other users for now.
> > + */
> > + return false;
> > +}
> > +
> > +static inline struct slabobj_ext *
> > +prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> > +{
> > + struct slab *slab;
> > +
> > + if (!p)
> > + return NULL;
> > +
> > + if (!need_slab_obj_ext())
> > + return NULL;
> > +
> > + if (s->flags & SLAB_NO_OBJ_EXT)
> > + return NULL;
> > +
> > + if (flags & __GFP_NO_OBJ_EXT)
> > + return NULL;
> > +
> > + slab = virt_to_slab(p);
> > + if (!slab_obj_exts(slab) &&
> > + WARN(alloc_slab_obj_exts(slab, s, flags, false),
> > + "%s, %s: Failed to create slab extension vector!\n",
> > + __func__, s->name))
> > + return NULL;
> > +
> > + return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> > +}
> > +
> > #else /* CONFIG_SLAB_OBJ_EXT */
> >
> > static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
> > @@ -589,6 +629,32 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> >
> > #endif /* CONFIG_SLAB_OBJ_EXT */
> >
> > +#ifdef CONFIG_MEM_ALLOC_PROFILING
> > +
> > +static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> > + void **p, int objects)
>
> Only used from mm/slub.c so could move?

Ack.

>
> > +{
> > + struct slabobj_ext *obj_exts;
> > + int i;
> > +
> > + obj_exts = slab_obj_exts(slab);
> > + if (!obj_exts)
> > + return;
> > +
> > + for (i = 0; i < objects; i++) {
> > + unsigned int off = obj_to_index(s, slab, p[i]);
> > +
> > + alloc_tag_sub(&obj_exts[off].ref, s->size);
> > + }
> > +}
> > +
> > +#else
> > +
> > +static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> > + void **p, int objects) {}
> > +
> > +#endif /* CONFIG_MEM_ALLOC_PROFILING */
> > +
> > #ifdef CONFIG_MEMCG_KMEM
> > void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
> > enum node_stat_item idx, int nr);
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 5dc7beda6c0d..a69b6b4c8df6 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -3826,6 +3826,7 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
> > unsigned int orig_size)
> > {
> > unsigned int zero_size = s->object_size;
> > + struct slabobj_ext *obj_exts;
> > bool kasan_init = init;
> > size_t i;
> > gfp_t init_flags = flags & gfp_allowed_mask;
> > @@ -3868,6 +3869,12 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
> > kmemleak_alloc_recursive(p[i], s->object_size, 1,
> > s->flags, init_flags);
> > kmsan_slab_alloc(s, p[i], init_flags);
> > + obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
> > +#ifdef CONFIG_MEM_ALLOC_PROFILING
> > + /* obj_exts can be allocated for other reasons */
> > + if (likely(obj_exts) && mem_alloc_profiling_enabled())
> > + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
> > +#endif
>
> I think that like in the page allocator, this could be better guarded by
> mem_alloc_profiling_enabled() as the outermost thing.

Oops, missed it. Will fix.

>
> > }
> >
> > memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> > @@ -4346,6 +4353,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
> > unsigned long addr)
> > {
> > memcg_slab_free_hook(s, slab, &object, 1);
> > + alloc_tagging_slab_free_hook(s, slab, &object, 1);
>
> Same here, the static key is not even inside of this?

Ack.

>
> >
> > if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> > do_slab_free(s, slab, object, object, 1, addr);
> > @@ -4356,6 +4364,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
> > void *tail, void **p, int cnt, unsigned long addr)
> > {
> > memcg_slab_free_hook(s, slab, p, cnt);
> > + alloc_tagging_slab_free_hook(s, slab, p, cnt);
>
> Ditto.

Ack.

>
> > /*
> > * With KASAN enabled slab_free_freelist_hook modifies the freelist
> > * to remove objects, whose reuse must be delayed.
>
> --
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
>