2022-11-21 14:00:03

by Feng Tang

[permalink] [raw]
Subject: [PATCH -next 2/2] mm/kasan: simplify is_kmalloc check

Use new is_kmalloc_cache() to simplify the code of checking whether
a kmem_cache is a kmalloc cache.

Signed-off-by: Feng Tang <[email protected]>
---
include/linux/kasan.h | 9 ---------
mm/kasan/common.c | 9 ++-------
mm/slab_common.c | 1 -
3 files changed, 2 insertions(+), 17 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index dff604912687..fc46f5d6f404 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -102,7 +102,6 @@ struct kasan_cache {
int alloc_meta_offset;
int free_meta_offset;
#endif
- bool is_kmalloc;
};

void __kasan_unpoison_range(const void *addr, size_t size);
@@ -129,13 +128,6 @@ static __always_inline bool kasan_unpoison_pages(struct page *page,
return false;
}

-void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
-static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- if (kasan_enabled())
- __kasan_cache_create_kmalloc(cache);
-}
-
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
@@ -252,7 +244,6 @@ static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
bool init) { return false; }
-static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 1f30080a7a4c..f7e0e5067e7a 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -122,11 +122,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
KASAN_PAGE_FREE, init);
}

-void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- cache->kasan_info.is_kmalloc = true;
-}
-
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -326,7 +321,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init);

/* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, tagged_object, flags);

return tagged_object;
@@ -372,7 +367,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc().
*/
- if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);

/* Keep the tag that was set by kasan_slab_alloc(). */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8276022f0da4..a5480d67f391 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -663,7 +663,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,

create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset,
usersize);
- kasan_cache_create_kmalloc(s);
list_add(&s->list, &slab_caches);
s->refcount = 1;
return s;
--
2.34.1



2022-11-21 14:27:59

by Feng Tang

[permalink] [raw]
Subject: Re: [PATCH -next 2/2] mm/kasan: simplify is_kmalloc check

On Mon, Nov 21, 2022 at 09:50:24PM +0800, Tang, Feng wrote:
> Use new is_kmalloc_cache() to simplify the code of checking whether
> a kmem_cache is a kmalloc cache.
>
> Signed-off-by: Feng Tang <[email protected]>
> ---
> include/linux/kasan.h | 9 ---------
> mm/kasan/common.c | 9 ++-------
> mm/slab_common.c | 1 -
> 3 files changed, 2 insertions(+), 17 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index dff604912687..fc46f5d6f404 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -102,7 +102,6 @@ struct kasan_cache {
> int alloc_meta_offset;
> int free_meta_offset;
> #endif
> - bool is_kmalloc;
> };
>
> void __kasan_unpoison_range(const void *addr, size_t size);
> @@ -129,13 +128,6 @@ static __always_inline bool kasan_unpoison_pages(struct page *page,
> return false;
> }
>
> -void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
> -static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
> -{
> - if (kasan_enabled())
> - __kasan_cache_create_kmalloc(cache);
> -}
> -
> void __kasan_poison_slab(struct slab *slab);
> static __always_inline void kasan_poison_slab(struct slab *slab)
> {
> @@ -252,7 +244,6 @@ static inline void kasan_poison_pages(struct page *page, unsigned int order,
> bool init) {}
> static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
> bool init) { return false; }
> -static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
> static inline void kasan_poison_slab(struct slab *slab) {}
> static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
> void *object) {}
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 1f30080a7a4c..f7e0e5067e7a 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -122,11 +122,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
> KASAN_PAGE_FREE, init);
> }
>
> -void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
> -{
> - cache->kasan_info.is_kmalloc = true;
> -}
> -
> void __kasan_poison_slab(struct slab *slab)
> {
> struct page *page = slab_page(slab);
> @@ -326,7 +321,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
> kasan_unpoison(tagged_object, cache->object_size, init);
>
> /* Save alloc info (if possible) for non-kmalloc() allocations. */
> - if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
> + if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))

Sorry, it should be:

- if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))

Thanks,
Feng

> kasan_save_alloc_info(cache, tagged_object, flags);
>
> return tagged_object;
> @@ -372,7 +367,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
> * Save alloc info (if possible) for kmalloc() allocations.
> * This also rewrites the alloc info when called from kasan_krealloc().
> */
> - if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
> + if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
> kasan_save_alloc_info(cache, (void *)object, flags);
>
> /* Keep the tag that was set by kasan_slab_alloc(). */
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..a5480d67f391 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -663,7 +663,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
>
> create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset,
> usersize);
> - kasan_cache_create_kmalloc(s);
> list_add(&s->list, &slab_caches);
> s->refcount = 1;
> return s;
> --
> 2.34.1
>

2022-11-21 15:41:11

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH -next 2/2] mm/kasan: simplify is_kmalloc check

On Mon, Nov 21, 2022 at 2:53 PM Feng Tang <[email protected]> wrote:
>
> Use new is_kmalloc_cache() to simplify the code of checking whether
> a kmem_cache is a kmalloc cache.
>
> Signed-off-by: Feng Tang <[email protected]>

Hi Feng,

Nice simplification!

> ---
> include/linux/kasan.h | 9 ---------
> mm/kasan/common.c | 9 ++-------
> mm/slab_common.c | 1 -
> 3 files changed, 2 insertions(+), 17 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index dff604912687..fc46f5d6f404 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -102,7 +102,6 @@ struct kasan_cache {
> int alloc_meta_offset;
> int free_meta_offset;
> #endif
> - bool is_kmalloc;
> };

We can go even further here, and only define the kasan_cache struct
and add the kasan_info field to kmem_cache when CONFIG_KASAN_GENERIC
is enabled.

>
> void __kasan_unpoison_range(const void *addr, size_t size);
> @@ -129,13 +128,6 @@ static __always_inline bool kasan_unpoison_pages(struct page *page,
> return false;
> }
>
> -void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
> -static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
> -{
> - if (kasan_enabled())
> - __kasan_cache_create_kmalloc(cache);
> -}
> -
> void __kasan_poison_slab(struct slab *slab);
> static __always_inline void kasan_poison_slab(struct slab *slab)
> {
> @@ -252,7 +244,6 @@ static inline void kasan_poison_pages(struct page *page, unsigned int order,
> bool init) {}
> static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
> bool init) { return false; }
> -static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
> static inline void kasan_poison_slab(struct slab *slab) {}
> static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
> void *object) {}
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 1f30080a7a4c..f7e0e5067e7a 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -122,11 +122,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
> KASAN_PAGE_FREE, init);
> }
>
> -void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
> -{
> - cache->kasan_info.is_kmalloc = true;
> -}
> -
> void __kasan_poison_slab(struct slab *slab)
> {
> struct page *page = slab_page(slab);
> @@ -326,7 +321,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
> kasan_unpoison(tagged_object, cache->object_size, init);
>
> /* Save alloc info (if possible) for non-kmalloc() allocations. */
> - if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
> + if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
> kasan_save_alloc_info(cache, tagged_object, flags);
>
> return tagged_object;
> @@ -372,7 +367,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
> * Save alloc info (if possible) for kmalloc() allocations.
> * This also rewrites the alloc info when called from kasan_krealloc().
> */
> - if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
> + if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
> kasan_save_alloc_info(cache, (void *)object, flags);
>
> /* Keep the tag that was set by kasan_slab_alloc(). */
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..a5480d67f391 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -663,7 +663,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
>
> create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset,
> usersize);
> - kasan_cache_create_kmalloc(s);
> list_add(&s->list, &slab_caches);
> s->refcount = 1;
> return s;
> --
> 2.34.1
>

2022-11-22 07:38:16

by Feng Tang

[permalink] [raw]
Subject: Re: [PATCH -next 2/2] mm/kasan: simplify is_kmalloc check

On Mon, Nov 21, 2022 at 04:15:32PM +0100, Andrey Konovalov wrote:
> On Mon, Nov 21, 2022 at 2:53 PM Feng Tang <[email protected]> wrote:
> >
> > Use new is_kmalloc_cache() to simplify the code of checking whether
> > a kmem_cache is a kmalloc cache.
> >
> > Signed-off-by: Feng Tang <[email protected]>
>
> Hi Feng,
>
> Nice simplification!
>
> > ---
> > include/linux/kasan.h | 9 ---------
> > mm/kasan/common.c | 9 ++-------
> > mm/slab_common.c | 1 -
> > 3 files changed, 2 insertions(+), 17 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index dff604912687..fc46f5d6f404 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -102,7 +102,6 @@ struct kasan_cache {
> > int alloc_meta_offset;
> > int free_meta_offset;
> > #endif
> > - bool is_kmalloc;
> > };
>
> We can go even further here, and only define the kasan_cache struct
> and add the kasan_info field to kmem_cache when CONFIG_KASAN_GENERIC
> is enabled.

Good idea. thanks!

I mainly checked the kasan_cache related code, and make an add-on
patch below, please let me know if my understanding is wrong or I
missed anything.

Thanks,
Feng

---
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 0ac6505367ee..f2e41290094e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -96,14 +96,6 @@ static inline bool kasan_has_integrated_init(void)
}

#ifdef CONFIG_KASAN
-
-struct kasan_cache {
-#ifdef CONFIG_KASAN_GENERIC
- int alloc_meta_offset;
- int free_meta_offset;
-#endif
-};
-
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -293,6 +285,11 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}

#ifdef CONFIG_KASAN_GENERIC

+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
slab_flags_t kasan_never_merge(void);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index f0ffad6a3365..39f7f1f95de2 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -72,7 +72,7 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */

-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f9c68a9dac04..4e7cdada4bbb 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -132,7 +132,7 @@ struct kmem_cache {
unsigned int *random_seq;
#endif

-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_cache kasan_info;
#endif



2022-11-22 11:14:13

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH -next 2/2] mm/kasan: simplify is_kmalloc check

On Tue, Nov 22, 2022 at 7:56 AM Feng Tang <[email protected]> wrote:
>
> On Mon, Nov 21, 2022 at 04:15:32PM +0100, Andrey Konovalov wrote:
> > On Mon, Nov 21, 2022 at 2:53 PM Feng Tang <[email protected]> wrote:
> > >
> > > Use new is_kmalloc_cache() to simplify the code of checking whether
> > > a kmem_cache is a kmalloc cache.
> > >
> > > Signed-off-by: Feng Tang <[email protected]>
> >
> > Hi Feng,
> >
> > Nice simplification!
> >
> > > ---
> > > include/linux/kasan.h | 9 ---------
> > > mm/kasan/common.c | 9 ++-------
> > > mm/slab_common.c | 1 -
> > > 3 files changed, 2 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > index dff604912687..fc46f5d6f404 100644
> > > --- a/include/linux/kasan.h
> > > +++ b/include/linux/kasan.h
> > > @@ -102,7 +102,6 @@ struct kasan_cache {
> > > int alloc_meta_offset;
> > > int free_meta_offset;
> > > #endif
> > > - bool is_kmalloc;
> > > };
> >
> > We can go even further here, and only define the kasan_cache struct
> > and add the kasan_info field to kmem_cache when CONFIG_KASAN_GENERIC
> > is enabled.
>
> Good idea. thanks!
>
> I mainly checked the kasan_cache related code, and make an add-on
> patch below, please let me know if my understanding is wrong or I
> missed anything.
>
> Thanks,
> Feng
>
> ---
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 0ac6505367ee..f2e41290094e 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -96,14 +96,6 @@ static inline bool kasan_has_integrated_init(void)
> }
>
> #ifdef CONFIG_KASAN
> -
> -struct kasan_cache {
> -#ifdef CONFIG_KASAN_GENERIC
> - int alloc_meta_offset;
> - int free_meta_offset;
> -#endif
> -};
> -
> void __kasan_unpoison_range(const void *addr, size_t size);
> static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
> {
> @@ -293,6 +285,11 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
>
> #ifdef CONFIG_KASAN_GENERIC
>
> +struct kasan_cache {
> + int alloc_meta_offset;
> + int free_meta_offset;
> +};
> +
> size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
> slab_flags_t kasan_never_merge(void);
> void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index f0ffad6a3365..39f7f1f95de2 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -72,7 +72,7 @@ struct kmem_cache {
> int obj_offset;
> #endif /* CONFIG_DEBUG_SLAB */
>
> -#ifdef CONFIG_KASAN
> +#ifdef CONFIG_KASAN_GENERIC
> struct kasan_cache kasan_info;
> #endif
>
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> index f9c68a9dac04..4e7cdada4bbb 100644
> --- a/include/linux/slub_def.h
> +++ b/include/linux/slub_def.h
> @@ -132,7 +132,7 @@ struct kmem_cache {
> unsigned int *random_seq;
> #endif
>
> -#ifdef CONFIG_KASAN
> +#ifdef CONFIG_KASAN_GENERIC
> struct kasan_cache kasan_info;
> #endif

Yes, this looks good.

Please resend as a v2 and I'll give a Reviewed-by.

Thanks!