2021-02-01 19:46:41

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH 01/12] kasan, mm: don't save alloc stacks twice

Currently KASAN saves allocation stacks in both kasan_slab_alloc() and
kasan_kmalloc() annotations. This patch changes KASAN to save allocation
stacks for slab objects from kmalloc caches in kasan_kmalloc() only,
and stacks for other slab objects in kasan_slab_alloc() only.

This change requires ____kasan_kmalloc() knowing whether the object
belongs to a kmalloc cache. This is implemented by adding a flag field
to the kasan_info structure. That flag is only set for kmalloc caches
via a new kasan_cache_create_kmalloc() annotation.

Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/kasan.h | 9 +++++++++
mm/kasan/common.c | 18 ++++++++++++++----
mm/slab_common.c | 1 +
3 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 6d8f3227c264..2d5de4092185 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
struct kasan_cache {
int alloc_meta_offset;
int free_meta_offset;
+ bool is_kmalloc;
};

#ifdef CONFIG_KASAN_HW_TAGS
@@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
__kasan_cache_create(cache, size, flags);
}

+void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
+static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
+{
+ if (kasan_enabled())
+ __kasan_cache_create_kmalloc(cache);
+}
+
size_t __kasan_metadata_size(struct kmem_cache *cache);
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
@@ -278,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
+static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index fe852f3cfa42..374049564ea3 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -210,6 +210,11 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
*size = optimal_size;
}

+void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
+{
+ cache->kasan_info.is_kmalloc = true;
+}
+
size_t __kasan_metadata_size(struct kmem_cache *cache)
{
if (!kasan_stack_collection_enabled())
@@ -394,17 +399,22 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
}
}

-static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
+static void set_alloc_info(struct kmem_cache *cache, void *object,
+ gfp_t flags, bool kmalloc)
{
struct kasan_alloc_meta *alloc_meta;

+ /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
+ if (cache->kasan_info.is_kmalloc && !kmalloc)
+ return;
+
alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
kasan_set_track(&alloc_meta->alloc_track, flags);
}

static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
- size_t size, gfp_t flags, bool keep_tag)
+ size_t size, gfp_t flags, bool kmalloc)
{
unsigned long redzone_start;
unsigned long redzone_end;
@@ -423,7 +433,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
KASAN_GRANULE_SIZE);
- tag = assign_tag(cache, object, false, keep_tag);
+ tag = assign_tag(cache, object, false, kmalloc);

/* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
kasan_unpoison(set_tag(object, tag), size);
@@ -431,7 +441,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_KMALLOC_REDZONE);

if (kasan_stack_collection_enabled())
- set_alloc_info(cache, (void *)object, flags);
+ set_alloc_info(cache, (void *)object, flags, kmalloc);

return set_tag(object, tag);
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 9aa3d2fe4c55..39d1a8ff9bb8 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -647,6 +647,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
panic("Out of memory when creating slab %s\n", name);

create_boot_cache(s, name, size, flags, useroffset, usersize);
+ kasan_cache_create_kmalloc(s);
list_add(&s->list, &slab_caches);
s->refcount = 1;
return s;
--
2.30.0.365.g02bc693789-goog


2021-02-02 22:51:50

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 01/12] kasan, mm: don't save alloc stacks twice

On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote:
> Currently KASAN saves allocation stacks in both kasan_slab_alloc() and
> kasan_kmalloc() annotations. This patch changes KASAN to save allocation
> stacks for slab objects from kmalloc caches in kasan_kmalloc() only,
> and stacks for other slab objects in kasan_slab_alloc() only.
>
> This change requires ____kasan_kmalloc() knowing whether the object
> belongs to a kmalloc cache. This is implemented by adding a flag field
> to the kasan_info structure. That flag is only set for kmalloc caches
> via a new kasan_cache_create_kmalloc() annotation.
>
> Signed-off-by: Andrey Konovalov <[email protected]>

Reviewed-by: Marco Elver <[email protected]>

> ---
> include/linux/kasan.h | 9 +++++++++
> mm/kasan/common.c | 18 ++++++++++++++----
> mm/slab_common.c | 1 +
> 3 files changed, 24 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 6d8f3227c264..2d5de4092185 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
> struct kasan_cache {
> int alloc_meta_offset;
> int free_meta_offset;
> + bool is_kmalloc;
> };
>
> #ifdef CONFIG_KASAN_HW_TAGS
> @@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
> __kasan_cache_create(cache, size, flags);
> }
>
> +void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
> +static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
> +{
> + if (kasan_enabled())
> + __kasan_cache_create_kmalloc(cache);
> +}
> +
> size_t __kasan_metadata_size(struct kmem_cache *cache);
> static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
> {
> @@ -278,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
> static inline void kasan_cache_create(struct kmem_cache *cache,
> unsigned int *size,
> slab_flags_t *flags) {}
> +static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
> static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> static inline void kasan_poison_slab(struct page *page) {}
> static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index fe852f3cfa42..374049564ea3 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -210,6 +210,11 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> *size = optimal_size;
> }
>
> +void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
> +{
> + cache->kasan_info.is_kmalloc = true;
> +}
> +
> size_t __kasan_metadata_size(struct kmem_cache *cache)
> {
> if (!kasan_stack_collection_enabled())
> @@ -394,17 +399,22 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
> }
> }
>
> -static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> +static void set_alloc_info(struct kmem_cache *cache, void *object,
> + gfp_t flags, bool kmalloc)
> {
> struct kasan_alloc_meta *alloc_meta;
>
> + /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
> + if (cache->kasan_info.is_kmalloc && !kmalloc)
> + return;
> +
> alloc_meta = kasan_get_alloc_meta(cache, object);
> if (alloc_meta)
> kasan_set_track(&alloc_meta->alloc_track, flags);
> }
>
> static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> - size_t size, gfp_t flags, bool keep_tag)
> + size_t size, gfp_t flags, bool kmalloc)
> {
> unsigned long redzone_start;
> unsigned long redzone_end;
> @@ -423,7 +433,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> KASAN_GRANULE_SIZE);
> redzone_end = round_up((unsigned long)object + cache->object_size,
> KASAN_GRANULE_SIZE);
> - tag = assign_tag(cache, object, false, keep_tag);
> + tag = assign_tag(cache, object, false, kmalloc);
>
> /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
> kasan_unpoison(set_tag(object, tag), size);
> @@ -431,7 +441,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> KASAN_KMALLOC_REDZONE);
>
> if (kasan_stack_collection_enabled())
> - set_alloc_info(cache, (void *)object, flags);
> + set_alloc_info(cache, (void *)object, flags, kmalloc);

It doesn't bother me too much, but: 'bool kmalloc' shadows function
'kmalloc' so this is technically fine, but using 'kmalloc' as the
variable name here might be confusing and there is a small chance it
might cause problems in a future refactor.

> return set_tag(object, tag);
> }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 9aa3d2fe4c55..39d1a8ff9bb8 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -647,6 +647,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
> panic("Out of memory when creating slab %s\n", name);
>
> create_boot_cache(s, name, size, flags, useroffset, usersize);
> + kasan_cache_create_kmalloc(s);
> list_add(&s->list, &slab_caches);
> s->refcount = 1;
> return s;
> --
> 2.30.0.365.g02bc693789-goog
>

2021-02-03 00:32:15

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH 01/12] kasan, mm: don't save alloc stacks twice

On Tue, Feb 2, 2021 at 5:06 PM Marco Elver <[email protected]> wrote:
>
> On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote:
> > Currently KASAN saves allocation stacks in both kasan_slab_alloc() and
> > kasan_kmalloc() annotations. This patch changes KASAN to save allocation
> > stacks for slab objects from kmalloc caches in kasan_kmalloc() only,
> > and stacks for other slab objects in kasan_slab_alloc() only.
> >
> > This change requires ____kasan_kmalloc() knowing whether the object
> > belongs to a kmalloc cache. This is implemented by adding a flag field
> > to the kasan_info structure. That flag is only set for kmalloc caches
> > via a new kasan_cache_create_kmalloc() annotation.
> >
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Reviewed-by: Marco Elver <[email protected]>
>
> > ---
> > include/linux/kasan.h | 9 +++++++++
> > mm/kasan/common.c | 18 ++++++++++++++----
> > mm/slab_common.c | 1 +
> > 3 files changed, 24 insertions(+), 4 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index 6d8f3227c264..2d5de4092185 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
> > struct kasan_cache {
> > int alloc_meta_offset;
> > int free_meta_offset;
> > + bool is_kmalloc;
> > };
> >
> > #ifdef CONFIG_KASAN_HW_TAGS
> > @@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
> > __kasan_cache_create(cache, size, flags);
> > }
> >
> > +void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
> > +static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
> > +{
> > + if (kasan_enabled())
> > + __kasan_cache_create_kmalloc(cache);
> > +}
> > +
> > size_t __kasan_metadata_size(struct kmem_cache *cache);
> > static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
> > {
> > @@ -278,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
> > static inline void kasan_cache_create(struct kmem_cache *cache,
> > unsigned int *size,
> > slab_flags_t *flags) {}
> > +static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
> > static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > static inline void kasan_poison_slab(struct page *page) {}
> > static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index fe852f3cfa42..374049564ea3 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -210,6 +210,11 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> > *size = optimal_size;
> > }
> >
> > +void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
> > +{
> > + cache->kasan_info.is_kmalloc = true;
> > +}
> > +
> > size_t __kasan_metadata_size(struct kmem_cache *cache)
> > {
> > if (!kasan_stack_collection_enabled())
> > @@ -394,17 +399,22 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
> > }
> > }
> >
> > -static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> > +static void set_alloc_info(struct kmem_cache *cache, void *object,
> > + gfp_t flags, bool kmalloc)
> > {
> > struct kasan_alloc_meta *alloc_meta;
> >
> > + /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
> > + if (cache->kasan_info.is_kmalloc && !kmalloc)
> > + return;
> > +
> > alloc_meta = kasan_get_alloc_meta(cache, object);
> > if (alloc_meta)
> > kasan_set_track(&alloc_meta->alloc_track, flags);
> > }
> >
> > static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> > - size_t size, gfp_t flags, bool keep_tag)
> > + size_t size, gfp_t flags, bool kmalloc)
> > {
> > unsigned long redzone_start;
> > unsigned long redzone_end;
> > @@ -423,7 +433,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> > KASAN_GRANULE_SIZE);
> > redzone_end = round_up((unsigned long)object + cache->object_size,
> > KASAN_GRANULE_SIZE);
> > - tag = assign_tag(cache, object, false, keep_tag);
> > + tag = assign_tag(cache, object, false, kmalloc);
> >
> > /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */
> > kasan_unpoison(set_tag(object, tag), size);
> > @@ -431,7 +441,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> > KASAN_KMALLOC_REDZONE);
> >
> > if (kasan_stack_collection_enabled())
> > - set_alloc_info(cache, (void *)object, flags);
> > + set_alloc_info(cache, (void *)object, flags, kmalloc);
>
> It doesn't bother me too much, but: 'bool kmalloc' shadows function
> 'kmalloc' so this is technically fine, but using 'kmalloc' as the
> variable name here might be confusing and there is a small chance it
> might cause problems in a future refactor.

Good point. Does "is_kmalloc" sound good?

Thanks!

2021-02-03 00:42:16

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 01/12] kasan, mm: don't save alloc stacks twice

On Tue, 2 Feb 2021 at 19:01, 'Andrey Konovalov' via kasan-dev
<[email protected]> wrote:
[...]
> > > @@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
> > > struct kasan_cache {
> > > int alloc_meta_offset;
> > > int free_meta_offset;
> > > + bool is_kmalloc;
[...]
> > > if (kasan_stack_collection_enabled())
> > > - set_alloc_info(cache, (void *)object, flags);
> > > + set_alloc_info(cache, (void *)object, flags, kmalloc);
> >
> > It doesn't bother me too much, but: 'bool kmalloc' shadows function
> > 'kmalloc' so this is technically fine, but using 'kmalloc' as the
> > variable name here might be confusing and there is a small chance it
> > might cause problems in a future refactor.
>
> Good point. Does "is_kmalloc" sound good?

Sure, that's also consistent with the new struct field.

Thanks,
-- Marco