2023-11-20 18:34:58

by Vlastimil Babka

[permalink] [raw]
Subject: [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB

With SLAB removed and SLUB the only remaining allocator, we can clean up
some code that was depending on the choice.

Reviewed-by: Kees Cook <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
Signed-off-by: Vlastimil Babka <[email protected]>
---
mm/kasan/common.c | 13 ++-----------
mm/kasan/kasan.h | 3 +--
mm/kasan/quarantine.c | 7 -------
3 files changed, 3 insertions(+), 20 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 256930da578a..5d95219e69d7 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -153,10 +153,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
* 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
* accessed after being freed. We preassign tags for objects in these
* caches as well.
- * 3. For SLAB allocator we can't preassign tags randomly since the freelist
- * is stored as an array of indexes instead of a linked list. Assign tags
- * based on objects indexes, so that objects that are next to each other
- * get different tags.
*/
static inline u8 assign_tag(struct kmem_cache *cache,
const void *object, bool init)
@@ -171,17 +167,12 @@ static inline u8 assign_tag(struct kmem_cache *cache,
if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
return init ? KASAN_TAG_KERNEL : kasan_random_tag();

- /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
-#ifdef CONFIG_SLAB
- /* For SLAB assign tags based on the object index in the freelist. */
- return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
-#else
/*
- * For SLUB assign a random tag during slab creation, otherwise reuse
+ * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
+ * assign a random tag during slab creation, otherwise reuse
* the already assigned tag.
*/
return init ? kasan_random_tag() : get_tag(object);
-#endif
}

void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 8b06bab5c406..eef50233640a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -373,8 +373,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags);
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
void kasan_save_free_info(struct kmem_cache *cache, void *object);

-#if defined(CONFIG_KASAN_GENERIC) && \
- (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
+#ifdef CONFIG_KASAN_GENERIC
bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
void kasan_quarantine_reduce(void);
void kasan_quarantine_remove_cache(struct kmem_cache *cache);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index ca4529156735..138c57b836f2 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
void *object = qlink_to_object(qlink, cache);
struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
- unsigned long flags;
-
- if (IS_ENABLED(CONFIG_SLAB))
- local_irq_save(flags);

/*
* If init_on_free is enabled and KASAN's free metadata is stored in
@@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;

___cache_free(cache, object, _THIS_IP_);
-
- if (IS_ENABLED(CONFIG_SLAB))
- local_irq_restore(flags);
}

static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)

--
2.42.1


2023-11-21 08:26:25

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB

On Tue, Nov 21, 2023 at 3:34 AM Vlastimil Babka <[email protected]> wrote:
>
> With SLAB removed and SLUB the only remaining allocator, we can clean up
> some code that was depending on the choice.
>
> Reviewed-by: Kees Cook <[email protected]>
> Reviewed-by: Marco Elver <[email protected]>
> Signed-off-by: Vlastimil Babka <[email protected]>
> ---

[...]

> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index ca4529156735..138c57b836f2 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> {
> void *object = qlink_to_object(qlink, cache);
> struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
> - unsigned long flags;
> -
> - if (IS_ENABLED(CONFIG_SLAB))
> - local_irq_save(flags);
>
> /*
> * If init_on_free is enabled and KASAN's free metadata is stored in
> @@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
>
> ___cache_free(cache, object, _THIS_IP_);
> -
> - if (IS_ENABLED(CONFIG_SLAB))
> - local_irq_restore(flags);
> }

FYI there's a slight conflict (easy to resolve, though) when I tried
to merge this on top of linux-next,
due to a recent change in KASAN:

https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=0e8b630f3053f0ff84b7c3ab8ff98a7393863824

Thanks,
Hyeonggon

2023-11-21 16:47:34

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB

On Mon, Nov 20, 2023 at 7:34 PM Vlastimil Babka <[email protected]> wrote:
>
> With SLAB removed and SLUB the only remaining allocator, we can clean up
> some code that was depending on the choice.
>
> Reviewed-by: Kees Cook <[email protected]>
> Reviewed-by: Marco Elver <[email protected]>
> Signed-off-by: Vlastimil Babka <[email protected]>
> ---
> mm/kasan/common.c | 13 ++-----------
> mm/kasan/kasan.h | 3 +--
> mm/kasan/quarantine.c | 7 -------
> 3 files changed, 3 insertions(+), 20 deletions(-)
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 256930da578a..5d95219e69d7 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -153,10 +153,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
> * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
> * accessed after being freed. We preassign tags for objects in these
> * caches as well.
> - * 3. For SLAB allocator we can't preassign tags randomly since the freelist
> - * is stored as an array of indexes instead of a linked list. Assign tags
> - * based on objects indexes, so that objects that are next to each other
> - * get different tags.
> */
> static inline u8 assign_tag(struct kmem_cache *cache,
> const void *object, bool init)
> @@ -171,17 +167,12 @@ static inline u8 assign_tag(struct kmem_cache *cache,
> if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
> return init ? KASAN_TAG_KERNEL : kasan_random_tag();
>
> - /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
> -#ifdef CONFIG_SLAB
> - /* For SLAB assign tags based on the object index in the freelist. */
> - return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
> -#else
> /*
> - * For SLUB assign a random tag during slab creation, otherwise reuse
> + * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
> + * assign a random tag during slab creation, otherwise reuse
> * the already assigned tag.
> */
> return init ? kasan_random_tag() : get_tag(object);
> -#endif
> }
>
> void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 8b06bab5c406..eef50233640a 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -373,8 +373,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags);
> void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
> void kasan_save_free_info(struct kmem_cache *cache, void *object);
>
> -#if defined(CONFIG_KASAN_GENERIC) && \
> - (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> +#ifdef CONFIG_KASAN_GENERIC
> bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
> void kasan_quarantine_reduce(void);
> void kasan_quarantine_remove_cache(struct kmem_cache *cache);
> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index ca4529156735..138c57b836f2 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> {
> void *object = qlink_to_object(qlink, cache);
> struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
> - unsigned long flags;
> -
> - if (IS_ENABLED(CONFIG_SLAB))
> - local_irq_save(flags);
>
> /*
> * If init_on_free is enabled and KASAN's free metadata is stored in
> @@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
>
> ___cache_free(cache, object, _THIS_IP_);
> -
> - if (IS_ENABLED(CONFIG_SLAB))
> - local_irq_restore(flags);
> }
>
> static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
>
> --
> 2.42.1
>

Reviewed-by: Andrey Konovalov <[email protected]>

Very nice to see SLAB-induced complexity being gone :)

Thank you!

2023-12-05 04:27:25

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB

On Mon, Nov 20, 2023 at 07:34:14PM +0100, Vlastimil Babka wrote:
> With SLAB removed and SLUB the only remaining allocator, we can clean up
> some code that was depending on the choice.
>
> Reviewed-by: Kees Cook <[email protected]>
> Reviewed-by: Marco Elver <[email protected]>
> Signed-off-by: Vlastimil Babka <[email protected]>
> ---
> mm/kasan/common.c | 13 ++-----------
> mm/kasan/kasan.h | 3 +--
> mm/kasan/quarantine.c | 7 -------
> 3 files changed, 3 insertions(+), 20 deletions(-)
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 256930da578a..5d95219e69d7 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -153,10 +153,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
> * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
> * accessed after being freed. We preassign tags for objects in these
> * caches as well.
> - * 3. For SLAB allocator we can't preassign tags randomly since the freelist
> - * is stored as an array of indexes instead of a linked list. Assign tags
> - * based on objects indexes, so that objects that are next to each other
> - * get different tags.
> */
> static inline u8 assign_tag(struct kmem_cache *cache,
> const void *object, bool init)
> @@ -171,17 +167,12 @@ static inline u8 assign_tag(struct kmem_cache *cache,
> if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
> return init ? KASAN_TAG_KERNEL : kasan_random_tag();
>
> - /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
> -#ifdef CONFIG_SLAB
> - /* For SLAB assign tags based on the object index in the freelist. */
> - return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
> -#else
> /*
> - * For SLUB assign a random tag during slab creation, otherwise reuse
> + * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
> + * assign a random tag during slab creation, otherwise reuse
> * the already assigned tag.
> */
> return init ? kasan_random_tag() : get_tag(object);
> -#endif
> }
>
> void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 8b06bab5c406..eef50233640a 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -373,8 +373,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags);
> void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
> void kasan_save_free_info(struct kmem_cache *cache, void *object);
>
> -#if defined(CONFIG_KASAN_GENERIC) && \
> - (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> +#ifdef CONFIG_KASAN_GENERIC
> bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
> void kasan_quarantine_reduce(void);
> void kasan_quarantine_remove_cache(struct kmem_cache *cache);
> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index ca4529156735..138c57b836f2 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> {
> void *object = qlink_to_object(qlink, cache);
> struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
> - unsigned long flags;
> -
> - if (IS_ENABLED(CONFIG_SLAB))
> - local_irq_save(flags);
>
> /*
> * If init_on_free is enabled and KASAN's free metadata is stored in
> @@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
>
> ___cache_free(cache, object, _THIS_IP_);
> -
> - if (IS_ENABLED(CONFIG_SLAB))
> - local_irq_restore(flags);
> }
>
> static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)

Looks good to me,
Reviewed-by: Hyeonggon Yoo <[email protected]>

>
> --
> 2.42.1
>
>

2023-12-05 04:48:55

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB

On Tue, Dec 5, 2023 at 1:27 PM Hyeonggon Yoo <[email protected]> wrote:
>
> On Mon, Nov 20, 2023 at 07:34:14PM +0100, Vlastimil Babka wrote:
> > With SLAB removed and SLUB the only remaining allocator, we can clean up
> > some code that was depending on the choice.
> >
> > Reviewed-by: Kees Cook <[email protected]>
> > Reviewed-by: Marco Elver <[email protected]>
> > Signed-off-by: Vlastimil Babka <[email protected]>
> > ---
> > mm/kasan/common.c | 13 ++-----------
> > mm/kasan/kasan.h | 3 +--
> > mm/kasan/quarantine.c | 7 -------
> > 3 files changed, 3 insertions(+), 20 deletions(-)
> >
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 256930da578a..5d95219e69d7 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -153,10 +153,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
> > * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
> > * accessed after being freed. We preassign tags for objects in these
> > * caches as well.
> > - * 3. For SLAB allocator we can't preassign tags randomly since the freelist
> > - * is stored as an array of indexes instead of a linked list. Assign tags
> > - * based on objects indexes, so that objects that are next to each other
> > - * get different tags.
> > */
> > static inline u8 assign_tag(struct kmem_cache *cache,
> > const void *object, bool init)
> > @@ -171,17 +167,12 @@ static inline u8 assign_tag(struct kmem_cache *cache,
> > if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
> > return init ? KASAN_TAG_KERNEL : kasan_random_tag();
> >
> > - /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
> > -#ifdef CONFIG_SLAB
> > - /* For SLAB assign tags based on the object index in the freelist. */
> > - return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
> > -#else
> > /*
> > - * For SLUB assign a random tag during slab creation, otherwise reuse
> > + * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
> > + * assign a random tag during slab creation, otherwise reuse
> > * the already assigned tag.
> > */
> > return init ? kasan_random_tag() : get_tag(object);
> > -#endif
> > }
> >
> > void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > index 8b06bab5c406..eef50233640a 100644
> > --- a/mm/kasan/kasan.h
> > +++ b/mm/kasan/kasan.h
> > @@ -373,8 +373,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags);
> > void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
> > void kasan_save_free_info(struct kmem_cache *cache, void *object);
> >
> > -#if defined(CONFIG_KASAN_GENERIC) && \
> > - (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > +#ifdef CONFIG_KASAN_GENERIC
> > bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
> > void kasan_quarantine_reduce(void);
> > void kasan_quarantine_remove_cache(struct kmem_cache *cache);
> > diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> > index ca4529156735..138c57b836f2 100644
> > --- a/mm/kasan/quarantine.c
> > +++ b/mm/kasan/quarantine.c
> > @@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> > {
> > void *object = qlink_to_object(qlink, cache);
> > struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
> > - unsigned long flags;
> > -
> > - if (IS_ENABLED(CONFIG_SLAB))
> > - local_irq_save(flags);
> >
> > /*
> > * If init_on_free is enabled and KASAN's free metadata is stored in
> > @@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> > *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
> >
> > ___cache_free(cache, object, _THIS_IP_);
> > -
> > - if (IS_ENABLED(CONFIG_SLAB))
> > - local_irq_restore(flags);
> > }
> >
> > static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
>
> Looks good to me,
> Reviewed-by: Hyeonggon Yoo <[email protected]>

nit: Some KASAN tests depends on SLUB, but as now it's the only allocator
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); in
mm/kasan/kasan_test.c can be removed

>
> >
> > --
> > 2.42.1
> >
> >

2023-12-05 10:17:34

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB

On 12/5/23 05:48, Hyeonggon Yoo wrote:
> On Tue, Dec 5, 2023 at 1:27 PM Hyeonggon Yoo <[email protected]> wrote:
>>
>> On Mon, Nov 20, 2023 at 07:34:14PM +0100, Vlastimil Babka wrote:
>> > With SLAB removed and SLUB the only remaining allocator, we can clean up
>> > some code that was depending on the choice.
>> >
>> > Reviewed-by: Kees Cook <[email protected]>
>> > Reviewed-by: Marco Elver <[email protected]>
>> > Signed-off-by: Vlastimil Babka <[email protected]>
>> > ---
>> > mm/kasan/common.c | 13 ++-----------
>> > mm/kasan/kasan.h | 3 +--
>> > mm/kasan/quarantine.c | 7 -------
>> > 3 files changed, 3 insertions(+), 20 deletions(-)
>> >
>> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
>> > index 256930da578a..5d95219e69d7 100644
>> > --- a/mm/kasan/common.c
>> > +++ b/mm/kasan/common.c
>> > @@ -153,10 +153,6 @@ void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
>> > * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
>> > * accessed after being freed. We preassign tags for objects in these
>> > * caches as well.
>> > - * 3. For SLAB allocator we can't preassign tags randomly since the freelist
>> > - * is stored as an array of indexes instead of a linked list. Assign tags
>> > - * based on objects indexes, so that objects that are next to each other
>> > - * get different tags.
>> > */
>> > static inline u8 assign_tag(struct kmem_cache *cache,
>> > const void *object, bool init)
>> > @@ -171,17 +167,12 @@ static inline u8 assign_tag(struct kmem_cache *cache,
>> > if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
>> > return init ? KASAN_TAG_KERNEL : kasan_random_tag();
>> >
>> > - /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
>> > -#ifdef CONFIG_SLAB
>> > - /* For SLAB assign tags based on the object index in the freelist. */
>> > - return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
>> > -#else
>> > /*
>> > - * For SLUB assign a random tag during slab creation, otherwise reuse
>> > + * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
>> > + * assign a random tag during slab creation, otherwise reuse
>> > * the already assigned tag.
>> > */
>> > return init ? kasan_random_tag() : get_tag(object);
>> > -#endif
>> > }
>> >
>> > void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
>> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
>> > index 8b06bab5c406..eef50233640a 100644
>> > --- a/mm/kasan/kasan.h
>> > +++ b/mm/kasan/kasan.h
>> > @@ -373,8 +373,7 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags);
>> > void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
>> > void kasan_save_free_info(struct kmem_cache *cache, void *object);
>> >
>> > -#if defined(CONFIG_KASAN_GENERIC) && \
>> > - (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
>> > +#ifdef CONFIG_KASAN_GENERIC
>> > bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
>> > void kasan_quarantine_reduce(void);
>> > void kasan_quarantine_remove_cache(struct kmem_cache *cache);
>> > diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
>> > index ca4529156735..138c57b836f2 100644
>> > --- a/mm/kasan/quarantine.c
>> > +++ b/mm/kasan/quarantine.c
>> > @@ -144,10 +144,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
>> > {
>> > void *object = qlink_to_object(qlink, cache);
>> > struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
>> > - unsigned long flags;
>> > -
>> > - if (IS_ENABLED(CONFIG_SLAB))
>> > - local_irq_save(flags);
>> >
>> > /*
>> > * If init_on_free is enabled and KASAN's free metadata is stored in
>> > @@ -166,9 +162,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
>> > *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
>> >
>> > ___cache_free(cache, object, _THIS_IP_);
>> > -
>> > - if (IS_ENABLED(CONFIG_SLAB))
>> > - local_irq_restore(flags);
>> > }
>> >
>> > static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
>>
>> Looks good to me,
>> Reviewed-by: Hyeonggon Yoo <[email protected]>

Thanks!

> nit: Some KASAN tests depends on SLUB, but as now it's the only allocator
> KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); in
> mm/kasan/kasan_test.c can be removed

Hmm I see, but will rather also leave it for later cleanup at this point,
thanks!

>>
>> >
>> > --
>> > 2.42.1
>> >
>> >