2024-01-22 17:50:46

by Marco Elver

[permalink] [raw]
Subject: [RFC PATCH] stackdepot: use variable size records for non-evictable entries

With the introduction of stack depot evictions, each stack record is now
fixed size, so that future reuse after an eviction can safely store
differently sized stack traces. In all cases that do not make use of
evictions, this wastes lots of space.

Fix it by re-introducing variable size stack records (up to the max
allowed size) for entries that will never be evicted. We know if an
entry will never be evicted if the flag STACK_DEPOT_FLAG_GET is not
provided, since a later stack_depot_put() attempt is undefined behavior.

With my current kernel config that enables KASAN and also SLUB owner tracking,
I observe (after a kernel boot) a whopping reduction of 296 stack depot pools,
which translates into 4736 KiB saved. The savings here are from SLUB owner
tracking only, because KASAN generic mode still uses refcounting.

Before:

pools: 893
allocations: 29841
frees: 6524
in_use: 23317
freelist_size: 3454

After:

pools: 597
allocations: 29657
frees: 6425
in_use: 23232
freelist_size: 3493

Fixes: 108be8def46e ("lib/stackdepot: allow users to evict stack traces")
Signed-off-by: Marco Elver <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Konovalov <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
---

Sending this out as an early RFC.

We're stilling mulling over what to do with generic KASAN, because stack
depot eviction support was only added due to concern of too much memory
usage.

If this general approach makes sense, then I'd be in favour of just
reverting all the KASAN-generic eviction patches and leaving KASAN-tag
as the only user of evictions.

Thoughts?

---
lib/stackdepot.c | 163 +++++++++++++++++++++++++----------------------
1 file changed, 88 insertions(+), 75 deletions(-)

diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 5caa1f566553..726002d2ac09 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -93,9 +93,6 @@ struct stack_record {
};
};

-#define DEPOT_STACK_RECORD_SIZE \
- ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
-
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
@@ -121,6 +118,8 @@ static void *stack_pools[DEPOT_MAX_POOLS];
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
/* Freelist of stack records within stack_pools. */
static LIST_HEAD(free_stacks);
/*
@@ -294,48 +293,44 @@ int stack_depot_init(void)
EXPORT_SYMBOL_GPL(stack_depot_init);

/*
- * Initializes new stack depot @pool, release all its entries to the freelist,
- * and update the list of pools.
+ * Initializes new stack pool, and update the list of pools.
*/
-static void depot_init_pool(void *pool)
+static bool depot_init_pool(void **prealloc)
{
- int offset;
+ void *pool = NULL;

lockdep_assert_held(&pool_lock);

- /* Initialize handles and link stack records into the freelist. */
- for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
- offset += DEPOT_STACK_RECORD_SIZE) {
- struct stack_record *stack = pool + offset;
-
- stack->handle.pool_index = pools_num;
- stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
- stack->handle.extra = 0;
-
- /*
- * Stack traces of size 0 are never saved, and we can simply use
- * the size field as an indicator if this is a new unused stack
- * record in the freelist.
- */
- stack->size = 0;
+ if (new_pool) {
+ /* We have a new pool saved, use it. */
+ pool = new_pool;
+ new_pool = NULL;

- INIT_LIST_HEAD(&stack->hash_list);
- /*
- * Add to the freelist front to prioritize never-used entries:
- * required in case there are entries in the freelist, but their
- * RCU cookie still belongs to the current RCU grace period
- * (there can still be concurrent readers).
- */
- list_add(&stack->free_list, &free_stacks);
- counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+ /* Take note that we might need a new new_pool. */
+ if (pools_num < DEPOT_MAX_POOLS)
+ WRITE_ONCE(new_pool_required, true);
+ } else if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ /* Bail out if we reached the pool limit. */
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ } else if (*prealloc) {
+ /* We have preallocated memory, use it. */
+ pool = *prealloc;
+ *prealloc = NULL;
}

+ if (!pool)
+ return false;
+
/* Save reference to the pool to be used by depot_fetch_stack(). */
stack_pools[pools_num] = pool;

/* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
WRITE_ONCE(pools_num, pools_num + 1);
ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+ pool_offset = 0;
+
+ return true;
}

/* Keeps the preallocated memory to be used for a new stack depot pool. */
@@ -368,39 +363,40 @@ static void depot_keep_new_pool(void **prealloc)
}

/*
- * Try to initialize a new stack depot pool from either a previous or the
- * current pre-allocation, and release all its entries to the freelist.
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
*/
-static bool depot_try_init_pool(void **prealloc)
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
{
+ struct stack_record *stack;
+ void *current_pool;
+ u32 pool_index;
+
lockdep_assert_held(&pool_lock);

- /* Check if we have a new pool saved and use it. */
- if (new_pool) {
- depot_init_pool(new_pool);
- new_pool = NULL;
+ if (pool_offset + size > DEPOT_POOL_SIZE) {
+ if (!depot_init_pool(prealloc))
+ return NULL;
+ }

- /* Take note that we might need a new new_pool. */
- if (pools_num < DEPOT_MAX_POOLS)
- WRITE_ONCE(new_pool_required, true);
+ if (WARN_ON_ONCE(pools_num < 1))
+ return NULL;
+ pool_index = pools_num - 1;
+ current_pool = stack_pools[pool_index];
+ if (WARN_ON_ONCE(!current_pool))
+ return NULL;

- return true;
- }
+ stack = current_pool + pool_offset;

- /* Bail out if we reached the pool limit. */
- if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return false;
- }
+ /* Pre-initialize handle once. */
+ stack->handle.pool_index = pool_index;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+ INIT_LIST_HEAD(&stack->hash_list);

- /* Check if we have preallocated memory and use it. */
- if (*prealloc) {
- depot_init_pool(*prealloc);
- *prealloc = NULL;
- return true;
- }
+ pool_offset += size;

- return false;
+ return stack;
}

/* Try to find next free usable entry. */
@@ -420,7 +416,7 @@ static struct stack_record *depot_pop_free(void)
* check the first entry.
*/
stack = list_first_entry(&free_stacks, struct stack_record, free_list);
- if (stack->size && !poll_state_synchronize_rcu(stack->rcu_state))
+ if (!poll_state_synchronize_rcu(stack->rcu_state))
return NULL;

list_del(&stack->free_list);
@@ -429,45 +425,62 @@ static struct stack_record *depot_pop_free(void)
return stack;
}

+static inline size_t depot_stack_record_size(struct stack_record *s, size_t nr_entries)
+{
+ const size_t used = flex_array_size(s, entries, nr_entries);
+ const size_t unused = sizeof(s->entries) - used;
+
+ WARN_ON_ONCE(sizeof(s->entries) < used);
+
+ return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
+}
+
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+depot_alloc_stack(unsigned long *entries, int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{
- struct stack_record *stack;
+ struct stack_record *stack = NULL;
+ size_t record_size;

lockdep_assert_held(&pool_lock);

/* This should already be checked by public API entry points. */
- if (WARN_ON_ONCE(!size))
+ if (WARN_ON_ONCE(!nr_entries))
return NULL;

- /* Check if we have a stack record to save the stack trace. */
- stack = depot_pop_free();
- if (!stack) {
- /* No usable entries on the freelist - try to refill the freelist. */
- if (!depot_try_init_pool(prealloc))
- return NULL;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+ nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ /*
+ * Evictable entries have to allocate the max. size so they may
+ * safely be re-used by differently sized allocations.
+ */
+ record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
stack = depot_pop_free();
- if (WARN_ON(!stack))
- return NULL;
+ } else {
+ record_size = depot_stack_record_size(stack, nr_entries);
}

- /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
- if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
- size = CONFIG_STACKDEPOT_MAX_FRAMES;
+ if (!stack) {
+ stack = depot_pop_free_pool(prealloc, record_size);
+ if (!stack)
+ return NULL;
+ }

/* Save the stack trace. */
stack->hash = hash;
- stack->size = size;
- /* stack->handle is already filled in by depot_init_pool(). */
+ stack->size = nr_entries;
+ /* stack->handle is already filled in by depot_pop_free_pool(). */
refcount_set(&stack->count, 1);
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));

/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
+ kmsan_unpoison_memory(stack, record_size);

counters[DEPOT_COUNTER_ALLOCS]++;
counters[DEPOT_COUNTER_INUSE]++;
@@ -681,7 +694,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);

if (new) {
/*
--
2.43.0.429.g432eaa2c6b-goog



2024-01-23 15:54:30

by Marco Elver

[permalink] [raw]
Subject: Re: [RFC PATCH] stackdepot: use variable size records for non-evictable entries

And on top of this we can make KASAN generic happier again:

Objections?

------ >8 ------

From: Marco Elver <[email protected]>
Date: Tue, 23 Jan 2024 12:11:36 +0100
Subject: [PATCH RFC] kasan: revert eviction of stack traces in generic mode

This partially reverts commits cc478e0b6bdf, 63b85ac56a64, 08d7c94d9635,
a414d4286f34, and 773688a6cb24 to make use of variable-sized stack depot
records, since eviction of stack entries from stack depot forces fixed-
sized stack records. Care was taken to retain the code cleanups by the
above commits.

Eviction was added to generic KASAN as a response to alleviating the
additional memory usage from fixed-sized stack records, but this still
uses more memory than previously.

With the re-introduction of variable-sized records for stack depot, we
can just switch back to non-evictable stack records again, and return
back to the previous performance and memory usage baseline.

Before (observed after a KASAN kernel boot):

pools: 597
allocations: 29657
frees: 6425
in_use: 23232
freelist_size: 3493

After:

pools: 315
allocations: 28964
frees: 0
in_use: 28964
freelist_size: 0

As can be seen from the number of "frees", with a generic KASAN config,
evictions are no longer used but due to using variable-sized records, I
observe a reduction of 282 stack depot pools (saving 4512 KiB).

Fixes: cc478e0b6bdf ("kasan: avoid resetting aux_lock")
Fixes: 63b85ac56a64 ("kasan: stop leaking stack trace handles")
Fixes: 08d7c94d9635 ("kasan: memset free track in qlink_free")
Fixes: a414d4286f34 ("kasan: handle concurrent kasan_record_aux_stack calls")
Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode")
Signed-off-by: Marco Elver <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Konovalov <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
---
mm/kasan/common.c | 3 +--
mm/kasan/generic.c | 54 ++++++----------------------------------------
mm/kasan/kasan.h | 8 -------
3 files changed, 8 insertions(+), 57 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 610efae91220..ad32803e34e9 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
{
depot_stack_handle_t stack;

- stack = kasan_save_stack(flags,
- STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
kasan_set_track(track, stack);
}

diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index df6627f62402..8bfb52b28c22 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
if (alloc_meta) {
/* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta));
-
- /*
- * Prepare the lock for saving auxiliary stack traces.
- * Temporarily disable KASAN bug reporting to allow instrumented
- * raw_spin_lock_init to access aux_lock, which resides inside
- * of a redzone.
- */
- kasan_disable_current();
- raw_spin_lock_init(&alloc_meta->aux_lock);
- kasan_enable_current();
}

/*
@@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)

static void release_alloc_meta(struct kasan_alloc_meta *meta)
{
- /* Evict the stack traces from stack depot. */
- stack_depot_put(meta->alloc_track.stack);
- stack_depot_put(meta->aux_stack[0]);
- stack_depot_put(meta->aux_stack[1]);
-
- /*
- * Zero out alloc meta to mark it as invalid but keep aux_lock
- * initialized to avoid having to reinitialize it when another object
- * is allocated in the same slot.
- */
- __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
- __memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
+ /* Zero out alloc meta to mark it as invalid. */
+ __memset(meta, 0, sizeof(*meta));
}

static void release_free_meta(const void *object, struct kasan_free_meta *meta)
@@ -526,9 +506,6 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta)
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;

- /* Evict the stack trace from the stack depot. */
- stack_depot_put(meta->free_track.stack);
-
/* Mark free meta as invalid. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
}
@@ -571,8 +548,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
- depot_stack_handle_t new_handle, old_handle;
- unsigned long flags;

if (is_kfence_address(addr) || !slab)
return;
@@ -583,33 +558,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta)
return;

- new_handle = kasan_save_stack(0, depot_flags);
-
- /*
- * Temporarily disable KASAN bug reporting to allow instrumented
- * spinlock functions to access aux_lock, which resides inside of a
- * redzone.
- */
- kasan_disable_current();
- raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
- old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = new_handle;
- raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
- kasan_enable_current();
-
- stack_depot_put(old_handle);
+ alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
}

void kasan_record_aux_stack(void *addr)
{
- return __kasan_record_aux_stack(addr,
- STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
}

void kasan_record_aux_stack_noalloc(void *addr)
{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
+ return __kasan_record_aux_stack(addr, 0);
}

void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -620,7 +580,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
if (!alloc_meta)
return;

- /* Evict previous stack traces (might exist for krealloc or mempool). */
+ /* Invalidate previous stack traces (might exist for krealloc or mempool). */
release_alloc_meta(alloc_meta);

kasan_save_track(&alloc_meta->alloc_track, flags);
@@ -634,7 +594,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta)
return;

- /* Evict previous stack trace (might exist for mempool). */
+ /* Invalidate previous stack trace (might exist for mempool). */
release_free_meta(object, free_meta);

kasan_save_track(&free_meta->free_track, 0);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d0f172f2b978..216ae0ef1e4b 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
-#include <linux/spinlock.h>
#include <linux/stackdepot.h>

#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -265,13 +264,6 @@ struct kasan_global {
struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
- /*
- * aux_lock protects aux_stack from accesses from concurrent
- * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
- * on RT kernels, as kasan_record_aux_stack_noalloc can be called from
- * non-sleepable contexts.
- */
- raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2];
};

--
2.43.0.429.g432eaa2c6b-goog


2024-01-23 15:56:32

by Dmitry Vyukov

[permalink] [raw]
Subject: Re: [RFC PATCH] stackdepot: use variable size records for non-evictable entries

On Tue, 23 Jan 2024 at 16:53, Marco Elver <[email protected]> wrote:
>
> And on top of this we can make KASAN generic happier again:
>
> Objections?

Not doing refcounting/aux locking for generic KASAN makes sense to me.

> ------ >8 ------
>
> From: Marco Elver <[email protected]>
> Date: Tue, 23 Jan 2024 12:11:36 +0100
> Subject: [PATCH RFC] kasan: revert eviction of stack traces in generic mode
>
> This partially reverts commits cc478e0b6bdf, 63b85ac56a64, 08d7c94d9635,
> a414d4286f34, and 773688a6cb24 to make use of variable-sized stack depot
> records, since eviction of stack entries from stack depot forces fixed-
> sized stack records. Care was taken to retain the code cleanups by the
> above commits.
>
> Eviction was added to generic KASAN as a response to alleviating the
> additional memory usage from fixed-sized stack records, but this still
> uses more memory than previously.
>
> With the re-introduction of variable-sized records for stack depot, we
> can just switch back to non-evictable stack records again, and return
> back to the previous performance and memory usage baseline.
>
> Before (observed after a KASAN kernel boot):
>
> pools: 597
> allocations: 29657
> frees: 6425
> in_use: 23232
> freelist_size: 3493
>
> After:
>
> pools: 315
> allocations: 28964
> frees: 0
> in_use: 28964
> freelist_size: 0
>
> As can be seen from the number of "frees", with a generic KASAN config,
> evictions are no longer used but due to using variable-sized records, I
> observe a reduction of 282 stack depot pools (saving 4512 KiB).
>
> Fixes: cc478e0b6bdf ("kasan: avoid resetting aux_lock")
> Fixes: 63b85ac56a64 ("kasan: stop leaking stack trace handles")
> Fixes: 08d7c94d9635 ("kasan: memset free track in qlink_free")
> Fixes: a414d4286f34 ("kasan: handle concurrent kasan_record_aux_stack calls")
> Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode")
> Signed-off-by: Marco Elver <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrey Konovalov <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> ---
> mm/kasan/common.c | 3 +--
> mm/kasan/generic.c | 54 ++++++----------------------------------------
> mm/kasan/kasan.h | 8 -------
> 3 files changed, 8 insertions(+), 57 deletions(-)
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 610efae91220..ad32803e34e9 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
> {
> depot_stack_handle_t stack;
>
> - stack = kasan_save_stack(flags,
> - STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
> + stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
> kasan_set_track(track, stack);
> }
>
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index df6627f62402..8bfb52b28c22 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
> if (alloc_meta) {
> /* Zero out alloc meta to mark it as invalid. */
> __memset(alloc_meta, 0, sizeof(*alloc_meta));
> -
> - /*
> - * Prepare the lock for saving auxiliary stack traces.
> - * Temporarily disable KASAN bug reporting to allow instrumented
> - * raw_spin_lock_init to access aux_lock, which resides inside
> - * of a redzone.
> - */
> - kasan_disable_current();
> - raw_spin_lock_init(&alloc_meta->aux_lock);
> - kasan_enable_current();
> }
>
> /*
> @@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
>
> static void release_alloc_meta(struct kasan_alloc_meta *meta)
> {
> - /* Evict the stack traces from stack depot. */
> - stack_depot_put(meta->alloc_track.stack);
> - stack_depot_put(meta->aux_stack[0]);
> - stack_depot_put(meta->aux_stack[1]);
> -
> - /*
> - * Zero out alloc meta to mark it as invalid but keep aux_lock
> - * initialized to avoid having to reinitialize it when another object
> - * is allocated in the same slot.
> - */
> - __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
> - __memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
> + /* Zero out alloc meta to mark it as invalid. */
> + __memset(meta, 0, sizeof(*meta));
> }
>
> static void release_free_meta(const void *object, struct kasan_free_meta *meta)
> @@ -526,9 +506,6 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta)
> if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
> return;
>
> - /* Evict the stack trace from the stack depot. */
> - stack_depot_put(meta->free_track.stack);
> -
> /* Mark free meta as invalid. */
> *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
> }
> @@ -571,8 +548,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> struct kmem_cache *cache;
> struct kasan_alloc_meta *alloc_meta;
> void *object;
> - depot_stack_handle_t new_handle, old_handle;
> - unsigned long flags;
>
> if (is_kfence_address(addr) || !slab)
> return;
> @@ -583,33 +558,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> if (!alloc_meta)
> return;
>
> - new_handle = kasan_save_stack(0, depot_flags);
> -
> - /*
> - * Temporarily disable KASAN bug reporting to allow instrumented
> - * spinlock functions to access aux_lock, which resides inside of a
> - * redzone.
> - */
> - kasan_disable_current();
> - raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
> - old_handle = alloc_meta->aux_stack[1];
> alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
> - alloc_meta->aux_stack[0] = new_handle;
> - raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
> - kasan_enable_current();
> -
> - stack_depot_put(old_handle);
> + alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
> }
>
> void kasan_record_aux_stack(void *addr)
> {
> - return __kasan_record_aux_stack(addr,
> - STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
> + return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
> }
>
> void kasan_record_aux_stack_noalloc(void *addr)
> {
> - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
> + return __kasan_record_aux_stack(addr, 0);
> }
>
> void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> @@ -620,7 +580,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
> if (!alloc_meta)
> return;
>
> - /* Evict previous stack traces (might exist for krealloc or mempool). */
> + /* Invalidate previous stack traces (might exist for krealloc or mempool). */
> release_alloc_meta(alloc_meta);
>
> kasan_save_track(&alloc_meta->alloc_track, flags);
> @@ -634,7 +594,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
> if (!free_meta)
> return;
>
> - /* Evict previous stack trace (might exist for mempool). */
> + /* Invalidate previous stack trace (might exist for mempool). */
> release_free_meta(object, free_meta);
>
> kasan_save_track(&free_meta->free_track, 0);
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index d0f172f2b978..216ae0ef1e4b 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -6,7 +6,6 @@
> #include <linux/kasan.h>
> #include <linux/kasan-tags.h>
> #include <linux/kfence.h>
> -#include <linux/spinlock.h>
> #include <linux/stackdepot.h>
>
> #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
> @@ -265,13 +264,6 @@ struct kasan_global {
> struct kasan_alloc_meta {
> struct kasan_track alloc_track;
> /* Free track is stored in kasan_free_meta. */
> - /*
> - * aux_lock protects aux_stack from accesses from concurrent
> - * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
> - * on RT kernels, as kasan_record_aux_stack_noalloc can be called from
> - * non-sleepable contexts.
> - */
> - raw_spinlock_t aux_lock;
> depot_stack_handle_t aux_stack[2];
> };
>
> --
> 2.43.0.429.g432eaa2c6b-goog
>