From: Andrey Konovalov <[email protected]>
A few fixes for the stack depot eviction series.
Andrey Konovalov (4):
lib/stackdepot: add printk_deferred_enter/exit guards
kasan: handle concurrent kasan_record_aux_stack calls
kasan: memset free track in qlink_free
lib/stackdepot: fix comment in include/linux/stackdepot.h
include/linux/stackdepot.h | 2 --
lib/stackdepot.c | 9 +++++++++
mm/kasan/generic.c | 15 +++++++++++++--
mm/kasan/quarantine.c | 2 +-
4 files changed, 23 insertions(+), 5 deletions(-)
--
2.25.1
From: Andrey Konovalov <[email protected]>
Stack depot functions can be called from various contexts that do
allocations, including with console locks taken. At the same time, stack
depot functions might print WARNING's or refcount-related failures.
This can cause a deadlock on console locks.
Add printk_deferred_enter/exit guards to stack depot to avoid this.
Reported-by: Tetsuo Handa <[email protected]>
Closes: https://lore.kernel.org/all/[email protected]/
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 870cce2f4cbd..a0be5d05c7f0 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -506,12 +506,14 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
bucket = &stack_table[hash & stack_hash_mask];
read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
/* Fast path: look the stack trace up without full locking. */
found = find_stack(bucket, entries, nr_entries, hash);
if (found) {
if (depot_flags & STACK_DEPOT_FLAG_GET)
refcount_inc(&found->count);
+ printk_deferred_exit();
read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
}
@@ -520,6 +522,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
if (new_pool_required)
need_alloc = true;
+ printk_deferred_exit();
read_unlock_irqrestore(&pool_rwlock, flags);
/*
@@ -541,6 +544,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
}
write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
found = find_stack(bucket, entries, nr_entries, hash);
if (!found) {
@@ -562,6 +566,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
depot_keep_new_pool(&prealloc);
}
+ printk_deferred_exit();
write_unlock_irqrestore(&pool_rwlock, flags);
exit:
if (prealloc) {
@@ -600,9 +605,11 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
return 0;
read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
stack = depot_fetch_stack(handle);
+ printk_deferred_exit();
read_unlock_irqrestore(&pool_rwlock, flags);
*entries = stack->entries;
@@ -619,6 +626,7 @@ void stack_depot_put(depot_stack_handle_t handle)
return;
write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
stack = depot_fetch_stack(handle);
if (WARN_ON(!stack))
@@ -633,6 +641,7 @@ void stack_depot_put(depot_stack_handle_t handle)
}
out:
+ printk_deferred_exit();
write_unlock_irqrestore(&pool_rwlock, flags);
}
EXPORT_SYMBOL_GPL(stack_depot_put);
--
2.25.1
From: Andrey Konovalov <[email protected]>
kasan_record_aux_stack can be called concurrently on the same object.
This might lead to a race condition when rotating the saved aux stack
trace handles.
Fix by introducing a spinlock to protect the aux stack trace handles
in kasan_record_aux_stack.
Reported-by: Tetsuo Handa <[email protected]>
Reported-by: [email protected]
Closes: https://lore.kernel.org/all/[email protected]/
Signed-off-by: Andrey Konovalov <[email protected]>
---
This can be squashed into "kasan: use stack_depot_put for Generic mode"
or left standalone.
---
mm/kasan/generic.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 54e20b2bc3e1..ca5c75a1866c 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
@@ -35,6 +36,8 @@
#include "kasan.h"
#include "../slab.h"
+DEFINE_SPINLOCK(aux_lock);
+
/*
* All functions below always inlined so compiler could
* perform better optimizations in each of __asan_loadX/__assn_storeX
@@ -502,6 +505,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
+ depot_stack_handle_t new_handle, old_handle;
+ unsigned long flags;
if (is_kfence_address(addr) || !slab)
return;
@@ -512,9 +517,15 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta)
return;
- stack_depot_put(alloc_meta->aux_stack[1]);
+ new_handle = kasan_save_stack(0, depot_flags);
+
+ spin_lock_irqsave(&aux_lock, flags);
+ old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
+ alloc_meta->aux_stack[0] = new_handle;
+ spin_unlock_irqrestore(&aux_lock, flags);
+
+ stack_depot_put(old_handle);
}
void kasan_record_aux_stack(void *addr)
--
2.25.1
From: Andrey Konovalov <[email protected]>
As stack traces can now be evicted from the stack depot, remove the
comment saying that they are never removed.
Signed-off-by: Andrey Konovalov <[email protected]>
---
Can be squashed into "lib/stackdepot: allow users to evict stack traces"
or left standalone.
---
include/linux/stackdepot.h | 2 --
1 file changed, 2 deletions(-)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index a6796f178913..adcbb8f23600 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,8 +11,6 @@
* SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
* stack traces often repeat, using stack depot allows to save about 100x space.
*
- * Stack traces are never removed from the stack depot.
- *
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
*
--
2.25.1
From: Andrey Konovalov <[email protected]>
Instead of only zeroing out the stack depot handle when evicting the
free stack trace in qlink_free, zero out the whole track.
Do this just to produce a similar effect for alloc and free meta. The
other fields of the free track besides the stack trace handle are
considered invalid at this point anyway, so no harm in zeroing them out.
Signed-off-by: Andrey Konovalov <[email protected]>
---
This can be squashed into "kasan: use stack_depot_put for Generic mode"
or left standalone.
---
mm/kasan/quarantine.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 265ca2bbe2dd..782e045da911 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -157,7 +157,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
if (free_meta &&
*(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) {
stack_depot_put(free_meta->free_track.stack);
- free_meta->free_track.stack = 0;
+ __memset(&free_meta->free_track, 0, sizeof(free_meta->free_track));
}
/*
--
2.25.1
On Tue, 12 Dec 2023 at 01:14, <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Stack depot functions can be called from various contexts that do
> allocations, including with console locks taken. At the same time, stack
> depot functions might print WARNING's or refcount-related failures.
>
> This can cause a deadlock on console locks.
>
> Add printk_deferred_enter/exit guards to stack depot to avoid this.
>
> Reported-by: Tetsuo Handa <[email protected]>
> Closes: https://lore.kernel.org/all/[email protected]/
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
Doesn't need Fixes, because the series is not yet in mainline, right?
> ---
> lib/stackdepot.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 870cce2f4cbd..a0be5d05c7f0 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -506,12 +506,14 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
> bucket = &stack_table[hash & stack_hash_mask];
>
> read_lock_irqsave(&pool_rwlock, flags);
> + printk_deferred_enter();
>
> /* Fast path: look the stack trace up without full locking. */
> found = find_stack(bucket, entries, nr_entries, hash);
> if (found) {
> if (depot_flags & STACK_DEPOT_FLAG_GET)
> refcount_inc(&found->count);
> + printk_deferred_exit();
> read_unlock_irqrestore(&pool_rwlock, flags);
> goto exit;
> }
> @@ -520,6 +522,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
> if (new_pool_required)
> need_alloc = true;
>
> + printk_deferred_exit();
> read_unlock_irqrestore(&pool_rwlock, flags);
>
> /*
> @@ -541,6 +544,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
> }
>
> write_lock_irqsave(&pool_rwlock, flags);
> + printk_deferred_enter();
>
> found = find_stack(bucket, entries, nr_entries, hash);
> if (!found) {
> @@ -562,6 +566,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
> depot_keep_new_pool(&prealloc);
> }
>
> + printk_deferred_exit();
> write_unlock_irqrestore(&pool_rwlock, flags);
> exit:
> if (prealloc) {
> @@ -600,9 +605,11 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
> return 0;
>
> read_lock_irqsave(&pool_rwlock, flags);
> + printk_deferred_enter();
>
> stack = depot_fetch_stack(handle);
>
> + printk_deferred_exit();
> read_unlock_irqrestore(&pool_rwlock, flags);
>
> *entries = stack->entries;
> @@ -619,6 +626,7 @@ void stack_depot_put(depot_stack_handle_t handle)
> return;
>
> write_lock_irqsave(&pool_rwlock, flags);
> + printk_deferred_enter();
>
> stack = depot_fetch_stack(handle);
> if (WARN_ON(!stack))
> @@ -633,6 +641,7 @@ void stack_depot_put(depot_stack_handle_t handle)
> }
>
> out:
> + printk_deferred_exit();
> write_unlock_irqrestore(&pool_rwlock, flags);
> }
> EXPORT_SYMBOL_GPL(stack_depot_put);
> --
> 2.25.1
>
On Tue, 12 Dec 2023 at 01:14, <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> kasan_record_aux_stack can be called concurrently on the same object.
> This might lead to a race condition when rotating the saved aux stack
> trace handles.
>
> Fix by introducing a spinlock to protect the aux stack trace handles
> in kasan_record_aux_stack.
>
> Reported-by: Tetsuo Handa <[email protected]>
> Reported-by: [email protected]
> Closes: https://lore.kernel.org/all/[email protected]/
> Signed-off-by: Andrey Konovalov <[email protected]>
>
> ---
>
> This can be squashed into "kasan: use stack_depot_put for Generic mode"
> or left standalone.
> ---
> mm/kasan/generic.c | 15 +++++++++++++--
> 1 file changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 54e20b2bc3e1..ca5c75a1866c 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -25,6 +25,7 @@
> #include <linux/sched.h>
> #include <linux/sched/task_stack.h>
> #include <linux/slab.h>
> +#include <linux/spinlock.h>
> #include <linux/stackdepot.h>
> #include <linux/stacktrace.h>
> #include <linux/string.h>
> @@ -35,6 +36,8 @@
> #include "kasan.h"
> #include "../slab.h"
>
> +DEFINE_SPINLOCK(aux_lock);
No, please don't.
> /*
> * All functions below always inlined so compiler could
> * perform better optimizations in each of __asan_loadX/__assn_storeX
> @@ -502,6 +505,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> struct kmem_cache *cache;
> struct kasan_alloc_meta *alloc_meta;
> void *object;
> + depot_stack_handle_t new_handle, old_handle;
> + unsigned long flags;
>
> if (is_kfence_address(addr) || !slab)
> return;
> @@ -512,9 +517,15 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
> if (!alloc_meta)
> return;
>
> - stack_depot_put(alloc_meta->aux_stack[1]);
> + new_handle = kasan_save_stack(0, depot_flags);
> +
> + spin_lock_irqsave(&aux_lock, flags);
This is a unnecessary global lock. What's the problem here? As far as
I can understand a race is possible where we may end up with
duplicated or lost stack handles.
Since storing this information is best effort anyway, and bugs are
rare, a global lock protecting this is overkill.
I'd just accept the racyness and use READ_ONCE() / WRITE_ONCE() just
to make sure we don't tear any reads/writes and the depot handles are
valid. There are other more complex schemes [1], but I think they are
overkill as well.
[1]: Since a depot stack handle is just an u32, we can have a
union {
depot_stack_handle_t handles[2];
atomic64_t atomic_handle;
} aux_stack;
(BUILD_BUG_ON somewhere if sizeof handles and atomic_handle mismatch.)
Then in the code here create the same union and load atomic_handle.
Swap handle[1] into handle[0] and write the new one in handles[1].
Then do a cmpxchg loop to store the new atomic_handle.
> + old_handle = alloc_meta->aux_stack[1];
> alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
> - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
> + alloc_meta->aux_stack[0] = new_handle;
> + spin_unlock_irqrestore(&aux_lock, flags);
> +
> + stack_depot_put(old_handle);
> }
>
> void kasan_record_aux_stack(void *addr)
> --
> 2.25.1
>
On Tue, 12 Dec 2023 at 01:14, <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Instead of only zeroing out the stack depot handle when evicting the
> free stack trace in qlink_free, zero out the whole track.
>
> Do this just to produce a similar effect for alloc and free meta. The
> other fields of the free track besides the stack trace handle are
> considered invalid at this point anyway, so no harm in zeroing them out.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
> ---
>
> This can be squashed into "kasan: use stack_depot_put for Generic mode"
> or left standalone.
> ---
> mm/kasan/quarantine.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
> index 265ca2bbe2dd..782e045da911 100644
> --- a/mm/kasan/quarantine.c
> +++ b/mm/kasan/quarantine.c
> @@ -157,7 +157,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
> if (free_meta &&
> *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) {
> stack_depot_put(free_meta->free_track.stack);
> - free_meta->free_track.stack = 0;
> + __memset(&free_meta->free_track, 0, sizeof(free_meta->free_track));
> }
>
> /*
> --
> 2.25.1
>
On Tue, 12 Dec 2023 at 01:14, <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> As stack traces can now be evicted from the stack depot, remove the
> comment saying that they are never removed.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
> ---
>
> Can be squashed into "lib/stackdepot: allow users to evict stack traces"
> or left standalone.
> ---
> include/linux/stackdepot.h | 2 --
> 1 file changed, 2 deletions(-)
>
> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
> index a6796f178913..adcbb8f23600 100644
> --- a/include/linux/stackdepot.h
> +++ b/include/linux/stackdepot.h
> @@ -11,8 +11,6 @@
> * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
> * stack traces often repeat, using stack depot allows to save about 100x space.
> *
> - * Stack traces are never removed from the stack depot.
> - *
> * Author: Alexander Potapenko <[email protected]>
> * Copyright (C) 2016 Google, Inc.
> *
> --
> 2.25.1
>
On Tue, 12 Dec 2023 19:59:29 +0100 Marco Elver <[email protected]> wrote:
> On Tue, 12 Dec 2023 at 01:14, <[email protected]> wrote:
> >
> > From: Andrey Konovalov <[email protected]>
> >
> > Stack depot functions can be called from various contexts that do
> > allocations, including with console locks taken. At the same time, stack
> > depot functions might print WARNING's or refcount-related failures.
> >
> > This can cause a deadlock on console locks.
> >
> > Add printk_deferred_enter/exit guards to stack depot to avoid this.
> >
> > Reported-by: Tetsuo Handa <[email protected]>
> > Closes: https://lore.kernel.org/all/[email protected]/
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Reviewed-by: Marco Elver <[email protected]>
>
> Doesn't need Fixes, because the series is not yet in mainline, right?
I've moved the series "stackdepot: allow evicting stack traces, v4"
(please, not "the stack depot eviction series") into mm-nonmm-stable.
Which is allegedly non-rebasing.
So yes please, provide Fixes: on each patch.
On Tue, Dec 12, 2023 at 8:29 PM Marco Elver <[email protected]> wrote:
>
> > - stack_depot_put(alloc_meta->aux_stack[1]);
> > + new_handle = kasan_save_stack(0, depot_flags);
> > +
> > + spin_lock_irqsave(&aux_lock, flags);
>
> This is a unnecessary global lock. What's the problem here? As far as
> I can understand a race is possible where we may end up with
> duplicated or lost stack handles.
Yes, this is the problem. And this leads to refcount underflows in the
stack depot code, as we fail to keep precise track of the stack
traces.
> Since storing this information is best effort anyway, and bugs are
> rare, a global lock protecting this is overkill.
>
> I'd just accept the racyness and use READ_ONCE() / WRITE_ONCE() just
> to make sure we don't tear any reads/writes and the depot handles are
> valid.
This will help with the potential tears but will not help with the
refcount issues.
> There are other more complex schemes [1], but I think they are
> overkill as well.
>
> [1]: Since a depot stack handle is just an u32, we can have a
>
> union {
> depot_stack_handle_t handles[2];
> atomic64_t atomic_handle;
> } aux_stack;
> (BUILD_BUG_ON somewhere if sizeof handles and atomic_handle mismatch.)
>
> Then in the code here create the same union and load atomic_handle.
> Swap handle[1] into handle[0] and write the new one in handles[1].
> Then do a cmpxchg loop to store the new atomic_handle.
This approach should work. If you prefer, I can do this instead of a spinlock.
But we do need some kind of atomicity while rotating the aux handles
to make sure nothing gets lost.
Thanks!
On Tue, Dec 12, 2023 at 9:57 PM Andrew Morton <[email protected]> wrote:
>
> On Tue, 12 Dec 2023 19:59:29 +0100 Marco Elver <[email protected]> wrote:
>
> > On Tue, 12 Dec 2023 at 01:14, <[email protected]> wrote:
> > >
> > > From: Andrey Konovalov <[email protected]>
> > >
> > > Stack depot functions can be called from various contexts that do
> > > allocations, including with console locks taken. At the same time, stack
> > > depot functions might print WARNING's or refcount-related failures.
> > >
> > > This can cause a deadlock on console locks.
> > >
> > > Add printk_deferred_enter/exit guards to stack depot to avoid this.
> > >
> > > Reported-by: Tetsuo Handa <[email protected]>
> > > Closes: https://lore.kernel.org/all/[email protected]/
> > > Signed-off-by: Andrey Konovalov <[email protected]>
> >
> > Reviewed-by: Marco Elver <[email protected]>
> >
> > Doesn't need Fixes, because the series is not yet in mainline, right?
>
> I've moved the series "stackdepot: allow evicting stack traces, v4"
> (please, not "the stack depot eviction series") into mm-nonmm-stable.
> Which is allegedly non-rebasing.
>
> So yes please, provide Fixes: on each patch.
Sure, I'll add them when I mail v2 after we figure out what to do with
patch #2. Thanks!
On Wed, 13 Dec 2023 at 15:40, Andrey Konovalov <[email protected]> wrote:
>
> On Tue, Dec 12, 2023 at 8:29 PM Marco Elver <[email protected]> wrote:
> >
> > > - stack_depot_put(alloc_meta->aux_stack[1]);
> > > + new_handle = kasan_save_stack(0, depot_flags);
> > > +
> > > + spin_lock_irqsave(&aux_lock, flags);
> >
> > This is a unnecessary global lock. What's the problem here? As far as
> > I can understand a race is possible where we may end up with
> > duplicated or lost stack handles.
>
> Yes, this is the problem. And this leads to refcount underflows in the
> stack depot code, as we fail to keep precise track of the stack
> traces.
>
> > Since storing this information is best effort anyway, and bugs are
> > rare, a global lock protecting this is overkill.
> >
> > I'd just accept the racyness and use READ_ONCE() / WRITE_ONCE() just
> > to make sure we don't tear any reads/writes and the depot handles are
> > valid.
>
> This will help with the potential tears but will not help with the
> refcount issues.
>
> > There are other more complex schemes [1], but I think they are
> > overkill as well.
> >
> > [1]: Since a depot stack handle is just an u32, we can have a
> >
> > union {
> > depot_stack_handle_t handles[2];
> > atomic64_t atomic_handle;
> > } aux_stack;
> > (BUILD_BUG_ON somewhere if sizeof handles and atomic_handle mismatch.)
> >
> > Then in the code here create the same union and load atomic_handle.
> > Swap handle[1] into handle[0] and write the new one in handles[1].
> > Then do a cmpxchg loop to store the new atomic_handle.
>
> This approach should work. If you prefer, I can do this instead of a spinlock.
>
> But we do need some kind of atomicity while rotating the aux handles
> to make sure nothing gets lost.
Yes, I think that'd be preferable. Although note that not all 32-bit
architectures have 64-bit atomics, so that may be an issue. Another
alternative is to have a spinlock next to the aux_stack (it needs to
be initialized properly). It'll use up a little more space, but that's
for KASAN configs only, so I think it's ok. Certainly better than a
global lock.
On Wed, Dec 13, 2023 at 5:51 PM Marco Elver <[email protected]> wrote:
>
> > > [1]: Since a depot stack handle is just an u32, we can have a
> > >
> > > union {
> > > depot_stack_handle_t handles[2];
> > > atomic64_t atomic_handle;
> > > } aux_stack;
> > > (BUILD_BUG_ON somewhere if sizeof handles and atomic_handle mismatch.)
> > >
> > > Then in the code here create the same union and load atomic_handle.
> > > Swap handle[1] into handle[0] and write the new one in handles[1].
> > > Then do a cmpxchg loop to store the new atomic_handle.
> >
> > This approach should work. If you prefer, I can do this instead of a spinlock.
> >
> > But we do need some kind of atomicity while rotating the aux handles
> > to make sure nothing gets lost.
>
> Yes, I think that'd be preferable. Although note that not all 32-bit
> architectures have 64-bit atomics, so that may be an issue. Another
> alternative is to have a spinlock next to the aux_stack (it needs to
> be initialized properly). It'll use up a little more space, but that's
> for KASAN configs only, so I think it's ok. Certainly better than a
> global lock.
Ah, hm, actually this is what I indented to do with this change. But
somehow my brain glitched out and decided to use a global lock :)
I'll change this into a local spinlock in v2.
Thank you!