From: Andrey Konovalov <[email protected]>
A set of fixes, comments, and clean-ups I came up with while reading
the stack depot code.
Changes v1->v2:
- Drop "lib/stackdepot: fix setting next_slab_inited in init_stack_slab",
as there is no bug to fix.
- Use "pool" instead of "slab" for memory regions that store stack traces.
- Rename next_slab/pool_inited to next_slab/pool_required and annotate its
uses with comments, see new patch #13.
- Use STACK_HASH_TABLE_SCALE as a new name for STACK_HASH_SCALE.
- Mark stack_depot_set_extra_bits as __must_check.
- Only assign extra bits in stack_depot_set_extra_bits for non-empty
handles.
- Minor comment fixes.
Andrey Konovalov (18):
lib/stackdepot: put functions in logical order
lib/stackdepot: use pr_fmt to define message format
lib/stackdepot, mm: rename stack_depot_want_early_init
lib/stackdepot: rename stack_depot_disable
lib/stackdepot: annotate init and early init functions
lib/stackdepot: lower the indentation in stack_depot_init
lib/stackdepot: reorder and annotate global variables
lib/stackdepot: rename hash table constants and variables
lib/stackdepot: rename slab to pool
lib/stackdepot: rename handle and pool constants
lib/stackdepot: rename init_stack_pool
lib/stacktrace: drop impossible WARN_ON for depot_init_pool
lib/stackdepot: annotate depot_init_pool and depot_alloc_stack
lib/stackdepot: rename next_pool_inited to next_pool_required
lib/stacktrace, kasan, kmsan: rework extra_bits interface
lib/stackdepot: annotate racy pool_index accesses
lib/stackdepot: various comments clean-ups
lib/stackdepot: move documentation comments to stackdepot.h
include/linux/stackdepot.h | 152 +++++++--
lib/stackdepot.c | 654 ++++++++++++++++++-------------------
mm/kasan/common.c | 2 +-
mm/kmsan/core.c | 10 +-
mm/page_owner.c | 2 +-
mm/slub.c | 4 +-
6 files changed, 457 insertions(+), 367 deletions(-)
--
2.25.1
From: Andrey Konovalov <[email protected]>
Use pr_fmt to define the format for printing stack depot messages instead
of duplicating the "Stack Depot" prefix in each message.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 4bfaf3bce619..83787e46a3ab 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -19,6 +19,8 @@
* Based on code by Dmitry Chernenkov.
*/
+#define pr_fmt(fmt) "stackdepot: " fmt
+
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
@@ -98,7 +100,7 @@ static int __init is_stack_depot_disabled(char *str)
ret = kstrtobool(str, &stack_depot_disable);
if (!ret && stack_depot_disable) {
- pr_info("Stack Depot is disabled\n");
+ pr_info("disabled\n");
stack_table = NULL;
}
return 0;
@@ -142,7 +144,7 @@ int __init stack_depot_early_init(void)
1UL << STACK_HASH_ORDER_MAX);
if (!stack_table) {
- pr_err("Stack Depot hash table allocation failed, disabling\n");
+ pr_err("hash table allocation failed, disabling\n");
stack_depot_disable = true;
return -ENOMEM;
}
@@ -177,11 +179,11 @@ int stack_depot_init(void)
if (entries > 1UL << STACK_HASH_ORDER_MAX)
entries = 1UL << STACK_HASH_ORDER_MAX;
- pr_info("Stack Depot allocating hash table of %lu entries with kvcalloc\n",
+ pr_info("allocating hash table of %lu entries with kvcalloc\n",
entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
- pr_err("Stack Depot hash table allocation failed, disabling\n");
+ pr_err("hash table allocation failed, disabling\n");
stack_depot_disable = true;
ret = -ENOMEM;
}
--
2.25.1
From: Andrey Konovalov <[email protected]>
Put stack depot functions' declarations and definitions in a more logical
order:
1. Functions that save stack traces into stack depot.
2. Functions that fetch and print stack traces.
3. stack_depot_get_extra_bits that operates on stack depot handles
and does not interact with the stack depot storage.
No functional changes.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/stackdepot.h | 15 +-
lib/stackdepot.c | 314 ++++++++++++++++++-------------------
2 files changed, 165 insertions(+), 164 deletions(-)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 9ca7798d7a31..1296a6eeaec0 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -14,17 +14,13 @@
#include <linux/gfp.h>
typedef u32 depot_stack_handle_t;
+
/*
* Number of bits in the handle that stack depot doesn't use. Users may store
* information in them.
*/
#define STACK_DEPOT_EXTRA_BITS 5
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- unsigned int extra_bits,
- gfp_t gfp_flags, bool can_alloc);
-
/*
* Every user of stack depot has to call stack_depot_init() during its own init
* when it's decided that it will be calling stack_depot_save() later. This is
@@ -59,17 +55,22 @@ static inline void stack_depot_want_early_init(void) { }
static inline int stack_depot_early_init(void) { return 0; }
#endif
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ unsigned int extra_bits,
+ gfp_t gfp_flags, bool can_alloc);
+
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
-unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
+void stack_depot_print(depot_stack_handle_t stack);
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
-void stack_depot_print(depot_stack_handle_t stack);
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 79e894cf8406..4bfaf3bce619 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -79,84 +79,6 @@ static int next_slab_inited;
static size_t depot_offset;
static DEFINE_RAW_SPINLOCK(depot_lock);
-unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
-{
- union handle_parts parts = { .handle = handle };
-
- return parts.extra;
-}
-EXPORT_SYMBOL(stack_depot_get_extra_bits);
-
-static bool init_stack_slab(void **prealloc)
-{
- if (!*prealloc)
- return false;
- /*
- * This smp_load_acquire() pairs with smp_store_release() to
- * |next_slab_inited| below and in depot_alloc_stack().
- */
- if (smp_load_acquire(&next_slab_inited))
- return true;
- if (stack_slabs[depot_index] == NULL) {
- stack_slabs[depot_index] = *prealloc;
- *prealloc = NULL;
- } else {
- /* If this is the last depot slab, do not touch the next one. */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
- stack_slabs[depot_index + 1] = *prealloc;
- *prealloc = NULL;
- }
- /*
- * This smp_store_release pairs with smp_load_acquire() from
- * |next_slab_inited| above and in stack_depot_save().
- */
- smp_store_release(&next_slab_inited, 1);
- }
- return true;
-}
-
-/* Allocation of a new stack in raw storage */
-static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
-{
- struct stack_record *stack;
- size_t required_size = struct_size(stack, entries, size);
-
- required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
-
- if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return NULL;
- }
- depot_index++;
- depot_offset = 0;
- /*
- * smp_store_release() here pairs with smp_load_acquire() from
- * |next_slab_inited| in stack_depot_save() and
- * init_stack_slab().
- */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
- smp_store_release(&next_slab_inited, 0);
- }
- init_stack_slab(prealloc);
- if (stack_slabs[depot_index] == NULL)
- return NULL;
-
- stack = stack_slabs[depot_index] + depot_offset;
-
- stack->hash = hash;
- stack->size = size;
- stack->handle.slabindex = depot_index;
- stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
- stack->handle.valid = 1;
- stack->handle.extra = 0;
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- depot_offset += required_size;
-
- return stack;
-}
-
/* one hash table bucket entry per 16kB of memory */
#define STACK_HASH_SCALE 14
/* limited between 4k and 1M buckets */
@@ -270,6 +192,76 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
+static bool init_stack_slab(void **prealloc)
+{
+ if (!*prealloc)
+ return false;
+ /*
+ * This smp_load_acquire() pairs with smp_store_release() to
+ * |next_slab_inited| below and in depot_alloc_stack().
+ */
+ if (smp_load_acquire(&next_slab_inited))
+ return true;
+ if (stack_slabs[depot_index] == NULL) {
+ stack_slabs[depot_index] = *prealloc;
+ *prealloc = NULL;
+ } else {
+ /* If this is the last depot slab, do not touch the next one. */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
+ stack_slabs[depot_index + 1] = *prealloc;
+ *prealloc = NULL;
+ }
+ /*
+ * This smp_store_release pairs with smp_load_acquire() from
+ * |next_slab_inited| above and in stack_depot_save().
+ */
+ smp_store_release(&next_slab_inited, 1);
+ }
+ return true;
+}
+
+/* Allocation of a new stack in raw storage */
+static struct stack_record *
+depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+{
+ struct stack_record *stack;
+ size_t required_size = struct_size(stack, entries, size);
+
+ required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+
+ if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
+ if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return NULL;
+ }
+ depot_index++;
+ depot_offset = 0;
+ /*
+ * smp_store_release() here pairs with smp_load_acquire() from
+ * |next_slab_inited| in stack_depot_save() and
+ * init_stack_slab().
+ */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
+ smp_store_release(&next_slab_inited, 0);
+ }
+ init_stack_slab(prealloc);
+ if (stack_slabs[depot_index] == NULL)
+ return NULL;
+
+ stack = stack_slabs[depot_index] + depot_offset;
+
+ stack->hash = hash;
+ stack->size = size;
+ stack->handle.slabindex = depot_index;
+ stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.valid = 1;
+ stack->handle.extra = 0;
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+ depot_offset += required_size;
+
+ return stack;
+}
+
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
@@ -309,85 +301,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
-/**
- * stack_depot_snprint - print stack entries from a depot into a buffer
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @buf: Pointer to the print buffer
- *
- * @size: Size of the print buffer
- *
- * @spaces: Number of leading spaces to print
- *
- * Return: Number of bytes printed.
- */
-int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
- int spaces)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(handle, &entries);
- return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
- spaces) : 0;
-}
-EXPORT_SYMBOL_GPL(stack_depot_snprint);
-
-/**
- * stack_depot_print - print stack entries from a depot
- *
- * @stack: Stack depot handle which was returned from
- * stack_depot_save().
- *
- */
-void stack_depot_print(depot_stack_handle_t stack)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(stack, &entries);
- if (nr_entries > 0)
- stack_trace_print(entries, nr_entries, 0);
-}
-EXPORT_SYMBOL_GPL(stack_depot_print);
-
-/**
- * stack_depot_fetch - Fetch stack entries from a depot
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @entries: Pointer to store the entries address
- *
- * Return: The number of trace entries for this depot.
- */
-unsigned int stack_depot_fetch(depot_stack_handle_t handle,
- unsigned long **entries)
-{
- union handle_parts parts = { .handle = handle };
- void *slab;
- size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack;
-
- *entries = NULL;
- if (!handle)
- return 0;
-
- if (parts.slabindex > depot_index) {
- WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
- parts.slabindex, depot_index, handle);
- return 0;
- }
- slab = stack_slabs[parts.slabindex];
- if (!slab)
- return 0;
- stack = slab + offset;
-
- *entries = stack->entries;
- return stack->size;
-}
-EXPORT_SYMBOL_GPL(stack_depot_fetch);
-
/**
* __stack_depot_save - Save a stack trace from an array
*
@@ -533,3 +446,90 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+/**
+ * stack_depot_fetch - Fetch stack entries from a depot
+ *
+ * @handle: Stack depot handle which was returned from
+ * stack_depot_save().
+ * @entries: Pointer to store the entries address
+ *
+ * Return: The number of trace entries for this depot.
+ */
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
+{
+ union handle_parts parts = { .handle = handle };
+ void *slab;
+ size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+ struct stack_record *stack;
+
+ *entries = NULL;
+ if (!handle)
+ return 0;
+
+ if (parts.slabindex > depot_index) {
+ WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
+ parts.slabindex, depot_index, handle);
+ return 0;
+ }
+ slab = stack_slabs[parts.slabindex];
+ if (!slab)
+ return 0;
+ stack = slab + offset;
+
+ *entries = stack->entries;
+ return stack->size;
+}
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
+
+/**
+ * stack_depot_print - print stack entries from a depot
+ *
+ * @stack: Stack depot handle which was returned from
+ * stack_depot_save().
+ *
+ */
+void stack_depot_print(depot_stack_handle_t stack)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(stack, &entries);
+ if (nr_entries > 0)
+ stack_trace_print(entries, nr_entries, 0);
+}
+EXPORT_SYMBOL_GPL(stack_depot_print);
+
+/**
+ * stack_depot_snprint - print stack entries from a depot into a buffer
+ *
+ * @handle: Stack depot handle which was returned from
+ * stack_depot_save().
+ * @buf: Pointer to the print buffer
+ *
+ * @size: Size of the print buffer
+ *
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed.
+ */
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(handle, &entries);
+ return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
+ spaces) : 0;
+}
+EXPORT_SYMBOL_GPL(stack_depot_snprint);
+
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+
+ return parts.extra;
+}
+EXPORT_SYMBOL(stack_depot_get_extra_bits);
--
2.25.1
From: Andrey Konovalov <[email protected]>
Rename stack_depot_disable to stack_depot_disabled to make its name look
similar to the names of other stack depot flags.
Also put stack_depot_disabled's definition together with the other flags.
Also rename is_stack_depot_disabled to disable_stack_depot: this name
looks more conventional for a function that processes a boot parameter.
No functional changes.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 136706efe339..202e07c4f02d 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -71,6 +71,7 @@ struct stack_record {
unsigned long entries[]; /* Variable-sized array of entries. */
};
+static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
@@ -91,21 +92,20 @@ static DEFINE_RAW_SPINLOCK(depot_lock);
static unsigned int stack_hash_order;
static unsigned int stack_hash_mask;
-static bool stack_depot_disable;
static struct stack_record **stack_table;
-static int __init is_stack_depot_disabled(char *str)
+static int __init disable_stack_depot(char *str)
{
int ret;
- ret = kstrtobool(str, &stack_depot_disable);
- if (!ret && stack_depot_disable) {
+ ret = kstrtobool(str, &stack_depot_disabled);
+ if (!ret && stack_depot_disabled) {
pr_info("disabled\n");
stack_table = NULL;
}
return 0;
}
-early_param("stack_depot_disable", is_stack_depot_disabled);
+early_param("stack_depot_disable", disable_stack_depot);
void __init stack_depot_request_early_init(void)
{
@@ -128,7 +128,7 @@ int __init stack_depot_early_init(void)
if (kasan_enabled() && !stack_hash_order)
stack_hash_order = STACK_HASH_ORDER_MAX;
- if (!__stack_depot_early_init_requested || stack_depot_disable)
+ if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0;
if (stack_hash_order)
@@ -145,7 +145,7 @@ int __init stack_depot_early_init(void)
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
- stack_depot_disable = true;
+ stack_depot_disabled = true;
return -ENOMEM;
}
@@ -158,7 +158,7 @@ int stack_depot_init(void)
int ret = 0;
mutex_lock(&stack_depot_init_mutex);
- if (!stack_depot_disable && !stack_table) {
+ if (!stack_depot_disabled && !stack_table) {
unsigned long entries;
int scale = STACK_HASH_SCALE;
@@ -184,7 +184,7 @@ int stack_depot_init(void)
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
- stack_depot_disable = true;
+ stack_depot_disabled = true;
ret = -ENOMEM;
}
stack_hash_mask = entries - 1;
@@ -353,7 +353,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
- if (unlikely(nr_entries == 0) || stack_depot_disable)
+ if (unlikely(nr_entries == 0) || stack_depot_disabled)
goto fast_exit;
hash = hash_stack(entries, nr_entries);
--
2.25.1
From: Andrey Konovalov <[email protected]>
Rename stack_depot_want_early_init to stack_depot_request_early_init.
The old name is confusing, as it hints at returning some kind of intention
of stack depot. The new name reflects that this function requests an action
from stack depot instead.
No functional changes.
Reviewed-by: Alexander Potapenko <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/stackdepot.h | 14 +++++++-------
lib/stackdepot.c | 10 +++++-----
mm/page_owner.c | 2 +-
mm/slub.c | 4 ++--
4 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 1296a6eeaec0..c4e3abc16b16 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -31,26 +31,26 @@ typedef u32 depot_stack_handle_t;
* enabled as part of mm_init(), for subsystems where it's known at compile time
* that stack depot will be used.
*
- * Another alternative is to call stack_depot_want_early_init(), when the
+ * Another alternative is to call stack_depot_request_early_init(), when the
* decision to use stack depot is taken e.g. when evaluating kernel boot
* parameters, which precedes the enablement point in mm_init().
*
- * stack_depot_init() and stack_depot_want_early_init() can be called regardless
- * of CONFIG_STACKDEPOT and are no-op when disabled. The actual save/fetch/print
- * functions should only be called from code that makes sure CONFIG_STACKDEPOT
- * is enabled.
+ * stack_depot_init() and stack_depot_request_early_init() can be called
+ * regardless of CONFIG_STACKDEPOT and are no-op when disabled. The actual
+ * save/fetch/print functions should only be called from code that makes sure
+ * CONFIG_STACKDEPOT is enabled.
*/
#ifdef CONFIG_STACKDEPOT
int stack_depot_init(void);
-void __init stack_depot_want_early_init(void);
+void __init stack_depot_request_early_init(void);
/* This is supposed to be called only from mm_init() */
int __init stack_depot_early_init(void);
#else
static inline int stack_depot_init(void) { return 0; }
-static inline void stack_depot_want_early_init(void) { }
+static inline void stack_depot_request_early_init(void) { }
static inline int stack_depot_early_init(void) { return 0; }
#endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 83787e46a3ab..136706efe339 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -71,7 +71,7 @@ struct stack_record {
unsigned long entries[]; /* Variable-sized array of entries. */
};
-static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
@@ -107,12 +107,12 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
-void __init stack_depot_want_early_init(void)
+void __init stack_depot_request_early_init(void)
{
- /* Too late to request early init now */
+ /* Too late to request early init now. */
WARN_ON(__stack_depot_early_init_passed);
- __stack_depot_want_early_init = true;
+ __stack_depot_early_init_requested = true;
}
int __init stack_depot_early_init(void)
@@ -128,7 +128,7 @@ int __init stack_depot_early_init(void)
if (kasan_enabled() && !stack_hash_order)
stack_hash_order = STACK_HASH_ORDER_MAX;
- if (!__stack_depot_want_early_init || stack_depot_disable)
+ if (!__stack_depot_early_init_requested || stack_depot_disable)
return 0;
if (stack_hash_order)
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 2d27f532df4c..90a4a087e6c7 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -48,7 +48,7 @@ static int __init early_page_owner_param(char *buf)
int ret = kstrtobool(buf, &page_owner_enabled);
if (page_owner_enabled)
- stack_depot_want_early_init();
+ stack_depot_request_early_init();
return ret;
}
diff --git a/mm/slub.c b/mm/slub.c
index 13459c69095a..f2c6c356bc36 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,7 +1592,7 @@ static int __init setup_slub_debug(char *str)
} else {
slab_list_specified = true;
if (flags & SLAB_STORE_USER)
- stack_depot_want_early_init();
+ stack_depot_request_early_init();
}
}
@@ -1611,7 +1611,7 @@ static int __init setup_slub_debug(char *str)
out:
slub_debug = global_flags;
if (slub_debug & SLAB_STORE_USER)
- stack_depot_want_early_init();
+ stack_depot_request_early_init();
if (slub_debug != 0 || slub_debug_string)
static_branch_enable(&slub_debug_enabled);
else
--
2.25.1
From: Andrey Konovalov <[email protected]>
Add comments to stack_depot_early_init and stack_depot_init to explain
certain parts of their implementation.
Also add a pr_info message to stack_depot_early_init similar to the one
in stack_depot_init.
Also move the scale variable in stack_depot_init to the scope where it
is being used.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 27 +++++++++++++++++++++------
1 file changed, 21 insertions(+), 6 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 202e07c4f02d..9fab711e4826 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -115,24 +115,34 @@ void __init stack_depot_request_early_init(void)
__stack_depot_early_init_requested = true;
}
+/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
unsigned long entries = 0;
- /* This is supposed to be called only once, from mm_init() */
+ /* This function must be called only once, from mm_init(). */
if (WARN_ON(__stack_depot_early_init_passed))
return 0;
-
__stack_depot_early_init_passed = true;
+ /*
+ * If KASAN is enabled, use the maximum order: KASAN is frequently used
+ * in fuzzing scenarios, which leads to a large number of different
+ * stack traces being stored in stack depot.
+ */
if (kasan_enabled() && !stack_hash_order)
stack_hash_order = STACK_HASH_ORDER_MAX;
if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0;
+ /*
+ * If stack_hash_order is not set, leave entries as 0 to rely on the
+ * automatic calculations performed by alloc_large_system_hash.
+ */
if (stack_hash_order)
- entries = 1UL << stack_hash_order;
+ entries = 1UL << stack_hash_order;
+ pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *),
entries,
@@ -142,7 +152,6 @@ int __init stack_depot_early_init(void)
&stack_hash_mask,
1UL << STACK_HASH_ORDER_MIN,
1UL << STACK_HASH_ORDER_MAX);
-
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
@@ -152,6 +161,7 @@ int __init stack_depot_early_init(void)
return 0;
}
+/* Allocates a hash table via kvcalloc. Can be used after boot. */
int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
@@ -160,11 +170,16 @@ int stack_depot_init(void)
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disabled && !stack_table) {
unsigned long entries;
- int scale = STACK_HASH_SCALE;
+ /*
+ * Similarly to stack_depot_early_init, use stack_hash_order
+ * if assigned, and rely on automatic scaling otherwise.
+ */
if (stack_hash_order) {
entries = 1UL << stack_hash_order;
} else {
+ int scale = STACK_HASH_SCALE;
+
entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries);
@@ -179,7 +194,7 @@ int stack_depot_init(void)
if (entries > 1UL << STACK_HASH_ORDER_MAX)
entries = 1UL << STACK_HASH_ORDER_MAX;
- pr_info("allocating hash table of %lu entries with kvcalloc\n",
+ pr_info("allocating hash table of %lu entries via kvcalloc\n",
entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
--
2.25.1
From: Andrey Konovalov <[email protected]>
stack_depot_init does most things inside an if check. Move them out and
use a goto statement instead.
No functional changes.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 70 +++++++++++++++++++++++++-----------------------
1 file changed, 37 insertions(+), 33 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 9fab711e4826..3c713f70b0a3 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -165,46 +165,50 @@ int __init stack_depot_early_init(void)
int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
+ unsigned long entries;
int ret = 0;
mutex_lock(&stack_depot_init_mutex);
- if (!stack_depot_disabled && !stack_table) {
- unsigned long entries;
- /*
- * Similarly to stack_depot_early_init, use stack_hash_order
- * if assigned, and rely on automatic scaling otherwise.
- */
- if (stack_hash_order) {
- entries = 1UL << stack_hash_order;
- } else {
- int scale = STACK_HASH_SCALE;
-
- entries = nr_free_buffer_pages();
- entries = roundup_pow_of_two(entries);
-
- if (scale > PAGE_SHIFT)
- entries >>= (scale - PAGE_SHIFT);
- else
- entries <<= (PAGE_SHIFT - scale);
- }
+ if (stack_depot_disabled || stack_table)
+ goto out_unlock;
- if (entries < 1UL << STACK_HASH_ORDER_MIN)
- entries = 1UL << STACK_HASH_ORDER_MIN;
- if (entries > 1UL << STACK_HASH_ORDER_MAX)
- entries = 1UL << STACK_HASH_ORDER_MAX;
-
- pr_info("allocating hash table of %lu entries via kvcalloc\n",
- entries);
- stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
- if (!stack_table) {
- pr_err("hash table allocation failed, disabling\n");
- stack_depot_disabled = true;
- ret = -ENOMEM;
- }
- stack_hash_mask = entries - 1;
+ /*
+ * Similarly to stack_depot_early_init, use stack_hash_order
+ * if assigned, and rely on automatic scaling otherwise.
+ */
+ if (stack_hash_order) {
+ entries = 1UL << stack_hash_order;
+ } else {
+ int scale = STACK_HASH_SCALE;
+
+ entries = nr_free_buffer_pages();
+ entries = roundup_pow_of_two(entries);
+
+ if (scale > PAGE_SHIFT)
+ entries >>= (scale - PAGE_SHIFT);
+ else
+ entries <<= (PAGE_SHIFT - scale);
}
+
+ if (entries < 1UL << STACK_HASH_ORDER_MIN)
+ entries = 1UL << STACK_HASH_ORDER_MIN;
+ if (entries > 1UL << STACK_HASH_ORDER_MAX)
+ entries = 1UL << STACK_HASH_ORDER_MAX;
+
+ pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
+ stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ stack_hash_mask = entries - 1;
+
+out_unlock:
mutex_unlock(&stack_depot_init_mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);
--
2.25.1
From: Andrey Konovalov <[email protected]>
Group stack depot global variables by their purpose:
1. Hash table-related variables,
2. Slab-related variables,
and add comments.
Also clean up comments for hash table-related constants.
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 3c713f70b0a3..de1afe3fb24d 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -75,24 +75,31 @@ static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
-static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
-
-static int depot_index;
-static int next_slab_inited;
-static size_t depot_offset;
-static DEFINE_RAW_SPINLOCK(depot_lock);
-
-/* one hash table bucket entry per 16kB of memory */
+/* Use one hash table bucket per 16 KB of memory. */
#define STACK_HASH_SCALE 14
-/* limited between 4k and 1M buckets */
+/* Limit the number of buckets between 4K and 1M. */
#define STACK_HASH_ORDER_MIN 12
#define STACK_HASH_ORDER_MAX 20
+/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
+/* Hash table of pointers to stored stack traces. */
+static struct stack_record **stack_table;
+/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_hash_order;
+/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
-static struct stack_record **stack_table;
+/* Array of memory regions that store stack traces. */
+static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+/* Currently used slab in stack_slabs. */
+static int depot_index;
+/* Offset to the unused space in the currently used slab. */
+static size_t depot_offset;
+/* Lock that protects the variables above. */
+static DEFINE_RAW_SPINLOCK(depot_lock);
+/* Whether the next slab is initialized. */
+static int next_slab_inited;
static int __init disable_stack_depot(char *str)
{
--
2.25.1
From: Andrey Konovalov <[email protected]>
Give more meaningful names to hash table-related constants and variables:
1. Rename STACK_HASH_SCALE to STACK_HASH_TABLE_SCALE to point out that it
is related to scaling the hash table.
2. Rename STACK_HASH_ORDER_MIN/MAX to STACK_BUCKET_NUMBER_ORDER_MIN/MAX
to point out that it is related to the number of hash table buckets.
3. Rename stack_hash_order to stack_bucket_number_order for the same
reason as #2.
No functional changes.
Signed-off-by: Andrey Konovalov <[email protected]>
---
Changes v1->v2:
- Rename STACK_HASH_SCALE to STACK_HASH_TABLE_SCALE.
---
lib/stackdepot.c | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index de1afe3fb24d..d1ab53197353 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -76,17 +76,17 @@ static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_ST
static bool __stack_depot_early_init_passed __initdata;
/* Use one hash table bucket per 16 KB of memory. */
-#define STACK_HASH_SCALE 14
+#define STACK_HASH_TABLE_SCALE 14
/* Limit the number of buckets between 4K and 1M. */
-#define STACK_HASH_ORDER_MIN 12
-#define STACK_HASH_ORDER_MAX 20
+#define STACK_BUCKET_NUMBER_ORDER_MIN 12
+#define STACK_BUCKET_NUMBER_ORDER_MAX 20
/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
/* Hash table of pointers to stored stack traces. */
static struct stack_record **stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
-static unsigned int stack_hash_order;
+static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
@@ -137,28 +137,28 @@ int __init stack_depot_early_init(void)
* in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot.
*/
- if (kasan_enabled() && !stack_hash_order)
- stack_hash_order = STACK_HASH_ORDER_MAX;
+ if (kasan_enabled() && !stack_bucket_number_order)
+ stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0;
/*
- * If stack_hash_order is not set, leave entries as 0 to rely on the
- * automatic calculations performed by alloc_large_system_hash.
+ * If stack_bucket_number_order is not set, leave entries as 0 to rely
+ * on the automatic calculations performed by alloc_large_system_hash.
*/
- if (stack_hash_order)
- entries = 1UL << stack_hash_order;
+ if (stack_bucket_number_order)
+ entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *),
entries,
- STACK_HASH_SCALE,
+ STACK_HASH_TABLE_SCALE,
HASH_EARLY | HASH_ZERO,
NULL,
&stack_hash_mask,
- 1UL << STACK_HASH_ORDER_MIN,
- 1UL << STACK_HASH_ORDER_MAX);
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
@@ -181,13 +181,13 @@ int stack_depot_init(void)
goto out_unlock;
/*
- * Similarly to stack_depot_early_init, use stack_hash_order
+ * Similarly to stack_depot_early_init, use stack_bucket_number_order
* if assigned, and rely on automatic scaling otherwise.
*/
- if (stack_hash_order) {
- entries = 1UL << stack_hash_order;
+ if (stack_bucket_number_order) {
+ entries = 1UL << stack_bucket_number_order;
} else {
- int scale = STACK_HASH_SCALE;
+ int scale = STACK_HASH_TABLE_SCALE;
entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries);
@@ -198,10 +198,10 @@ int stack_depot_init(void)
entries <<= (PAGE_SHIFT - scale);
}
- if (entries < 1UL << STACK_HASH_ORDER_MIN)
- entries = 1UL << STACK_HASH_ORDER_MIN;
- if (entries > 1UL << STACK_HASH_ORDER_MAX)
- entries = 1UL << STACK_HASH_ORDER_MAX;
+ if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
+ if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
--
2.25.1
From: Andrey Konovalov <[email protected]>
Use "pool" instead of "slab" for naming memory regions stack depot
uses to store stack traces. Using "slab" is confusing, as stack depot
pools have nothing to do with the slab allocator.
Also give better names to pool-related global variables: change
"depot_" prefix to "pool_" to point out that these variables are
related to stack depot pools.
Also rename the slabindex (poolindex) field in handle_parts to pool_index
to align its name with the pool_index global variable.
No functional changes.
Signed-off-by: Andrey Konovalov <[email protected]>
---
Changes v1->v2:
- Use "pool" instead of "slab" for memory regions that store stack traces.
---
lib/stackdepot.c | 106 +++++++++++++++++++++++------------------------
1 file changed, 53 insertions(+), 53 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index d1ab53197353..522e36cf449f 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -39,7 +39,7 @@
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
#define STACK_ALLOC_NULL_PROTECTION_BITS 1
-#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
+#define STACK_ALLOC_ORDER 2 /* Pool size order for stack depot, 4 pages */
#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
#define STACK_ALLOC_ALIGN 4
#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
@@ -47,16 +47,16 @@
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
STACK_ALLOC_NULL_PROTECTION_BITS - \
STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
-#define STACK_ALLOC_SLABS_CAP 8192
-#define STACK_ALLOC_MAX_SLABS \
- (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
- (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
+#define STACK_ALLOC_POOLS_CAP 8192
+#define STACK_ALLOC_MAX_POOLS \
+ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_POOLS_CAP) ? \
+ (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_POOLS_CAP)
/* The compact structure to store the reference to stacks. */
union handle_parts {
depot_stack_handle_t handle;
struct {
- u32 slabindex : STACK_ALLOC_INDEX_BITS;
+ u32 pool_index : STACK_ALLOC_INDEX_BITS;
u32 offset : STACK_ALLOC_OFFSET_BITS;
u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
u32 extra : STACK_DEPOT_EXTRA_BITS;
@@ -91,15 +91,15 @@ static unsigned int stack_bucket_number_order;
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack traces. */
-static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
-/* Currently used slab in stack_slabs. */
-static int depot_index;
-/* Offset to the unused space in the currently used slab. */
-static size_t depot_offset;
+static void *stack_pools[STACK_ALLOC_MAX_POOLS];
+/* Currently used pool in stack_pools. */
+static int pool_index;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset;
/* Lock that protects the variables above. */
-static DEFINE_RAW_SPINLOCK(depot_lock);
-/* Whether the next slab is initialized. */
-static int next_slab_inited;
+static DEFINE_RAW_SPINLOCK(pool_lock);
+/* Whether the next pool is initialized. */
+static int next_pool_inited;
static int __init disable_stack_depot(char *str)
{
@@ -220,30 +220,30 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-static bool init_stack_slab(void **prealloc)
+static bool init_stack_pool(void **prealloc)
{
if (!*prealloc)
return false;
/*
* This smp_load_acquire() pairs with smp_store_release() to
- * |next_slab_inited| below and in depot_alloc_stack().
+ * |next_pool_inited| below and in depot_alloc_stack().
*/
- if (smp_load_acquire(&next_slab_inited))
+ if (smp_load_acquire(&next_pool_inited))
return true;
- if (stack_slabs[depot_index] == NULL) {
- stack_slabs[depot_index] = *prealloc;
+ if (stack_pools[pool_index] == NULL) {
+ stack_pools[pool_index] = *prealloc;
*prealloc = NULL;
} else {
- /* If this is the last depot slab, do not touch the next one. */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
- stack_slabs[depot_index + 1] = *prealloc;
+ /* If this is the last depot pool, do not touch the next one. */
+ if (pool_index + 1 < STACK_ALLOC_MAX_POOLS) {
+ stack_pools[pool_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
* This smp_store_release pairs with smp_load_acquire() from
- * |next_slab_inited| above and in stack_depot_save().
+ * |next_pool_inited| above and in stack_depot_save().
*/
- smp_store_release(&next_slab_inited, 1);
+ smp_store_release(&next_pool_inited, 1);
}
return true;
}
@@ -257,35 +257,35 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
- if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+ if (unlikely(pool_offset + required_size > STACK_ALLOC_SIZE)) {
+ if (unlikely(pool_index + 1 >= STACK_ALLOC_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
- depot_index++;
- depot_offset = 0;
+ pool_index++;
+ pool_offset = 0;
/*
* smp_store_release() here pairs with smp_load_acquire() from
- * |next_slab_inited| in stack_depot_save() and
- * init_stack_slab().
+ * |next_pool_inited| in stack_depot_save() and
+ * init_stack_pool().
*/
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
- smp_store_release(&next_slab_inited, 0);
+ if (pool_index + 1 < STACK_ALLOC_MAX_POOLS)
+ smp_store_release(&next_pool_inited, 0);
}
- init_stack_slab(prealloc);
- if (stack_slabs[depot_index] == NULL)
+ init_stack_pool(prealloc);
+ if (stack_pools[pool_index] == NULL)
return NULL;
- stack = stack_slabs[depot_index] + depot_offset;
+ stack = stack_pools[pool_index] + pool_offset;
stack->hash = hash;
stack->size = size;
- stack->handle.slabindex = depot_index;
- stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.pool_index = pool_index;
+ stack->handle.offset = pool_offset >> STACK_ALLOC_ALIGN;
stack->handle.valid = 1;
stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- depot_offset += required_size;
+ pool_offset += required_size;
return stack;
}
@@ -336,10 +336,10 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
* @nr_entries: Size of the storage array
* @extra_bits: Flags to store in unused bits of depot_stack_handle_t
* @alloc_flags: Allocation gfp flags
- * @can_alloc: Allocate stack slabs (increased chance of failure if false)
+ * @can_alloc: Allocate stack pools (increased chance of failure if false)
*
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, is allowed to replenish the stack slab pool in case no space is left
+ * %true, is allowed to replenish the stack pool in case no space is left
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
* any allocations and will fail if no space is left to store the stack trace.
*
@@ -396,14 +396,14 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
goto exit;
/*
- * Check if the current or the next stack slab need to be initialized.
+ * Check if the current or the next stack pool need to be initialized.
* If so, allocate the memory - we won't be able to do that under the
* lock.
*
* The smp_load_acquire() here pairs with smp_store_release() to
- * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
+ * |next_pool_inited| in depot_alloc_stack() and init_stack_pool().
*/
- if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
+ if (unlikely(can_alloc && !smp_load_acquire(&next_pool_inited))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -417,7 +417,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
prealloc = page_address(page);
}
- raw_spin_lock_irqsave(&depot_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
@@ -437,10 +437,10 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* We didn't need to store this stack trace, but let's keep
* the preallocated memory for the future.
*/
- WARN_ON(!init_stack_slab(&prealloc));
+ WARN_ON(!init_stack_pool(&prealloc));
}
- raw_spin_unlock_irqrestore(&depot_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */
@@ -488,7 +488,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
- void *slab;
+ void *pool;
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
struct stack_record *stack;
@@ -496,15 +496,15 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
if (!handle)
return 0;
- if (parts.slabindex > depot_index) {
- WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
- parts.slabindex, depot_index, handle);
+ if (parts.pool_index > pool_index) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ parts.pool_index, pool_index, handle);
return 0;
}
- slab = stack_slabs[parts.slabindex];
- if (!slab)
+ pool = stack_pools[parts.pool_index];
+ if (!pool)
return 0;
- stack = slab + offset;
+ stack = pool + offset;
*entries = stack->entries;
return stack->size;
--
2.25.1
From: Andrey Konovalov <[email protected]>
Change the "STACK_ALLOC_" prefix to "DEPOT_" for the constants that
define the number of bits in stack depot handles and the maximum number
of pools.
The old prefix is unclear and makes wonder about how these constants
are related to stack allocations. The new prefix is also shorter.
Also simplify the comment for DEPOT_POOL_ORDER.
No functional changes.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 56 +++++++++++++++++++++++-------------------------
1 file changed, 27 insertions(+), 29 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 522e36cf449f..97bba462ee13 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -36,30 +36,28 @@
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
-
-#define STACK_ALLOC_NULL_PROTECTION_BITS 1
-#define STACK_ALLOC_ORDER 2 /* Pool size order for stack depot, 4 pages */
-#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
-#define STACK_ALLOC_ALIGN 4
-#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
- STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
- STACK_ALLOC_NULL_PROTECTION_BITS - \
- STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
-#define STACK_ALLOC_POOLS_CAP 8192
-#define STACK_ALLOC_MAX_POOLS \
- (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_POOLS_CAP) ? \
- (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_POOLS_CAP)
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define DEPOT_VALID_BITS 1
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
+ DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOLS_CAP 8192
+#define DEPOT_MAX_POOLS \
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
/* The compact structure to store the reference to stacks. */
union handle_parts {
depot_stack_handle_t handle;
struct {
- u32 pool_index : STACK_ALLOC_INDEX_BITS;
- u32 offset : STACK_ALLOC_OFFSET_BITS;
- u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
- u32 extra : STACK_DEPOT_EXTRA_BITS;
+ u32 pool_index : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 valid : DEPOT_VALID_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
@@ -91,7 +89,7 @@ static unsigned int stack_bucket_number_order;
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack traces. */
-static void *stack_pools[STACK_ALLOC_MAX_POOLS];
+static void *stack_pools[DEPOT_MAX_POOLS];
/* Currently used pool in stack_pools. */
static int pool_index;
/* Offset to the unused space in the currently used pool. */
@@ -235,7 +233,7 @@ static bool init_stack_pool(void **prealloc)
*prealloc = NULL;
} else {
/* If this is the last depot pool, do not touch the next one. */
- if (pool_index + 1 < STACK_ALLOC_MAX_POOLS) {
+ if (pool_index + 1 < DEPOT_MAX_POOLS) {
stack_pools[pool_index + 1] = *prealloc;
*prealloc = NULL;
}
@@ -255,10 +253,10 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
- required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+ required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
- if (unlikely(pool_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(pool_index + 1 >= STACK_ALLOC_MAX_POOLS)) {
+ if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
+ if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
@@ -269,7 +267,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
* |next_pool_inited| in stack_depot_save() and
* init_stack_pool().
*/
- if (pool_index + 1 < STACK_ALLOC_MAX_POOLS)
+ if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_inited, 0);
}
init_stack_pool(prealloc);
@@ -281,7 +279,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
stack->hash = hash;
stack->size = size;
stack->handle.pool_index = pool_index;
- stack->handle.offset = pool_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
stack->handle.valid = 1;
stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
@@ -412,7 +410,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
@@ -444,7 +442,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */
- free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
retval.handle = found->handle.handle;
@@ -489,7 +487,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
{
union handle_parts parts = { .handle = handle };
void *pool;
- size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
*entries = NULL;
--
2.25.1
From: Andrey Konovalov <[email protected]>
Rename init_stack_pool to depot_init_pool to align the name with
depot_alloc_stack.
No functional changes.
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 97bba462ee13..7f5f08bb6c3a 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -218,7 +218,7 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-static bool init_stack_pool(void **prealloc)
+static bool depot_init_pool(void **prealloc)
{
if (!*prealloc)
return false;
@@ -265,12 +265,12 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
/*
* smp_store_release() here pairs with smp_load_acquire() from
* |next_pool_inited| in stack_depot_save() and
- * init_stack_pool().
+ * depot_init_pool().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_inited, 0);
}
- init_stack_pool(prealloc);
+ depot_init_pool(prealloc);
if (stack_pools[pool_index] == NULL)
return NULL;
@@ -399,7 +399,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* lock.
*
* The smp_load_acquire() here pairs with smp_store_release() to
- * |next_pool_inited| in depot_alloc_stack() and init_stack_pool().
+ * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
*/
if (unlikely(can_alloc && !smp_load_acquire(&next_pool_inited))) {
/*
@@ -435,7 +435,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* We didn't need to store this stack trace, but let's keep
* the preallocated memory for the future.
*/
- WARN_ON(!init_stack_pool(&prealloc));
+ WARN_ON(!depot_init_pool(&prealloc));
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
--
2.25.1
From: Andrey Konovalov <[email protected]>
depot_init_pool has two call sites:
1. In depot_alloc_stack with a potentially NULL prealloc.
2. In __stack_depot_save with a non-NULL prealloc.
At the same time depot_init_pool can only return false when prealloc is
NULL.
As the second call site makes sure that prealloc is not NULL, the WARN_ON
there can never trigger. Thus, drop the WARN_ON and also move the prealloc
check from depot_init_pool to its first call site.
Also change the return type of depot_init_pool to void as it now always
returns true.
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 7f5f08bb6c3a..d4d988276b91 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -218,16 +218,14 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-static bool depot_init_pool(void **prealloc)
+static void depot_init_pool(void **prealloc)
{
- if (!*prealloc)
- return false;
/*
* This smp_load_acquire() pairs with smp_store_release() to
* |next_pool_inited| below and in depot_alloc_stack().
*/
if (smp_load_acquire(&next_pool_inited))
- return true;
+ return;
if (stack_pools[pool_index] == NULL) {
stack_pools[pool_index] = *prealloc;
*prealloc = NULL;
@@ -243,7 +241,6 @@ static bool depot_init_pool(void **prealloc)
*/
smp_store_release(&next_pool_inited, 1);
}
- return true;
}
/* Allocation of a new stack in raw storage */
@@ -270,7 +267,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_inited, 0);
}
- depot_init_pool(prealloc);
+ if (*prealloc)
+ depot_init_pool(prealloc);
if (stack_pools[pool_index] == NULL)
return NULL;
@@ -435,7 +433,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* We didn't need to store this stack trace, but let's keep
* the preallocated memory for the future.
*/
- WARN_ON(!depot_init_pool(&prealloc));
+ depot_init_pool(&prealloc);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
--
2.25.1
From: Andrey Konovalov <[email protected]>
Clean up the exisiting comments and add new ones to depot_init_pool and
depot_alloc_stack.
As a part of the clean-up, remove mentions of which variable is accessed
by smp_store_release and smp_load_acquire: it is clear as is from the
code.
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 34 ++++++++++++++++++++++++----------
1 file changed, 24 insertions(+), 10 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index d4d988276b91..c4bc198c3d93 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -218,32 +218,39 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
+/* Uses preallocated memory to initialize a new stack depot pool. */
static void depot_init_pool(void **prealloc)
{
/*
- * This smp_load_acquire() pairs with smp_store_release() to
- * |next_pool_inited| below and in depot_alloc_stack().
+ * smp_load_acquire() here pairs with smp_store_release() below and
+ * in depot_alloc_stack().
*/
if (smp_load_acquire(&next_pool_inited))
return;
+
+ /* Check if the current pool is not yet allocated. */
if (stack_pools[pool_index] == NULL) {
+ /* Use the preallocated memory for the current pool. */
stack_pools[pool_index] = *prealloc;
*prealloc = NULL;
} else {
- /* If this is the last depot pool, do not touch the next one. */
+ /*
+ * Otherwise, use the preallocated memory for the next pool
+ * as long as we do not exceed the maximum number of pools.
+ */
if (pool_index + 1 < DEPOT_MAX_POOLS) {
stack_pools[pool_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
- * This smp_store_release pairs with smp_load_acquire() from
- * |next_pool_inited| above and in stack_depot_save().
+ * This smp_store_release pairs with smp_load_acquire() above
+ * and in stack_depot_save().
*/
smp_store_release(&next_pool_inited, 1);
}
}
-/* Allocation of a new stack in raw storage */
+/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
@@ -252,28 +259,35 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
+ /* Check if there is not enough space in the current pool. */
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
+ /* Bail out if we reached the pool limit. */
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
+
+ /* Move on to the next pool. */
pool_index++;
pool_offset = 0;
/*
- * smp_store_release() here pairs with smp_load_acquire() from
- * |next_pool_inited| in stack_depot_save() and
- * depot_init_pool().
+ * smp_store_release() here pairs with smp_load_acquire() in
+ * stack_depot_save() and depot_init_pool().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_inited, 0);
}
+
+ /* Assign the preallocated memory to a pool if required. */
if (*prealloc)
depot_init_pool(prealloc);
+
+ /* Check if we have a pool to save the stack trace. */
if (stack_pools[pool_index] == NULL)
return NULL;
+ /* Save the stack trace. */
stack = stack_pools[pool_index] + pool_offset;
-
stack->hash = hash;
stack->size = size;
stack->handle.pool_index = pool_index;
--
2.25.1
From: Andrey Konovalov <[email protected]>
Accesses to pool_index are protected by pool_lock everywhere except
in a sanity check in stack_depot_fetch. The read access there can race
with the write access in depot_alloc_stack.
Use WRITE/READ_ONCE() to annotate the racy accesses.
As the sanity check is only used to print a warning in case of a
violation of the stack depot interface usage, it does not make a lot
of sense to use proper synchronization.
Signed-off-by: Andrey Konovalov <[email protected]>
---
Changes v1->v2:
- Improve comments as suggested by Marco.
---
lib/stackdepot.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 8c6e4e9cb535..684c2168bed9 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -278,8 +278,12 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return NULL;
}
- /* Move on to the next pool. */
- pool_index++;
+ /*
+ * Move on to the next pool.
+ * WRITE_ONCE pairs with potential concurrent read in
+ * stack_depot_fetch().
+ */
+ WRITE_ONCE(pool_index, pool_index + 1);
pool_offset = 0;
/*
* If the maximum number of pools is not reached, take note
@@ -502,6 +506,11 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
+ /*
+ * READ_ONCE pairs with potential concurrent write in
+ * depot_alloc_stack.
+ */
+ int pool_index_cached = READ_ONCE(pool_index);
void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
@@ -510,7 +519,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
if (!handle)
return 0;
- if (parts.pool_index > pool_index) {
+ if (parts.pool_index > pool_index_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
parts.pool_index, pool_index, handle);
return 0;
--
2.25.1
From: Andrey Konovalov <[email protected]>
Stack depot uses next_pool_inited to mark that either the next pool is
initialized or the limit on the number of pools is reached. However,
the flag name only reflects the former part of its purpose, which is
confusing.
Rename next_pool_inited to next_pool_required and invert its value.
Also annotate usages of next_pool_required with comments.
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/stackdepot.c | 30 +++++++++++++++++++++---------
1 file changed, 21 insertions(+), 9 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index c4bc198c3d93..4df162a84bfe 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -96,8 +96,14 @@ static int pool_index;
static size_t pool_offset;
/* Lock that protects the variables above. */
static DEFINE_RAW_SPINLOCK(pool_lock);
-/* Whether the next pool is initialized. */
-static int next_pool_inited;
+/*
+ * Stack depot tries to keep an extra pool allocated even before it runs out
+ * of space in the currently used pool.
+ * This flag marks that this next extra pool needs to be allocated and
+ * initialized. It has the value 0 when either the next pool is not yet
+ * initialized or the limit on the number of pools is reached.
+ */
+static int next_pool_required = 1;
static int __init disable_stack_depot(char *str)
{
@@ -222,10 +228,12 @@ EXPORT_SYMBOL_GPL(stack_depot_init);
static void depot_init_pool(void **prealloc)
{
/*
+ * If the next pool is already initialized or the maximum number of
+ * pools is reached, do not use the preallocated memory.
* smp_load_acquire() here pairs with smp_store_release() below and
* in depot_alloc_stack().
*/
- if (smp_load_acquire(&next_pool_inited))
+ if (!smp_load_acquire(&next_pool_required))
return;
/* Check if the current pool is not yet allocated. */
@@ -243,10 +251,13 @@ static void depot_init_pool(void **prealloc)
*prealloc = NULL;
}
/*
+ * At this point, either the next pool is initialized or the
+ * maximum number of pools is reached. In either case, take
+ * note that initializing another pool is not required.
* This smp_store_release pairs with smp_load_acquire() above
* and in stack_depot_save().
*/
- smp_store_release(&next_pool_inited, 1);
+ smp_store_release(&next_pool_required, 0);
}
}
@@ -271,11 +282,13 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
pool_index++;
pool_offset = 0;
/*
+ * If the maximum number of pools is not reached, take note
+ * that the next pool needs to initialized.
* smp_store_release() here pairs with smp_load_acquire() in
* stack_depot_save() and depot_init_pool().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
- smp_store_release(&next_pool_inited, 0);
+ smp_store_release(&next_pool_required, 1);
}
/* Assign the preallocated memory to a pool if required. */
@@ -406,14 +419,13 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
goto exit;
/*
- * Check if the current or the next stack pool need to be initialized.
- * If so, allocate the memory - we won't be able to do that under the
- * lock.
+ * Check if another stack pool needs to be initialized. If so, allocate
+ * the memory now - we won't be able to do that under the lock.
*
* The smp_load_acquire() here pairs with smp_store_release() to
* |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
*/
- if (unlikely(can_alloc && !smp_load_acquire(&next_pool_inited))) {
+ if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
--
2.25.1
From: Andrey Konovalov <[email protected]>
The current implementation of the extra_bits interface is confusing:
passing extra_bits to __stack_depot_save makes it seem that the extra
bits are somehow stored in stack depot. In reality, they are only
embedded into a stack depot handle and are not used within stack depot.
Drop the extra_bits argument from __stack_depot_save and instead provide
a new stack_depot_set_extra_bits function (similar to the exsiting
stack_depot_get_extra_bits) that saves extra bits into a stack depot
handle.
Update the callers of __stack_depot_save to use the new interace.
This change also fixes a minor issue in the old code: __stack_depot_save
does not return NULL if saving stack trace fails and extra_bits is used.
Signed-off-by: Andrey Konovalov <[email protected]>
Changes v1->v2:
- Mark stack_depot_set_extra_bits as __must_check.
- Only assign extra bits in stack_depot_set_extra_bits for non-empty
handles.
---
include/linux/stackdepot.h | 4 +++-
lib/stackdepot.c | 42 ++++++++++++++++++++++++++++++--------
mm/kasan/common.c | 2 +-
mm/kmsan/core.c | 10 ++++++---
4 files changed, 44 insertions(+), 14 deletions(-)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index c4e3abc16b16..267f4b2634ee 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -57,7 +57,6 @@ static inline int stack_depot_early_init(void) { return 0; }
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
- unsigned int extra_bits,
gfp_t gfp_flags, bool can_alloc);
depot_stack_handle_t stack_depot_save(unsigned long *entries,
@@ -71,6 +70,9 @@ void stack_depot_print(depot_stack_handle_t stack);
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits);
+
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 4df162a84bfe..8c6e4e9cb535 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -357,7 +357,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
- * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
* @alloc_flags: Allocation gfp flags
* @can_alloc: Allocate stack pools (increased chance of failure if false)
*
@@ -369,10 +368,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
* If the stack trace in @entries is from an interrupt, only the portion up to
* interrupt entry is saved.
*
- * Additional opaque flags can be passed in @extra_bits, stored in the unused
- * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
- * without calling stack_depot_fetch().
- *
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor
@@ -382,7 +377,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
*/
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
- unsigned int extra_bits,
gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
@@ -471,8 +465,6 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
if (found)
retval.handle = found->handle.handle;
fast_exit:
- retval.extra = extra_bits;
-
return retval.handle;
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
@@ -493,7 +485,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
- return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
+ return __stack_depot_save(entries, nr_entries, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
@@ -576,6 +568,38 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
}
EXPORT_SYMBOL_GPL(stack_depot_snprint);
+/**
+ * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @extra_bits: Value to set the extra bits
+ *
+ * Return: Stack depot handle with extra bits set
+ *
+ * Stack depot handles have a few unused bits, which can be used for storing
+ * user-specific information. These bits are transparent to the stack depot.
+ */
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits)
+{
+ union handle_parts parts = { .handle = handle };
+
+ /* Don't set extra bits on empty handles. */
+ if (!handle)
+ return 0;
+
+ parts.extra = extra_bits;
+ return parts.handle;
+}
+EXPORT_SYMBOL(stack_depot_set_extra_bits);
+
+/**
+ * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
+ *
+ * @handle: Stack depot handle with extra bits saved
+ *
+ * Return: Extra bits retrieved from the stack depot handle
+ */
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
{
union handle_parts parts = { .handle = handle };
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 833bf2cfd2a3..50f4338b477f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -43,7 +43,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
+ return __stack_depot_save(entries, nr_entries, flags, can_alloc);
}
void kasan_set_track(struct kasan_track *track, gfp_t flags)
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index 112dce135c7f..f710257d6867 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -69,13 +69,15 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
{
unsigned long entries[KMSAN_STACK_DEPTH];
unsigned int nr_entries;
+ depot_stack_handle_t handle;
nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
/* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */
flags &= ~__GFP_DIRECT_RECLAIM;
- return __stack_depot_save(entries, nr_entries, extra, flags, true);
+ handle = __stack_depot_save(entries, nr_entries, flags, true);
+ return stack_depot_set_extra_bits(handle, extra);
}
/* Copy the metadata following the memmove() behavior. */
@@ -215,6 +217,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
u32 extra_bits;
int depth;
bool uaf;
+ depot_stack_handle_t handle;
if (!id)
return id;
@@ -250,8 +253,9 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
* positives when __stack_depot_save() passes it to instrumented code.
*/
kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
- return __stack_depot_save(entries, ARRAY_SIZE(entries), extra_bits,
- GFP_ATOMIC, true);
+ handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC,
+ true);
+ return stack_depot_set_extra_bits(handle, extra_bits);
}
void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
--
2.25.1
From: Andrey Konovalov <[email protected]>
Clean up comments in include/linux/stackdepot.h and lib/stackdepot.c:
1. Rework the initialization comment in stackdepot.h.
2. Rework the header comment in stackdepot.c.
3. Various clean-ups for other comments.
Also adjust whitespaces for find_stack and depot_alloc_stack call sites.
No functional changes.
Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/stackdepot.h | 36 +++++------
lib/stackdepot.c | 120 ++++++++++++++++++-------------------
2 files changed, 78 insertions(+), 78 deletions(-)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 267f4b2634ee..afdf8ee7b597 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * A generic stack depot implementation
+ * Stack depot - a stack trace storage that avoids duplication.
*
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
#ifndef _LINUX_STACKDEPOT_H
@@ -17,35 +17,37 @@ typedef u32 depot_stack_handle_t;
/*
* Number of bits in the handle that stack depot doesn't use. Users may store
- * information in them.
+ * information in them via stack_depot_set/get_extra_bits.
*/
#define STACK_DEPOT_EXTRA_BITS 5
/*
- * Every user of stack depot has to call stack_depot_init() during its own init
- * when it's decided that it will be calling stack_depot_save() later. This is
- * recommended for e.g. modules initialized later in the boot process, when
- * slab_is_available() is true.
+ * Using stack depot requires its initialization, which can be done in 3 ways:
*
- * The alternative is to select STACKDEPOT_ALWAYS_INIT to have stack depot
- * enabled as part of mm_init(), for subsystems where it's known at compile time
- * that stack depot will be used.
+ * 1. Selecting CONFIG_STACKDEPOT_ALWAYS_INIT. This option is suitable in
+ * scenarios where it's known at compile time that stack depot will be used.
+ * Enabling this config makes the kernel initialize stack depot in mm_init().
*
- * Another alternative is to call stack_depot_request_early_init(), when the
- * decision to use stack depot is taken e.g. when evaluating kernel boot
- * parameters, which precedes the enablement point in mm_init().
+ * 2. Calling stack_depot_request_early_init() during early boot, before
+ * stack_depot_early_init() in mm_init() completes. For example, this can
+ * be done when evaluating kernel boot parameters.
+ *
+ * 3. Calling stack_depot_init(). Possible after boot is complete. This option
+ * is recommended for modules initialized later in the boot process, after
+ * mm_init() completes.
*
* stack_depot_init() and stack_depot_request_early_init() can be called
- * regardless of CONFIG_STACKDEPOT and are no-op when disabled. The actual
- * save/fetch/print functions should only be called from code that makes sure
- * CONFIG_STACKDEPOT is enabled.
+ * regardless of whether CONFIG_STACKDEPOT is enabled and are no-op when this
+ * config is disabled. The save/fetch/print stack depot functions can only be
+ * called from the code that makes sure CONFIG_STACKDEPOT is enabled _and_
+ * initializes stack depot via one of the ways listed above.
*/
#ifdef CONFIG_STACKDEPOT
int stack_depot_init(void);
void __init stack_depot_request_early_init(void);
-/* This is supposed to be called only from mm_init() */
+/* Must be only called from mm_init(). */
int __init stack_depot_early_init(void);
#else
static inline int stack_depot_init(void) { return 0; }
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 684c2168bed9..02bb6cdb69dc 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -1,22 +1,26 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic stack depot for storing stack traces.
+ * Stack depot - a stack trace storage that avoids duplication.
*
- * Some debugging tools need to save stack traces of certain events which can
- * be later presented to the user. For example, KASAN needs to safe alloc and
- * free stacks for each object, but storing two stack traces per object
- * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
- * that).
+ * Stack depot is intended to be used by subsystems that need to store and
+ * later retrieve many potentially duplicated stack traces without wasting
+ * memory.
*
- * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
- * and free stacks repeat a lot, we save about 100x space.
- * Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguous memory allocation.
+ * For example, KASAN needs to save allocation and free stack traces for each
+ * object. Storing two stack traces per object requires a lot of memory (e.g.
+ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
+ * stack traces often repeat, using stack depot allows to save about 100x space.
+ *
+ * Internally, stack depot maintains a hash table of unique stacktraces. The
+ * stack traces themselves are stored contiguously one after another in a set
+ * of separate page allocations.
+ *
+ * Stack traces are never removed from stack depot.
*
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
#define pr_fmt(fmt) "stackdepot: " fmt
@@ -50,7 +54,7 @@
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
-/* The compact structure to store the reference to stacks. */
+/* Compact structure that stores a reference to a stack. */
union handle_parts {
depot_stack_handle_t handle;
struct {
@@ -62,11 +66,11 @@ union handle_parts {
};
struct stack_record {
- struct stack_record *next; /* Link in the hashtable */
- u32 hash; /* Hash in the hastable */
- u32 size; /* Number of frames in the stack */
+ struct stack_record *next; /* Link in the hash table */
+ u32 hash; /* Hash in the hash table */
+ u32 size; /* Number of stored frames */
union handle_parts handle;
- unsigned long entries[]; /* Variable-sized array of entries. */
+ unsigned long entries[]; /* Variable-sized array of frames */
};
static bool stack_depot_disabled;
@@ -317,7 +321,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return stack;
}
-/* Calculate hash for a stack */
+/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
@@ -325,9 +329,9 @@ static inline u32 hash_stack(unsigned long *entries, unsigned int size)
STACK_HASH_SEED);
}
-/* Use our own, non-instrumented version of memcmp().
- *
- * We actually don't care about the order, just the equality.
+/*
+ * Non-instrumented version of memcmp().
+ * Does not check the lexicographical order, only the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
@@ -340,7 +344,7 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
return 0;
}
-/* Find a stack that is equal to the one stored in entries in the hash */
+/* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct stack_record *bucket,
unsigned long *entries, int size,
u32 hash)
@@ -357,27 +361,27 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
}
/**
- * __stack_depot_save - Save a stack trace from an array
+ * __stack_depot_save - Save a stack trace to stack depot
*
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
* @can_alloc: Allocate stack pools (increased chance of failure if false)
*
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, is allowed to replenish the stack pool in case no space is left
+ * %true, stack depot can replenish the stack pools in case no space is left
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and will fail if no space is left to store the stack trace.
+ * any allocations and fails if no space is left to store the stack trace.
*
- * If the stack trace in @entries is from an interrupt, only the portion up to
- * interrupt entry is saved.
+ * If the provided stack trace comes from the interrupt context, only the part
+ * up to the interrupt entry is saved.
*
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
- * this is the case from contexts where neither %GFP_ATOMIC nor
+ * this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
- * Return: The handle of the stack struct stored in depot, 0 on failure.
+ * Return: Handle of the stack struct stored in depot, 0 on failure
*/
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
@@ -392,11 +396,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
/*
* If this stack trace is from an interrupt, including anything before
- * interrupt entry usually leads to unbounded stackdepot growth.
+ * interrupt entry usually leads to unbounded stack depot growth.
*
- * Because use of filter_irq_stacks() is a requirement to ensure
- * stackdepot can efficiently deduplicate interrupt stacks, always
- * filter_irq_stacks() to simplify all callers' use of stackdepot.
+ * Since use of filter_irq_stacks() is a requirement to ensure stack
+ * depot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stack depot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
@@ -411,8 +415,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
- found = find_stack(smp_load_acquire(bucket), entries,
- nr_entries, hash);
+ found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
if (found)
goto exit;
@@ -441,7 +444,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
- struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ struct stack_record *new =
+ depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
new->next = *bucket;
@@ -454,8 +458,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
}
} else if (prealloc) {
/*
- * We didn't need to store this stack trace, but let's keep
- * the preallocated memory for the future.
+ * Stack depot already contains this stack trace, but let's
+ * keep the preallocated memory for the future.
*/
depot_init_pool(&prealloc);
}
@@ -463,7 +467,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
- /* Nobody used this memory, ok to free it. */
+ /* Stack depot didn't use this memory, free it. */
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
@@ -474,16 +478,16 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
EXPORT_SYMBOL_GPL(__stack_depot_save);
/**
- * stack_depot_save - Save a stack trace from an array
+ * stack_depot_save - Save a stack trace to stack depot
*
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
*
* Context: Contexts where allocations via alloc_pages() are allowed.
* See __stack_depot_save() for more details.
*
- * Return: The handle of the stack struct stored in depot, 0 on failure.
+ * Return: Handle of the stack trace stored in depot, 0 on failure
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
@@ -494,13 +498,12 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
EXPORT_SYMBOL_GPL(stack_depot_save);
/**
- * stack_depot_fetch - Fetch stack entries from a depot
+ * stack_depot_fetch - Fetch a stack trace from stack depot
*
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @entries: Pointer to store the entries address
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @entries: Pointer to store the address of the stack trace
*
- * Return: The number of trace entries for this depot.
+ * Return: Number of frames for the fetched stack
*/
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
@@ -535,11 +538,9 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
- * stack_depot_print - print stack entries from a depot
- *
- * @stack: Stack depot handle which was returned from
- * stack_depot_save().
+ * stack_depot_print - Print a stack trace from stack depot
*
+ * @stack: Stack depot handle returned from stack_depot_save()
*/
void stack_depot_print(depot_stack_handle_t stack)
{
@@ -553,17 +554,14 @@ void stack_depot_print(depot_stack_handle_t stack)
EXPORT_SYMBOL_GPL(stack_depot_print);
/**
- * stack_depot_snprint - print stack entries from a depot into a buffer
+ * stack_depot_snprint - Print a stack trace from stack depot into a buffer
*
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
+ * @handle: Stack depot handle returned from stack_depot_save()
* @buf: Pointer to the print buffer
- *
* @size: Size of the print buffer
- *
* @spaces: Number of leading spaces to print
*
- * Return: Number of bytes printed.
+ * Return: Number of bytes printed
*/
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces)
--
2.25.1
From: Andrey Konovalov <[email protected]>
Move all interface- and usage-related documentation comments to
include/linux/stackdepot.h.
It makes sense to have them in the header where they are available to
the interface users.
Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/stackdepot.h | 87 ++++++++++++++++++++++++++++++++++++++
lib/stackdepot.c | 87 --------------------------------------
2 files changed, 87 insertions(+), 87 deletions(-)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index afdf8ee7b597..91f038829eaa 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -2,6 +2,17 @@
/*
* Stack depot - a stack trace storage that avoids duplication.
*
+ * Stack depot is intended to be used by subsystems that need to store and
+ * later retrieve many potentially duplicated stack traces without wasting
+ * memory.
+ *
+ * For example, KASAN needs to save allocation and free stack traces for each
+ * object. Storing two stack traces per object requires a lot of memory (e.g.
+ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
+ * stack traces often repeat, using stack depot allows to save about 100x space.
+ *
+ * Stack traces are never removed from stack depot.
+ *
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
*
@@ -57,24 +68,100 @@ static inline void stack_depot_request_early_init(void) { }
static inline int stack_depot_early_init(void) { return 0; }
#endif
+/**
+ * __stack_depot_save - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ * @can_alloc: Allocate stack pools (increased chance of failure if false)
+ *
+ * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
+ * %true, stack depot can replenish the stack pools in case no space is left
+ * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
+ * any allocations and fails if no space is left to store the stack trace.
+ *
+ * If the provided stack trace comes from the interrupt context, only the part
+ * up to the interrupt entry is saved.
+ *
+ * Context: Any context, but setting @can_alloc to %false is required if
+ * alloc_pages() cannot be used from the current context. Currently
+ * this is the case for contexts where neither %GFP_ATOMIC nor
+ * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
+ *
+ * Return: Handle of the stack struct stored in depot, 0 on failure
+ */
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t gfp_flags, bool can_alloc);
+/**
+ * stack_depot_save - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed.
+ * See __stack_depot_save() for more details.
+ *
+ * Return: Handle of the stack trace stored in depot, 0 on failure
+ */
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
+/**
+ * stack_depot_fetch - Fetch a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @entries: Pointer to store the address of the stack trace
+ *
+ * Return: Number of frames for the fetched stack
+ */
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
+/**
+ * stack_depot_print - Print a stack trace from stack depot
+ *
+ * @stack: Stack depot handle returned from stack_depot_save()
+ */
void stack_depot_print(depot_stack_handle_t stack);
+/**
+ * stack_depot_snprint - Print a stack trace from stack depot into a buffer
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @buf: Pointer to the print buffer
+ * @size: Size of the print buffer
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed
+ */
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
+/**
+ * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @extra_bits: Value to set the extra bits
+ *
+ * Return: Stack depot handle with extra bits set
+ *
+ * Stack depot handles have a few unused bits, which can be used for storing
+ * user-specific information. These bits are transparent to the stack depot.
+ */
depot_stack_handle_t __must_check stack_depot_set_extra_bits(
depot_stack_handle_t handle, unsigned int extra_bits);
+/**
+ * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
+ *
+ * @handle: Stack depot handle with extra bits saved
+ *
+ * Return: Extra bits retrieved from the stack depot handle
+ */
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 02bb6cdb69dc..ec772e78af39 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -2,21 +2,10 @@
/*
* Stack depot - a stack trace storage that avoids duplication.
*
- * Stack depot is intended to be used by subsystems that need to store and
- * later retrieve many potentially duplicated stack traces without wasting
- * memory.
- *
- * For example, KASAN needs to save allocation and free stack traces for each
- * object. Storing two stack traces per object requires a lot of memory (e.g.
- * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
- * stack traces often repeat, using stack depot allows to save about 100x space.
- *
* Internally, stack depot maintains a hash table of unique stacktraces. The
* stack traces themselves are stored contiguously one after another in a set
* of separate page allocations.
*
- * Stack traces are never removed from stack depot.
- *
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
*
@@ -360,29 +349,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
-/**
- * __stack_depot_save - Save a stack trace to stack depot
- *
- * @entries: Pointer to the stack trace
- * @nr_entries: Number of frames in the stack
- * @alloc_flags: Allocation GFP flags
- * @can_alloc: Allocate stack pools (increased chance of failure if false)
- *
- * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, stack depot can replenish the stack pools in case no space is left
- * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and fails if no space is left to store the stack trace.
- *
- * If the provided stack trace comes from the interrupt context, only the part
- * up to the interrupt entry is saved.
- *
- * Context: Any context, but setting @can_alloc to %false is required if
- * alloc_pages() cannot be used from the current context. Currently
- * this is the case for contexts where neither %GFP_ATOMIC nor
- * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
- *
- * Return: Handle of the stack struct stored in depot, 0 on failure
- */
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc)
@@ -477,18 +443,6 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
-/**
- * stack_depot_save - Save a stack trace to stack depot
- *
- * @entries: Pointer to the stack trace
- * @nr_entries: Number of frames in the stack
- * @alloc_flags: Allocation GFP flags
- *
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See __stack_depot_save() for more details.
- *
- * Return: Handle of the stack trace stored in depot, 0 on failure
- */
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
@@ -497,14 +451,6 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
}
EXPORT_SYMBOL_GPL(stack_depot_save);
-/**
- * stack_depot_fetch - Fetch a stack trace from stack depot
- *
- * @handle: Stack depot handle returned from stack_depot_save()
- * @entries: Pointer to store the address of the stack trace
- *
- * Return: Number of frames for the fetched stack
- */
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
@@ -537,11 +483,6 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
-/**
- * stack_depot_print - Print a stack trace from stack depot
- *
- * @stack: Stack depot handle returned from stack_depot_save()
- */
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
@@ -553,16 +494,6 @@ void stack_depot_print(depot_stack_handle_t stack)
}
EXPORT_SYMBOL_GPL(stack_depot_print);
-/**
- * stack_depot_snprint - Print a stack trace from stack depot into a buffer
- *
- * @handle: Stack depot handle returned from stack_depot_save()
- * @buf: Pointer to the print buffer
- * @size: Size of the print buffer
- * @spaces: Number of leading spaces to print
- *
- * Return: Number of bytes printed
- */
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces)
{
@@ -575,17 +506,6 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
}
EXPORT_SYMBOL_GPL(stack_depot_snprint);
-/**
- * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
- *
- * @handle: Stack depot handle returned from stack_depot_save()
- * @extra_bits: Value to set the extra bits
- *
- * Return: Stack depot handle with extra bits set
- *
- * Stack depot handles have a few unused bits, which can be used for storing
- * user-specific information. These bits are transparent to the stack depot.
- */
depot_stack_handle_t __must_check stack_depot_set_extra_bits(
depot_stack_handle_t handle, unsigned int extra_bits)
{
@@ -600,13 +520,6 @@ depot_stack_handle_t __must_check stack_depot_set_extra_bits(
}
EXPORT_SYMBOL(stack_depot_set_extra_bits);
-/**
- * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
- *
- * @handle: Stack depot handle with extra bits saved
- *
- * Return: Extra bits retrieved from the stack depot handle
- */
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
{
union handle_parts parts = { .handle = handle };
--
2.25.1
On 2/10/23 22:15, [email protected] wrote:
> From: Andrey Konovalov <[email protected]>
>
> Use "pool" instead of "slab" for naming memory regions stack depot
> uses to store stack traces. Using "slab" is confusing, as stack depot
> pools have nothing to do with the slab allocator.
>
> Also give better names to pool-related global variables: change
> "depot_" prefix to "pool_" to point out that these variables are
> related to stack depot pools.
>
> Also rename the slabindex (poolindex) field in handle_parts to pool_index
> to align its name with the pool_index global variable.
>
> No functional changes.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Thanks!
On Fri, Feb 10, 2023 at 10:17 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Group stack depot global variables by their purpose:
>
> 1. Hash table-related variables,
> 2. Slab-related variables,
>
> and add comments.
>
> Also clean up comments for hash table-related constants.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Fri, Feb 10, 2023 at 10:17 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Give more meaningful names to hash table-related constants and variables:
>
> 1. Rename STACK_HASH_SCALE to STACK_HASH_TABLE_SCALE to point out that it
> is related to scaling the hash table.
>
> 2. Rename STACK_HASH_ORDER_MIN/MAX to STACK_BUCKET_NUMBER_ORDER_MIN/MAX
> to point out that it is related to the number of hash table buckets.
>
> 3. Rename stack_hash_order to stack_bucket_number_order for the same
> reason as #2.
>
> No functional changes.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Mon, Feb 13, 2023 at 11:20 AM Vlastimil Babka <[email protected]> wrote:
>
> On 2/10/23 22:15, [email protected] wrote:
> > From: Andrey Konovalov <[email protected]>
> >
> > Use "pool" instead of "slab" for naming memory regions stack depot
> > uses to store stack traces. Using "slab" is confusing, as stack depot
> > pools have nothing to do with the slab allocator.
> >
> > Also give better names to pool-related global variables: change
> > "depot_" prefix to "pool_" to point out that these variables are
> > related to stack depot pools.
> >
> > Also rename the slabindex (poolindex) field in handle_parts to pool_index
> > to align its name with the pool_index global variable.
> >
> > No functional changes.
> >
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Acked-by: Vlastimil Babka <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> depot_init_pool has two call sites:
>
> 1. In depot_alloc_stack with a potentially NULL prealloc.
> 2. In __stack_depot_save with a non-NULL prealloc.
>
> At the same time depot_init_pool can only return false when prealloc is
> NULL.
>
> As the second call site makes sure that prealloc is not NULL, the WARN_ON
> there can never trigger. Thus, drop the WARN_ON and also move the prealloc
> check from depot_init_pool to its first call site.
>
> Also change the return type of depot_init_pool to void as it now always
> returns true.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Clean up the exisiting comments and add new ones to depot_init_pool and
> depot_alloc_stack.
>
> As a part of the clean-up, remove mentions of which variable is accessed
> by smp_store_release and smp_load_acquire: it is clear as is from the
> code.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Thanks for the cleanup!
On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Stack depot uses next_pool_inited to mark that either the next pool is
> initialized or the limit on the number of pools is reached. However,
> the flag name only reflects the former part of its purpose, which is
> confusing.
>
> Rename next_pool_inited to next_pool_required and invert its value.
>
> Also annotate usages of next_pool_required with comments.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> The current implementation of the extra_bits interface is confusing:
> passing extra_bits to __stack_depot_save makes it seem that the extra
> bits are somehow stored in stack depot. In reality, they are only
> embedded into a stack depot handle and are not used within stack depot.
>
> Drop the extra_bits argument from __stack_depot_save and instead provide
> a new stack_depot_set_extra_bits function (similar to the exsiting
> stack_depot_get_extra_bits) that saves extra bits into a stack depot
> handle.
>
> Update the callers of __stack_depot_save to use the new interace.
>
> This change also fixes a minor issue in the old code: __stack_depot_save
> does not return NULL if saving stack trace fails and extra_bits is used.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Accesses to pool_index are protected by pool_lock everywhere except
> in a sanity check in stack_depot_fetch. The read access there can race
> with the write access in depot_alloc_stack.
>
> Use WRITE/READ_ONCE() to annotate the racy accesses.
>
> As the sanity check is only used to print a warning in case of a
> violation of the stack depot interface usage, it does not make a lot
> of sense to use proper synchronization.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Clean up comments in include/linux/stackdepot.h and lib/stackdepot.c:
>
> 1. Rework the initialization comment in stackdepot.h.
> 2. Rework the header comment in stackdepot.c.
> 3. Various clean-ups for other comments.
>
> Also adjust whitespaces for find_stack and depot_alloc_stack call sites.
>
> No functional changes.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
> - * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
> - * and free stacks repeat a lot, we save about 100x space.
> - * Stacks are never removed from depot, so we store them contiguously one after
> - * another in a contiguous memory allocation.
> + * For example, KASAN needs to save allocation and free stack traces for each
s/free/deallocation, maybe? (Here and below)
> + * object. Storing two stack traces per object requires a lot of memory (e.g.
> + * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
> + * stack traces often repeat, using stack depot allows to save about 100x space.
On Fri, Feb 10, 2023 at 10:19 PM <[email protected]> wrote:
>
> From: Andrey Konovalov <[email protected]>
>
> Move all interface- and usage-related documentation comments to
> include/linux/stackdepot.h.
>
> It makes sense to have them in the header where they are available to
> the interface users.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
> + * For example, KASAN needs to save allocation and free stack traces for each
> + * object. Storing two stack traces per object requires a lot of memory (e.g.
> + * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
> + * stack traces often repeat, using stack depot allows to save about 100x space.
> + *
> + * Stack traces are never removed from stack depot.
... from the stack depot?
On Mon, Feb 13, 2023 at 2:26 PM Alexander Potapenko <[email protected]> wrote:
>
> On Fri, Feb 10, 2023 at 10:18 PM <[email protected]> wrote:
> >
> > From: Andrey Konovalov <[email protected]>
> >
> > Clean up comments in include/linux/stackdepot.h and lib/stackdepot.c:
> >
> > 1. Rework the initialization comment in stackdepot.h.
> > 2. Rework the header comment in stackdepot.c.
> > 3. Various clean-ups for other comments.
> >
> > Also adjust whitespaces for find_stack and depot_alloc_stack call sites.
> >
> > No functional changes.
> >
> > Signed-off-by: Andrey Konovalov <[email protected]>
> Reviewed-by: Alexander Potapenko <[email protected]>
>
> > - * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
> > - * and free stacks repeat a lot, we save about 100x space.
> > - * Stacks are never removed from depot, so we store them contiguously one after
> > - * another in a contiguous memory allocation.
> > + * For example, KASAN needs to save allocation and free stack traces for each
>
> s/free/deallocation, maybe? (Here and below)
Either way looks good to me.
The patches are in mm-stable now though, so lets save this change for
another potential set of clean-ups.
Thank you!
On Mon, Feb 13, 2023 at 2:28 PM Alexander Potapenko <[email protected]> wrote:
>
> On Fri, Feb 10, 2023 at 10:19 PM <[email protected]> wrote:
> >
> > From: Andrey Konovalov <[email protected]>
> >
> > Move all interface- and usage-related documentation comments to
> > include/linux/stackdepot.h.
> >
> > It makes sense to have them in the header where they are available to
> > the interface users.
> >
> > Signed-off-by: Andrey Konovalov <[email protected]>
> Reviewed-by: Alexander Potapenko <[email protected]>
>
> > + * For example, KASAN needs to save allocation and free stack traces for each
> > + * object. Storing two stack traces per object requires a lot of memory (e.g.
> > + * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
> > + * stack traces often repeat, using stack depot allows to save about 100x space.
> > + *
> > + * Stack traces are never removed from stack depot.
> ... from the stack depot?
I avoided using "the" for stack depot everywhere to make comments a
bit shorter, but I don't mind using it.
I see that Andrew already added a fix for this. There are other places
where "stack depot" is used without "the", but lets save this for
future clean-ups too.
Thanks!
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define DEPOT_VALID_BITS 1
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
+ DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOLS_CAP 8192
Increase DEPOT_POOLS_CAP size to 32768
+#define DEPOT_MAX_POOLS \
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
Hi Andrey.
I have some question about DEPOT_MAX_POOLS.
Actually I didn't know where to post my question, so here it is.
I'm testing a feature of the UFS4.0 specification called MCQ,
the call stack looks like this and __stack_depot_save keeps printing warning messages.
[7: OST-Normal-13:17476] ------------[ cut here ]------------
[7: OST-Normal-13:17476] Stack depot reached limit capacity
[7: OST-Normal-13:17476] pc : __stack_depot_save+0x464/0x46c
[7: OST-Normal-13:17476] lr : __stack_depot_save+0x460/0x46c
[7: OST-Normal-13:17476] sp : ffffffc008077730
[7: OST-Normal-13:17476] x29: ffffffc008077750 x28: ffffffd00b78a000 x27: 0000000000000000
[7: OST-Normal-13:17476] x26: 000000000009a7a4 x25: ffffff8914750000 x24: 000000004379a7a4
[7: OST-Normal-13:17476] x23: 00000000000001f8 x22: 0000000000000210 x21: 000000000000003f
[7: OST-Normal-13:17476] x20: ffffffc0080777b0 x19: 0000000000000000 x18: 0000000000001000
[7: OST-Normal-13:17476] x17: 2065726568207475 x16: 00000000000000c3 x15: 2d2d2d2d2d2d2d20
[7: OST-Normal-13:17476] x14: 5d36373437313a33 x13: 000000000059a740 x12: 000000000059a6f8
[7: OST-Normal-13:17476] x11: 00000000ffffffff x10: ffffffb90aba9000 x9 : 008c3feffad60900
[7: OST-Normal-13:17476] x8 : 008c3feffad60900 x7 : 000000000059a740 x6 : 000000000059a6f8
[7: OST-Normal-13:17476] x5 : ffffffc008077438 x4 : ffffffd00b196970 x3 : ffffffd0092b313c
[7: OST-Normal-13:17476] x2 : 0000000000000001 x1 : 0000000000000004 x0 : 0000000000000022
[7: OST-Normal-13:17476] Call trace:
[7: OST-Normal-13:17476] __stack_depot_save+0x464/0x46c
[7: OST-Normal-13:17476] kasan_save_stack+0x58/0x70
[7: OST-Normal-13:17476] save_stack_info+0x34/0x138
[7: OST-Normal-13:17476] kasan_save_free_info+0x18/0x24
[7: OST-Normal-13:17476] ____kasan_slab_free+0x16c/0x170
[7: OST-Normal-13:17476] __kasan_slab_free+0x10/0x20
[7: OST-Normal-13:17476] kmem_cache_free+0x238/0x53c
[7: OST-Normal-13:17476] mempool_free_slab+0x1c/0x28
[7: OST-Normal-13:17476] mempool_free+0x7c/0x1a0
[7: OST-Normal-13:17476] sg_pool_free+0x6c/0x84
[7: OST-Normal-13:17476] __sg_free_table+0x88/0xbc
[7: OST-Normal-13:17476] sg_free_table_chained+0x40/0x4c
[7: OST-Normal-13:17476] scsi_free_sgtables+0x3c/0x7c
[7: OST-Normal-13:17476] scsi_mq_uninit_cmd+0x20/0x7c
[7: OST-Normal-13:17476] scsi_end_request+0xd8/0x304
[7: OST-Normal-13:17476] scsi_io_completion+0x88/0x160
[7: OST-Normal-13:17476] scsi_finish_command+0x17c/0x194
[7: OST-Normal-13:17476] scsi_complete+0xcc/0x158
[7: OST-Normal-13:17476] blk_mq_complete_request+0x4c/0x5c
[7: OST-Normal-13:17476] scsi_done_internal+0xf4/0x1e0
[7: OST-Normal-13:17476] scsi_done+0x14/0x20
[7: OST-Normal-13:17476] ufshcd_compl_one_cqe+0x578/0x71c
[7: OST-Normal-13:17476] ufshcd_mcq_poll_cqe_nolock+0xc8/0x150
[7: OST-Normal-13:17476] vendor_mcq_irq+0x74/0x88 [ufs-core]
[7: OST-Normal-13:17476] __handle_irq_event_percpu+0xd0/0x348
[7: OST-Normal-13:17476] handle_irq_event_percpu+0x24/0x74
[7: OST-Normal-13:17476] handle_irq_event+0x74/0xe0
[7: OST-Normal-13:17476] handle_fasteoi_irq+0x174/0x240
[7: OST-Normal-13:17476] handle_irq_desc+0x7c/0x2c0
[7: OST-Normal-13:17476] generic_handle_domain_irq+0x1c/0x28
[7: OST-Normal-13:17476] gic_handle_irq+0x64/0x158
[7: OST-Normal-13:17476] call_on_irq_stack+0x2c/0x54
[7: OST-Normal-13:17476] do_interrupt_handler+0x70/0xa0
[7: OST-Normal-13:17476] el1_interrupt+0x34/0x68
[7: OST-Normal-13:17476] el1h_64_irq_handler+0x18/0x24
[7: OST-Normal-13:17476] el1h_64_irq+0x68/0x6c
[7: OST-Normal-13:17476] __hwasan_check_x0_67043363+0xc/0x30
[7: OST-Normal-13:17476] ufshcd_queuecommand+0x5f8/0x7b4
[7: OST-Normal-13:17476] scsi_queue_rq+0xb88/0xea4
[7: OST-Normal-13:17476] blk_mq_dispatch_rq_list+0x640/0xe18
[7: OST-Normal-13:17476] blk_mq_do_dispatch_sched+0x47c/0x530
[7: OST-Normal-13:17476] __blk_mq_sched_dispatch_requests+0x158/0x1cc
[7: OST-Normal-13:17476] blk_mq_sched_dispatch_requests+0x68/0x9c
[7: OST-Normal-13:17476] __blk_mq_run_hw_queue+0x9c/0x11c
[7: OST-Normal-13:17476] __blk_mq_delay_run_hw_queue+0xa4/0x234
[7: OST-Normal-13:17476] blk_mq_run_hw_queue+0x130/0x150
[7: OST-Normal-13:17476] blk_mq_sched_insert_requests+0x208/0x3a0
[7: OST-Normal-13:17476] blk_mq_flush_plug_list+0x21c/0x4f0
[7: OST-Normal-13:17476] __blk_flush_plug+0x180/0x1d8
[7: OST-Normal-13:17476] blk_finish_plug+0x40/0x5c
[7: OST-Normal-13:17476] read_pages+0x420/0x4ac
[7: OST-Normal-13:17476] page_cache_ra_unbounded+0xec/0x288
[7: OST-Normal-13:17476] do_page_cache_ra+0x60/0x6c
[7: OST-Normal-13:17476] page_cache_ra_order+0x318/0x364
[7: OST-Normal-13:17476] do_sync_mmap_readahead+0x1a0/0x3c8
[7: OST-Normal-13:17476] filemap_fault+0x260/0x68c
[7: OST-Normal-13:17476] __do_fault+0x80/0x1b4
[7: OST-Normal-13:17476] handle_mm_fault+0x6b4/0x1530
[7: OST-Normal-13:17476] do_page_fault+0x3ec/0x5d4
[7: OST-Normal-13:17476] do_translation_fault+0x44/0x5c
[7: OST-Normal-13:17476] do_mem_abort+0x54/0xd8
[7: OST-Normal-13:17476] el0_ia+0x68/0xf4
[7: OST-Normal-13:17476] el0t_64_sync_handler+0xd0/0x114
[7: OST-Normal-13:17476] el0t_64_sync+0x190/0x194
After analyzing it, it seems that the stack buffer is running out of memory,
so what do you think about increasing the size of DEPOT_POOLS_CAP to 32768?
Tell us, your opinion
Thanks.