2021-04-14 18:07:26

by Oliver Glitta

[permalink] [raw]
Subject: [PATCH] mm/slub: use stackdepot to save stack trace in objects

From: Oliver Glitta <[email protected]>

Many stack traces are similar so there are many similar arrays.
Stackdepot saves each unique stack only once.

Replace field addrs in struct track with depot_stack_handle_t handle.
Use stackdepot to save stack trace.

The benefits are smaller memory overhead and possibility to aggregate
per-cache statistics in the future using the stackdepot handle
instead of matching stacks manually.

Signed-off-by: Oliver Glitta <[email protected]>
---
init/Kconfig | 1 +
mm/slub.c | 79 ++++++++++++++++++++++++++++++++--------------------
2 files changed, 50 insertions(+), 30 deletions(-)

diff --git a/init/Kconfig b/init/Kconfig
index 37a17853433a..a4ed2daa6c41 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1891,6 +1891,7 @@ config SLUB_DEBUG
default y
bool "Enable SLUB debugging support" if EXPERT
depends on SLUB && SYSFS
+ select STACKDEPOT if STACKTRACE_SUPPORT
help
SLUB has extensive debug support features. Disabling these can
result in significant savings in code size. This also disables
diff --git a/mm/slub.c b/mm/slub.c
index 9c0e26ddf300..4b18499726eb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -35,6 +35,7 @@
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
#include <linux/random.h>
+#include <linux/stackdepot.h>

#include <trace/events/kmem.h>

@@ -203,8 +204,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define TRACK_ADDRS_COUNT 16
struct track {
unsigned long addr; /* Called from address */
-#ifdef CONFIG_STACKTRACE
- unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
+#ifdef CONFIG_STACKDEPOT
+ depot_stack_handle_t handle;
#endif
int cpu; /* Was running on cpu */
int pid; /* Pid context */
@@ -581,22 +582,27 @@ static struct track *get_track(struct kmem_cache *s, void *object,
return kasan_reset_tag(p + alloc);
}

+#ifdef CONFIG_STACKDEPOT
+static depot_stack_handle_t save_stack_trace(gfp_t flags)
+{
+ unsigned long entries[TRACK_ADDRS_COUNT];
+ depot_stack_handle_t handle;
+ unsigned int nr_entries;
+
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4);
+ handle = stack_depot_save(entries, nr_entries, flags);
+ return handle;
+}
+#endif
+
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
struct track *p = get_track(s, object, alloc);

if (addr) {
-#ifdef CONFIG_STACKTRACE
- unsigned int nr_entries;
-
- metadata_access_enable();
- nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
- TRACK_ADDRS_COUNT, 3);
- metadata_access_disable();
-
- if (nr_entries < TRACK_ADDRS_COUNT)
- p->addrs[nr_entries] = 0;
+#ifdef CONFIG_STACKDEPOT
+ p->handle = save_stack_trace(GFP_KERNEL);
#endif
p->addr = addr;
p->cpu = smp_processor_id();
@@ -623,14 +629,19 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time)

pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
-#ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_STACKDEPOT
{
- int i;
- for (i = 0; i < TRACK_ADDRS_COUNT; i++)
- if (t->addrs[i])
- pr_err("\t%pS\n", (void *)t->addrs[i]);
- else
- break;
+ depot_stack_handle_t handle;
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ handle = READ_ONCE(t->handle);
+ if (!handle) {
+ pr_err("object allocation/free stack trace missing\n");
+ } else {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ stack_trace_print(entries, nr_entries, 0);
+ }
}
#endif
}
@@ -4017,18 +4028,26 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
objp = fixup_red_left(s, objp);
trackp = get_track(s, objp, TRACK_ALLOC);
kpp->kp_ret = (void *)trackp->addr;
-#ifdef CONFIG_STACKTRACE
- for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
- kpp->kp_stack[i] = (void *)trackp->addrs[i];
- if (!kpp->kp_stack[i])
- break;
- }
+#ifdef CONFIG_STACKDEPOT
+ {
+ depot_stack_handle_t handle;
+ unsigned long *entries;
+ unsigned int nr_entries;

- trackp = get_track(s, objp, TRACK_FREE);
- for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
- kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
- if (!kpp->kp_free_stack[i])
- break;
+ handle = READ_ONCE(trackp->handle);
+ if (handle) {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
+ kpp->kp_stack[i] = (void *)entries[i];
+ }
+
+ trackp = get_track(s, objp, TRACK_FREE);
+ handle = READ_ONCE(trackp->handle);
+ if (handle) {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
+ kpp->kp_free_stack[i] = (void *)entries[i];
+ }
}
#endif
#endif
--
2.31.1.272.g89b43f80a5


2021-04-20 16:59:55

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

On 4/14/21 6:34 PM, [email protected] wrote:
> From: Oliver Glitta <[email protected]>
>
> Many stack traces are similar so there are many similar arrays.
> Stackdepot saves each unique stack only once.
>
> Replace field addrs in struct track with depot_stack_handle_t handle.
> Use stackdepot to save stack trace.
>
> The benefits are smaller memory overhead and possibility to aggregate
> per-cache statistics in the future using the stackdepot handle
> instead of matching stacks manually.
>
> Signed-off-by: Oliver Glitta <[email protected]>

Reviewed-by: Vlastimil Babka <[email protected]>

(again with a disclaimer that I'm the advisor of Oliver's student project)

> ---
> init/Kconfig | 1 +
> mm/slub.c | 79 ++++++++++++++++++++++++++++++++--------------------
> 2 files changed, 50 insertions(+), 30 deletions(-)
>
> diff --git a/init/Kconfig b/init/Kconfig
> index 37a17853433a..a4ed2daa6c41 100644
> --- a/init/Kconfig
> +++ b/init/Kconfig
> @@ -1891,6 +1891,7 @@ config SLUB_DEBUG
> default y
> bool "Enable SLUB debugging support" if EXPERT
> depends on SLUB && SYSFS
> + select STACKDEPOT if STACKTRACE_SUPPORT
> help
> SLUB has extensive debug support features. Disabling these can
> result in significant savings in code size. This also disables
> diff --git a/mm/slub.c b/mm/slub.c
> index 9c0e26ddf300..4b18499726eb 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -35,6 +35,7 @@
> #include <linux/prefetch.h>
> #include <linux/memcontrol.h>
> #include <linux/random.h>
> +#include <linux/stackdepot.h>
>
> #include <trace/events/kmem.h>
>
> @@ -203,8 +204,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
> #define TRACK_ADDRS_COUNT 16
> struct track {
> unsigned long addr; /* Called from address */
> -#ifdef CONFIG_STACKTRACE
> - unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
> +#ifdef CONFIG_STACKDEPOT
> + depot_stack_handle_t handle;
> #endif
> int cpu; /* Was running on cpu */
> int pid; /* Pid context */
> @@ -581,22 +582,27 @@ static struct track *get_track(struct kmem_cache *s, void *object,
> return kasan_reset_tag(p + alloc);
> }
>
> +#ifdef CONFIG_STACKDEPOT
> +static depot_stack_handle_t save_stack_trace(gfp_t flags)
> +{
> + unsigned long entries[TRACK_ADDRS_COUNT];
> + depot_stack_handle_t handle;
> + unsigned int nr_entries;
> +
> + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4);
> + handle = stack_depot_save(entries, nr_entries, flags);
> + return handle;
> +}
> +#endif
> +
> static void set_track(struct kmem_cache *s, void *object,
> enum track_item alloc, unsigned long addr)
> {
> struct track *p = get_track(s, object, alloc);
>
> if (addr) {
> -#ifdef CONFIG_STACKTRACE
> - unsigned int nr_entries;
> -
> - metadata_access_enable();
> - nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
> - TRACK_ADDRS_COUNT, 3);
> - metadata_access_disable();
> -
> - if (nr_entries < TRACK_ADDRS_COUNT)
> - p->addrs[nr_entries] = 0;
> +#ifdef CONFIG_STACKDEPOT
> + p->handle = save_stack_trace(GFP_KERNEL);
> #endif
> p->addr = addr;
> p->cpu = smp_processor_id();
> @@ -623,14 +629,19 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time)
>
> pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
> s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
> -#ifdef CONFIG_STACKTRACE
> +#ifdef CONFIG_STACKDEPOT
> {
> - int i;
> - for (i = 0; i < TRACK_ADDRS_COUNT; i++)
> - if (t->addrs[i])
> - pr_err("\t%pS\n", (void *)t->addrs[i]);
> - else
> - break;
> + depot_stack_handle_t handle;
> + unsigned long *entries;
> + unsigned int nr_entries;
> +
> + handle = READ_ONCE(t->handle);
> + if (!handle) {
> + pr_err("object allocation/free stack trace missing\n");
> + } else {
> + nr_entries = stack_depot_fetch(handle, &entries);
> + stack_trace_print(entries, nr_entries, 0);
> + }
> }
> #endif
> }
> @@ -4017,18 +4028,26 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
> objp = fixup_red_left(s, objp);
> trackp = get_track(s, objp, TRACK_ALLOC);
> kpp->kp_ret = (void *)trackp->addr;
> -#ifdef CONFIG_STACKTRACE
> - for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
> - kpp->kp_stack[i] = (void *)trackp->addrs[i];
> - if (!kpp->kp_stack[i])
> - break;
> - }
> +#ifdef CONFIG_STACKDEPOT
> + {
> + depot_stack_handle_t handle;
> + unsigned long *entries;
> + unsigned int nr_entries;
>
> - trackp = get_track(s, objp, TRACK_FREE);
> - for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
> - kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
> - if (!kpp->kp_free_stack[i])
> - break;
> + handle = READ_ONCE(trackp->handle);
> + if (handle) {
> + nr_entries = stack_depot_fetch(handle, &entries);
> + for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
> + kpp->kp_stack[i] = (void *)entries[i];
> + }
> +
> + trackp = get_track(s, objp, TRACK_FREE);
> + handle = READ_ONCE(trackp->handle);
> + if (handle) {
> + nr_entries = stack_depot_fetch(handle, &entries);
> + for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
> + kpp->kp_free_stack[i] = (void *)entries[i];
> + }
> }
> #endif
> #endif
>

2021-04-26 01:12:02

by David Rientjes

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

On Wed, 14 Apr 2021, [email protected] wrote:

> From: Oliver Glitta <[email protected]>
>
> Many stack traces are similar so there are many similar arrays.
> Stackdepot saves each unique stack only once.
>
> Replace field addrs in struct track with depot_stack_handle_t handle.
> Use stackdepot to save stack trace.
>
> The benefits are smaller memory overhead and possibility to aggregate
> per-cache statistics in the future using the stackdepot handle
> instead of matching stacks manually.
>
> Signed-off-by: Oliver Glitta <[email protected]>

Acked-by: David Rientjes <[email protected]>

2021-05-10 05:26:11

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

On Wed, 14 Apr 2021 18:34:34 +0200 [email protected] wrote:

> Many stack traces are similar so there are many similar arrays.
> Stackdepot saves each unique stack only once.
>
> Replace field addrs in struct track with depot_stack_handle_t handle.
> Use stackdepot to save stack trace.
>
> The benefits are smaller memory overhead and possibility to aggregate
> per-cache statistics in the future using the stackdepot handle
> instead of matching stacks manually.

Which tree was this prepared against? 5.12's kmem_obj_info() is
significantly different from the version you were working on.

Please take a look, redo, retest and resend? Thanks.

2021-05-12 14:37:51

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

On 5/10/21 6:46 AM, Andrew Morton wrote:
> On Wed, 14 Apr 2021 18:34:34 +0200 [email protected] wrote:
>
>> Many stack traces are similar so there are many similar arrays.
>> Stackdepot saves each unique stack only once.
>>
>> Replace field addrs in struct track with depot_stack_handle_t handle.
>> Use stackdepot to save stack trace.
>>
>> The benefits are smaller memory overhead and possibility to aggregate
>> per-cache statistics in the future using the stackdepot handle
>> instead of matching stacks manually.
>
> Which tree was this prepared against? 5.12's kmem_obj_info() is
> significantly different from the version you were working on.

It was based on -next at the time of submission, which contained patch in Paul's
tree that expands kmem_obj_info to print also free call stack [1] so that also
needs to be switched to stackdepot to work.

> Please take a look, redo, retest and resend? Thanks.

I expected [1] to be in 5.13-rc1, but as Paul explained to me, it's queued for
5.14. So if we (Oliver) rebase on current -next, can you queue it in the section
of mmotm series that goes after -next?

Vlastimil

[1]
https://lore.kernel.org/linux-arm-kernel/[email protected]/

2021-05-13 00:17:53

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

On Wed, 12 May 2021 16:33:50 +0200 Vlastimil Babka <[email protected]> wrote:

> On 5/10/21 6:46 AM, Andrew Morton wrote:
> > On Wed, 14 Apr 2021 18:34:34 +0200 [email protected] wrote:
> >
> >> Many stack traces are similar so there are many similar arrays.
> >> Stackdepot saves each unique stack only once.
> >>
> >> Replace field addrs in struct track with depot_stack_handle_t handle.
> >> Use stackdepot to save stack trace.
> >>
> >> The benefits are smaller memory overhead and possibility to aggregate
> >> per-cache statistics in the future using the stackdepot handle
> >> instead of matching stacks manually.
> >
> > Which tree was this prepared against? 5.12's kmem_obj_info() is
> > significantly different from the version you were working on.
>
> It was based on -next at the time of submission, which contained patch in Paul's
> tree that expands kmem_obj_info to print also free call stack [1] so that also
> needs to be switched to stackdepot to work.

OK, sorry, I should have checked.

> > Please take a look, redo, retest and resend? Thanks.
>
> I expected [1] to be in 5.13-rc1, but as Paul explained to me, it's queued for
> 5.14. So if we (Oliver) rebase on current -next, can you queue it in the section
> of mmotm series that goes after -next?

I grabbed this version and queued it after the linux-next patches,
thanks.

2021-05-17 00:27:00

by Vlastimil Babka

[permalink] [raw]
Subject: [PATCH] mm/slub: use stackdepot to save stack trace in objects-fix

Paul reports [1] lockdep splat HARDIRQ-safe -> HARDIRQ-unsafe lock order detected.
Kernel test robot reports [2] BUG:sleeping_function_called_from_invalid_context_at_mm/page_alloc.c

The stack trace might be saved from contexts where we can't block so GFP_KERNEL
is unsafe. So Use GFP_NOWAIT. Under memory pressure we might thus fail to save
some new unique stack, but that should be extremely rare.

[1] https://lore.kernel.org/linux-mm/20210515204622.GA2672367@paulmck-ThinkPad-P17-Gen-1/
[2] https://lore.kernel.org/linux-mm/20210516144152.GA25903@xsang-OptiPlex-9020/

Reported-and-tested-by: Paul E. McKenney <[email protected]>
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Vlastimil Babka <[email protected]>
---
mm/slub.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/slub.c b/mm/slub.c
index 6b896b8c36f0..04824dae2e32 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -623,7 +623,7 @@ static void set_track(struct kmem_cache *s, void *object,

if (addr) {
#ifdef CONFIG_STACKDEPOT
- p->handle = save_stack_depot_trace(GFP_KERNEL);
+ p->handle = save_stack_depot_trace(GFP_NOWAIT);
#endif
p->addr = addr;
p->cpu = smp_processor_id();
--
2.31.1


2021-07-02 15:45:06

by Guenter Roeck

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

Hi,

On Wed, Apr 14, 2021 at 06:34:34PM +0200, [email protected] wrote:
> From: Oliver Glitta <[email protected]>
>
> Many stack traces are similar so there are many similar arrays.
> Stackdepot saves each unique stack only once.
>
> Replace field addrs in struct track with depot_stack_handle_t handle.
> Use stackdepot to save stack trace.
>
> The benefits are smaller memory overhead and possibility to aggregate
> per-cache statistics in the future using the stackdepot handle
> instead of matching stacks manually.
>
> Signed-off-by: Oliver Glitta <[email protected]>

With arcv2:allnoconfig, this patch results in:

Building arcv2:allnoconfig ... failed
--------------
Error log:
arc-elf-ld: lib/stackdepot.o: in function `filter_irq_stacks':
stackdepot.c:(.text+0x43a): undefined reference to `__irqentry_text_start'
arc-elf-ld: stackdepot.c:(.text+0x43a): undefined reference to `__irqentry_text_start'
arc-elf-ld: stackdepot.c:(.text+0x45a): undefined reference to `__irqentry_text_end'
arc-elf-ld: stackdepot.c:(.text+0x45a): undefined reference to `__irqentry_text_end'
arc-elf-ld: stackdepot.c:(.text+0x468): undefined reference to `__softirqentry_text_start'
arc-elf-ld: stackdepot.c:(.text+0x468): undefined reference to `__softirqentry_text_start'
arc-elf-ld: stackdepot.c:(.text+0x470): undefined reference to `__softirqentry_text_end'
arc-elf-ld: stackdepot.c:(.text+0x470): undefined reference to `__softirqentry_text_end'

Guenter

---
# bad: [fb0ca446157a86b75502c1636b0d81e642fe6bf1] Add linux-next specific files for 20210701
# good: [62fb9874f5da54fdb243003b386128037319b219] Linux 5.13
git bisect start 'HEAD' 'v5.13'
# good: [f63c4fda987a19b1194cc45cb72fd5bf968d9d90] Merge remote-tracking branch 'rdma/for-next'
git bisect good f63c4fda987a19b1194cc45cb72fd5bf968d9d90
# good: [49c8769be0b910d4134eba07cae5d9c71b861c4a] Merge remote-tracking branch 'drm/drm-next'
git bisect good 49c8769be0b910d4134eba07cae5d9c71b861c4a
# good: [3b858fe26f206d3c65adfc06c4db473e2dcaacd2] Merge remote-tracking branch 'char-misc/char-misc-next'
git bisect good 3b858fe26f206d3c65adfc06c4db473e2dcaacd2
# good: [b7289b49bb2edbe261f3f9a554f02996a4d12c11] Merge remote-tracking branch 'cgroup/for-next'
git bisect good b7289b49bb2edbe261f3f9a554f02996a4d12c11
# good: [20bf25c2b863e97a2724092c234e1ce892f83e5c] Merge remote-tracking branch 'pwm/for-next'
git bisect good 20bf25c2b863e97a2724092c234e1ce892f83e5c
# good: [1446f64f402a42c74c60df7f255df666fe302412] linux-next-pre
git bisect good 1446f64f402a42c74c60df7f255df666fe302412
# good: [312d598a2ea9e0927c3ec1decf24d4f3693e06f1] Merge remote-tracking branch 'mhi/mhi-next'
git bisect good 312d598a2ea9e0927c3ec1decf24d4f3693e06f1
# good: [d266180aa2811c7b6a8cf3c44e40a8f02a543a23] Merge remote-tracking branch 'cxl/next'
git bisect good d266180aa2811c7b6a8cf3c44e40a8f02a543a23
# bad: [8cf245ab25c7db5c10e7f63dcff2ccf09ade5880] sh: convert to setup_initial_init_mm()
git bisect bad 8cf245ab25c7db5c10e7f63dcff2ccf09ade5880
# bad: [125069500be687630bcfe6daa80f5408912fc3ef] mm-introduce-memfd_secret-system-call-to-create-secret-memory-areas-fix
git bisect bad 125069500be687630bcfe6daa80f5408912fc3ef
# good: [c6c08f08ff06799b2c84e2a6a6258537a323d584] hexagon: use common DISCARDS macro
git bisect good c6c08f08ff06799b2c84e2a6a6258537a323d584
# bad: [e50e7ac989f6c658fd7b28b14274ae230825b1f9] mm/slub: use stackdepot to save stack trace in objects-fix
git bisect bad e50e7ac989f6c658fd7b28b14274ae230825b1f9
# bad: [d1be1dcc08d3ba68331dd47cfdea155f016c79db] mm/slub: use stackdepot to save stack trace in objects
git bisect bad d1be1dcc08d3ba68331dd47cfdea155f016c79db
# good: [8bf985a45ac528b6bcfbbdec4c3c263240b34264] hexagon: select ARCH_WANT_LD_ORPHAN_WARN
git bisect good 8bf985a45ac528b6bcfbbdec4c3c263240b34264
# first bad commit: [d1be1dcc08d3ba68331dd47cfdea155f016c79db] mm/slub: use stackdepot to save stack trace in objects

2021-07-02 16:05:00

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

On 7/2/21 5:37 PM, Guenter Roeck wrote:
> Hi,
>
> On Wed, Apr 14, 2021 at 06:34:34PM +0200, [email protected] wrote:
>> From: Oliver Glitta <[email protected]>
>>
>> Many stack traces are similar so there are many similar arrays.
>> Stackdepot saves each unique stack only once.
>>
>> Replace field addrs in struct track with depot_stack_handle_t handle.
>> Use stackdepot to save stack trace.
>>
>> The benefits are smaller memory overhead and possibility to aggregate
>> per-cache statistics in the future using the stackdepot handle
>> instead of matching stacks manually.
>>
>> Signed-off-by: Oliver Glitta <[email protected]>
>
> With arcv2:allnoconfig, this patch results in:
>
> Building arcv2:allnoconfig ... failed
> --------------
> Error log:
> arc-elf-ld: lib/stackdepot.o: in function `filter_irq_stacks':
> stackdepot.c:(.text+0x43a): undefined reference to `__irqentry_text_start'
> arc-elf-ld: stackdepot.c:(.text+0x43a): undefined reference to `__irqentry_text_start'
> arc-elf-ld: stackdepot.c:(.text+0x45a): undefined reference to `__irqentry_text_end'
> arc-elf-ld: stackdepot.c:(.text+0x45a): undefined reference to `__irqentry_text_end'
> arc-elf-ld: stackdepot.c:(.text+0x468): undefined reference to `__softirqentry_text_start'
> arc-elf-ld: stackdepot.c:(.text+0x468): undefined reference to `__softirqentry_text_start'
> arc-elf-ld: stackdepot.c:(.text+0x470): undefined reference to `__softirqentry_text_end'
> arc-elf-ld: stackdepot.c:(.text+0x470): undefined reference to `__softirqentry_text_end'

Looks to me this patch exposed an existing problem in stackdepot that's
a result of 505a0ef15f96c "kasan: stackdepot: move filter_irq_stacks()
to stackdepot.c". Looks like that commit added the necessary symbols to
a number of arch's vmlinux.lds.S but not arcv2. Alexander?

What the slub patch does is just to have stackdepot built when
SLUB_DEBUG is enabled:

select STACKDEPOT if STACKTRACE_SUPPORT

But that was already possible with PAGE_OWNER:

config PAGE_OWNER
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select STACKDEPOT

AFAICS there's nothing to prevent this config on arcv2.
We could perhaps exclude arcv2 but maybe it's easier just to fix up
505a0ef15f96c there?



>
> Guenter
>
> ---
> # bad: [fb0ca446157a86b75502c1636b0d81e642fe6bf1] Add linux-next specific files for 20210701
> # good: [62fb9874f5da54fdb243003b386128037319b219] Linux 5.13
> git bisect start 'HEAD' 'v5.13'
> # good: [f63c4fda987a19b1194cc45cb72fd5bf968d9d90] Merge remote-tracking branch 'rdma/for-next'
> git bisect good f63c4fda987a19b1194cc45cb72fd5bf968d9d90
> # good: [49c8769be0b910d4134eba07cae5d9c71b861c4a] Merge remote-tracking branch 'drm/drm-next'
> git bisect good 49c8769be0b910d4134eba07cae5d9c71b861c4a
> # good: [3b858fe26f206d3c65adfc06c4db473e2dcaacd2] Merge remote-tracking branch 'char-misc/char-misc-next'
> git bisect good 3b858fe26f206d3c65adfc06c4db473e2dcaacd2
> # good: [b7289b49bb2edbe261f3f9a554f02996a4d12c11] Merge remote-tracking branch 'cgroup/for-next'
> git bisect good b7289b49bb2edbe261f3f9a554f02996a4d12c11
> # good: [20bf25c2b863e97a2724092c234e1ce892f83e5c] Merge remote-tracking branch 'pwm/for-next'
> git bisect good 20bf25c2b863e97a2724092c234e1ce892f83e5c
> # good: [1446f64f402a42c74c60df7f255df666fe302412] linux-next-pre
> git bisect good 1446f64f402a42c74c60df7f255df666fe302412
> # good: [312d598a2ea9e0927c3ec1decf24d4f3693e06f1] Merge remote-tracking branch 'mhi/mhi-next'
> git bisect good 312d598a2ea9e0927c3ec1decf24d4f3693e06f1
> # good: [d266180aa2811c7b6a8cf3c44e40a8f02a543a23] Merge remote-tracking branch 'cxl/next'
> git bisect good d266180aa2811c7b6a8cf3c44e40a8f02a543a23
> # bad: [8cf245ab25c7db5c10e7f63dcff2ccf09ade5880] sh: convert to setup_initial_init_mm()
> git bisect bad 8cf245ab25c7db5c10e7f63dcff2ccf09ade5880
> # bad: [125069500be687630bcfe6daa80f5408912fc3ef] mm-introduce-memfd_secret-system-call-to-create-secret-memory-areas-fix
> git bisect bad 125069500be687630bcfe6daa80f5408912fc3ef
> # good: [c6c08f08ff06799b2c84e2a6a6258537a323d584] hexagon: use common DISCARDS macro
> git bisect good c6c08f08ff06799b2c84e2a6a6258537a323d584
> # bad: [e50e7ac989f6c658fd7b28b14274ae230825b1f9] mm/slub: use stackdepot to save stack trace in objects-fix
> git bisect bad e50e7ac989f6c658fd7b28b14274ae230825b1f9
> # bad: [d1be1dcc08d3ba68331dd47cfdea155f016c79db] mm/slub: use stackdepot to save stack trace in objects
> git bisect bad d1be1dcc08d3ba68331dd47cfdea155f016c79db
> # good: [8bf985a45ac528b6bcfbbdec4c3c263240b34264] hexagon: select ARCH_WANT_LD_ORPHAN_WARN
> git bisect good 8bf985a45ac528b6bcfbbdec4c3c263240b34264
> # first bad commit: [d1be1dcc08d3ba68331dd47cfdea155f016c79db] mm/slub: use stackdepot to save stack trace in objects
>

2021-07-13 12:05:36

by Geert Uytterhoeven

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: use stackdepot to save stack trace in objects

Hi Oliver, Yogesh,

On Wed, Apr 14, 2021 at 8:08 PM <[email protected]> wrote:
> From: Oliver Glitta <[email protected]>
>
> Many stack traces are similar so there are many similar arrays.
> Stackdepot saves each unique stack only once.
>
> Replace field addrs in struct track with depot_stack_handle_t handle.
> Use stackdepot to save stack trace.
>
> The benefits are smaller memory overhead and possibility to aggregate
> per-cache statistics in the future using the stackdepot handle
> instead of matching stacks manually.
>
> Signed-off-by: Oliver Glitta <[email protected]>

Thanks for your patch, which is now commit 788691464c294553 ("mm/slub:
use stackdepot to save stack trace in objects") in v5.14-rc1.

> --- a/init/Kconfig
> +++ b/init/Kconfig
> @@ -1891,6 +1891,7 @@ config SLUB_DEBUG
> default y
> bool "Enable SLUB debugging support" if EXPERT
> depends on SLUB && SYSFS
> + select STACKDEPOT if STACKTRACE_SUPPORT
> help
> SLUB has extensive debug support features. Disabling these can
> result in significant savings in code size. This also disables

This change increases memory consumption by 4 MiB (or more, see below).

Looking at lib/Kconfig:

| config STACK_HASH_ORDER
| int "stack depot hash size (12 => 4KB, 20 => 1024KB)"

The sizes reported here are not correct, as the actual memory consumption
is not STACK_HAS_ORDER bytes, but STACK_HAS_ORDER pointers.
Hence they're off by a factor of 4 or 8.

| range 12 20
| default 20

Does this really have to default to the maximum value?

| depends on STACKDEPOT
| help
| Select the hash size as a power of 2 for the stackdepot hash table.
| Choose a lower value to reduce the memory impact.

Gr{oetje,eeting}s,

Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds