This feature will record first and last call_rcu() call stack and
print two call_rcu() call stack in KASAN report.
When call_rcu() is called, we store the call_rcu() call stack into
slub alloc meta-data, so that KASAN report can print rcu stack.
It doesn't increase the cost of memory consumption. Because we don't
enlarge struct kasan_alloc_meta size.
- add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
- remove free track from kasan_alloc_meta, size is 8 bytes.
[1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
[2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
Signed-off-by: Walter Wu <[email protected]>
Suggested-by: Dmitry Vyukov <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Josh Triplett <[email protected]>
Cc: Mathieu Desnoyers <[email protected]>
Cc: Lai Jiangshan <[email protected]>
Cc: Joel Fernandes <[email protected]>
---
include/linux/kasan.h | 2 ++
kernel/rcu/tree.c | 3 +++
lib/Kconfig.kasan | 2 ++
mm/kasan/common.c | 4 ++--
mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
mm/kasan/kasan.h | 19 +++++++++++++++++++
mm/kasan/report.c | 21 +++++++++++++++++----
7 files changed, 74 insertions(+), 6 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 31314ca7c635..23b7ee00572d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
+void kasan_record_aux_stack(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+static inline void kasan_record_aux_stack(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 06548e2ebb72..de872b6cc261 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -57,6 +57,7 @@
#include <linux/slab.h>
#include <linux/sched/isolation.h>
#include <linux/sched/clock.h>
+#include <linux/kasan.h>
#include "../time/tick-internal.h"
#include "tree.h"
@@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_cbs(&rdp->cblist));
+ kasan_record_aux_stack(head);
+
/* Go handle any RCU core processing required. */
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 81f5464ea9e1..56a89291f1cc 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -58,6 +58,8 @@ config KASAN_GENERIC
For better error detection enable CONFIG_STACKTRACE.
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
+ Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
+ call stack. It doesn't increase the cost of memory consumption.
config KASAN_SW_TAGS
bool "Software tag-based mode"
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 2906358e42f0..8bc618289bb1 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -41,7 +41,7 @@
#include "kasan.h"
#include "../slab.h"
-static inline depot_stack_handle_t save_stack(gfp_t flags)
+depot_stack_handle_t kasan_save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries;
@@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
static inline void set_track(struct kasan_track *track, gfp_t flags)
{
track->pid = current->pid;
- track->stack = save_stack(flags);
+ track->stack = kasan_save_stack(flags);
}
void kasan_enable_current(void)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 56ff8885fe2e..b86880c338e2 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);
+
+void kasan_record_aux_stack(void *addr)
+{
+ struct page *page = kasan_addr_to_page(addr);
+ struct kmem_cache *cache;
+ struct kasan_alloc_meta *alloc_info;
+ void *object;
+
+ if (!(page && PageSlab(page)))
+ return;
+
+ cache = page->slab_cache;
+ object = nearest_obj(cache, page, addr);
+ alloc_info = get_alloc_info(cache, object);
+
+ if (!alloc_info->rcu_stack[0])
+ /* record first call_rcu() call stack */
+ alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
+ else
+ /* record last call_rcu() call stack */
+ alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
+}
+
+struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
+ u8 idx)
+{
+ return container_of(&alloc_info->rcu_stack[idx],
+ struct kasan_track, stack);
+}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index e8f37199d885..1cc1fb7b0de3 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -96,15 +96,28 @@ struct kasan_track {
depot_stack_handle_t stack;
};
+#ifdef CONFIG_KASAN_GENERIC
+#define SIZEOF_PTR sizeof(void *)
+#define KASAN_NR_RCU_CALL_STACKS 2
+#else /* CONFIG_KASAN_GENERIC */
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
#define KASAN_NR_FREE_STACKS 5
#else
#define KASAN_NR_FREE_STACKS 1
#endif
+#endif /* CONFIG_KASAN_GENERIC */
struct kasan_alloc_meta {
struct kasan_track alloc_track;
+#ifdef CONFIG_KASAN_GENERIC
+ /*
+ * call_rcu() call stack is stored into struct kasan_alloc_meta.
+ * The free stack is stored into freed object.
+ */
+ depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
+#else
struct kasan_track free_track[KASAN_NR_FREE_STACKS];
+#endif
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
u8 free_track_idx;
@@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
struct page *kasan_addr_to_page(const void *addr);
+depot_stack_handle_t kasan_save_stack(gfp_t flags);
+
#if defined(CONFIG_KASAN_GENERIC) && \
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
void quarantine_reduce(void);
void quarantine_remove_cache(struct kmem_cache *cache);
+struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
+ u8 idx);
#else
static inline void quarantine_put(struct kasan_free_meta *info,
struct kmem_cache *cache) { }
static inline void quarantine_reduce(void) { }
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
+static inline struct kasan_track *kasan_get_aux_stack(
+ struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
#endif
#ifdef CONFIG_KASAN_SW_TAGS
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 80f23c9da6b0..f16a1a210815 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
kasan_enable_current();
}
-static void print_track(struct kasan_track *track, const char *prefix)
+static void print_track(struct kasan_track *track, const char *prefix,
+ bool is_callrcu)
{
- pr_err("%s by task %u:\n", prefix, track->pid);
+ if (is_callrcu)
+ pr_err("%s:\n", prefix);
+ else
+ pr_err("%s by task %u:\n", prefix, track->pid);
if (track->stack) {
unsigned long *entries;
unsigned int nr_entries;
@@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
if (cache->flags & SLAB_KASAN) {
struct kasan_track *free_track;
- print_track(&alloc_info->alloc_track, "Allocated");
+ print_track(&alloc_info->alloc_track, "Allocated", false);
pr_err("\n");
free_track = kasan_get_free_track(cache, object, tag);
- print_track(free_track, "Freed");
+ print_track(free_track, "Freed", false);
pr_err("\n");
+
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ free_track = kasan_get_aux_stack(alloc_info, 0);
+ print_track(free_track, "First call_rcu() call stack", true);
+ pr_err("\n");
+ free_track = kasan_get_aux_stack(alloc_info, 1);
+ print_track(free_track, "Last call_rcu() call stack", true);
+ pr_err("\n");
+ }
}
describe_object_addr(cache, object, addr);
--
2.18.0
On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
>
> This feature will record first and last call_rcu() call stack and
> print two call_rcu() call stack in KASAN report.
>
> When call_rcu() is called, we store the call_rcu() call stack into
> slub alloc meta-data, so that KASAN report can print rcu stack.
>
> It doesn't increase the cost of memory consumption. Because we don't
> enlarge struct kasan_alloc_meta size.
> - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> - remove free track from kasan_alloc_meta, size is 8 bytes.
>
> [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
>
> Signed-off-by: Walter Wu <[email protected]>
> Suggested-by: Dmitry Vyukov <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Paul E. McKenney <[email protected]>
> Cc: Josh Triplett <[email protected]>
> Cc: Mathieu Desnoyers <[email protected]>
> Cc: Lai Jiangshan <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> ---
> include/linux/kasan.h | 2 ++
> kernel/rcu/tree.c | 3 +++
> lib/Kconfig.kasan | 2 ++
> mm/kasan/common.c | 4 ++--
> mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> mm/kasan/kasan.h | 19 +++++++++++++++++++
> mm/kasan/report.c | 21 +++++++++++++++++----
> 7 files changed, 74 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 31314ca7c635..23b7ee00572d 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> void kasan_cache_shrink(struct kmem_cache *cache);
> void kasan_cache_shutdown(struct kmem_cache *cache);
> +void kasan_record_aux_stack(void *ptr);
>
> #else /* CONFIG_KASAN_GENERIC */
>
> static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> +static inline void kasan_record_aux_stack(void *ptr) {}
>
> #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 06548e2ebb72..de872b6cc261 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -57,6 +57,7 @@
> #include <linux/slab.h>
> #include <linux/sched/isolation.h>
> #include <linux/sched/clock.h>
> +#include <linux/kasan.h>
> #include "../time/tick-internal.h"
>
> #include "tree.h"
> @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> trace_rcu_callback(rcu_state.name, head,
> rcu_segcblist_n_cbs(&rdp->cblist));
>
> + kasan_record_aux_stack(head);
This happens after we queued the object onto some queue and after some
return statements. I think this is wrong.
We need to do this somewhere at the very beginning of the function.
This is what I meant by "hard to review". This is completely invisible
in the diff.
> /* Go handle any RCU core processing required. */
> if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> index 81f5464ea9e1..56a89291f1cc 100644
> --- a/lib/Kconfig.kasan
> +++ b/lib/Kconfig.kasan
> @@ -58,6 +58,8 @@ config KASAN_GENERIC
> For better error detection enable CONFIG_STACKTRACE.
> Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> (the resulting kernel does not boot).
> + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> + call stack. It doesn't increase the cost of memory consumption.
>
> config KASAN_SW_TAGS
> bool "Software tag-based mode"
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 2906358e42f0..8bc618289bb1 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -41,7 +41,7 @@
> #include "kasan.h"
> #include "../slab.h"
>
> -static inline depot_stack_handle_t save_stack(gfp_t flags)
> +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> {
> unsigned long entries[KASAN_STACK_DEPTH];
> unsigned int nr_entries;
> @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> static inline void set_track(struct kasan_track *track, gfp_t flags)
> {
> track->pid = current->pid;
> - track->stack = save_stack(flags);
> + track->stack = kasan_save_stack(flags);
> }
>
> void kasan_enable_current(void)
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 56ff8885fe2e..b86880c338e2 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> DEFINE_ASAN_SET_SHADOW(f3);
> DEFINE_ASAN_SET_SHADOW(f5);
> DEFINE_ASAN_SET_SHADOW(f8);
> +
> +void kasan_record_aux_stack(void *addr)
> +{
> + struct page *page = kasan_addr_to_page(addr);
> + struct kmem_cache *cache;
> + struct kasan_alloc_meta *alloc_info;
> + void *object;
> +
> + if (!(page && PageSlab(page)))
> + return;
> +
> + cache = page->slab_cache;
> + object = nearest_obj(cache, page, addr);
> + alloc_info = get_alloc_info(cache, object);
> +
> + if (!alloc_info->rcu_stack[0])
> + /* record first call_rcu() call stack */
> + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> + else
> + /* record last call_rcu() call stack */
> + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> +}
> +
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx)
> +{
> + return container_of(&alloc_info->rcu_stack[idx],
> + struct kasan_track, stack);
> +}
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index e8f37199d885..1cc1fb7b0de3 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -96,15 +96,28 @@ struct kasan_track {
> depot_stack_handle_t stack;
> };
>
> +#ifdef CONFIG_KASAN_GENERIC
> +#define SIZEOF_PTR sizeof(void *)
> +#define KASAN_NR_RCU_CALL_STACKS 2
> +#else /* CONFIG_KASAN_GENERIC */
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> #define KASAN_NR_FREE_STACKS 5
> #else
> #define KASAN_NR_FREE_STACKS 1
> #endif
> +#endif /* CONFIG_KASAN_GENERIC */
>
> struct kasan_alloc_meta {
> struct kasan_track alloc_track;
> +#ifdef CONFIG_KASAN_GENERIC
> + /*
> + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> + * The free stack is stored into freed object.
> + */
> + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> +#else
> struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> +#endif
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> u8 free_track_idx;
> @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
>
> struct page *kasan_addr_to_page(const void *addr);
>
> +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> +
> #if defined(CONFIG_KASAN_GENERIC) && \
> (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> void quarantine_reduce(void);
> void quarantine_remove_cache(struct kmem_cache *cache);
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx);
> #else
> static inline void quarantine_put(struct kasan_free_meta *info,
> struct kmem_cache *cache) { }
> static inline void quarantine_reduce(void) { }
> static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> +static inline struct kasan_track *kasan_get_aux_stack(
> + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> #endif
>
> #ifdef CONFIG_KASAN_SW_TAGS
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 80f23c9da6b0..f16a1a210815 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> kasan_enable_current();
> }
>
> -static void print_track(struct kasan_track *track, const char *prefix)
> +static void print_track(struct kasan_track *track, const char *prefix,
> + bool is_callrcu)
> {
> - pr_err("%s by task %u:\n", prefix, track->pid);
> + if (is_callrcu)
> + pr_err("%s:\n", prefix);
> + else
> + pr_err("%s by task %u:\n", prefix, track->pid);
> if (track->stack) {
> unsigned long *entries;
> unsigned int nr_entries;
> @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> if (cache->flags & SLAB_KASAN) {
> struct kasan_track *free_track;
>
> - print_track(&alloc_info->alloc_track, "Allocated");
> + print_track(&alloc_info->alloc_track, "Allocated", false);
> pr_err("\n");
> free_track = kasan_get_free_track(cache, object, tag);
> - print_track(free_track, "Freed");
> + print_track(free_track, "Freed", false);
> pr_err("\n");
> +
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> + free_track = kasan_get_aux_stack(alloc_info, 0);
> + print_track(free_track, "First call_rcu() call stack", true);
> + pr_err("\n");
> + free_track = kasan_get_aux_stack(alloc_info, 1);
> + print_track(free_track, "Last call_rcu() call stack", true);
> + pr_err("\n");
> + }
> }
>
> describe_object_addr(cache, object, addr);
> --
> 2.18.0
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
>
> This feature will record first and last call_rcu() call stack and
> print two call_rcu() call stack in KASAN report.
>
> When call_rcu() is called, we store the call_rcu() call stack into
> slub alloc meta-data, so that KASAN report can print rcu stack.
>
> It doesn't increase the cost of memory consumption. Because we don't
> enlarge struct kasan_alloc_meta size.
> - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> - remove free track from kasan_alloc_meta, size is 8 bytes.
>
> [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
>
> Signed-off-by: Walter Wu <[email protected]>
> Suggested-by: Dmitry Vyukov <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Paul E. McKenney <[email protected]>
> Cc: Josh Triplett <[email protected]>
> Cc: Mathieu Desnoyers <[email protected]>
> Cc: Lai Jiangshan <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> ---
> include/linux/kasan.h | 2 ++
> kernel/rcu/tree.c | 3 +++
> lib/Kconfig.kasan | 2 ++
> mm/kasan/common.c | 4 ++--
> mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> mm/kasan/kasan.h | 19 +++++++++++++++++++
> mm/kasan/report.c | 21 +++++++++++++++++----
> 7 files changed, 74 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 31314ca7c635..23b7ee00572d 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> void kasan_cache_shrink(struct kmem_cache *cache);
> void kasan_cache_shutdown(struct kmem_cache *cache);
> +void kasan_record_aux_stack(void *ptr);
>
> #else /* CONFIG_KASAN_GENERIC */
>
> static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> +static inline void kasan_record_aux_stack(void *ptr) {}
>
> #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 06548e2ebb72..de872b6cc261 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -57,6 +57,7 @@
> #include <linux/slab.h>
> #include <linux/sched/isolation.h>
> #include <linux/sched/clock.h>
> +#include <linux/kasan.h>
> #include "../time/tick-internal.h"
>
> #include "tree.h"
> @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> trace_rcu_callback(rcu_state.name, head,
> rcu_segcblist_n_cbs(&rdp->cblist));
>
> + kasan_record_aux_stack(head);
> +
> /* Go handle any RCU core processing required. */
> if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> index 81f5464ea9e1..56a89291f1cc 100644
> --- a/lib/Kconfig.kasan
> +++ b/lib/Kconfig.kasan
> @@ -58,6 +58,8 @@ config KASAN_GENERIC
> For better error detection enable CONFIG_STACKTRACE.
> Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> (the resulting kernel does not boot).
> + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> + call stack. It doesn't increase the cost of memory consumption.
We don't plan to change this and this is not a bug, right? So I think
using "Currently" is confusing. What's changing in future?
s/will print/prints/
Simple present tense is the default for documentation, we are just
stating facts.
The remark about not increasing memory consumption is both false and
not useful (we don't give an option to change this).
I would just say:
"In generic mode KASAN prints first and last call_rcu() call stacks in reports."
> config KASAN_SW_TAGS
> bool "Software tag-based mode"
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 2906358e42f0..8bc618289bb1 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -41,7 +41,7 @@
> #include "kasan.h"
> #include "../slab.h"
>
> -static inline depot_stack_handle_t save_stack(gfp_t flags)
> +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> {
> unsigned long entries[KASAN_STACK_DEPTH];
> unsigned int nr_entries;
> @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> static inline void set_track(struct kasan_track *track, gfp_t flags)
> {
> track->pid = current->pid;
> - track->stack = save_stack(flags);
> + track->stack = kasan_save_stack(flags);
> }
>
> void kasan_enable_current(void)
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 56ff8885fe2e..b86880c338e2 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> DEFINE_ASAN_SET_SHADOW(f3);
> DEFINE_ASAN_SET_SHADOW(f5);
> DEFINE_ASAN_SET_SHADOW(f8);
> +
> +void kasan_record_aux_stack(void *addr)
> +{
> + struct page *page = kasan_addr_to_page(addr);
> + struct kmem_cache *cache;
> + struct kasan_alloc_meta *alloc_info;
> + void *object;
> +
> + if (!(page && PageSlab(page)))
> + return;
> +
> + cache = page->slab_cache;
> + object = nearest_obj(cache, page, addr);
> + alloc_info = get_alloc_info(cache, object);
> +
> + if (!alloc_info->rcu_stack[0])
> + /* record first call_rcu() call stack */
> + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> + else
> + /* record last call_rcu() call stack */
> + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> +}
> +
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx)
> +{
> + return container_of(&alloc_info->rcu_stack[idx],
> + struct kasan_track, stack);
> +}
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index e8f37199d885..1cc1fb7b0de3 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -96,15 +96,28 @@ struct kasan_track {
> depot_stack_handle_t stack;
> };
>
> +#ifdef CONFIG_KASAN_GENERIC
> +#define SIZEOF_PTR sizeof(void *)
> +#define KASAN_NR_RCU_CALL_STACKS 2
> +#else /* CONFIG_KASAN_GENERIC */
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> #define KASAN_NR_FREE_STACKS 5
> #else
> #define KASAN_NR_FREE_STACKS 1
> #endif
> +#endif /* CONFIG_KASAN_GENERIC */
>
> struct kasan_alloc_meta {
> struct kasan_track alloc_track;
> +#ifdef CONFIG_KASAN_GENERIC
> + /*
> + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> + * The free stack is stored into freed object.
> + */
> + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> +#else
> struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> +#endif
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> u8 free_track_idx;
> @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
>
> struct page *kasan_addr_to_page(const void *addr);
>
> +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> +
> #if defined(CONFIG_KASAN_GENERIC) && \
> (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> void quarantine_reduce(void);
> void quarantine_remove_cache(struct kmem_cache *cache);
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx);
> #else
> static inline void quarantine_put(struct kasan_free_meta *info,
> struct kmem_cache *cache) { }
> static inline void quarantine_reduce(void) { }
> static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> +static inline struct kasan_track *kasan_get_aux_stack(
> + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> #endif
>
> #ifdef CONFIG_KASAN_SW_TAGS
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 80f23c9da6b0..f16a1a210815 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> kasan_enable_current();
> }
>
> -static void print_track(struct kasan_track *track, const char *prefix)
> +static void print_track(struct kasan_track *track, const char *prefix,
> + bool is_callrcu)
> {
> - pr_err("%s by task %u:\n", prefix, track->pid);
> + if (is_callrcu)
> + pr_err("%s:\n", prefix);
> + else
> + pr_err("%s by task %u:\n", prefix, track->pid);
> if (track->stack) {
> unsigned long *entries;
> unsigned int nr_entries;
> @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> if (cache->flags & SLAB_KASAN) {
> struct kasan_track *free_track;
>
> - print_track(&alloc_info->alloc_track, "Allocated");
> + print_track(&alloc_info->alloc_track, "Allocated", false);
> pr_err("\n");
> free_track = kasan_get_free_track(cache, object, tag);
> - print_track(free_track, "Freed");
> + print_track(free_track, "Freed", false);
> pr_err("\n");
> +
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> + free_track = kasan_get_aux_stack(alloc_info, 0);
> + print_track(free_track, "First call_rcu() call stack", true);
> + pr_err("\n");
> + free_track = kasan_get_aux_stack(alloc_info, 1);
> + print_track(free_track, "Last call_rcu() call stack", true);
> + pr_err("\n");
> + }
> }
>
> describe_object_addr(cache, object, addr);
> --
> 2.18.0
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
>
> This feature will record first and last call_rcu() call stack and
> print two call_rcu() call stack in KASAN report.
>
> When call_rcu() is called, we store the call_rcu() call stack into
> slub alloc meta-data, so that KASAN report can print rcu stack.
>
> It doesn't increase the cost of memory consumption. Because we don't
> enlarge struct kasan_alloc_meta size.
> - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> - remove free track from kasan_alloc_meta, size is 8 bytes.
>
> [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
>
> Signed-off-by: Walter Wu <[email protected]>
> Suggested-by: Dmitry Vyukov <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Paul E. McKenney <[email protected]>
> Cc: Josh Triplett <[email protected]>
> Cc: Mathieu Desnoyers <[email protected]>
> Cc: Lai Jiangshan <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> ---
> include/linux/kasan.h | 2 ++
> kernel/rcu/tree.c | 3 +++
> lib/Kconfig.kasan | 2 ++
> mm/kasan/common.c | 4 ++--
> mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> mm/kasan/kasan.h | 19 +++++++++++++++++++
> mm/kasan/report.c | 21 +++++++++++++++++----
> 7 files changed, 74 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 31314ca7c635..23b7ee00572d 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> void kasan_cache_shrink(struct kmem_cache *cache);
> void kasan_cache_shutdown(struct kmem_cache *cache);
> +void kasan_record_aux_stack(void *ptr);
>
> #else /* CONFIG_KASAN_GENERIC */
>
> static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> +static inline void kasan_record_aux_stack(void *ptr) {}
>
> #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 06548e2ebb72..de872b6cc261 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -57,6 +57,7 @@
> #include <linux/slab.h>
> #include <linux/sched/isolation.h>
> #include <linux/sched/clock.h>
> +#include <linux/kasan.h>
> #include "../time/tick-internal.h"
>
> #include "tree.h"
> @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> trace_rcu_callback(rcu_state.name, head,
> rcu_segcblist_n_cbs(&rdp->cblist));
>
> + kasan_record_aux_stack(head);
> +
> /* Go handle any RCU core processing required. */
> if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> index 81f5464ea9e1..56a89291f1cc 100644
> --- a/lib/Kconfig.kasan
> +++ b/lib/Kconfig.kasan
> @@ -58,6 +58,8 @@ config KASAN_GENERIC
> For better error detection enable CONFIG_STACKTRACE.
> Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> (the resulting kernel does not boot).
> + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> + call stack. It doesn't increase the cost of memory consumption.
>
> config KASAN_SW_TAGS
> bool "Software tag-based mode"
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 2906358e42f0..8bc618289bb1 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -41,7 +41,7 @@
> #include "kasan.h"
> #include "../slab.h"
>
> -static inline depot_stack_handle_t save_stack(gfp_t flags)
> +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> {
> unsigned long entries[KASAN_STACK_DEPTH];
> unsigned int nr_entries;
> @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> static inline void set_track(struct kasan_track *track, gfp_t flags)
> {
> track->pid = current->pid;
> - track->stack = save_stack(flags);
> + track->stack = kasan_save_stack(flags);
> }
>
> void kasan_enable_current(void)
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 56ff8885fe2e..b86880c338e2 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> DEFINE_ASAN_SET_SHADOW(f3);
> DEFINE_ASAN_SET_SHADOW(f5);
> DEFINE_ASAN_SET_SHADOW(f8);
> +
> +void kasan_record_aux_stack(void *addr)
> +{
> + struct page *page = kasan_addr_to_page(addr);
> + struct kmem_cache *cache;
> + struct kasan_alloc_meta *alloc_info;
> + void *object;
> +
> + if (!(page && PageSlab(page)))
> + return;
> +
> + cache = page->slab_cache;
> + object = nearest_obj(cache, page, addr);
> + alloc_info = get_alloc_info(cache, object);
> +
> + if (!alloc_info->rcu_stack[0])
> + /* record first call_rcu() call stack */
> + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> + else
> + /* record last call_rcu() call stack */
> + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> +}
> +
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx)
> +{
> + return container_of(&alloc_info->rcu_stack[idx],
> + struct kasan_track, stack);
This is not type safe, there is no kasan_track object. And we create a
pointer to kasan_track just to carefully not treat it as valid
kasan_track in print_track.
This adds an unnecessary if to print_track. And does not seem to be
useful/nice to print:
First call_rcu() call stack:
(stack is not available)
Last call_rcu() call stack:
(stack is not available)
when no rcu stacks are memorized.
Your intention seems to be to reuse 2 lines of code from print_track.
I would factor them out into a function:
static void print_stack(depot_stack_handle_t stack)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(stack, &entries);
stack_trace_print(entries, nr_entries, 0);
}
And then this can expressed as:
if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
stack = alloc_info->rcu_stack[0];
if (stack) {
pr_err("First call_rcu() call stack:\n");
print_stack(stack);
pr_err("\n");
}
stack = alloc_info->rcu_stack[1];
if (stack) {
pr_err("Last call_rcu() call stack:\n");
print_stack(stack);
pr_err("\n");
}
}
Or with another helper function it becomes:
if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
print_aux_stack(alloc_info->rcu_stack[0], "First");
print_aux_stack(alloc_info->rcu_stack[1], "Last");
}
> +}
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index e8f37199d885..1cc1fb7b0de3 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -96,15 +96,28 @@ struct kasan_track {
> depot_stack_handle_t stack;
> };
>
> +#ifdef CONFIG_KASAN_GENERIC
> +#define SIZEOF_PTR sizeof(void *)
> +#define KASAN_NR_RCU_CALL_STACKS 2
> +#else /* CONFIG_KASAN_GENERIC */
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> #define KASAN_NR_FREE_STACKS 5
> #else
> #define KASAN_NR_FREE_STACKS 1
> #endif
> +#endif /* CONFIG_KASAN_GENERIC */
>
> struct kasan_alloc_meta {
> struct kasan_track alloc_track;
> +#ifdef CONFIG_KASAN_GENERIC
> + /*
> + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> + * The free stack is stored into freed object.
> + */
> + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> +#else
> struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> +#endif
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> u8 free_track_idx;
> @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
>
> struct page *kasan_addr_to_page(const void *addr);
>
> +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> +
> #if defined(CONFIG_KASAN_GENERIC) && \
> (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> void quarantine_reduce(void);
> void quarantine_remove_cache(struct kmem_cache *cache);
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx);
> #else
> static inline void quarantine_put(struct kasan_free_meta *info,
> struct kmem_cache *cache) { }
> static inline void quarantine_reduce(void) { }
> static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> +static inline struct kasan_track *kasan_get_aux_stack(
> + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> #endif
>
> #ifdef CONFIG_KASAN_SW_TAGS
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 80f23c9da6b0..f16a1a210815 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> kasan_enable_current();
> }
>
> -static void print_track(struct kasan_track *track, const char *prefix)
> +static void print_track(struct kasan_track *track, const char *prefix,
> + bool is_callrcu)
> {
> - pr_err("%s by task %u:\n", prefix, track->pid);
> + if (is_callrcu)
> + pr_err("%s:\n", prefix);
> + else
> + pr_err("%s by task %u:\n", prefix, track->pid);
> if (track->stack) {
> unsigned long *entries;
> unsigned int nr_entries;
> @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> if (cache->flags & SLAB_KASAN) {
> struct kasan_track *free_track;
>
> - print_track(&alloc_info->alloc_track, "Allocated");
> + print_track(&alloc_info->alloc_track, "Allocated", false);
> pr_err("\n");
> free_track = kasan_get_free_track(cache, object, tag);
> - print_track(free_track, "Freed");
> + print_track(free_track, "Freed", false);
> pr_err("\n");
> +
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> + free_track = kasan_get_aux_stack(alloc_info, 0);
> + print_track(free_track, "First call_rcu() call stack", true);
> + pr_err("\n");
> + free_track = kasan_get_aux_stack(alloc_info, 1);
> + print_track(free_track, "Last call_rcu() call stack", true);
> + pr_err("\n");
> + }
> }
>
> describe_object_addr(cache, object, addr);
> --
> 2.18.0
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
>
> This feature will record first and last call_rcu() call stack and
> print two call_rcu() call stack in KASAN report.
>
> When call_rcu() is called, we store the call_rcu() call stack into
> slub alloc meta-data, so that KASAN report can print rcu stack.
>
> It doesn't increase the cost of memory consumption. Because we don't
> enlarge struct kasan_alloc_meta size.
> - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> - remove free track from kasan_alloc_meta, size is 8 bytes.
>
> [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
>
> Signed-off-by: Walter Wu <[email protected]>
> Suggested-by: Dmitry Vyukov <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Paul E. McKenney <[email protected]>
> Cc: Josh Triplett <[email protected]>
> Cc: Mathieu Desnoyers <[email protected]>
> Cc: Lai Jiangshan <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> ---
> include/linux/kasan.h | 2 ++
> kernel/rcu/tree.c | 3 +++
> lib/Kconfig.kasan | 2 ++
> mm/kasan/common.c | 4 ++--
> mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> mm/kasan/kasan.h | 19 +++++++++++++++++++
> mm/kasan/report.c | 21 +++++++++++++++++----
> 7 files changed, 74 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 31314ca7c635..23b7ee00572d 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> void kasan_cache_shrink(struct kmem_cache *cache);
> void kasan_cache_shutdown(struct kmem_cache *cache);
> +void kasan_record_aux_stack(void *ptr);
>
> #else /* CONFIG_KASAN_GENERIC */
>
> static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> +static inline void kasan_record_aux_stack(void *ptr) {}
>
> #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 06548e2ebb72..de872b6cc261 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -57,6 +57,7 @@
> #include <linux/slab.h>
> #include <linux/sched/isolation.h>
> #include <linux/sched/clock.h>
> +#include <linux/kasan.h>
> #include "../time/tick-internal.h"
>
> #include "tree.h"
> @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> trace_rcu_callback(rcu_state.name, head,
> rcu_segcblist_n_cbs(&rdp->cblist));
>
> + kasan_record_aux_stack(head);
> +
> /* Go handle any RCU core processing required. */
> if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> index 81f5464ea9e1..56a89291f1cc 100644
> --- a/lib/Kconfig.kasan
> +++ b/lib/Kconfig.kasan
> @@ -58,6 +58,8 @@ config KASAN_GENERIC
> For better error detection enable CONFIG_STACKTRACE.
> Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> (the resulting kernel does not boot).
> + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> + call stack. It doesn't increase the cost of memory consumption.
>
> config KASAN_SW_TAGS
> bool "Software tag-based mode"
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 2906358e42f0..8bc618289bb1 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -41,7 +41,7 @@
> #include "kasan.h"
> #include "../slab.h"
>
> -static inline depot_stack_handle_t save_stack(gfp_t flags)
> +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> {
> unsigned long entries[KASAN_STACK_DEPTH];
> unsigned int nr_entries;
> @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> static inline void set_track(struct kasan_track *track, gfp_t flags)
> {
> track->pid = current->pid;
> - track->stack = save_stack(flags);
> + track->stack = kasan_save_stack(flags);
> }
>
> void kasan_enable_current(void)
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 56ff8885fe2e..b86880c338e2 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> DEFINE_ASAN_SET_SHADOW(f3);
> DEFINE_ASAN_SET_SHADOW(f5);
> DEFINE_ASAN_SET_SHADOW(f8);
> +
> +void kasan_record_aux_stack(void *addr)
> +{
> + struct page *page = kasan_addr_to_page(addr);
> + struct kmem_cache *cache;
> + struct kasan_alloc_meta *alloc_info;
> + void *object;
> +
> + if (!(page && PageSlab(page)))
> + return;
> +
> + cache = page->slab_cache;
> + object = nearest_obj(cache, page, addr);
> + alloc_info = get_alloc_info(cache, object);
> +
> + if (!alloc_info->rcu_stack[0])
> + /* record first call_rcu() call stack */
> + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> + else
> + /* record last call_rcu() call stack */
> + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> +}
> +
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx)
> +{
> + return container_of(&alloc_info->rcu_stack[idx],
> + struct kasan_track, stack);
> +}
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index e8f37199d885..1cc1fb7b0de3 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -96,15 +96,28 @@ struct kasan_track {
> depot_stack_handle_t stack;
> };
>
> +#ifdef CONFIG_KASAN_GENERIC
> +#define SIZEOF_PTR sizeof(void *)
Please move this to generic.c closer to kasan_set_free_info.
Unnecessary in the header.
> +#define KASAN_NR_RCU_CALL_STACKS 2
Since KASAN_NR_RCU_CALL_STACKS is only used once below, you could as
well use 2 instead of it.
Reduces level of indirection and cognitive load.
> +#else /* CONFIG_KASAN_GENERIC */
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> #define KASAN_NR_FREE_STACKS 5
> #else
> #define KASAN_NR_FREE_STACKS 1
> #endif
> +#endif /* CONFIG_KASAN_GENERIC */
>
> struct kasan_alloc_meta {
> struct kasan_track alloc_track;
> +#ifdef CONFIG_KASAN_GENERIC
> + /*
> + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> + * The free stack is stored into freed object.
> + */
> + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> +#else
> struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> +#endif
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> u8 free_track_idx;
> @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
>
> struct page *kasan_addr_to_page(const void *addr);
>
> +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> +
> #if defined(CONFIG_KASAN_GENERIC) && \
> (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> void quarantine_reduce(void);
> void quarantine_remove_cache(struct kmem_cache *cache);
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx);
> #else
> static inline void quarantine_put(struct kasan_free_meta *info,
> struct kmem_cache *cache) { }
> static inline void quarantine_reduce(void) { }
> static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> +static inline struct kasan_track *kasan_get_aux_stack(
> + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> #endif
>
> #ifdef CONFIG_KASAN_SW_TAGS
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 80f23c9da6b0..f16a1a210815 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> kasan_enable_current();
> }
>
> -static void print_track(struct kasan_track *track, const char *prefix)
> +static void print_track(struct kasan_track *track, const char *prefix,
> + bool is_callrcu)
> {
> - pr_err("%s by task %u:\n", prefix, track->pid);
> + if (is_callrcu)
> + pr_err("%s:\n", prefix);
> + else
> + pr_err("%s by task %u:\n", prefix, track->pid);
> if (track->stack) {
> unsigned long *entries;
> unsigned int nr_entries;
> @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> if (cache->flags & SLAB_KASAN) {
> struct kasan_track *free_track;
>
> - print_track(&alloc_info->alloc_track, "Allocated");
> + print_track(&alloc_info->alloc_track, "Allocated", false);
> pr_err("\n");
> free_track = kasan_get_free_track(cache, object, tag);
> - print_track(free_track, "Freed");
> + print_track(free_track, "Freed", false);
> pr_err("\n");
> +
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> + free_track = kasan_get_aux_stack(alloc_info, 0);
> + print_track(free_track, "First call_rcu() call stack", true);
> + pr_err("\n");
> + free_track = kasan_get_aux_stack(alloc_info, 1);
> + print_track(free_track, "Last call_rcu() call stack", true);
> + pr_err("\n");
> + }
> }
>
> describe_object_addr(cache, object, addr);
> --
> 2.18.0
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
On Mon, 2020-05-11 at 13:08 +0200, Dmitry Vyukov wrote:
> On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
> >
> > This feature will record first and last call_rcu() call stack and
> > print two call_rcu() call stack in KASAN report.
> >
> > When call_rcu() is called, we store the call_rcu() call stack into
> > slub alloc meta-data, so that KASAN report can print rcu stack.
> >
> > It doesn't increase the cost of memory consumption. Because we don't
> > enlarge struct kasan_alloc_meta size.
> > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > - remove free track from kasan_alloc_meta, size is 8 bytes.
> >
> > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> >
> > Signed-off-by: Walter Wu <[email protected]>
> > Suggested-by: Dmitry Vyukov <[email protected]>
> > Cc: Andrey Ryabinin <[email protected]>
> > Cc: Dmitry Vyukov <[email protected]>
> > Cc: Alexander Potapenko <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Paul E. McKenney <[email protected]>
> > Cc: Josh Triplett <[email protected]>
> > Cc: Mathieu Desnoyers <[email protected]>
> > Cc: Lai Jiangshan <[email protected]>
> > Cc: Joel Fernandes <[email protected]>
> > ---
> > include/linux/kasan.h | 2 ++
> > kernel/rcu/tree.c | 3 +++
> > lib/Kconfig.kasan | 2 ++
> > mm/kasan/common.c | 4 ++--
> > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > mm/kasan/report.c | 21 +++++++++++++++++----
> > 7 files changed, 74 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index 31314ca7c635..23b7ee00572d 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> >
> > void kasan_cache_shrink(struct kmem_cache *cache);
> > void kasan_cache_shutdown(struct kmem_cache *cache);
> > +void kasan_record_aux_stack(void *ptr);
> >
> > #else /* CONFIG_KASAN_GENERIC */
> >
> > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > +static inline void kasan_record_aux_stack(void *ptr) {}
> >
> > #endif /* CONFIG_KASAN_GENERIC */
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 06548e2ebb72..de872b6cc261 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -57,6 +57,7 @@
> > #include <linux/slab.h>
> > #include <linux/sched/isolation.h>
> > #include <linux/sched/clock.h>
> > +#include <linux/kasan.h>
> > #include "../time/tick-internal.h"
> >
> > #include "tree.h"
> > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > trace_rcu_callback(rcu_state.name, head,
> > rcu_segcblist_n_cbs(&rdp->cblist));
> >
> > + kasan_record_aux_stack(head);
>
> This happens after we queued the object onto some queue and after some
> return statements. I think this is wrong.
> We need to do this somewhere at the very beginning of the function.
>
Yes, we should make sure that all recording are fully and correctly.
> This is what I meant by "hard to review". This is completely invisible
> in the diff.
>
Thanks.
>
> > /* Go handle any RCU core processing required. */
> > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > index 81f5464ea9e1..56a89291f1cc 100644
> > --- a/lib/Kconfig.kasan
> > +++ b/lib/Kconfig.kasan
> > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > For better error detection enable CONFIG_STACKTRACE.
> > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > (the resulting kernel does not boot).
> > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > + call stack. It doesn't increase the cost of memory consumption.
> >
> > config KASAN_SW_TAGS
> > bool "Software tag-based mode"
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 2906358e42f0..8bc618289bb1 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -41,7 +41,7 @@
> > #include "kasan.h"
> > #include "../slab.h"
> >
> > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > {
> > unsigned long entries[KASAN_STACK_DEPTH];
> > unsigned int nr_entries;
> > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > {
> > track->pid = current->pid;
> > - track->stack = save_stack(flags);
> > + track->stack = kasan_save_stack(flags);
> > }
> >
> > void kasan_enable_current(void)
> > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > index 56ff8885fe2e..b86880c338e2 100644
> > --- a/mm/kasan/generic.c
> > +++ b/mm/kasan/generic.c
> > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > DEFINE_ASAN_SET_SHADOW(f3);
> > DEFINE_ASAN_SET_SHADOW(f5);
> > DEFINE_ASAN_SET_SHADOW(f8);
> > +
> > +void kasan_record_aux_stack(void *addr)
> > +{
> > + struct page *page = kasan_addr_to_page(addr);
> > + struct kmem_cache *cache;
> > + struct kasan_alloc_meta *alloc_info;
> > + void *object;
> > +
> > + if (!(page && PageSlab(page)))
> > + return;
> > +
> > + cache = page->slab_cache;
> > + object = nearest_obj(cache, page, addr);
> > + alloc_info = get_alloc_info(cache, object);
> > +
> > + if (!alloc_info->rcu_stack[0])
> > + /* record first call_rcu() call stack */
> > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > + else
> > + /* record last call_rcu() call stack */
> > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > +}
> > +
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx)
> > +{
> > + return container_of(&alloc_info->rcu_stack[idx],
> > + struct kasan_track, stack);
> > +}
> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > index e8f37199d885..1cc1fb7b0de3 100644
> > --- a/mm/kasan/kasan.h
> > +++ b/mm/kasan/kasan.h
> > @@ -96,15 +96,28 @@ struct kasan_track {
> > depot_stack_handle_t stack;
> > };
> >
> > +#ifdef CONFIG_KASAN_GENERIC
> > +#define SIZEOF_PTR sizeof(void *)
> > +#define KASAN_NR_RCU_CALL_STACKS 2
> > +#else /* CONFIG_KASAN_GENERIC */
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > #define KASAN_NR_FREE_STACKS 5
> > #else
> > #define KASAN_NR_FREE_STACKS 1
> > #endif
> > +#endif /* CONFIG_KASAN_GENERIC */
> >
> > struct kasan_alloc_meta {
> > struct kasan_track alloc_track;
> > +#ifdef CONFIG_KASAN_GENERIC
> > + /*
> > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > + * The free stack is stored into freed object.
> > + */
> > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > +#else
> > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > +#endif
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > u8 free_track_idx;
> > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> >
> > struct page *kasan_addr_to_page(const void *addr);
> >
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > +
> > #if defined(CONFIG_KASAN_GENERIC) && \
> > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > void quarantine_reduce(void);
> > void quarantine_remove_cache(struct kmem_cache *cache);
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx);
> > #else
> > static inline void quarantine_put(struct kasan_free_meta *info,
> > struct kmem_cache *cache) { }
> > static inline void quarantine_reduce(void) { }
> > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > +static inline struct kasan_track *kasan_get_aux_stack(
> > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > #endif
> >
> > #ifdef CONFIG_KASAN_SW_TAGS
> > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > index 80f23c9da6b0..f16a1a210815 100644
> > --- a/mm/kasan/report.c
> > +++ b/mm/kasan/report.c
> > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > kasan_enable_current();
> > }
> >
> > -static void print_track(struct kasan_track *track, const char *prefix)
> > +static void print_track(struct kasan_track *track, const char *prefix,
> > + bool is_callrcu)
> > {
> > - pr_err("%s by task %u:\n", prefix, track->pid);
> > + if (is_callrcu)
> > + pr_err("%s:\n", prefix);
> > + else
> > + pr_err("%s by task %u:\n", prefix, track->pid);
> > if (track->stack) {
> > unsigned long *entries;
> > unsigned int nr_entries;
> > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > if (cache->flags & SLAB_KASAN) {
> > struct kasan_track *free_track;
> >
> > - print_track(&alloc_info->alloc_track, "Allocated");
> > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > pr_err("\n");
> > free_track = kasan_get_free_track(cache, object, tag);
> > - print_track(free_track, "Freed");
> > + print_track(free_track, "Freed", false);
> > pr_err("\n");
> > +
> > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > + print_track(free_track, "First call_rcu() call stack", true);
> > + pr_err("\n");
> > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > + print_track(free_track, "Last call_rcu() call stack", true);
> > + pr_err("\n");
> > + }
> > }
> >
> > describe_object_addr(cache, object, addr);
> > --
> > 2.18.0
> >
> > --
> > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
On Mon, May 11, 2020 at 2:31 PM Dmitry Vyukov <[email protected]> wrote:
>
> On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
> >
> > This feature will record first and last call_rcu() call stack and
> > print two call_rcu() call stack in KASAN report.
> >
> > When call_rcu() is called, we store the call_rcu() call stack into
> > slub alloc meta-data, so that KASAN report can print rcu stack.
> >
> > It doesn't increase the cost of memory consumption. Because we don't
> > enlarge struct kasan_alloc_meta size.
> > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > - remove free track from kasan_alloc_meta, size is 8 bytes.
> >
> > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> >
> > Signed-off-by: Walter Wu <[email protected]>
> > Suggested-by: Dmitry Vyukov <[email protected]>
> > Cc: Andrey Ryabinin <[email protected]>
> > Cc: Dmitry Vyukov <[email protected]>
> > Cc: Alexander Potapenko <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Paul E. McKenney <[email protected]>
> > Cc: Josh Triplett <[email protected]>
> > Cc: Mathieu Desnoyers <[email protected]>
> > Cc: Lai Jiangshan <[email protected]>
> > Cc: Joel Fernandes <[email protected]>
> > ---
> > include/linux/kasan.h | 2 ++
> > kernel/rcu/tree.c | 3 +++
> > lib/Kconfig.kasan | 2 ++
> > mm/kasan/common.c | 4 ++--
> > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > mm/kasan/report.c | 21 +++++++++++++++++----
> > 7 files changed, 74 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index 31314ca7c635..23b7ee00572d 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> >
> > void kasan_cache_shrink(struct kmem_cache *cache);
> > void kasan_cache_shutdown(struct kmem_cache *cache);
> > +void kasan_record_aux_stack(void *ptr);
> >
> > #else /* CONFIG_KASAN_GENERIC */
> >
> > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > +static inline void kasan_record_aux_stack(void *ptr) {}
> >
> > #endif /* CONFIG_KASAN_GENERIC */
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 06548e2ebb72..de872b6cc261 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -57,6 +57,7 @@
> > #include <linux/slab.h>
> > #include <linux/sched/isolation.h>
> > #include <linux/sched/clock.h>
> > +#include <linux/kasan.h>
> > #include "../time/tick-internal.h"
> >
> > #include "tree.h"
> > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > trace_rcu_callback(rcu_state.name, head,
> > rcu_segcblist_n_cbs(&rdp->cblist));
> >
> > + kasan_record_aux_stack(head);
> > +
> > /* Go handle any RCU core processing required. */
> > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > index 81f5464ea9e1..56a89291f1cc 100644
> > --- a/lib/Kconfig.kasan
> > +++ b/lib/Kconfig.kasan
> > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > For better error detection enable CONFIG_STACKTRACE.
> > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > (the resulting kernel does not boot).
> > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > + call stack. It doesn't increase the cost of memory consumption.
> >
> > config KASAN_SW_TAGS
> > bool "Software tag-based mode"
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 2906358e42f0..8bc618289bb1 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -41,7 +41,7 @@
> > #include "kasan.h"
> > #include "../slab.h"
> >
> > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > {
> > unsigned long entries[KASAN_STACK_DEPTH];
> > unsigned int nr_entries;
> > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > {
> > track->pid = current->pid;
> > - track->stack = save_stack(flags);
> > + track->stack = kasan_save_stack(flags);
> > }
> >
> > void kasan_enable_current(void)
> > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > index 56ff8885fe2e..b86880c338e2 100644
> > --- a/mm/kasan/generic.c
> > +++ b/mm/kasan/generic.c
> > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > DEFINE_ASAN_SET_SHADOW(f3);
> > DEFINE_ASAN_SET_SHADOW(f5);
> > DEFINE_ASAN_SET_SHADOW(f8);
> > +
> > +void kasan_record_aux_stack(void *addr)
> > +{
> > + struct page *page = kasan_addr_to_page(addr);
> > + struct kmem_cache *cache;
> > + struct kasan_alloc_meta *alloc_info;
> > + void *object;
> > +
> > + if (!(page && PageSlab(page)))
> > + return;
> > +
> > + cache = page->slab_cache;
> > + object = nearest_obj(cache, page, addr);
> > + alloc_info = get_alloc_info(cache, object);
> > +
> > + if (!alloc_info->rcu_stack[0])
> > + /* record first call_rcu() call stack */
> > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > + else
> > + /* record last call_rcu() call stack */
> > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > +}
> > +
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx)
> > +{
> > + return container_of(&alloc_info->rcu_stack[idx],
> > + struct kasan_track, stack);
> > +}
> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > index e8f37199d885..1cc1fb7b0de3 100644
> > --- a/mm/kasan/kasan.h
> > +++ b/mm/kasan/kasan.h
> > @@ -96,15 +96,28 @@ struct kasan_track {
> > depot_stack_handle_t stack;
> > };
> >
> > +#ifdef CONFIG_KASAN_GENERIC
> > +#define SIZEOF_PTR sizeof(void *)
>
> Please move this to generic.c closer to kasan_set_free_info.
> Unnecessary in the header.
>
> > +#define KASAN_NR_RCU_CALL_STACKS 2
>
> Since KASAN_NR_RCU_CALL_STACKS is only used once below, you could as
> well use 2 instead of it.
> Reduces level of indirection and cognitive load.
>
> > +#else /* CONFIG_KASAN_GENERIC */
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > #define KASAN_NR_FREE_STACKS 5
> > #else
> > #define KASAN_NR_FREE_STACKS 1
> > #endif
> > +#endif /* CONFIG_KASAN_GENERIC */
> >
> > struct kasan_alloc_meta {
> > struct kasan_track alloc_track;
> > +#ifdef CONFIG_KASAN_GENERIC
> > + /*
> > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > + * The free stack is stored into freed object.
> > + */
> > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > +#else
> > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > +#endif
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > u8 free_track_idx;
> > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> >
> > struct page *kasan_addr_to_page(const void *addr);
> >
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > +
> > #if defined(CONFIG_KASAN_GENERIC) && \
> > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > void quarantine_reduce(void);
> > void quarantine_remove_cache(struct kmem_cache *cache);
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx);
> > #else
> > static inline void quarantine_put(struct kasan_free_meta *info,
> > struct kmem_cache *cache) { }
> > static inline void quarantine_reduce(void) { }
> > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > +static inline struct kasan_track *kasan_get_aux_stack(
> > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > #endif
> >
> > #ifdef CONFIG_KASAN_SW_TAGS
> > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > index 80f23c9da6b0..f16a1a210815 100644
> > --- a/mm/kasan/report.c
> > +++ b/mm/kasan/report.c
> > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > kasan_enable_current();
> > }
> >
> > -static void print_track(struct kasan_track *track, const char *prefix)
> > +static void print_track(struct kasan_track *track, const char *prefix,
> > + bool is_callrcu)
> > {
> > - pr_err("%s by task %u:\n", prefix, track->pid);
> > + if (is_callrcu)
> > + pr_err("%s:\n", prefix);
> > + else
> > + pr_err("%s by task %u:\n", prefix, track->pid);
> > if (track->stack) {
> > unsigned long *entries;
> > unsigned int nr_entries;
> > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > if (cache->flags & SLAB_KASAN) {
> > struct kasan_track *free_track;
> >
> > - print_track(&alloc_info->alloc_track, "Allocated");
> > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > pr_err("\n");
> > free_track = kasan_get_free_track(cache, object, tag);
> > - print_track(free_track, "Freed");
> > + print_track(free_track, "Freed", false);
> > pr_err("\n");
> > +
> > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > + print_track(free_track, "First call_rcu() call stack", true);
> > + pr_err("\n");
> > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > + print_track(free_track, "Last call_rcu() call stack", true);
> > + pr_err("\n");
> > + }
> > }
> >
> > describe_object_addr(cache, object, addr);
Some higher level comments.
1. I think we need to put the free track into kasan_free_meta as it
was before. It looks like exactly the place for it. We have logic to
properly place it and to do the casts.
2. We need to zero aux stacks when we reallocate the object. Otherwise
we print confusing garbage.
3. __kasan_slab_free now contains a window of inconsistency when it
marked the object as KASAN_KMALLOC_FREE, but did not store the free
track yet. If another thread prints a report now, it will print random
garbage.
4. We need some tests. At least (2) should be visible on tests.
On Mon, 2020-05-11 at 14:20 +0200, 'Dmitry Vyukov' via kasan-dev wrote:
> On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
> >
> > This feature will record first and last call_rcu() call stack and
> > print two call_rcu() call stack in KASAN report.
> >
> > When call_rcu() is called, we store the call_rcu() call stack into
> > slub alloc meta-data, so that KASAN report can print rcu stack.
> >
> > It doesn't increase the cost of memory consumption. Because we don't
> > enlarge struct kasan_alloc_meta size.
> > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > - remove free track from kasan_alloc_meta, size is 8 bytes.
> >
> > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> >
> > Signed-off-by: Walter Wu <[email protected]>
> > Suggested-by: Dmitry Vyukov <[email protected]>
> > Cc: Andrey Ryabinin <[email protected]>
> > Cc: Dmitry Vyukov <[email protected]>
> > Cc: Alexander Potapenko <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Paul E. McKenney <[email protected]>
> > Cc: Josh Triplett <[email protected]>
> > Cc: Mathieu Desnoyers <[email protected]>
> > Cc: Lai Jiangshan <[email protected]>
> > Cc: Joel Fernandes <[email protected]>
> > ---
> > include/linux/kasan.h | 2 ++
> > kernel/rcu/tree.c | 3 +++
> > lib/Kconfig.kasan | 2 ++
> > mm/kasan/common.c | 4 ++--
> > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > mm/kasan/report.c | 21 +++++++++++++++++----
> > 7 files changed, 74 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index 31314ca7c635..23b7ee00572d 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> >
> > void kasan_cache_shrink(struct kmem_cache *cache);
> > void kasan_cache_shutdown(struct kmem_cache *cache);
> > +void kasan_record_aux_stack(void *ptr);
> >
> > #else /* CONFIG_KASAN_GENERIC */
> >
> > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > +static inline void kasan_record_aux_stack(void *ptr) {}
> >
> > #endif /* CONFIG_KASAN_GENERIC */
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 06548e2ebb72..de872b6cc261 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -57,6 +57,7 @@
> > #include <linux/slab.h>
> > #include <linux/sched/isolation.h>
> > #include <linux/sched/clock.h>
> > +#include <linux/kasan.h>
> > #include "../time/tick-internal.h"
> >
> > #include "tree.h"
> > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > trace_rcu_callback(rcu_state.name, head,
> > rcu_segcblist_n_cbs(&rdp->cblist));
> >
> > + kasan_record_aux_stack(head);
> > +
> > /* Go handle any RCU core processing required. */
> > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > index 81f5464ea9e1..56a89291f1cc 100644
> > --- a/lib/Kconfig.kasan
> > +++ b/lib/Kconfig.kasan
> > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > For better error detection enable CONFIG_STACKTRACE.
> > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > (the resulting kernel does not boot).
> > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > + call stack. It doesn't increase the cost of memory consumption.
> >
> > config KASAN_SW_TAGS
> > bool "Software tag-based mode"
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 2906358e42f0..8bc618289bb1 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -41,7 +41,7 @@
> > #include "kasan.h"
> > #include "../slab.h"
> >
> > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > {
> > unsigned long entries[KASAN_STACK_DEPTH];
> > unsigned int nr_entries;
> > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > {
> > track->pid = current->pid;
> > - track->stack = save_stack(flags);
> > + track->stack = kasan_save_stack(flags);
> > }
> >
> > void kasan_enable_current(void)
> > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > index 56ff8885fe2e..b86880c338e2 100644
> > --- a/mm/kasan/generic.c
> > +++ b/mm/kasan/generic.c
> > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > DEFINE_ASAN_SET_SHADOW(f3);
> > DEFINE_ASAN_SET_SHADOW(f5);
> > DEFINE_ASAN_SET_SHADOW(f8);
> > +
> > +void kasan_record_aux_stack(void *addr)
> > +{
> > + struct page *page = kasan_addr_to_page(addr);
> > + struct kmem_cache *cache;
> > + struct kasan_alloc_meta *alloc_info;
> > + void *object;
> > +
> > + if (!(page && PageSlab(page)))
> > + return;
> > +
> > + cache = page->slab_cache;
> > + object = nearest_obj(cache, page, addr);
> > + alloc_info = get_alloc_info(cache, object);
> > +
> > + if (!alloc_info->rcu_stack[0])
> > + /* record first call_rcu() call stack */
> > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > + else
> > + /* record last call_rcu() call stack */
> > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > +}
> > +
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx)
> > +{
> > + return container_of(&alloc_info->rcu_stack[idx],
> > + struct kasan_track, stack);
>
> This is not type safe, there is no kasan_track object. And we create a
> pointer to kasan_track just to carefully not treat it as valid
> kasan_track in print_track.
>
Good catch.
> This adds an unnecessary if to print_track. And does not seem to be
> useful/nice to print:
>
> First call_rcu() call stack:
> (stack is not available)
>
> Last call_rcu() call stack:
> (stack is not available)
>
> when no rcu stacks are memorized.
> Your intention seems to be to reuse 2 lines of code from print_track.
> I would factor them out into a function:
>
> static void print_stack(depot_stack_handle_t stack)
> {
> unsigned long *entries;
> unsigned int nr_entries;
>
> nr_entries = stack_depot_fetch(stack, &entries);
> stack_trace_print(entries, nr_entries, 0);
> }
>
> And then this can expressed as:
>
> if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> stack = alloc_info->rcu_stack[0];
> if (stack) {
> pr_err("First call_rcu() call stack:\n");
> print_stack(stack);
> pr_err("\n");
> }
> stack = alloc_info->rcu_stack[1];
> if (stack) {
> pr_err("Last call_rcu() call stack:\n");
> print_stack(stack);
> pr_err("\n");
> }
> }
>
rcu_stack doesn't exist at report.c, it need at generic.c,
otherwise it will have build error, unless add the #ifdef GENERIC_KASAN
Maybe we can make kasan_get_aux_stack() return NULL? then print_stack()
determine whether it will print aux stack.
>
> Or with another helper function it becomes:
>
> if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> print_aux_stack(alloc_info->rcu_stack[0], "First");
> print_aux_stack(alloc_info->rcu_stack[1], "Last");
> }
>
>
> > +}
> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > index e8f37199d885..1cc1fb7b0de3 100644
> > --- a/mm/kasan/kasan.h
> > +++ b/mm/kasan/kasan.h
> > @@ -96,15 +96,28 @@ struct kasan_track {
> > depot_stack_handle_t stack;
> > };
> >
> > +#ifdef CONFIG_KASAN_GENERIC
> > +#define SIZEOF_PTR sizeof(void *)
> > +#define KASAN_NR_RCU_CALL_STACKS 2
> > +#else /* CONFIG_KASAN_GENERIC */
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > #define KASAN_NR_FREE_STACKS 5
> > #else
> > #define KASAN_NR_FREE_STACKS 1
> > #endif
> > +#endif /* CONFIG_KASAN_GENERIC */
> >
> > struct kasan_alloc_meta {
> > struct kasan_track alloc_track;
> > +#ifdef CONFIG_KASAN_GENERIC
> > + /*
> > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > + * The free stack is stored into freed object.
> > + */
> > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > +#else
> > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > +#endif
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > u8 free_track_idx;
> > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> >
> > struct page *kasan_addr_to_page(const void *addr);
> >
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > +
> > #if defined(CONFIG_KASAN_GENERIC) && \
> > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > void quarantine_reduce(void);
> > void quarantine_remove_cache(struct kmem_cache *cache);
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx);
> > #else
> > static inline void quarantine_put(struct kasan_free_meta *info,
> > struct kmem_cache *cache) { }
> > static inline void quarantine_reduce(void) { }
> > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > +static inline struct kasan_track *kasan_get_aux_stack(
> > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > #endif
> >
> > #ifdef CONFIG_KASAN_SW_TAGS
> > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > index 80f23c9da6b0..f16a1a210815 100644
> > --- a/mm/kasan/report.c
> > +++ b/mm/kasan/report.c
> > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > kasan_enable_current();
> > }
> >
> > -static void print_track(struct kasan_track *track, const char *prefix)
> > +static void print_track(struct kasan_track *track, const char *prefix,
> > + bool is_callrcu)
> > {
> > - pr_err("%s by task %u:\n", prefix, track->pid);
> > + if (is_callrcu)
> > + pr_err("%s:\n", prefix);
> > + else
> > + pr_err("%s by task %u:\n", prefix, track->pid);
> > if (track->stack) {
> > unsigned long *entries;
> > unsigned int nr_entries;
> > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > if (cache->flags & SLAB_KASAN) {
> > struct kasan_track *free_track;
> >
> > - print_track(&alloc_info->alloc_track, "Allocated");
> > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > pr_err("\n");
> > free_track = kasan_get_free_track(cache, object, tag);
> > - print_track(free_track, "Freed");
> > + print_track(free_track, "Freed", false);
> > pr_err("\n");
> > +
> > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > + print_track(free_track, "First call_rcu() call stack", true);
> > + pr_err("\n");
> > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > + print_track(free_track, "Last call_rcu() call stack", true);
> > + pr_err("\n");
> > + }
> > }
> >
> > describe_object_addr(cache, object, addr);
> > --
> > 2.18.0
> >
> > --
> > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
>
On Mon, May 11, 2020 at 2:54 PM Walter Wu <[email protected]> wrote:
>
> On Mon, 2020-05-11 at 14:20 +0200, 'Dmitry Vyukov' via kasan-dev wrote:
> > On Mon, May 11, 2020 at 4:31 AM Walter Wu <[email protected]> wrote:
> > >
> > > This feature will record first and last call_rcu() call stack and
> > > print two call_rcu() call stack in KASAN report.
> > >
> > > When call_rcu() is called, we store the call_rcu() call stack into
> > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > >
> > > It doesn't increase the cost of memory consumption. Because we don't
> > > enlarge struct kasan_alloc_meta size.
> > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > >
> > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > >
> > > Signed-off-by: Walter Wu <[email protected]>
> > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > Cc: Andrey Ryabinin <[email protected]>
> > > Cc: Dmitry Vyukov <[email protected]>
> > > Cc: Alexander Potapenko <[email protected]>
> > > Cc: Andrew Morton <[email protected]>
> > > Cc: Paul E. McKenney <[email protected]>
> > > Cc: Josh Triplett <[email protected]>
> > > Cc: Mathieu Desnoyers <[email protected]>
> > > Cc: Lai Jiangshan <[email protected]>
> > > Cc: Joel Fernandes <[email protected]>
> > > ---
> > > include/linux/kasan.h | 2 ++
> > > kernel/rcu/tree.c | 3 +++
> > > lib/Kconfig.kasan | 2 ++
> > > mm/kasan/common.c | 4 ++--
> > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > index 31314ca7c635..23b7ee00572d 100644
> > > --- a/include/linux/kasan.h
> > > +++ b/include/linux/kasan.h
> > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > >
> > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > +void kasan_record_aux_stack(void *ptr);
> > >
> > > #else /* CONFIG_KASAN_GENERIC */
> > >
> > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > >
> > > #endif /* CONFIG_KASAN_GENERIC */
> > >
> > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > index 06548e2ebb72..de872b6cc261 100644
> > > --- a/kernel/rcu/tree.c
> > > +++ b/kernel/rcu/tree.c
> > > @@ -57,6 +57,7 @@
> > > #include <linux/slab.h>
> > > #include <linux/sched/isolation.h>
> > > #include <linux/sched/clock.h>
> > > +#include <linux/kasan.h>
> > > #include "../time/tick-internal.h"
> > >
> > > #include "tree.h"
> > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > trace_rcu_callback(rcu_state.name, head,
> > > rcu_segcblist_n_cbs(&rdp->cblist));
> > >
> > > + kasan_record_aux_stack(head);
> > > +
> > > /* Go handle any RCU core processing required. */
> > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > index 81f5464ea9e1..56a89291f1cc 100644
> > > --- a/lib/Kconfig.kasan
> > > +++ b/lib/Kconfig.kasan
> > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > For better error detection enable CONFIG_STACKTRACE.
> > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > (the resulting kernel does not boot).
> > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > + call stack. It doesn't increase the cost of memory consumption.
> > >
> > > config KASAN_SW_TAGS
> > > bool "Software tag-based mode"
> > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > index 2906358e42f0..8bc618289bb1 100644
> > > --- a/mm/kasan/common.c
> > > +++ b/mm/kasan/common.c
> > > @@ -41,7 +41,7 @@
> > > #include "kasan.h"
> > > #include "../slab.h"
> > >
> > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > {
> > > unsigned long entries[KASAN_STACK_DEPTH];
> > > unsigned int nr_entries;
> > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > {
> > > track->pid = current->pid;
> > > - track->stack = save_stack(flags);
> > > + track->stack = kasan_save_stack(flags);
> > > }
> > >
> > > void kasan_enable_current(void)
> > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > index 56ff8885fe2e..b86880c338e2 100644
> > > --- a/mm/kasan/generic.c
> > > +++ b/mm/kasan/generic.c
> > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > DEFINE_ASAN_SET_SHADOW(f3);
> > > DEFINE_ASAN_SET_SHADOW(f5);
> > > DEFINE_ASAN_SET_SHADOW(f8);
> > > +
> > > +void kasan_record_aux_stack(void *addr)
> > > +{
> > > + struct page *page = kasan_addr_to_page(addr);
> > > + struct kmem_cache *cache;
> > > + struct kasan_alloc_meta *alloc_info;
> > > + void *object;
> > > +
> > > + if (!(page && PageSlab(page)))
> > > + return;
> > > +
> > > + cache = page->slab_cache;
> > > + object = nearest_obj(cache, page, addr);
> > > + alloc_info = get_alloc_info(cache, object);
> > > +
> > > + if (!alloc_info->rcu_stack[0])
> > > + /* record first call_rcu() call stack */
> > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > + else
> > > + /* record last call_rcu() call stack */
> > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > +}
> > > +
> > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > + u8 idx)
> > > +{
> > > + return container_of(&alloc_info->rcu_stack[idx],
> > > + struct kasan_track, stack);
> >
> > This is not type safe, there is no kasan_track object. And we create a
> > pointer to kasan_track just to carefully not treat it as valid
> > kasan_track in print_track.
> >
>
> Good catch.
>
> > This adds an unnecessary if to print_track. And does not seem to be
> > useful/nice to print:
> >
> > First call_rcu() call stack:
> > (stack is not available)
> >
> > Last call_rcu() call stack:
> > (stack is not available)
> >
> > when no rcu stacks are memorized.
> > Your intention seems to be to reuse 2 lines of code from print_track.
> > I would factor them out into a function:
> >
> > static void print_stack(depot_stack_handle_t stack)
> > {
> > unsigned long *entries;
> > unsigned int nr_entries;
> >
> > nr_entries = stack_depot_fetch(stack, &entries);
> > stack_trace_print(entries, nr_entries, 0);
> > }
> >
> > And then this can expressed as:
> >
> > if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > stack = alloc_info->rcu_stack[0];
> > if (stack) {
> > pr_err("First call_rcu() call stack:\n");
> > print_stack(stack);
> > pr_err("\n");
> > }
> > stack = alloc_info->rcu_stack[1];
> > if (stack) {
> > pr_err("Last call_rcu() call stack:\n");
> > print_stack(stack);
> > pr_err("\n");
> > }
> > }
> >
>
> rcu_stack doesn't exist at report.c, it need at generic.c,
> otherwise it will have build error, unless add the #ifdef GENERIC_KASAN
>
> Maybe we can make kasan_get_aux_stack() return NULL? then print_stack()
> determine whether it will print aux stack.
I would put all this under #ifdef CONFIG_KASAN_GENERIC now.
> > Or with another helper function it becomes:
> >
> > if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > print_aux_stack(alloc_info->rcu_stack[0], "First");
> > print_aux_stack(alloc_info->rcu_stack[1], "Last");
> > }
> >
> >
> > > +}
> > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > index e8f37199d885..1cc1fb7b0de3 100644
> > > --- a/mm/kasan/kasan.h
> > > +++ b/mm/kasan/kasan.h
> > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > depot_stack_handle_t stack;
> > > };
> > >
> > > +#ifdef CONFIG_KASAN_GENERIC
> > > +#define SIZEOF_PTR sizeof(void *)
> > > +#define KASAN_NR_RCU_CALL_STACKS 2
> > > +#else /* CONFIG_KASAN_GENERIC */
> > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > #define KASAN_NR_FREE_STACKS 5
> > > #else
> > > #define KASAN_NR_FREE_STACKS 1
> > > #endif
> > > +#endif /* CONFIG_KASAN_GENERIC */
> > >
> > > struct kasan_alloc_meta {
> > > struct kasan_track alloc_track;
> > > +#ifdef CONFIG_KASAN_GENERIC
> > > + /*
> > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > + * The free stack is stored into freed object.
> > > + */
> > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > +#else
> > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > +#endif
> > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > u8 free_track_idx;
> > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > >
> > > struct page *kasan_addr_to_page(const void *addr);
> > >
> > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > +
> > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > void quarantine_reduce(void);
> > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > + u8 idx);
> > > #else
> > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > struct kmem_cache *cache) { }
> > > static inline void quarantine_reduce(void) { }
> > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > #endif
> > >
> > > #ifdef CONFIG_KASAN_SW_TAGS
> > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > index 80f23c9da6b0..f16a1a210815 100644
> > > --- a/mm/kasan/report.c
> > > +++ b/mm/kasan/report.c
> > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > kasan_enable_current();
> > > }
> > >
> > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > + bool is_callrcu)
> > > {
> > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > + if (is_callrcu)
> > > + pr_err("%s:\n", prefix);
> > > + else
> > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > if (track->stack) {
> > > unsigned long *entries;
> > > unsigned int nr_entries;
> > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > if (cache->flags & SLAB_KASAN) {
> > > struct kasan_track *free_track;
> > >
> > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > pr_err("\n");
> > > free_track = kasan_get_free_track(cache, object, tag);
> > > - print_track(free_track, "Freed");
> > > + print_track(free_track, "Freed", false);
> > > pr_err("\n");
> > > +
> > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > + print_track(free_track, "First call_rcu() call stack", true);
> > > + pr_err("\n");
> > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > + pr_err("\n");
> > > + }
> > > }
> > >
> > > describe_object_addr(cache, object, addr);
> > > --
> > > 2.18.0
> > >
> > > --
> > > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200511023111.15310-1-walter-zh.wu%40mediatek.com.
> >
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1589201640.21284.8.camel%40mtksdccf07.
On Mon, May 11, 2020 at 3:29 PM Walter Wu <[email protected]> wrote:
> > > This feature will record first and last call_rcu() call stack and
> > > print two call_rcu() call stack in KASAN report.
> > >
> > > When call_rcu() is called, we store the call_rcu() call stack into
> > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > >
> > > It doesn't increase the cost of memory consumption. Because we don't
> > > enlarge struct kasan_alloc_meta size.
> > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > >
> > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > >
> > > Signed-off-by: Walter Wu <[email protected]>
> > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > Cc: Andrey Ryabinin <[email protected]>
> > > Cc: Dmitry Vyukov <[email protected]>
> > > Cc: Alexander Potapenko <[email protected]>
> > > Cc: Andrew Morton <[email protected]>
> > > Cc: Paul E. McKenney <[email protected]>
> > > Cc: Josh Triplett <[email protected]>
> > > Cc: Mathieu Desnoyers <[email protected]>
> > > Cc: Lai Jiangshan <[email protected]>
> > > Cc: Joel Fernandes <[email protected]>
> > > ---
> > > include/linux/kasan.h | 2 ++
> > > kernel/rcu/tree.c | 3 +++
> > > lib/Kconfig.kasan | 2 ++
> > > mm/kasan/common.c | 4 ++--
> > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > index 31314ca7c635..23b7ee00572d 100644
> > > --- a/include/linux/kasan.h
> > > +++ b/include/linux/kasan.h
> > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > >
> > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > +void kasan_record_aux_stack(void *ptr);
> > >
> > > #else /* CONFIG_KASAN_GENERIC */
> > >
> > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > >
> > > #endif /* CONFIG_KASAN_GENERIC */
> > >
> > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > index 06548e2ebb72..de872b6cc261 100644
> > > --- a/kernel/rcu/tree.c
> > > +++ b/kernel/rcu/tree.c
> > > @@ -57,6 +57,7 @@
> > > #include <linux/slab.h>
> > > #include <linux/sched/isolation.h>
> > > #include <linux/sched/clock.h>
> > > +#include <linux/kasan.h>
> > > #include "../time/tick-internal.h"
> > >
> > > #include "tree.h"
> > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > trace_rcu_callback(rcu_state.name, head,
> > > rcu_segcblist_n_cbs(&rdp->cblist));
> > >
> > > + kasan_record_aux_stack(head);
> > > +
> > > /* Go handle any RCU core processing required. */
> > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > index 81f5464ea9e1..56a89291f1cc 100644
> > > --- a/lib/Kconfig.kasan
> > > +++ b/lib/Kconfig.kasan
> > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > For better error detection enable CONFIG_STACKTRACE.
> > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > (the resulting kernel does not boot).
> > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > + call stack. It doesn't increase the cost of memory consumption.
> > >
> > > config KASAN_SW_TAGS
> > > bool "Software tag-based mode"
> > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > index 2906358e42f0..8bc618289bb1 100644
> > > --- a/mm/kasan/common.c
> > > +++ b/mm/kasan/common.c
> > > @@ -41,7 +41,7 @@
> > > #include "kasan.h"
> > > #include "../slab.h"
> > >
> > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > {
> > > unsigned long entries[KASAN_STACK_DEPTH];
> > > unsigned int nr_entries;
> > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > {
> > > track->pid = current->pid;
> > > - track->stack = save_stack(flags);
> > > + track->stack = kasan_save_stack(flags);
> > > }
> > >
> > > void kasan_enable_current(void)
> > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > index 56ff8885fe2e..b86880c338e2 100644
> > > --- a/mm/kasan/generic.c
> > > +++ b/mm/kasan/generic.c
> > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > DEFINE_ASAN_SET_SHADOW(f3);
> > > DEFINE_ASAN_SET_SHADOW(f5);
> > > DEFINE_ASAN_SET_SHADOW(f8);
> > > +
> > > +void kasan_record_aux_stack(void *addr)
> > > +{
> > > + struct page *page = kasan_addr_to_page(addr);
> > > + struct kmem_cache *cache;
> > > + struct kasan_alloc_meta *alloc_info;
> > > + void *object;
> > > +
> > > + if (!(page && PageSlab(page)))
> > > + return;
> > > +
> > > + cache = page->slab_cache;
> > > + object = nearest_obj(cache, page, addr);
> > > + alloc_info = get_alloc_info(cache, object);
> > > +
> > > + if (!alloc_info->rcu_stack[0])
> > > + /* record first call_rcu() call stack */
> > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > + else
> > > + /* record last call_rcu() call stack */
> > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > +}
> > > +
> > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > + u8 idx)
> > > +{
> > > + return container_of(&alloc_info->rcu_stack[idx],
> > > + struct kasan_track, stack);
> > > +}
> > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > index e8f37199d885..1cc1fb7b0de3 100644
> > > --- a/mm/kasan/kasan.h
> > > +++ b/mm/kasan/kasan.h
> > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > depot_stack_handle_t stack;
> > > };
> > >
> > > +#ifdef CONFIG_KASAN_GENERIC
> > > +#define SIZEOF_PTR sizeof(void *)
> >
> > Please move this to generic.c closer to kasan_set_free_info.
> > Unnecessary in the header.
> >
> > > +#define KASAN_NR_RCU_CALL_STACKS 2
> >
> > Since KASAN_NR_RCU_CALL_STACKS is only used once below, you could as
> > well use 2 instead of it.
> > Reduces level of indirection and cognitive load.
> >
> > > +#else /* CONFIG_KASAN_GENERIC */
> > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > #define KASAN_NR_FREE_STACKS 5
> > > #else
> > > #define KASAN_NR_FREE_STACKS 1
> > > #endif
> > > +#endif /* CONFIG_KASAN_GENERIC */
> > >
> > > struct kasan_alloc_meta {
> > > struct kasan_track alloc_track;
> > > +#ifdef CONFIG_KASAN_GENERIC
> > > + /*
> > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > + * The free stack is stored into freed object.
> > > + */
> > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > +#else
> > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > +#endif
> > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > u8 free_track_idx;
> > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > >
> > > struct page *kasan_addr_to_page(const void *addr);
> > >
> > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > +
> > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > void quarantine_reduce(void);
> > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > + u8 idx);
> > > #else
> > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > struct kmem_cache *cache) { }
> > > static inline void quarantine_reduce(void) { }
> > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > #endif
> > >
> > > #ifdef CONFIG_KASAN_SW_TAGS
> > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > index 80f23c9da6b0..f16a1a210815 100644
> > > --- a/mm/kasan/report.c
> > > +++ b/mm/kasan/report.c
> > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > kasan_enable_current();
> > > }
> > >
> > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > + bool is_callrcu)
> > > {
> > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > + if (is_callrcu)
> > > + pr_err("%s:\n", prefix);
> > > + else
> > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > if (track->stack) {
> > > unsigned long *entries;
> > > unsigned int nr_entries;
> > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > if (cache->flags & SLAB_KASAN) {
> > > struct kasan_track *free_track;
> > >
> > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > pr_err("\n");
> > > free_track = kasan_get_free_track(cache, object, tag);
> > > - print_track(free_track, "Freed");
> > > + print_track(free_track, "Freed", false);
> > > pr_err("\n");
> > > +
> > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > + print_track(free_track, "First call_rcu() call stack", true);
> > > + pr_err("\n");
> > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > + pr_err("\n");
> > > + }
> > > }
> > >
> > > describe_object_addr(cache, object, addr);
>
> Some higher level comments.
>
> 1. I think we need to put the free track into kasan_free_meta as it
> was before. It looks like exactly the place for it. We have logic to
> properly place it and to do the casts.
>
>
> If the free track put kasan_free_meta, then it increase slab meta size?
> Our original goal does not enlarge it.
Are you sure it will increase object size?
I think we overlap kasan_free_meta with the object as well. The only
case we don't overlap kasan_free_meta with the object are
SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
only affect small objects with small redzones.
And I think now we simply have a bug for these objects, we check
KASAN_KMALLOC_FREE and then assume object contains free stack, but for
objects with ctor, they still contain live object data, we don't store
free stack in them.
Such objects can be both free and still contain user data.
> 2. We need to zero aux stacks when we reallocate the object. Otherwise
> we print confusing garbage.
>
> My local has an UT about use-after-free and rcu, but it is hard to test the printing confusing garbage, because we may need to get the same object(old pointer and new pointer). In generic KASAN is not easy to get it.
>
> 3. __kasan_slab_free now contains a window of inconsistency when it
> marked the object as KASAN_KMALLOC_FREE, but did not store the free
> track yet. If another thread prints a report now, it will print random
> garbage.
>
>
> It is possible, but the window is so tiny. It sets free track immediately after write the KASAN_KMALLOC_FREE.
It is small. But (1) why do we want to allow it at all, (2) there is
actually a more serious problem. If we mark an object as
KASAN_KMALLOC_FREE, but don't do kasan_set_free_info (because object
has ctor), now we will treat live object data as free track. We need
to fix it anyway.
> 4. We need some tests. At least (2) should be visible on tests.
>
>
> Ok.
On Mon, May 11, 2020 at 10:31:11AM +0800, Walter Wu wrote:
> This feature will record first and last call_rcu() call stack and
> print two call_rcu() call stack in KASAN report.
Suppose that a given rcu_head structure is passed to call_rcu(), then
the grace period elapses, the callback is invoked, and the enclosing
data structure is freed. But then that same region of memory is
immediately reallocated as the same type of structure and again
passed to call_rcu(), and that this cycle repeats several times.
Would the first call stack forever be associated with the first
call_rcu() in this series? If so, wouldn't the last two usually
be the most useful? Or am I unclear on the use case?
> When call_rcu() is called, we store the call_rcu() call stack into
> slub alloc meta-data, so that KASAN report can print rcu stack.
>
> It doesn't increase the cost of memory consumption. Because we don't
> enlarge struct kasan_alloc_meta size.
> - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> - remove free track from kasan_alloc_meta, size is 8 bytes.
>
> [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
>
> Signed-off-by: Walter Wu <[email protected]>
> Suggested-by: Dmitry Vyukov <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Paul E. McKenney <[email protected]>
> Cc: Josh Triplett <[email protected]>
> Cc: Mathieu Desnoyers <[email protected]>
> Cc: Lai Jiangshan <[email protected]>
> Cc: Joel Fernandes <[email protected]>
> ---
> include/linux/kasan.h | 2 ++
> kernel/rcu/tree.c | 3 +++
> lib/Kconfig.kasan | 2 ++
> mm/kasan/common.c | 4 ++--
> mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> mm/kasan/kasan.h | 19 +++++++++++++++++++
> mm/kasan/report.c | 21 +++++++++++++++++----
> 7 files changed, 74 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 31314ca7c635..23b7ee00572d 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
>
> void kasan_cache_shrink(struct kmem_cache *cache);
> void kasan_cache_shutdown(struct kmem_cache *cache);
> +void kasan_record_aux_stack(void *ptr);
>
> #else /* CONFIG_KASAN_GENERIC */
>
> static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> +static inline void kasan_record_aux_stack(void *ptr) {}
>
> #endif /* CONFIG_KASAN_GENERIC */
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 06548e2ebb72..de872b6cc261 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -57,6 +57,7 @@
> #include <linux/slab.h>
> #include <linux/sched/isolation.h>
> #include <linux/sched/clock.h>
> +#include <linux/kasan.h>
> #include "../time/tick-internal.h"
>
> #include "tree.h"
> @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> trace_rcu_callback(rcu_state.name, head,
> rcu_segcblist_n_cbs(&rdp->cblist));
>
> + kasan_record_aux_stack(head);
Just for the record, at this point we have not yet queued the callback.
We have also not yet disabled interrupts. Which might be OK, but I
figured I should call out the possibility of moving this down a few
lines to follow the local_irq_save().
If someone incorrectly invokes concurrently invokes call_rcu() on this
same region of memory, possibly from an interrupt handler, we are OK
corrupting the stack traces, right?
But what happens if a given structure has more than one rcu_head
structure? In that case, RCU would be just fine with it being
concurrently passed to different call_rcu() invocations as long as the
two invocations didn't both use the same rcu_head structure. (In that
case, they had best not be both freeing the object, and if even one of
them is freeing the object, coordination is necessary.)
If this is a problem, one approach would be to move the
kasan_record_aux_stack(head) call to kfree_rcu(). After all, it is
definitely illegal to pass the same memory to a pair of kfree_rcu()
invocations! ;-)
Thanx, Paul
> +
> /* Go handle any RCU core processing required. */
> if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> index 81f5464ea9e1..56a89291f1cc 100644
> --- a/lib/Kconfig.kasan
> +++ b/lib/Kconfig.kasan
> @@ -58,6 +58,8 @@ config KASAN_GENERIC
> For better error detection enable CONFIG_STACKTRACE.
> Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> (the resulting kernel does not boot).
> + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> + call stack. It doesn't increase the cost of memory consumption.
>
> config KASAN_SW_TAGS
> bool "Software tag-based mode"
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 2906358e42f0..8bc618289bb1 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -41,7 +41,7 @@
> #include "kasan.h"
> #include "../slab.h"
>
> -static inline depot_stack_handle_t save_stack(gfp_t flags)
> +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> {
> unsigned long entries[KASAN_STACK_DEPTH];
> unsigned int nr_entries;
> @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> static inline void set_track(struct kasan_track *track, gfp_t flags)
> {
> track->pid = current->pid;
> - track->stack = save_stack(flags);
> + track->stack = kasan_save_stack(flags);
> }
>
> void kasan_enable_current(void)
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 56ff8885fe2e..b86880c338e2 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> DEFINE_ASAN_SET_SHADOW(f3);
> DEFINE_ASAN_SET_SHADOW(f5);
> DEFINE_ASAN_SET_SHADOW(f8);
> +
> +void kasan_record_aux_stack(void *addr)
> +{
> + struct page *page = kasan_addr_to_page(addr);
> + struct kmem_cache *cache;
> + struct kasan_alloc_meta *alloc_info;
> + void *object;
> +
> + if (!(page && PageSlab(page)))
> + return;
> +
> + cache = page->slab_cache;
> + object = nearest_obj(cache, page, addr);
> + alloc_info = get_alloc_info(cache, object);
> +
> + if (!alloc_info->rcu_stack[0])
> + /* record first call_rcu() call stack */
> + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> + else
> + /* record last call_rcu() call stack */
> + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> +}
> +
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx)
> +{
> + return container_of(&alloc_info->rcu_stack[idx],
> + struct kasan_track, stack);
> +}
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index e8f37199d885..1cc1fb7b0de3 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -96,15 +96,28 @@ struct kasan_track {
> depot_stack_handle_t stack;
> };
>
> +#ifdef CONFIG_KASAN_GENERIC
> +#define SIZEOF_PTR sizeof(void *)
> +#define KASAN_NR_RCU_CALL_STACKS 2
> +#else /* CONFIG_KASAN_GENERIC */
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> #define KASAN_NR_FREE_STACKS 5
> #else
> #define KASAN_NR_FREE_STACKS 1
> #endif
> +#endif /* CONFIG_KASAN_GENERIC */
>
> struct kasan_alloc_meta {
> struct kasan_track alloc_track;
> +#ifdef CONFIG_KASAN_GENERIC
> + /*
> + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> + * The free stack is stored into freed object.
> + */
> + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> +#else
> struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> +#endif
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> u8 free_track_idx;
> @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
>
> struct page *kasan_addr_to_page(const void *addr);
>
> +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> +
> #if defined(CONFIG_KASAN_GENERIC) && \
> (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> void quarantine_reduce(void);
> void quarantine_remove_cache(struct kmem_cache *cache);
> +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> + u8 idx);
> #else
> static inline void quarantine_put(struct kasan_free_meta *info,
> struct kmem_cache *cache) { }
> static inline void quarantine_reduce(void) { }
> static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> +static inline struct kasan_track *kasan_get_aux_stack(
> + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> #endif
>
> #ifdef CONFIG_KASAN_SW_TAGS
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 80f23c9da6b0..f16a1a210815 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> kasan_enable_current();
> }
>
> -static void print_track(struct kasan_track *track, const char *prefix)
> +static void print_track(struct kasan_track *track, const char *prefix,
> + bool is_callrcu)
> {
> - pr_err("%s by task %u:\n", prefix, track->pid);
> + if (is_callrcu)
> + pr_err("%s:\n", prefix);
> + else
> + pr_err("%s by task %u:\n", prefix, track->pid);
> if (track->stack) {
> unsigned long *entries;
> unsigned int nr_entries;
> @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> if (cache->flags & SLAB_KASAN) {
> struct kasan_track *free_track;
>
> - print_track(&alloc_info->alloc_track, "Allocated");
> + print_track(&alloc_info->alloc_track, "Allocated", false);
> pr_err("\n");
> free_track = kasan_get_free_track(cache, object, tag);
> - print_track(free_track, "Freed");
> + print_track(free_track, "Freed", false);
> pr_err("\n");
> +
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> + free_track = kasan_get_aux_stack(alloc_info, 0);
> + print_track(free_track, "First call_rcu() call stack", true);
> + pr_err("\n");
> + free_track = kasan_get_aux_stack(alloc_info, 1);
> + print_track(free_track, "Last call_rcu() call stack", true);
> + pr_err("\n");
> + }
> }
>
> describe_object_addr(cache, object, addr);
> --
I> 2.18.0
On Mon, 2020-05-11 at 11:05 -0700, Paul E. McKenney wrote:
> On Mon, May 11, 2020 at 10:31:11AM +0800, Walter Wu wrote:
> > This feature will record first and last call_rcu() call stack and
> > print two call_rcu() call stack in KASAN report.
>
> Suppose that a given rcu_head structure is passed to call_rcu(), then
> the grace period elapses, the callback is invoked, and the enclosing
> data structure is freed. But then that same region of memory is
> immediately reallocated as the same type of structure and again
> passed to call_rcu(), and that this cycle repeats several times.
>
> Would the first call stack forever be associated with the first
> call_rcu() in this series? If so, wouldn't the last two usually
> be the most useful? Or am I unclear on the use case?
>
The first call stack doesn't forever associate with first call_rcu(),
if someone object freed and reallocated, then the first call stack will
replace with new object.
> > When call_rcu() is called, we store the call_rcu() call stack into
> > slub alloc meta-data, so that KASAN report can print rcu stack.
> >
> > It doesn't increase the cost of memory consumption. Because we don't
> > enlarge struct kasan_alloc_meta size.
> > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > - remove free track from kasan_alloc_meta, size is 8 bytes.
> >
> > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> >
> > Signed-off-by: Walter Wu <[email protected]>
> > Suggested-by: Dmitry Vyukov <[email protected]>
> > Cc: Andrey Ryabinin <[email protected]>
> > Cc: Dmitry Vyukov <[email protected]>
> > Cc: Alexander Potapenko <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Paul E. McKenney <[email protected]>
> > Cc: Josh Triplett <[email protected]>
> > Cc: Mathieu Desnoyers <[email protected]>
> > Cc: Lai Jiangshan <[email protected]>
> > Cc: Joel Fernandes <[email protected]>
> > ---
> > include/linux/kasan.h | 2 ++
> > kernel/rcu/tree.c | 3 +++
> > lib/Kconfig.kasan | 2 ++
> > mm/kasan/common.c | 4 ++--
> > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > mm/kasan/report.c | 21 +++++++++++++++++----
> > 7 files changed, 74 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > index 31314ca7c635..23b7ee00572d 100644
> > --- a/include/linux/kasan.h
> > +++ b/include/linux/kasan.h
> > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> >
> > void kasan_cache_shrink(struct kmem_cache *cache);
> > void kasan_cache_shutdown(struct kmem_cache *cache);
> > +void kasan_record_aux_stack(void *ptr);
> >
> > #else /* CONFIG_KASAN_GENERIC */
> >
> > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > +static inline void kasan_record_aux_stack(void *ptr) {}
> >
> > #endif /* CONFIG_KASAN_GENERIC */
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 06548e2ebb72..de872b6cc261 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -57,6 +57,7 @@
> > #include <linux/slab.h>
> > #include <linux/sched/isolation.h>
> > #include <linux/sched/clock.h>
> > +#include <linux/kasan.h>
> > #include "../time/tick-internal.h"
> >
> > #include "tree.h"
> > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > trace_rcu_callback(rcu_state.name, head,
> > rcu_segcblist_n_cbs(&rdp->cblist));
> >
> > + kasan_record_aux_stack(head);
>
> Just for the record, at this point we have not yet queued the callback.
> We have also not yet disabled interrupts. Which might be OK, but I
> figured I should call out the possibility of moving this down a few
> lines to follow the local_irq_save().
>
We will intend to do it.
> If someone incorrectly invokes concurrently invokes call_rcu() on this
> same region of memory, possibly from an interrupt handler, we are OK
> corrupting the stack traces, right?
>
Yes, and the wrong invoking call_rcu should be recorded.
> But what happens if a given structure has more than one rcu_head
> structure? In that case, RCU would be just fine with it being
> concurrently passed to different call_rcu() invocations as long as the
> two invocations didn't both use the same rcu_head structure. (In that
> case, they had best not be both freeing the object, and if even one of
> them is freeing the object, coordination is necessary.)
>
> If this is a problem, one approach would be to move the
> kasan_record_aux_stack(head) call to kfree_rcu(). After all, it is
> definitely illegal to pass the same memory to a pair of kfree_rcu()
> invocations! ;-)
>
The function of kasan_record_aux_stack(head) is simple, it is only to
record call stack by the 'head' object.
Thanks.
> Thanx, Paul
>
> > +
> > /* Go handle any RCU core processing required. */
> > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > index 81f5464ea9e1..56a89291f1cc 100644
> > --- a/lib/Kconfig.kasan
> > +++ b/lib/Kconfig.kasan
> > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > For better error detection enable CONFIG_STACKTRACE.
> > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > (the resulting kernel does not boot).
> > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > + call stack. It doesn't increase the cost of memory consumption.
> >
> > config KASAN_SW_TAGS
> > bool "Software tag-based mode"
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 2906358e42f0..8bc618289bb1 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -41,7 +41,7 @@
> > #include "kasan.h"
> > #include "../slab.h"
> >
> > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > {
> > unsigned long entries[KASAN_STACK_DEPTH];
> > unsigned int nr_entries;
> > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > {
> > track->pid = current->pid;
> > - track->stack = save_stack(flags);
> > + track->stack = kasan_save_stack(flags);
> > }
> >
> > void kasan_enable_current(void)
> > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > index 56ff8885fe2e..b86880c338e2 100644
> > --- a/mm/kasan/generic.c
> > +++ b/mm/kasan/generic.c
> > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > DEFINE_ASAN_SET_SHADOW(f3);
> > DEFINE_ASAN_SET_SHADOW(f5);
> > DEFINE_ASAN_SET_SHADOW(f8);
> > +
> > +void kasan_record_aux_stack(void *addr)
> > +{
> > + struct page *page = kasan_addr_to_page(addr);
> > + struct kmem_cache *cache;
> > + struct kasan_alloc_meta *alloc_info;
> > + void *object;
> > +
> > + if (!(page && PageSlab(page)))
> > + return;
> > +
> > + cache = page->slab_cache;
> > + object = nearest_obj(cache, page, addr);
> > + alloc_info = get_alloc_info(cache, object);
> > +
> > + if (!alloc_info->rcu_stack[0])
> > + /* record first call_rcu() call stack */
> > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > + else
> > + /* record last call_rcu() call stack */
> > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > +}
> > +
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx)
> > +{
> > + return container_of(&alloc_info->rcu_stack[idx],
> > + struct kasan_track, stack);
> > +}
> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > index e8f37199d885..1cc1fb7b0de3 100644
> > --- a/mm/kasan/kasan.h
> > +++ b/mm/kasan/kasan.h
> > @@ -96,15 +96,28 @@ struct kasan_track {
> > depot_stack_handle_t stack;
> > };
> >
> > +#ifdef CONFIG_KASAN_GENERIC
> > +#define SIZEOF_PTR sizeof(void *)
> > +#define KASAN_NR_RCU_CALL_STACKS 2
> > +#else /* CONFIG_KASAN_GENERIC */
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > #define KASAN_NR_FREE_STACKS 5
> > #else
> > #define KASAN_NR_FREE_STACKS 1
> > #endif
> > +#endif /* CONFIG_KASAN_GENERIC */
> >
> > struct kasan_alloc_meta {
> > struct kasan_track alloc_track;
> > +#ifdef CONFIG_KASAN_GENERIC
> > + /*
> > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > + * The free stack is stored into freed object.
> > + */
> > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > +#else
> > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > +#endif
> > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > u8 free_track_idx;
> > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> >
> > struct page *kasan_addr_to_page(const void *addr);
> >
> > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > +
> > #if defined(CONFIG_KASAN_GENERIC) && \
> > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > void quarantine_reduce(void);
> > void quarantine_remove_cache(struct kmem_cache *cache);
> > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > + u8 idx);
> > #else
> > static inline void quarantine_put(struct kasan_free_meta *info,
> > struct kmem_cache *cache) { }
> > static inline void quarantine_reduce(void) { }
> > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > +static inline struct kasan_track *kasan_get_aux_stack(
> > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > #endif
> >
> > #ifdef CONFIG_KASAN_SW_TAGS
> > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > index 80f23c9da6b0..f16a1a210815 100644
> > --- a/mm/kasan/report.c
> > +++ b/mm/kasan/report.c
> > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > kasan_enable_current();
> > }
> >
> > -static void print_track(struct kasan_track *track, const char *prefix)
> > +static void print_track(struct kasan_track *track, const char *prefix,
> > + bool is_callrcu)
> > {
> > - pr_err("%s by task %u:\n", prefix, track->pid);
> > + if (is_callrcu)
> > + pr_err("%s:\n", prefix);
> > + else
> > + pr_err("%s by task %u:\n", prefix, track->pid);
> > if (track->stack) {
> > unsigned long *entries;
> > unsigned int nr_entries;
> > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > if (cache->flags & SLAB_KASAN) {
> > struct kasan_track *free_track;
> >
> > - print_track(&alloc_info->alloc_track, "Allocated");
> > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > pr_err("\n");
> > free_track = kasan_get_free_track(cache, object, tag);
> > - print_track(free_track, "Freed");
> > + print_track(free_track, "Freed", false);
> > pr_err("\n");
> > +
> > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > + print_track(free_track, "First call_rcu() call stack", true);
> > + pr_err("\n");
> > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > + print_track(free_track, "Last call_rcu() call stack", true);
> > + pr_err("\n");
> > + }
> > }
> >
> > describe_object_addr(cache, object, addr);
> > --
> I> 2.18.0
On Mon, 2020-05-11 at 16:19 +0200, Dmitry Vyukov wrote:
> On Mon, May 11, 2020 at 3:29 PM Walter Wu <[email protected]> wrote:
> > > > This feature will record first and last call_rcu() call stack and
> > > > print two call_rcu() call stack in KASAN report.
> > > >
> > > > When call_rcu() is called, we store the call_rcu() call stack into
> > > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > > >
> > > > It doesn't increase the cost of memory consumption. Because we don't
> > > > enlarge struct kasan_alloc_meta size.
> > > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > > >
> > > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > > >
> > > > Signed-off-by: Walter Wu <[email protected]>
> > > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > > Cc: Andrey Ryabinin <[email protected]>
> > > > Cc: Dmitry Vyukov <[email protected]>
> > > > Cc: Alexander Potapenko <[email protected]>
> > > > Cc: Andrew Morton <[email protected]>
> > > > Cc: Paul E. McKenney <[email protected]>
> > > > Cc: Josh Triplett <[email protected]>
> > > > Cc: Mathieu Desnoyers <[email protected]>
> > > > Cc: Lai Jiangshan <[email protected]>
> > > > Cc: Joel Fernandes <[email protected]>
> > > > ---
> > > > include/linux/kasan.h | 2 ++
> > > > kernel/rcu/tree.c | 3 +++
> > > > lib/Kconfig.kasan | 2 ++
> > > > mm/kasan/common.c | 4 ++--
> > > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > > >
> > > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > > index 31314ca7c635..23b7ee00572d 100644
> > > > --- a/include/linux/kasan.h
> > > > +++ b/include/linux/kasan.h
> > > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > > >
> > > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > > +void kasan_record_aux_stack(void *ptr);
> > > >
> > > > #else /* CONFIG_KASAN_GENERIC */
> > > >
> > > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > > >
> > > > #endif /* CONFIG_KASAN_GENERIC */
> > > >
> > > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > > index 06548e2ebb72..de872b6cc261 100644
> > > > --- a/kernel/rcu/tree.c
> > > > +++ b/kernel/rcu/tree.c
> > > > @@ -57,6 +57,7 @@
> > > > #include <linux/slab.h>
> > > > #include <linux/sched/isolation.h>
> > > > #include <linux/sched/clock.h>
> > > > +#include <linux/kasan.h>
> > > > #include "../time/tick-internal.h"
> > > >
> > > > #include "tree.h"
> > > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > > trace_rcu_callback(rcu_state.name, head,
> > > > rcu_segcblist_n_cbs(&rdp->cblist));
> > > >
> > > > + kasan_record_aux_stack(head);
> > > > +
> > > > /* Go handle any RCU core processing required. */
> > > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > > index 81f5464ea9e1..56a89291f1cc 100644
> > > > --- a/lib/Kconfig.kasan
> > > > +++ b/lib/Kconfig.kasan
> > > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > > For better error detection enable CONFIG_STACKTRACE.
> > > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > > (the resulting kernel does not boot).
> > > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > > + call stack. It doesn't increase the cost of memory consumption.
> > > >
> > > > config KASAN_SW_TAGS
> > > > bool "Software tag-based mode"
> > > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > > index 2906358e42f0..8bc618289bb1 100644
> > > > --- a/mm/kasan/common.c
> > > > +++ b/mm/kasan/common.c
> > > > @@ -41,7 +41,7 @@
> > > > #include "kasan.h"
> > > > #include "../slab.h"
> > > >
> > > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > > {
> > > > unsigned long entries[KASAN_STACK_DEPTH];
> > > > unsigned int nr_entries;
> > > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > > {
> > > > track->pid = current->pid;
> > > > - track->stack = save_stack(flags);
> > > > + track->stack = kasan_save_stack(flags);
> > > > }
> > > >
> > > > void kasan_enable_current(void)
> > > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > > index 56ff8885fe2e..b86880c338e2 100644
> > > > --- a/mm/kasan/generic.c
> > > > +++ b/mm/kasan/generic.c
> > > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > > DEFINE_ASAN_SET_SHADOW(f3);
> > > > DEFINE_ASAN_SET_SHADOW(f5);
> > > > DEFINE_ASAN_SET_SHADOW(f8);
> > > > +
> > > > +void kasan_record_aux_stack(void *addr)
> > > > +{
> > > > + struct page *page = kasan_addr_to_page(addr);
> > > > + struct kmem_cache *cache;
> > > > + struct kasan_alloc_meta *alloc_info;
> > > > + void *object;
> > > > +
> > > > + if (!(page && PageSlab(page)))
> > > > + return;
> > > > +
> > > > + cache = page->slab_cache;
> > > > + object = nearest_obj(cache, page, addr);
> > > > + alloc_info = get_alloc_info(cache, object);
> > > > +
> > > > + if (!alloc_info->rcu_stack[0])
> > > > + /* record first call_rcu() call stack */
> > > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > > + else
> > > > + /* record last call_rcu() call stack */
> > > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > > +}
> > > > +
> > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > + u8 idx)
> > > > +{
> > > > + return container_of(&alloc_info->rcu_stack[idx],
> > > > + struct kasan_track, stack);
> > > > +}
> > > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > > index e8f37199d885..1cc1fb7b0de3 100644
> > > > --- a/mm/kasan/kasan.h
> > > > +++ b/mm/kasan/kasan.h
> > > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > > depot_stack_handle_t stack;
> > > > };
> > > >
> > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > +#define SIZEOF_PTR sizeof(void *)
> > >
> > > Please move this to generic.c closer to kasan_set_free_info.
> > > Unnecessary in the header.
> > >
> > > > +#define KASAN_NR_RCU_CALL_STACKS 2
> > >
> > > Since KASAN_NR_RCU_CALL_STACKS is only used once below, you could as
> > > well use 2 instead of it.
> > > Reduces level of indirection and cognitive load.
> > >
> > > > +#else /* CONFIG_KASAN_GENERIC */
> > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > #define KASAN_NR_FREE_STACKS 5
> > > > #else
> > > > #define KASAN_NR_FREE_STACKS 1
> > > > #endif
> > > > +#endif /* CONFIG_KASAN_GENERIC */
> > > >
> > > > struct kasan_alloc_meta {
> > > > struct kasan_track alloc_track;
> > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > + /*
> > > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > > + * The free stack is stored into freed object.
> > > > + */
> > > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > > +#else
> > > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > > +#endif
> > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > > u8 free_track_idx;
> > > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > > >
> > > > struct page *kasan_addr_to_page(const void *addr);
> > > >
> > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > > +
> > > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > > void quarantine_reduce(void);
> > > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > + u8 idx);
> > > > #else
> > > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > > struct kmem_cache *cache) { }
> > > > static inline void quarantine_reduce(void) { }
> > > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > > #endif
> > > >
> > > > #ifdef CONFIG_KASAN_SW_TAGS
> > > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > > index 80f23c9da6b0..f16a1a210815 100644
> > > > --- a/mm/kasan/report.c
> > > > +++ b/mm/kasan/report.c
> > > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > > kasan_enable_current();
> > > > }
> > > >
> > > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > > + bool is_callrcu)
> > > > {
> > > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > > + if (is_callrcu)
> > > > + pr_err("%s:\n", prefix);
> > > > + else
> > > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > > if (track->stack) {
> > > > unsigned long *entries;
> > > > unsigned int nr_entries;
> > > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > > if (cache->flags & SLAB_KASAN) {
> > > > struct kasan_track *free_track;
> > > >
> > > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > > pr_err("\n");
> > > > free_track = kasan_get_free_track(cache, object, tag);
> > > > - print_track(free_track, "Freed");
> > > > + print_track(free_track, "Freed", false);
> > > > pr_err("\n");
> > > > +
> > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > > + print_track(free_track, "First call_rcu() call stack", true);
> > > > + pr_err("\n");
> > > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > > + pr_err("\n");
> > > > + }
> > > > }
> > > >
> > > > describe_object_addr(cache, object, addr);
> >
> > Some higher level comments.
> >
> > 1. I think we need to put the free track into kasan_free_meta as it
> > was before. It looks like exactly the place for it. We have logic to
> > properly place it and to do the casts.
> >
> >
> > If the free track put kasan_free_meta, then it increase slab meta size?
> > Our original goal does not enlarge it.
>
> Are you sure it will increase object size?
> I think we overlap kasan_free_meta with the object as well. The only
> case we don't overlap kasan_free_meta with the object are
> SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> only affect small objects with small redzones.
> And I think now we simply have a bug for these objects, we check
> KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> objects with ctor, they still contain live object data, we don't store
> free stack in them.
> Such objects can be both free and still contain user data.
>
Overlay kasan_free_meta. I see. but overlay it only when the object was
freed. kasan_free_meta will be used until free object.
1). When put object into quarantine, it need kasan_free_meta.
2). When the object exit from quarantine, it need kasan_free_meta
If we choose to overlay kasan_free_meta, then the free stack will be
stored very late. It may has no free stack in report.
>
> > 2. We need to zero aux stacks when we reallocate the object. Otherwise
> > we print confusing garbage.
> >
> > My local has an UT about use-after-free and rcu, but it is hard to test the printing confusing garbage, because we may need to get the same object(old pointer and new pointer). In generic KASAN is not easy to get it.
> >
> > 3. __kasan_slab_free now contains a window of inconsistency when it
> > marked the object as KASAN_KMALLOC_FREE, but did not store the free
> > track yet. If another thread prints a report now, it will print random
> > garbage.
> >
> >
> > It is possible, but the window is so tiny. It sets free track immediately after write the KASAN_KMALLOC_FREE.
>
> It is small. But (1) why do we want to allow it at all, (2) there is
> actually a more serious problem. If we mark an object as
> KASAN_KMALLOC_FREE, but don't do kasan_set_free_info (because object
> has ctor), now we will treat live object data as free track. We need
> to fix it anyway.
>
I see.
>
>
>
> > 4. We need some tests. At least (2) should be visible on tests.
> >
> >
> > Ok.
On Tue, May 12, 2020 at 4:36 AM Walter Wu <[email protected]> wrote:
>
> On Mon, 2020-05-11 at 11:05 -0700, Paul E. McKenney wrote:
> > On Mon, May 11, 2020 at 10:31:11AM +0800, Walter Wu wrote:
> > > This feature will record first and last call_rcu() call stack and
> > > print two call_rcu() call stack in KASAN report.
> >
> > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > the grace period elapses, the callback is invoked, and the enclosing
> > data structure is freed. But then that same region of memory is
> > immediately reallocated as the same type of structure and again
> > passed to call_rcu(), and that this cycle repeats several times.
> >
> > Would the first call stack forever be associated with the first
> > call_rcu() in this series? If so, wouldn't the last two usually
> > be the most useful? Or am I unclear on the use case?
2 points here:
1. With KASAN the object won't be immediately reallocated. KASAN has
'quarantine' to delay reuse of heap objects. It is assumed that the
object is still in quarantine when we detect a use-after-free. In such
a case we will have proper call_rcu stacks as well.
It is possible that the object is not in quarantine already and was
reused several times (quarantine is not infinite), but then KASAN will
report non-sense stacks for allocation/free as well. So wrong call_rcu
stacks are less of a problem in such cases.
2. We would like to memorize 2 last call_rcu stacks regardless, but we
just don't have a good place for the index (bit which of the 2 is the
one to overwrite). Probably could shove it into some existing field,
but then will require atomic operations, etc.
Nobody knows how well/bad it will work. I think we need to get the
first version in, deploy on syzbot, accumulate some base of example
reports and iterate from there.
> The first call stack doesn't forever associate with first call_rcu(),
> if someone object freed and reallocated, then the first call stack will
> replace with new object.
>
> > > When call_rcu() is called, we store the call_rcu() call stack into
> > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > >
> > > It doesn't increase the cost of memory consumption. Because we don't
> > > enlarge struct kasan_alloc_meta size.
> > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > >
> > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > >
> > > Signed-off-by: Walter Wu <[email protected]>
> > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > Cc: Andrey Ryabinin <[email protected]>
> > > Cc: Dmitry Vyukov <[email protected]>
> > > Cc: Alexander Potapenko <[email protected]>
> > > Cc: Andrew Morton <[email protected]>
> > > Cc: Paul E. McKenney <[email protected]>
> > > Cc: Josh Triplett <[email protected]>
> > > Cc: Mathieu Desnoyers <[email protected]>
> > > Cc: Lai Jiangshan <[email protected]>
> > > Cc: Joel Fernandes <[email protected]>
> > > ---
> > > include/linux/kasan.h | 2 ++
> > > kernel/rcu/tree.c | 3 +++
> > > lib/Kconfig.kasan | 2 ++
> > > mm/kasan/common.c | 4 ++--
> > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > index 31314ca7c635..23b7ee00572d 100644
> > > --- a/include/linux/kasan.h
> > > +++ b/include/linux/kasan.h
> > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > >
> > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > +void kasan_record_aux_stack(void *ptr);
> > >
> > > #else /* CONFIG_KASAN_GENERIC */
> > >
> > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > >
> > > #endif /* CONFIG_KASAN_GENERIC */
> > >
> > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > index 06548e2ebb72..de872b6cc261 100644
> > > --- a/kernel/rcu/tree.c
> > > +++ b/kernel/rcu/tree.c
> > > @@ -57,6 +57,7 @@
> > > #include <linux/slab.h>
> > > #include <linux/sched/isolation.h>
> > > #include <linux/sched/clock.h>
> > > +#include <linux/kasan.h>
> > > #include "../time/tick-internal.h"
> > >
> > > #include "tree.h"
> > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > trace_rcu_callback(rcu_state.name, head,
> > > rcu_segcblist_n_cbs(&rdp->cblist));
> > >
> > > + kasan_record_aux_stack(head);
> >
> > Just for the record, at this point we have not yet queued the callback.
> > We have also not yet disabled interrupts. Which might be OK, but I
> > figured I should call out the possibility of moving this down a few
> > lines to follow the local_irq_save().
> >
>
> We will intend to do it.
I will sleep better if we move it up :)
It qualifies a "debug check", which are generally done on entrance to
the function. Or are these all debug checks up to this point?
But if the callback did not leak anywhere up to this point and we will
maintain it that way, then formally it is fine.
> > If someone incorrectly invokes concurrently invokes call_rcu() on this
> > same region of memory, possibly from an interrupt handler, we are OK
> > corrupting the stack traces, right?
> >
>
> Yes, and the wrong invoking call_rcu should be recorded.
>
> > But what happens if a given structure has more than one rcu_head
> > structure? In that case, RCU would be just fine with it being
> > concurrently passed to different call_rcu() invocations as long as the
> > two invocations didn't both use the same rcu_head structure. (In that
> > case, they had best not be both freeing the object, and if even one of
> > them is freeing the object, coordination is necessary.)
> >
> > If this is a problem, one approach would be to move the
> > kasan_record_aux_stack(head) call to kfree_rcu(). After all, it is
> > definitely illegal to pass the same memory to a pair of kfree_rcu()
> > invocations! ;-)
> >
>
> The function of kasan_record_aux_stack(head) is simple, it is only to
> record call stack by the 'head' object.
I would say "corrupting" stacks on some races is fine-ish. In the end
we are just storing an u32 stack id.
On syzbot we generally have multiple samples of the same crash, so
even if one is "corrupted" there may be others that are not corrupted.
Just protecting from this looks too complex and expensive. And in the
end there is not much we can do anyway.
Recording all call_rcu stacks (not just kfree_rcu) is intentional. I
think it may be useful to even extend to recording workqueue and timer
stacks as well.
> Thanks.
>
> > Thanx, Paul
> >
> > > +
> > > /* Go handle any RCU core processing required. */
> > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > index 81f5464ea9e1..56a89291f1cc 100644
> > > --- a/lib/Kconfig.kasan
> > > +++ b/lib/Kconfig.kasan
> > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > For better error detection enable CONFIG_STACKTRACE.
> > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > (the resulting kernel does not boot).
> > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > + call stack. It doesn't increase the cost of memory consumption.
> > >
> > > config KASAN_SW_TAGS
> > > bool "Software tag-based mode"
> > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > index 2906358e42f0..8bc618289bb1 100644
> > > --- a/mm/kasan/common.c
> > > +++ b/mm/kasan/common.c
> > > @@ -41,7 +41,7 @@
> > > #include "kasan.h"
> > > #include "../slab.h"
> > >
> > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > {
> > > unsigned long entries[KASAN_STACK_DEPTH];
> > > unsigned int nr_entries;
> > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > {
> > > track->pid = current->pid;
> > > - track->stack = save_stack(flags);
> > > + track->stack = kasan_save_stack(flags);
> > > }
> > >
> > > void kasan_enable_current(void)
> > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > index 56ff8885fe2e..b86880c338e2 100644
> > > --- a/mm/kasan/generic.c
> > > +++ b/mm/kasan/generic.c
> > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > DEFINE_ASAN_SET_SHADOW(f3);
> > > DEFINE_ASAN_SET_SHADOW(f5);
> > > DEFINE_ASAN_SET_SHADOW(f8);
> > > +
> > > +void kasan_record_aux_stack(void *addr)
> > > +{
> > > + struct page *page = kasan_addr_to_page(addr);
> > > + struct kmem_cache *cache;
> > > + struct kasan_alloc_meta *alloc_info;
> > > + void *object;
> > > +
> > > + if (!(page && PageSlab(page)))
> > > + return;
> > > +
> > > + cache = page->slab_cache;
> > > + object = nearest_obj(cache, page, addr);
> > > + alloc_info = get_alloc_info(cache, object);
> > > +
> > > + if (!alloc_info->rcu_stack[0])
> > > + /* record first call_rcu() call stack */
> > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > + else
> > > + /* record last call_rcu() call stack */
> > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > +}
> > > +
> > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > + u8 idx)
> > > +{
> > > + return container_of(&alloc_info->rcu_stack[idx],
> > > + struct kasan_track, stack);
> > > +}
> > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > index e8f37199d885..1cc1fb7b0de3 100644
> > > --- a/mm/kasan/kasan.h
> > > +++ b/mm/kasan/kasan.h
> > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > depot_stack_handle_t stack;
> > > };
> > >
> > > +#ifdef CONFIG_KASAN_GENERIC
> > > +#define SIZEOF_PTR sizeof(void *)
> > > +#define KASAN_NR_RCU_CALL_STACKS 2
> > > +#else /* CONFIG_KASAN_GENERIC */
> > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > #define KASAN_NR_FREE_STACKS 5
> > > #else
> > > #define KASAN_NR_FREE_STACKS 1
> > > #endif
> > > +#endif /* CONFIG_KASAN_GENERIC */
> > >
> > > struct kasan_alloc_meta {
> > > struct kasan_track alloc_track;
> > > +#ifdef CONFIG_KASAN_GENERIC
> > > + /*
> > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > + * The free stack is stored into freed object.
> > > + */
> > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > +#else
> > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > +#endif
> > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > u8 free_track_idx;
> > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > >
> > > struct page *kasan_addr_to_page(const void *addr);
> > >
> > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > +
> > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > void quarantine_reduce(void);
> > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > + u8 idx);
> > > #else
> > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > struct kmem_cache *cache) { }
> > > static inline void quarantine_reduce(void) { }
> > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > #endif
> > >
> > > #ifdef CONFIG_KASAN_SW_TAGS
> > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > index 80f23c9da6b0..f16a1a210815 100644
> > > --- a/mm/kasan/report.c
> > > +++ b/mm/kasan/report.c
> > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > kasan_enable_current();
> > > }
> > >
> > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > + bool is_callrcu)
> > > {
> > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > + if (is_callrcu)
> > > + pr_err("%s:\n", prefix);
> > > + else
> > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > if (track->stack) {
> > > unsigned long *entries;
> > > unsigned int nr_entries;
> > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > if (cache->flags & SLAB_KASAN) {
> > > struct kasan_track *free_track;
> > >
> > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > pr_err("\n");
> > > free_track = kasan_get_free_track(cache, object, tag);
> > > - print_track(free_track, "Freed");
> > > + print_track(free_track, "Freed", false);
> > > pr_err("\n");
> > > +
> > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > + print_track(free_track, "First call_rcu() call stack", true);
> > > + pr_err("\n");
> > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > + pr_err("\n");
> > > + }
> > > }
> > >
> > > describe_object_addr(cache, object, addr);
> > > --
> > I> 2.18.0
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1589250993.19238.22.camel%40mtksdccf07.
On Tue, May 12, 2020 at 5:38 AM Walter Wu <[email protected]> wrote:
> > Are you sure it will increase object size?
> > I think we overlap kasan_free_meta with the object as well. The only
> > case we don't overlap kasan_free_meta with the object are
> > SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> > only affect small objects with small redzones.
> > And I think now we simply have a bug for these objects, we check
> > KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> > objects with ctor, they still contain live object data, we don't store
> > free stack in them.
> > Such objects can be both free and still contain user data.
> >
>
> Overlay kasan_free_meta. I see. but overlay it only when the object was
> freed. kasan_free_meta will be used until free object.
> 1). When put object into quarantine, it need kasan_free_meta.
> 2). When the object exit from quarantine, it need kasan_free_meta
>
> If we choose to overlay kasan_free_meta, then the free stack will be
> stored very late. It may has no free stack in report.
Sorry, I don't understand what you mean.
Why will it be stored too late?
In __kasan_slab_free() putting into quarantine and recording free
stack are literally adjacent lines of code:
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool quarantine)
{
...
kasan_set_free_info(cache, object, tag);
quarantine_put(get_free_info(cache, object), cache);
Just to make sure, what I meant is that we add free_track to kasan_free_meta:
struct kasan_free_meta {
struct qlist_node quarantine_link;
+ struct kasan_track free_track;
};
And I think its life-time and everything should be exactly what we need.
Also it should help to fix the problem with ctors: kasan_free_meta is
already allocated on the side for such objects, and that's exactly
what we need for objects with ctor's.
On Tue, May 12, 2020 at 03:56:17PM +0200, Dmitry Vyukov wrote:
> On Tue, May 12, 2020 at 4:36 AM Walter Wu <[email protected]> wrote:
> >
> > On Mon, 2020-05-11 at 11:05 -0700, Paul E. McKenney wrote:
> > > On Mon, May 11, 2020 at 10:31:11AM +0800, Walter Wu wrote:
> > > > This feature will record first and last call_rcu() call stack and
> > > > print two call_rcu() call stack in KASAN report.
> > >
> > > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > > the grace period elapses, the callback is invoked, and the enclosing
> > > data structure is freed. But then that same region of memory is
> > > immediately reallocated as the same type of structure and again
> > > passed to call_rcu(), and that this cycle repeats several times.
> > >
> > > Would the first call stack forever be associated with the first
> > > call_rcu() in this series? If so, wouldn't the last two usually
> > > be the most useful? Or am I unclear on the use case?
>
> 2 points here:
>
> 1. With KASAN the object won't be immediately reallocated. KASAN has
> 'quarantine' to delay reuse of heap objects. It is assumed that the
> object is still in quarantine when we detect a use-after-free. In such
> a case we will have proper call_rcu stacks as well.
> It is possible that the object is not in quarantine already and was
> reused several times (quarantine is not infinite), but then KASAN will
> report non-sense stacks for allocation/free as well. So wrong call_rcu
> stacks are less of a problem in such cases.
>
> 2. We would like to memorize 2 last call_rcu stacks regardless, but we
> just don't have a good place for the index (bit which of the 2 is the
> one to overwrite). Probably could shove it into some existing field,
> but then will require atomic operations, etc.
>
> Nobody knows how well/bad it will work. I think we need to get the
> first version in, deploy on syzbot, accumulate some base of example
> reports and iterate from there.
If I understood the stack-index point below, why not just move the
previous stackm index to clobber the previous-to-previous stack index,
then put the current stack index into the spot thus opened up?
> > The first call stack doesn't forever associate with first call_rcu(),
> > if someone object freed and reallocated, then the first call stack will
> > replace with new object.
> >
> > > > When call_rcu() is called, we store the call_rcu() call stack into
> > > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > > >
> > > > It doesn't increase the cost of memory consumption. Because we don't
> > > > enlarge struct kasan_alloc_meta size.
> > > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > > >
> > > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > > >
> > > > Signed-off-by: Walter Wu <[email protected]>
> > > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > > Cc: Andrey Ryabinin <[email protected]>
> > > > Cc: Dmitry Vyukov <[email protected]>
> > > > Cc: Alexander Potapenko <[email protected]>
> > > > Cc: Andrew Morton <[email protected]>
> > > > Cc: Paul E. McKenney <[email protected]>
> > > > Cc: Josh Triplett <[email protected]>
> > > > Cc: Mathieu Desnoyers <[email protected]>
> > > > Cc: Lai Jiangshan <[email protected]>
> > > > Cc: Joel Fernandes <[email protected]>
> > > > ---
> > > > include/linux/kasan.h | 2 ++
> > > > kernel/rcu/tree.c | 3 +++
> > > > lib/Kconfig.kasan | 2 ++
> > > > mm/kasan/common.c | 4 ++--
> > > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > > >
> > > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > > index 31314ca7c635..23b7ee00572d 100644
> > > > --- a/include/linux/kasan.h
> > > > +++ b/include/linux/kasan.h
> > > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > > >
> > > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > > +void kasan_record_aux_stack(void *ptr);
> > > >
> > > > #else /* CONFIG_KASAN_GENERIC */
> > > >
> > > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > > >
> > > > #endif /* CONFIG_KASAN_GENERIC */
> > > >
> > > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > > index 06548e2ebb72..de872b6cc261 100644
> > > > --- a/kernel/rcu/tree.c
> > > > +++ b/kernel/rcu/tree.c
> > > > @@ -57,6 +57,7 @@
> > > > #include <linux/slab.h>
> > > > #include <linux/sched/isolation.h>
> > > > #include <linux/sched/clock.h>
> > > > +#include <linux/kasan.h>
> > > > #include "../time/tick-internal.h"
> > > >
> > > > #include "tree.h"
> > > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > > trace_rcu_callback(rcu_state.name, head,
> > > > rcu_segcblist_n_cbs(&rdp->cblist));
> > > >
> > > > + kasan_record_aux_stack(head);
> > >
> > > Just for the record, at this point we have not yet queued the callback.
> > > We have also not yet disabled interrupts. Which might be OK, but I
> > > figured I should call out the possibility of moving this down a few
> > > lines to follow the local_irq_save().
> > >
> >
> > We will intend to do it.
>
> I will sleep better if we move it up :)
> It qualifies a "debug check", which are generally done on entrance to
> the function. Or are these all debug checks up to this point?
> But if the callback did not leak anywhere up to this point and we will
> maintain it that way, then formally it is fine.
There are debug checks, then initialization of presumed private
structures, disabling of interrupts, more check that are now safe given
that we are pinned to a specific CPU, and so on.
I am OK with it being at the beginning of the function.
> > > If someone incorrectly invokes concurrently invokes call_rcu() on this
> > > same region of memory, possibly from an interrupt handler, we are OK
> > > corrupting the stack traces, right?
> > >
> >
> > Yes, and the wrong invoking call_rcu should be recorded.
> >
> > > But what happens if a given structure has more than one rcu_head
> > > structure? In that case, RCU would be just fine with it being
> > > concurrently passed to different call_rcu() invocations as long as the
> > > two invocations didn't both use the same rcu_head structure. (In that
> > > case, they had best not be both freeing the object, and if even one of
> > > them is freeing the object, coordination is necessary.)
> > >
> > > If this is a problem, one approach would be to move the
> > > kasan_record_aux_stack(head) call to kfree_rcu(). After all, it is
> > > definitely illegal to pass the same memory to a pair of kfree_rcu()
> > > invocations! ;-)
> > >
> >
> > The function of kasan_record_aux_stack(head) is simple, it is only to
> > record call stack by the 'head' object.
>
> I would say "corrupting" stacks on some races is fine-ish. In the end
> we are just storing an u32 stack id.
> On syzbot we generally have multiple samples of the same crash, so
> even if one is "corrupted" there may be others that are not corrupted.
> Just protecting from this looks too complex and expensive. And in the
> end there is not much we can do anyway.
>
> Recording all call_rcu stacks (not just kfree_rcu) is intentional. I
> think it may be useful to even extend to recording workqueue and timer
> stacks as well.
Given the u32 nature of the stack ID, I agree that there is no point
in excluding call_rcu(). At least until such time as we start getting
false positives due to multiple rcu_head structures in the same structure.
Thanx, Paul
> > > > +
> > > > /* Go handle any RCU core processing required. */
> > > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > > index 81f5464ea9e1..56a89291f1cc 100644
> > > > --- a/lib/Kconfig.kasan
> > > > +++ b/lib/Kconfig.kasan
> > > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > > For better error detection enable CONFIG_STACKTRACE.
> > > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > > (the resulting kernel does not boot).
> > > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > > + call stack. It doesn't increase the cost of memory consumption.
> > > >
> > > > config KASAN_SW_TAGS
> > > > bool "Software tag-based mode"
> > > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > > index 2906358e42f0..8bc618289bb1 100644
> > > > --- a/mm/kasan/common.c
> > > > +++ b/mm/kasan/common.c
> > > > @@ -41,7 +41,7 @@
> > > > #include "kasan.h"
> > > > #include "../slab.h"
> > > >
> > > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > > {
> > > > unsigned long entries[KASAN_STACK_DEPTH];
> > > > unsigned int nr_entries;
> > > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > > {
> > > > track->pid = current->pid;
> > > > - track->stack = save_stack(flags);
> > > > + track->stack = kasan_save_stack(flags);
> > > > }
> > > >
> > > > void kasan_enable_current(void)
> > > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > > index 56ff8885fe2e..b86880c338e2 100644
> > > > --- a/mm/kasan/generic.c
> > > > +++ b/mm/kasan/generic.c
> > > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > > DEFINE_ASAN_SET_SHADOW(f3);
> > > > DEFINE_ASAN_SET_SHADOW(f5);
> > > > DEFINE_ASAN_SET_SHADOW(f8);
> > > > +
> > > > +void kasan_record_aux_stack(void *addr)
> > > > +{
> > > > + struct page *page = kasan_addr_to_page(addr);
> > > > + struct kmem_cache *cache;
> > > > + struct kasan_alloc_meta *alloc_info;
> > > > + void *object;
> > > > +
> > > > + if (!(page && PageSlab(page)))
> > > > + return;
> > > > +
> > > > + cache = page->slab_cache;
> > > > + object = nearest_obj(cache, page, addr);
> > > > + alloc_info = get_alloc_info(cache, object);
> > > > +
> > > > + if (!alloc_info->rcu_stack[0])
> > > > + /* record first call_rcu() call stack */
> > > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > > + else
> > > > + /* record last call_rcu() call stack */
> > > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > > +}
> > > > +
> > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > + u8 idx)
> > > > +{
> > > > + return container_of(&alloc_info->rcu_stack[idx],
> > > > + struct kasan_track, stack);
> > > > +}
> > > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > > index e8f37199d885..1cc1fb7b0de3 100644
> > > > --- a/mm/kasan/kasan.h
> > > > +++ b/mm/kasan/kasan.h
> > > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > > depot_stack_handle_t stack;
> > > > };
> > > >
> > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > +#define SIZEOF_PTR sizeof(void *)
> > > > +#define KASAN_NR_RCU_CALL_STACKS 2
> > > > +#else /* CONFIG_KASAN_GENERIC */
> > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > #define KASAN_NR_FREE_STACKS 5
> > > > #else
> > > > #define KASAN_NR_FREE_STACKS 1
> > > > #endif
> > > > +#endif /* CONFIG_KASAN_GENERIC */
> > > >
> > > > struct kasan_alloc_meta {
> > > > struct kasan_track alloc_track;
> > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > + /*
> > > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > > + * The free stack is stored into freed object.
> > > > + */
> > > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > > +#else
> > > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > > +#endif
> > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > > u8 free_track_idx;
> > > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > > >
> > > > struct page *kasan_addr_to_page(const void *addr);
> > > >
> > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > > +
> > > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > > void quarantine_reduce(void);
> > > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > + u8 idx);
> > > > #else
> > > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > > struct kmem_cache *cache) { }
> > > > static inline void quarantine_reduce(void) { }
> > > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > > #endif
> > > >
> > > > #ifdef CONFIG_KASAN_SW_TAGS
> > > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > > index 80f23c9da6b0..f16a1a210815 100644
> > > > --- a/mm/kasan/report.c
> > > > +++ b/mm/kasan/report.c
> > > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > > kasan_enable_current();
> > > > }
> > > >
> > > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > > + bool is_callrcu)
> > > > {
> > > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > > + if (is_callrcu)
> > > > + pr_err("%s:\n", prefix);
> > > > + else
> > > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > > if (track->stack) {
> > > > unsigned long *entries;
> > > > unsigned int nr_entries;
> > > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > > if (cache->flags & SLAB_KASAN) {
> > > > struct kasan_track *free_track;
> > > >
> > > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > > pr_err("\n");
> > > > free_track = kasan_get_free_track(cache, object, tag);
> > > > - print_track(free_track, "Freed");
> > > > + print_track(free_track, "Freed", false);
> > > > pr_err("\n");
> > > > +
> > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > > + print_track(free_track, "First call_rcu() call stack", true);
> > > > + pr_err("\n");
> > > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > > + pr_err("\n");
> > > > + }
> > > > }
> > > >
> > > > describe_object_addr(cache, object, addr);
> > > > --
> > > I> 2.18.0
> >
> > --
> > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1589250993.19238.22.camel%40mtksdccf07.
On Tue, May 12, 2020 at 4:25 PM Paul E. McKenney <[email protected]> wrote:
>
> On Tue, May 12, 2020 at 03:56:17PM +0200, Dmitry Vyukov wrote:
> > On Tue, May 12, 2020 at 4:36 AM Walter Wu <[email protected]> wrote:
> > >
> > > On Mon, 2020-05-11 at 11:05 -0700, Paul E. McKenney wrote:
> > > > On Mon, May 11, 2020 at 10:31:11AM +0800, Walter Wu wrote:
> > > > > This feature will record first and last call_rcu() call stack and
> > > > > print two call_rcu() call stack in KASAN report.
> > > >
> > > > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > > > the grace period elapses, the callback is invoked, and the enclosing
> > > > data structure is freed. But then that same region of memory is
> > > > immediately reallocated as the same type of structure and again
> > > > passed to call_rcu(), and that this cycle repeats several times.
> > > >
> > > > Would the first call stack forever be associated with the first
> > > > call_rcu() in this series? If so, wouldn't the last two usually
> > > > be the most useful? Or am I unclear on the use case?
> >
> > 2 points here:
> >
> > 1. With KASAN the object won't be immediately reallocated. KASAN has
> > 'quarantine' to delay reuse of heap objects. It is assumed that the
> > object is still in quarantine when we detect a use-after-free. In such
> > a case we will have proper call_rcu stacks as well.
> > It is possible that the object is not in quarantine already and was
> > reused several times (quarantine is not infinite), but then KASAN will
> > report non-sense stacks for allocation/free as well. So wrong call_rcu
> > stacks are less of a problem in such cases.
> >
> > 2. We would like to memorize 2 last call_rcu stacks regardless, but we
> > just don't have a good place for the index (bit which of the 2 is the
> > one to overwrite). Probably could shove it into some existing field,
> > but then will require atomic operations, etc.
> >
> > Nobody knows how well/bad it will work. I think we need to get the
> > first version in, deploy on syzbot, accumulate some base of example
> > reports and iterate from there.
>
> If I understood the stack-index point below, why not just move the
> previous stackm index to clobber the previous-to-previous stack index,
> then put the current stack index into the spot thus opened up?
We don't have any index in this change (don't have memory for such index).
The pseudo code is"
u32 aux_stacks[2]; // = {0,0}
if (aux_stacks[0] != 0)
aux_stacks[0] = stack;
else
aux_stacks[1] = stack;
> > > The first call stack doesn't forever associate with first call_rcu(),
> > > if someone object freed and reallocated, then the first call stack will
> > > replace with new object.
> > >
> > > > > When call_rcu() is called, we store the call_rcu() call stack into
> > > > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > > > >
> > > > > It doesn't increase the cost of memory consumption. Because we don't
> > > > > enlarge struct kasan_alloc_meta size.
> > > > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > > > >
> > > > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > > > >
> > > > > Signed-off-by: Walter Wu <[email protected]>
> > > > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > > > Cc: Andrey Ryabinin <[email protected]>
> > > > > Cc: Dmitry Vyukov <[email protected]>
> > > > > Cc: Alexander Potapenko <[email protected]>
> > > > > Cc: Andrew Morton <[email protected]>
> > > > > Cc: Paul E. McKenney <[email protected]>
> > > > > Cc: Josh Triplett <[email protected]>
> > > > > Cc: Mathieu Desnoyers <[email protected]>
> > > > > Cc: Lai Jiangshan <[email protected]>
> > > > > Cc: Joel Fernandes <[email protected]>
> > > > > ---
> > > > > include/linux/kasan.h | 2 ++
> > > > > kernel/rcu/tree.c | 3 +++
> > > > > lib/Kconfig.kasan | 2 ++
> > > > > mm/kasan/common.c | 4 ++--
> > > > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > > > >
> > > > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > > > index 31314ca7c635..23b7ee00572d 100644
> > > > > --- a/include/linux/kasan.h
> > > > > +++ b/include/linux/kasan.h
> > > > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > > > >
> > > > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > > > +void kasan_record_aux_stack(void *ptr);
> > > > >
> > > > > #else /* CONFIG_KASAN_GENERIC */
> > > > >
> > > > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > > > >
> > > > > #endif /* CONFIG_KASAN_GENERIC */
> > > > >
> > > > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > > > index 06548e2ebb72..de872b6cc261 100644
> > > > > --- a/kernel/rcu/tree.c
> > > > > +++ b/kernel/rcu/tree.c
> > > > > @@ -57,6 +57,7 @@
> > > > > #include <linux/slab.h>
> > > > > #include <linux/sched/isolation.h>
> > > > > #include <linux/sched/clock.h>
> > > > > +#include <linux/kasan.h>
> > > > > #include "../time/tick-internal.h"
> > > > >
> > > > > #include "tree.h"
> > > > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > > > trace_rcu_callback(rcu_state.name, head,
> > > > > rcu_segcblist_n_cbs(&rdp->cblist));
> > > > >
> > > > > + kasan_record_aux_stack(head);
> > > >
> > > > Just for the record, at this point we have not yet queued the callback.
> > > > We have also not yet disabled interrupts. Which might be OK, but I
> > > > figured I should call out the possibility of moving this down a few
> > > > lines to follow the local_irq_save().
> > > >
> > >
> > > We will intend to do it.
> >
> > I will sleep better if we move it up :)
> > It qualifies a "debug check", which are generally done on entrance to
> > the function. Or are these all debug checks up to this point?
> > But if the callback did not leak anywhere up to this point and we will
> > maintain it that way, then formally it is fine.
>
> There are debug checks, then initialization of presumed private
> structures, disabling of interrupts, more check that are now safe given
> that we are pinned to a specific CPU, and so on.
>
> I am OK with it being at the beginning of the function.
>
> > > > If someone incorrectly invokes concurrently invokes call_rcu() on this
> > > > same region of memory, possibly from an interrupt handler, we are OK
> > > > corrupting the stack traces, right?
> > > >
> > >
> > > Yes, and the wrong invoking call_rcu should be recorded.
> > >
> > > > But what happens if a given structure has more than one rcu_head
> > > > structure? In that case, RCU would be just fine with it being
> > > > concurrently passed to different call_rcu() invocations as long as the
> > > > two invocations didn't both use the same rcu_head structure. (In that
> > > > case, they had best not be both freeing the object, and if even one of
> > > > them is freeing the object, coordination is necessary.)
> > > >
> > > > If this is a problem, one approach would be to move the
> > > > kasan_record_aux_stack(head) call to kfree_rcu(). After all, it is
> > > > definitely illegal to pass the same memory to a pair of kfree_rcu()
> > > > invocations! ;-)
> > > >
> > >
> > > The function of kasan_record_aux_stack(head) is simple, it is only to
> > > record call stack by the 'head' object.
> >
> > I would say "corrupting" stacks on some races is fine-ish. In the end
> > we are just storing an u32 stack id.
> > On syzbot we generally have multiple samples of the same crash, so
> > even if one is "corrupted" there may be others that are not corrupted.
> > Just protecting from this looks too complex and expensive. And in the
> > end there is not much we can do anyway.
> >
> > Recording all call_rcu stacks (not just kfree_rcu) is intentional. I
> > think it may be useful to even extend to recording workqueue and timer
> > stacks as well.
>
> Given the u32 nature of the stack ID, I agree that there is no point
> in excluding call_rcu(). At least until such time as we start getting
> false positives due to multiple rcu_head structures in the same structure.
>
> Thanx, Paul
>
> > > > > +
> > > > > /* Go handle any RCU core processing required. */
> > > > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > > > index 81f5464ea9e1..56a89291f1cc 100644
> > > > > --- a/lib/Kconfig.kasan
> > > > > +++ b/lib/Kconfig.kasan
> > > > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > > > For better error detection enable CONFIG_STACKTRACE.
> > > > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > > > (the resulting kernel does not boot).
> > > > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > > > + call stack. It doesn't increase the cost of memory consumption.
> > > > >
> > > > > config KASAN_SW_TAGS
> > > > > bool "Software tag-based mode"
> > > > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > > > index 2906358e42f0..8bc618289bb1 100644
> > > > > --- a/mm/kasan/common.c
> > > > > +++ b/mm/kasan/common.c
> > > > > @@ -41,7 +41,7 @@
> > > > > #include "kasan.h"
> > > > > #include "../slab.h"
> > > > >
> > > > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > > > {
> > > > > unsigned long entries[KASAN_STACK_DEPTH];
> > > > > unsigned int nr_entries;
> > > > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > > > {
> > > > > track->pid = current->pid;
> > > > > - track->stack = save_stack(flags);
> > > > > + track->stack = kasan_save_stack(flags);
> > > > > }
> > > > >
> > > > > void kasan_enable_current(void)
> > > > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > > > index 56ff8885fe2e..b86880c338e2 100644
> > > > > --- a/mm/kasan/generic.c
> > > > > +++ b/mm/kasan/generic.c
> > > > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > > > DEFINE_ASAN_SET_SHADOW(f3);
> > > > > DEFINE_ASAN_SET_SHADOW(f5);
> > > > > DEFINE_ASAN_SET_SHADOW(f8);
> > > > > +
> > > > > +void kasan_record_aux_stack(void *addr)
> > > > > +{
> > > > > + struct page *page = kasan_addr_to_page(addr);
> > > > > + struct kmem_cache *cache;
> > > > > + struct kasan_alloc_meta *alloc_info;
> > > > > + void *object;
> > > > > +
> > > > > + if (!(page && PageSlab(page)))
> > > > > + return;
> > > > > +
> > > > > + cache = page->slab_cache;
> > > > > + object = nearest_obj(cache, page, addr);
> > > > > + alloc_info = get_alloc_info(cache, object);
> > > > > +
> > > > > + if (!alloc_info->rcu_stack[0])
> > > > > + /* record first call_rcu() call stack */
> > > > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > > > + else
> > > > > + /* record last call_rcu() call stack */
> > > > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > > > +}
> > > > > +
> > > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > > + u8 idx)
> > > > > +{
> > > > > + return container_of(&alloc_info->rcu_stack[idx],
> > > > > + struct kasan_track, stack);
> > > > > +}
> > > > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > > > index e8f37199d885..1cc1fb7b0de3 100644
> > > > > --- a/mm/kasan/kasan.h
> > > > > +++ b/mm/kasan/kasan.h
> > > > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > > > depot_stack_handle_t stack;
> > > > > };
> > > > >
> > > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > > +#define SIZEOF_PTR sizeof(void *)
> > > > > +#define KASAN_NR_RCU_CALL_STACKS 2
> > > > > +#else /* CONFIG_KASAN_GENERIC */
> > > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > > #define KASAN_NR_FREE_STACKS 5
> > > > > #else
> > > > > #define KASAN_NR_FREE_STACKS 1
> > > > > #endif
> > > > > +#endif /* CONFIG_KASAN_GENERIC */
> > > > >
> > > > > struct kasan_alloc_meta {
> > > > > struct kasan_track alloc_track;
> > > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > > + /*
> > > > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > > > + * The free stack is stored into freed object.
> > > > > + */
> > > > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > > > +#else
> > > > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > > > +#endif
> > > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > > > u8 free_track_idx;
> > > > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > > > >
> > > > > struct page *kasan_addr_to_page(const void *addr);
> > > > >
> > > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > > > +
> > > > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > > > void quarantine_reduce(void);
> > > > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > > + u8 idx);
> > > > > #else
> > > > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > > > struct kmem_cache *cache) { }
> > > > > static inline void quarantine_reduce(void) { }
> > > > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > > > #endif
> > > > >
> > > > > #ifdef CONFIG_KASAN_SW_TAGS
> > > > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > > > index 80f23c9da6b0..f16a1a210815 100644
> > > > > --- a/mm/kasan/report.c
> > > > > +++ b/mm/kasan/report.c
> > > > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > > > kasan_enable_current();
> > > > > }
> > > > >
> > > > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > > > + bool is_callrcu)
> > > > > {
> > > > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > > > + if (is_callrcu)
> > > > > + pr_err("%s:\n", prefix);
> > > > > + else
> > > > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > > > if (track->stack) {
> > > > > unsigned long *entries;
> > > > > unsigned int nr_entries;
> > > > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > > > if (cache->flags & SLAB_KASAN) {
> > > > > struct kasan_track *free_track;
> > > > >
> > > > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > > > pr_err("\n");
> > > > > free_track = kasan_get_free_track(cache, object, tag);
> > > > > - print_track(free_track, "Freed");
> > > > > + print_track(free_track, "Freed", false);
> > > > > pr_err("\n");
> > > > > +
> > > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > > > + print_track(free_track, "First call_rcu() call stack", true);
> > > > > + pr_err("\n");
> > > > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > > > + pr_err("\n");
> > > > > + }
> > > > > }
> > > > >
> > > > > describe_object_addr(cache, object, addr);
> > > > > --
> > > > I> 2.18.0
> > >
> > > --
> > > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1589250993.19238.22.camel%40mtksdccf07.
On Tue, May 12, 2020 at 05:50:28PM +0200, Dmitry Vyukov wrote:
> On Tue, May 12, 2020 at 4:25 PM Paul E. McKenney <[email protected]> wrote:
> >
> > On Tue, May 12, 2020 at 03:56:17PM +0200, Dmitry Vyukov wrote:
> > > On Tue, May 12, 2020 at 4:36 AM Walter Wu <[email protected]> wrote:
> > > >
> > > > On Mon, 2020-05-11 at 11:05 -0700, Paul E. McKenney wrote:
> > > > > On Mon, May 11, 2020 at 10:31:11AM +0800, Walter Wu wrote:
> > > > > > This feature will record first and last call_rcu() call stack and
> > > > > > print two call_rcu() call stack in KASAN report.
> > > > >
> > > > > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > > > > the grace period elapses, the callback is invoked, and the enclosing
> > > > > data structure is freed. But then that same region of memory is
> > > > > immediately reallocated as the same type of structure and again
> > > > > passed to call_rcu(), and that this cycle repeats several times.
> > > > >
> > > > > Would the first call stack forever be associated with the first
> > > > > call_rcu() in this series? If so, wouldn't the last two usually
> > > > > be the most useful? Or am I unclear on the use case?
> > >
> > > 2 points here:
> > >
> > > 1. With KASAN the object won't be immediately reallocated. KASAN has
> > > 'quarantine' to delay reuse of heap objects. It is assumed that the
> > > object is still in quarantine when we detect a use-after-free. In such
> > > a case we will have proper call_rcu stacks as well.
> > > It is possible that the object is not in quarantine already and was
> > > reused several times (quarantine is not infinite), but then KASAN will
> > > report non-sense stacks for allocation/free as well. So wrong call_rcu
> > > stacks are less of a problem in such cases.
> > >
> > > 2. We would like to memorize 2 last call_rcu stacks regardless, but we
> > > just don't have a good place for the index (bit which of the 2 is the
> > > one to overwrite). Probably could shove it into some existing field,
> > > but then will require atomic operations, etc.
> > >
> > > Nobody knows how well/bad it will work. I think we need to get the
> > > first version in, deploy on syzbot, accumulate some base of example
> > > reports and iterate from there.
> >
> > If I understood the stack-index point below, why not just move the
> > previous stackm index to clobber the previous-to-previous stack index,
> > then put the current stack index into the spot thus opened up?
>
> We don't have any index in this change (don't have memory for such index).
> The pseudo code is"
>
> u32 aux_stacks[2]; // = {0,0}
>
> if (aux_stacks[0] != 0)
> aux_stacks[0] = stack;
> else
> aux_stacks[1] = stack;
I was thinking in terms of something like this:
u32 aux_stacks[2]; // = {0,0}
if (aux_stacks[0] != 0) {
aux_stacks[0] = stack;
} else {
if (aux_stacks[1])
aux_stacks[0] = aux_stacks[1];
aux_stacks[1] = stack;
}
Whether this actually makes sense in real life, I have no idea.
The theory is that you want the last two stacks. However, if these
elements get cleared at kfree() time, then I could easily believe that
the approach you already have (first and last) is the way to go.
Just asking the question, not arguing for a change!
Thanx, Paul
> > > > The first call stack doesn't forever associate with first call_rcu(),
> > > > if someone object freed and reallocated, then the first call stack will
> > > > replace with new object.
> > > >
> > > > > > When call_rcu() is called, we store the call_rcu() call stack into
> > > > > > slub alloc meta-data, so that KASAN report can print rcu stack.
> > > > > >
> > > > > > It doesn't increase the cost of memory consumption. Because we don't
> > > > > > enlarge struct kasan_alloc_meta size.
> > > > > > - add two call_rcu() call stack into kasan_alloc_meta, size is 8 bytes.
> > > > > > - remove free track from kasan_alloc_meta, size is 8 bytes.
> > > > > >
> > > > > > [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437
> > > > > > [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ
> > > > > >
> > > > > > Signed-off-by: Walter Wu <[email protected]>
> > > > > > Suggested-by: Dmitry Vyukov <[email protected]>
> > > > > > Cc: Andrey Ryabinin <[email protected]>
> > > > > > Cc: Dmitry Vyukov <[email protected]>
> > > > > > Cc: Alexander Potapenko <[email protected]>
> > > > > > Cc: Andrew Morton <[email protected]>
> > > > > > Cc: Paul E. McKenney <[email protected]>
> > > > > > Cc: Josh Triplett <[email protected]>
> > > > > > Cc: Mathieu Desnoyers <[email protected]>
> > > > > > Cc: Lai Jiangshan <[email protected]>
> > > > > > Cc: Joel Fernandes <[email protected]>
> > > > > > ---
> > > > > > include/linux/kasan.h | 2 ++
> > > > > > kernel/rcu/tree.c | 3 +++
> > > > > > lib/Kconfig.kasan | 2 ++
> > > > > > mm/kasan/common.c | 4 ++--
> > > > > > mm/kasan/generic.c | 29 +++++++++++++++++++++++++++++
> > > > > > mm/kasan/kasan.h | 19 +++++++++++++++++++
> > > > > > mm/kasan/report.c | 21 +++++++++++++++++----
> > > > > > 7 files changed, 74 insertions(+), 6 deletions(-)
> > > > > >
> > > > > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> > > > > > index 31314ca7c635..23b7ee00572d 100644
> > > > > > --- a/include/linux/kasan.h
> > > > > > +++ b/include/linux/kasan.h
> > > > > > @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> > > > > >
> > > > > > void kasan_cache_shrink(struct kmem_cache *cache);
> > > > > > void kasan_cache_shutdown(struct kmem_cache *cache);
> > > > > > +void kasan_record_aux_stack(void *ptr);
> > > > > >
> > > > > > #else /* CONFIG_KASAN_GENERIC */
> > > > > >
> > > > > > static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
> > > > > > static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> > > > > > +static inline void kasan_record_aux_stack(void *ptr) {}
> > > > > >
> > > > > > #endif /* CONFIG_KASAN_GENERIC */
> > > > > >
> > > > > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > > > > index 06548e2ebb72..de872b6cc261 100644
> > > > > > --- a/kernel/rcu/tree.c
> > > > > > +++ b/kernel/rcu/tree.c
> > > > > > @@ -57,6 +57,7 @@
> > > > > > #include <linux/slab.h>
> > > > > > #include <linux/sched/isolation.h>
> > > > > > #include <linux/sched/clock.h>
> > > > > > +#include <linux/kasan.h>
> > > > > > #include "../time/tick-internal.h"
> > > > > >
> > > > > > #include "tree.h"
> > > > > > @@ -2694,6 +2695,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
> > > > > > trace_rcu_callback(rcu_state.name, head,
> > > > > > rcu_segcblist_n_cbs(&rdp->cblist));
> > > > > >
> > > > > > + kasan_record_aux_stack(head);
> > > > >
> > > > > Just for the record, at this point we have not yet queued the callback.
> > > > > We have also not yet disabled interrupts. Which might be OK, but I
> > > > > figured I should call out the possibility of moving this down a few
> > > > > lines to follow the local_irq_save().
> > > > >
> > > >
> > > > We will intend to do it.
> > >
> > > I will sleep better if we move it up :)
> > > It qualifies a "debug check", which are generally done on entrance to
> > > the function. Or are these all debug checks up to this point?
> > > But if the callback did not leak anywhere up to this point and we will
> > > maintain it that way, then formally it is fine.
> >
> > There are debug checks, then initialization of presumed private
> > structures, disabling of interrupts, more check that are now safe given
> > that we are pinned to a specific CPU, and so on.
> >
> > I am OK with it being at the beginning of the function.
> >
> > > > > If someone incorrectly invokes concurrently invokes call_rcu() on this
> > > > > same region of memory, possibly from an interrupt handler, we are OK
> > > > > corrupting the stack traces, right?
> > > > >
> > > >
> > > > Yes, and the wrong invoking call_rcu should be recorded.
> > > >
> > > > > But what happens if a given structure has more than one rcu_head
> > > > > structure? In that case, RCU would be just fine with it being
> > > > > concurrently passed to different call_rcu() invocations as long as the
> > > > > two invocations didn't both use the same rcu_head structure. (In that
> > > > > case, they had best not be both freeing the object, and if even one of
> > > > > them is freeing the object, coordination is necessary.)
> > > > >
> > > > > If this is a problem, one approach would be to move the
> > > > > kasan_record_aux_stack(head) call to kfree_rcu(). After all, it is
> > > > > definitely illegal to pass the same memory to a pair of kfree_rcu()
> > > > > invocations! ;-)
> > > > >
> > > >
> > > > The function of kasan_record_aux_stack(head) is simple, it is only to
> > > > record call stack by the 'head' object.
> > >
> > > I would say "corrupting" stacks on some races is fine-ish. In the end
> > > we are just storing an u32 stack id.
> > > On syzbot we generally have multiple samples of the same crash, so
> > > even if one is "corrupted" there may be others that are not corrupted.
> > > Just protecting from this looks too complex and expensive. And in the
> > > end there is not much we can do anyway.
> > >
> > > Recording all call_rcu stacks (not just kfree_rcu) is intentional. I
> > > think it may be useful to even extend to recording workqueue and timer
> > > stacks as well.
> >
> > Given the u32 nature of the stack ID, I agree that there is no point
> > in excluding call_rcu(). At least until such time as we start getting
> > false positives due to multiple rcu_head structures in the same structure.
> >
> > Thanx, Paul
> >
> > > > > > +
> > > > > > /* Go handle any RCU core processing required. */
> > > > > > if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
> > > > > > unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
> > > > > > diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> > > > > > index 81f5464ea9e1..56a89291f1cc 100644
> > > > > > --- a/lib/Kconfig.kasan
> > > > > > +++ b/lib/Kconfig.kasan
> > > > > > @@ -58,6 +58,8 @@ config KASAN_GENERIC
> > > > > > For better error detection enable CONFIG_STACKTRACE.
> > > > > > Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
> > > > > > (the resulting kernel does not boot).
> > > > > > + Currently CONFIG_KASAN_GENERIC will print first and last call_rcu()
> > > > > > + call stack. It doesn't increase the cost of memory consumption.
> > > > > >
> > > > > > config KASAN_SW_TAGS
> > > > > > bool "Software tag-based mode"
> > > > > > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > > > > > index 2906358e42f0..8bc618289bb1 100644
> > > > > > --- a/mm/kasan/common.c
> > > > > > +++ b/mm/kasan/common.c
> > > > > > @@ -41,7 +41,7 @@
> > > > > > #include "kasan.h"
> > > > > > #include "../slab.h"
> > > > > >
> > > > > > -static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags)
> > > > > > {
> > > > > > unsigned long entries[KASAN_STACK_DEPTH];
> > > > > > unsigned int nr_entries;
> > > > > > @@ -54,7 +54,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags)
> > > > > > static inline void set_track(struct kasan_track *track, gfp_t flags)
> > > > > > {
> > > > > > track->pid = current->pid;
> > > > > > - track->stack = save_stack(flags);
> > > > > > + track->stack = kasan_save_stack(flags);
> > > > > > }
> > > > > >
> > > > > > void kasan_enable_current(void)
> > > > > > diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> > > > > > index 56ff8885fe2e..b86880c338e2 100644
> > > > > > --- a/mm/kasan/generic.c
> > > > > > +++ b/mm/kasan/generic.c
> > > > > > @@ -325,3 +325,32 @@ DEFINE_ASAN_SET_SHADOW(f2);
> > > > > > DEFINE_ASAN_SET_SHADOW(f3);
> > > > > > DEFINE_ASAN_SET_SHADOW(f5);
> > > > > > DEFINE_ASAN_SET_SHADOW(f8);
> > > > > > +
> > > > > > +void kasan_record_aux_stack(void *addr)
> > > > > > +{
> > > > > > + struct page *page = kasan_addr_to_page(addr);
> > > > > > + struct kmem_cache *cache;
> > > > > > + struct kasan_alloc_meta *alloc_info;
> > > > > > + void *object;
> > > > > > +
> > > > > > + if (!(page && PageSlab(page)))
> > > > > > + return;
> > > > > > +
> > > > > > + cache = page->slab_cache;
> > > > > > + object = nearest_obj(cache, page, addr);
> > > > > > + alloc_info = get_alloc_info(cache, object);
> > > > > > +
> > > > > > + if (!alloc_info->rcu_stack[0])
> > > > > > + /* record first call_rcu() call stack */
> > > > > > + alloc_info->rcu_stack[0] = kasan_save_stack(GFP_NOWAIT);
> > > > > > + else
> > > > > > + /* record last call_rcu() call stack */
> > > > > > + alloc_info->rcu_stack[1] = kasan_save_stack(GFP_NOWAIT);
> > > > > > +}
> > > > > > +
> > > > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > > > + u8 idx)
> > > > > > +{
> > > > > > + return container_of(&alloc_info->rcu_stack[idx],
> > > > > > + struct kasan_track, stack);
> > > > > > +}
> > > > > > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > > > > > index e8f37199d885..1cc1fb7b0de3 100644
> > > > > > --- a/mm/kasan/kasan.h
> > > > > > +++ b/mm/kasan/kasan.h
> > > > > > @@ -96,15 +96,28 @@ struct kasan_track {
> > > > > > depot_stack_handle_t stack;
> > > > > > };
> > > > > >
> > > > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > > > +#define SIZEOF_PTR sizeof(void *)
> > > > > > +#define KASAN_NR_RCU_CALL_STACKS 2
> > > > > > +#else /* CONFIG_KASAN_GENERIC */
> > > > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > > > #define KASAN_NR_FREE_STACKS 5
> > > > > > #else
> > > > > > #define KASAN_NR_FREE_STACKS 1
> > > > > > #endif
> > > > > > +#endif /* CONFIG_KASAN_GENERIC */
> > > > > >
> > > > > > struct kasan_alloc_meta {
> > > > > > struct kasan_track alloc_track;
> > > > > > +#ifdef CONFIG_KASAN_GENERIC
> > > > > > + /*
> > > > > > + * call_rcu() call stack is stored into struct kasan_alloc_meta.
> > > > > > + * The free stack is stored into freed object.
> > > > > > + */
> > > > > > + depot_stack_handle_t rcu_stack[KASAN_NR_RCU_CALL_STACKS];
> > > > > > +#else
> > > > > > struct kasan_track free_track[KASAN_NR_FREE_STACKS];
> > > > > > +#endif
> > > > > > #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> > > > > > u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
> > > > > > u8 free_track_idx;
> > > > > > @@ -159,16 +172,22 @@ void kasan_report_invalid_free(void *object, unsigned long ip);
> > > > > >
> > > > > > struct page *kasan_addr_to_page(const void *addr);
> > > > > >
> > > > > > +depot_stack_handle_t kasan_save_stack(gfp_t flags);
> > > > > > +
> > > > > > #if defined(CONFIG_KASAN_GENERIC) && \
> > > > > > (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
> > > > > > void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
> > > > > > void quarantine_reduce(void);
> > > > > > void quarantine_remove_cache(struct kmem_cache *cache);
> > > > > > +struct kasan_track *kasan_get_aux_stack(struct kasan_alloc_meta *alloc_info,
> > > > > > + u8 idx);
> > > > > > #else
> > > > > > static inline void quarantine_put(struct kasan_free_meta *info,
> > > > > > struct kmem_cache *cache) { }
> > > > > > static inline void quarantine_reduce(void) { }
> > > > > > static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
> > > > > > +static inline struct kasan_track *kasan_get_aux_stack(
> > > > > > + struct kasan_alloc_meta *alloc_info, u8 idx) { return NULL; }
> > > > > > #endif
> > > > > >
> > > > > > #ifdef CONFIG_KASAN_SW_TAGS
> > > > > > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > > > > > index 80f23c9da6b0..f16a1a210815 100644
> > > > > > --- a/mm/kasan/report.c
> > > > > > +++ b/mm/kasan/report.c
> > > > > > @@ -105,9 +105,13 @@ static void end_report(unsigned long *flags)
> > > > > > kasan_enable_current();
> > > > > > }
> > > > > >
> > > > > > -static void print_track(struct kasan_track *track, const char *prefix)
> > > > > > +static void print_track(struct kasan_track *track, const char *prefix,
> > > > > > + bool is_callrcu)
> > > > > > {
> > > > > > - pr_err("%s by task %u:\n", prefix, track->pid);
> > > > > > + if (is_callrcu)
> > > > > > + pr_err("%s:\n", prefix);
> > > > > > + else
> > > > > > + pr_err("%s by task %u:\n", prefix, track->pid);
> > > > > > if (track->stack) {
> > > > > > unsigned long *entries;
> > > > > > unsigned int nr_entries;
> > > > > > @@ -187,11 +191,20 @@ static void describe_object(struct kmem_cache *cache, void *object,
> > > > > > if (cache->flags & SLAB_KASAN) {
> > > > > > struct kasan_track *free_track;
> > > > > >
> > > > > > - print_track(&alloc_info->alloc_track, "Allocated");
> > > > > > + print_track(&alloc_info->alloc_track, "Allocated", false);
> > > > > > pr_err("\n");
> > > > > > free_track = kasan_get_free_track(cache, object, tag);
> > > > > > - print_track(free_track, "Freed");
> > > > > > + print_track(free_track, "Freed", false);
> > > > > > pr_err("\n");
> > > > > > +
> > > > > > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> > > > > > + free_track = kasan_get_aux_stack(alloc_info, 0);
> > > > > > + print_track(free_track, "First call_rcu() call stack", true);
> > > > > > + pr_err("\n");
> > > > > > + free_track = kasan_get_aux_stack(alloc_info, 1);
> > > > > > + print_track(free_track, "Last call_rcu() call stack", true);
> > > > > > + pr_err("\n");
> > > > > > + }
> > > > > > }
> > > > > >
> > > > > > describe_object_addr(cache, object, addr);
> > > > > > --
> > > > > I> 2.18.0
> > > >
> > > > --
> > > > You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> > > > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > > > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1589250993.19238.22.camel%40mtksdccf07.
On Tue, May 12, 2020 at 6:14 PM Paul E. McKenney <[email protected]> wrote:
> > > > > > > This feature will record first and last call_rcu() call stack and
> > > > > > > print two call_rcu() call stack in KASAN report.
> > > > > >
> > > > > > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > > > > > the grace period elapses, the callback is invoked, and the enclosing
> > > > > > data structure is freed. But then that same region of memory is
> > > > > > immediately reallocated as the same type of structure and again
> > > > > > passed to call_rcu(), and that this cycle repeats several times.
> > > > > >
> > > > > > Would the first call stack forever be associated with the first
> > > > > > call_rcu() in this series? If so, wouldn't the last two usually
> > > > > > be the most useful? Or am I unclear on the use case?
> > > >
> > > > 2 points here:
> > > >
> > > > 1. With KASAN the object won't be immediately reallocated. KASAN has
> > > > 'quarantine' to delay reuse of heap objects. It is assumed that the
> > > > object is still in quarantine when we detect a use-after-free. In such
> > > > a case we will have proper call_rcu stacks as well.
> > > > It is possible that the object is not in quarantine already and was
> > > > reused several times (quarantine is not infinite), but then KASAN will
> > > > report non-sense stacks for allocation/free as well. So wrong call_rcu
> > > > stacks are less of a problem in such cases.
> > > >
> > > > 2. We would like to memorize 2 last call_rcu stacks regardless, but we
> > > > just don't have a good place for the index (bit which of the 2 is the
> > > > one to overwrite). Probably could shove it into some existing field,
> > > > but then will require atomic operations, etc.
> > > >
> > > > Nobody knows how well/bad it will work. I think we need to get the
> > > > first version in, deploy on syzbot, accumulate some base of example
> > > > reports and iterate from there.
> > >
> > > If I understood the stack-index point below, why not just move the
> > > previous stackm index to clobber the previous-to-previous stack index,
> > > then put the current stack index into the spot thus opened up?
> >
> > We don't have any index in this change (don't have memory for such index).
> > The pseudo code is"
> >
> > u32 aux_stacks[2]; // = {0,0}
> >
> > if (aux_stacks[0] != 0)
> > aux_stacks[0] = stack;
> > else
> > aux_stacks[1] = stack;
>
> I was thinking in terms of something like this:
>
> u32 aux_stacks[2]; // = {0,0}
>
> if (aux_stacks[0] != 0) {
> aux_stacks[0] = stack;
> } else {
> if (aux_stacks[1])
> aux_stacks[0] = aux_stacks[1];
> aux_stacks[1] = stack;
> }
>
> Whether this actually makes sense in real life, I have no idea.
> The theory is that you want the last two stacks. However, if these
> elements get cleared at kfree() time, then I could easily believe that
> the approach you already have (first and last) is the way to go.
>
> Just asking the question, not arguing for a change!
Oh, this is so obvious... in hindsight! :)
Walter, what do you think?
I would do this. I think latter stacks are generally more interesting
wrt shedding light on a bug. The first stack may even be "statically
known" (e.g. if object is always queued into a workqueue for some lazy
initialization during construction).
On Tue, 2020-05-12 at 16:03 +0200, Dmitry Vyukov wrote:
> On Tue, May 12, 2020 at 5:38 AM Walter Wu <[email protected]> wrote:
> > > Are you sure it will increase object size?
> > > I think we overlap kasan_free_meta with the object as well. The only
> > > case we don't overlap kasan_free_meta with the object are
> > > SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> > > only affect small objects with small redzones.
> > > And I think now we simply have a bug for these objects, we check
> > > KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> > > objects with ctor, they still contain live object data, we don't store
> > > free stack in them.
> > > Such objects can be both free and still contain user data.
> > >
> >
> > Overlay kasan_free_meta. I see. but overlay it only when the object was
> > freed. kasan_free_meta will be used until free object.
> > 1). When put object into quarantine, it need kasan_free_meta.
> > 2). When the object exit from quarantine, it need kasan_free_meta
> >
> > If we choose to overlay kasan_free_meta, then the free stack will be
> > stored very late. It may has no free stack in report.
>
> Sorry, I don't understand what you mean.
>
> Why will it be stored too late?
> In __kasan_slab_free() putting into quarantine and recording free
> stack are literally adjacent lines of code:
>
> static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> unsigned long ip, bool quarantine)
> {
> ...
> kasan_set_free_info(cache, object, tag);
> quarantine_put(get_free_info(cache, object), cache);
>
>
> Just to make sure, what I meant is that we add free_track to kasan_free_meta:
>
> struct kasan_free_meta {
> struct qlist_node quarantine_link;
> + struct kasan_track free_track;
> };
>
When I see above struct kasan_free_meta, I know why you don't understand
my meaning, because I thought you were going to overlay the
quarantine_link by free_track, but it seems like to add free_track to
kasan_free_meta. Does it enlarge meta-data size?
> And I think its life-time and everything should be exactly what we need.
>
> Also it should help to fix the problem with ctors: kasan_free_meta is
> already allocated on the side for such objects, and that's exactly
> what we need for objects with ctor's.
I see.
On Tue, 2020-05-12 at 18:22 +0200, Dmitry Vyukov wrote:
> On Tue, May 12, 2020 at 6:14 PM Paul E. McKenney <[email protected]> wrote:
> > > > > > > > This feature will record first and last call_rcu() call stack and
> > > > > > > > print two call_rcu() call stack in KASAN report.
> > > > > > >
> > > > > > > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > > > > > > the grace period elapses, the callback is invoked, and the enclosing
> > > > > > > data structure is freed. But then that same region of memory is
> > > > > > > immediately reallocated as the same type of structure and again
> > > > > > > passed to call_rcu(), and that this cycle repeats several times.
> > > > > > >
> > > > > > > Would the first call stack forever be associated with the first
> > > > > > > call_rcu() in this series? If so, wouldn't the last two usually
> > > > > > > be the most useful? Or am I unclear on the use case?
> > > > >
> > > > > 2 points here:
> > > > >
> > > > > 1. With KASAN the object won't be immediately reallocated. KASAN has
> > > > > 'quarantine' to delay reuse of heap objects. It is assumed that the
> > > > > object is still in quarantine when we detect a use-after-free. In such
> > > > > a case we will have proper call_rcu stacks as well.
> > > > > It is possible that the object is not in quarantine already and was
> > > > > reused several times (quarantine is not infinite), but then KASAN will
> > > > > report non-sense stacks for allocation/free as well. So wrong call_rcu
> > > > > stacks are less of a problem in such cases.
> > > > >
> > > > > 2. We would like to memorize 2 last call_rcu stacks regardless, but we
> > > > > just don't have a good place for the index (bit which of the 2 is the
> > > > > one to overwrite). Probably could shove it into some existing field,
> > > > > but then will require atomic operations, etc.
> > > > >
> > > > > Nobody knows how well/bad it will work. I think we need to get the
> > > > > first version in, deploy on syzbot, accumulate some base of example
> > > > > reports and iterate from there.
> > > >
> > > > If I understood the stack-index point below, why not just move the
> > > > previous stackm index to clobber the previous-to-previous stack index,
> > > > then put the current stack index into the spot thus opened up?
> > >
> > > We don't have any index in this change (don't have memory for such index).
> > > The pseudo code is"
> > >
> > > u32 aux_stacks[2]; // = {0,0}
> > >
> > > if (aux_stacks[0] != 0)
> > > aux_stacks[0] = stack;
> > > else
> > > aux_stacks[1] = stack;
> >
> > I was thinking in terms of something like this:
> >
> > u32 aux_stacks[2]; // = {0,0}
> >
> > if (aux_stacks[0] != 0) {
> > aux_stacks[0] = stack;
> > } else {
> > if (aux_stacks[1])
> > aux_stacks[0] = aux_stacks[1];
> > aux_stacks[1] = stack;
> > }
> >
> > Whether this actually makes sense in real life, I have no idea.
> > The theory is that you want the last two stacks. However, if these
> > elements get cleared at kfree() time, then I could easily believe that
> > the approach you already have (first and last) is the way to go.
> >
> > Just asking the question, not arguing for a change!
>
> Oh, this is so obvious... in hindsight! :)
>
> Walter, what do you think?
>
u32 aux_stacks[2]; // = {0,0}
if (aux_stacks[0] != 0) {
aux_stacks[0] = stack;
} else {
if (aux_stacks[1])
aux_stacks[0] = aux_stacks[1];
aux_stacks[1] = stack;
}
Hmm...why I think it will always cover aux_stacks[0] after aux_stacks[0]
has stack, it should not record last two stacks?
How about this:
u32 aux_stacks[2]; // = {0,0}
if (aux_stacks[1])
aux_stacks[0] = aux_stacks[1];
aux_stacks[1] = stack;
> I would do this. I think latter stacks are generally more interesting
> wrt shedding light on a bug. The first stack may even be "statically
> known" (e.g. if object is always queued into a workqueue for some lazy
> initialization during construction).
I think it make more sense to record latter stack, too.
Thanks for your and Paul's suggestion.
On Wed, May 13, 2020 at 10:05:31AM +0800, Walter Wu wrote:
> On Tue, 2020-05-12 at 18:22 +0200, Dmitry Vyukov wrote:
> > On Tue, May 12, 2020 at 6:14 PM Paul E. McKenney <[email protected]> wrote:
> > > > > > > > > This feature will record first and last call_rcu() call stack and
> > > > > > > > > print two call_rcu() call stack in KASAN report.
> > > > > > > >
> > > > > > > > Suppose that a given rcu_head structure is passed to call_rcu(), then
> > > > > > > > the grace period elapses, the callback is invoked, and the enclosing
> > > > > > > > data structure is freed. But then that same region of memory is
> > > > > > > > immediately reallocated as the same type of structure and again
> > > > > > > > passed to call_rcu(), and that this cycle repeats several times.
> > > > > > > >
> > > > > > > > Would the first call stack forever be associated with the first
> > > > > > > > call_rcu() in this series? If so, wouldn't the last two usually
> > > > > > > > be the most useful? Or am I unclear on the use case?
> > > > > >
> > > > > > 2 points here:
> > > > > >
> > > > > > 1. With KASAN the object won't be immediately reallocated. KASAN has
> > > > > > 'quarantine' to delay reuse of heap objects. It is assumed that the
> > > > > > object is still in quarantine when we detect a use-after-free. In such
> > > > > > a case we will have proper call_rcu stacks as well.
> > > > > > It is possible that the object is not in quarantine already and was
> > > > > > reused several times (quarantine is not infinite), but then KASAN will
> > > > > > report non-sense stacks for allocation/free as well. So wrong call_rcu
> > > > > > stacks are less of a problem in such cases.
> > > > > >
> > > > > > 2. We would like to memorize 2 last call_rcu stacks regardless, but we
> > > > > > just don't have a good place for the index (bit which of the 2 is the
> > > > > > one to overwrite). Probably could shove it into some existing field,
> > > > > > but then will require atomic operations, etc.
> > > > > >
> > > > > > Nobody knows how well/bad it will work. I think we need to get the
> > > > > > first version in, deploy on syzbot, accumulate some base of example
> > > > > > reports and iterate from there.
> > > > >
> > > > > If I understood the stack-index point below, why not just move the
> > > > > previous stackm index to clobber the previous-to-previous stack index,
> > > > > then put the current stack index into the spot thus opened up?
> > > >
> > > > We don't have any index in this change (don't have memory for such index).
> > > > The pseudo code is"
> > > >
> > > > u32 aux_stacks[2]; // = {0,0}
> > > >
> > > > if (aux_stacks[0] != 0)
> > > > aux_stacks[0] = stack;
> > > > else
> > > > aux_stacks[1] = stack;
> > >
> > > I was thinking in terms of something like this:
> > >
> > > u32 aux_stacks[2]; // = {0,0}
> > >
> > > if (aux_stacks[0] != 0) {
> > > aux_stacks[0] = stack;
> > > } else {
> > > if (aux_stacks[1])
> > > aux_stacks[0] = aux_stacks[1];
> > > aux_stacks[1] = stack;
> > > }
> > >
> > > Whether this actually makes sense in real life, I have no idea.
> > > The theory is that you want the last two stacks. However, if these
> > > elements get cleared at kfree() time, then I could easily believe that
> > > the approach you already have (first and last) is the way to go.
> > >
> > > Just asking the question, not arguing for a change!
> >
> > Oh, this is so obvious... in hindsight! :)
> >
> > Walter, what do you think?
> >
>
> u32 aux_stacks[2]; // = {0,0}
>
> if (aux_stacks[0] != 0) {
> aux_stacks[0] = stack;
> } else {
> if (aux_stacks[1])
> aux_stacks[0] = aux_stacks[1];
> aux_stacks[1] = stack;
> }
>
> Hmm...why I think it will always cover aux_stacks[0] after aux_stacks[0]
> has stack, it should not record last two stacks?
>
> How about this:
>
> u32 aux_stacks[2]; // = {0,0}
>
> if (aux_stacks[1])
> aux_stacks[0] = aux_stacks[1];
> aux_stacks[1] = stack;
Even better! ;-)
Thanx, Paul
> > I would do this. I think latter stacks are generally more interesting
> > wrt shedding light on a bug. The first stack may even be "statically
> > known" (e.g. if object is always queued into a workqueue for some lazy
> > initialization during construction).
>
> I think it make more sense to record latter stack, too.
>
> Thanks for your and Paul's suggestion.
>
>
On Wed, May 13, 2020 at 3:48 AM Walter Wu <[email protected]> wrote:
> > > > Are you sure it will increase object size?
> > > > I think we overlap kasan_free_meta with the object as well. The only
> > > > case we don't overlap kasan_free_meta with the object are
> > > > SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> > > > only affect small objects with small redzones.
> > > > And I think now we simply have a bug for these objects, we check
> > > > KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> > > > objects with ctor, they still contain live object data, we don't store
> > > > free stack in them.
> > > > Such objects can be both free and still contain user data.
> > > >
> > >
> > > Overlay kasan_free_meta. I see. but overlay it only when the object was
> > > freed. kasan_free_meta will be used until free object.
> > > 1). When put object into quarantine, it need kasan_free_meta.
> > > 2). When the object exit from quarantine, it need kasan_free_meta
> > >
> > > If we choose to overlay kasan_free_meta, then the free stack will be
> > > stored very late. It may has no free stack in report.
> >
> > Sorry, I don't understand what you mean.
> >
> > Why will it be stored too late?
> > In __kasan_slab_free() putting into quarantine and recording free
> > stack are literally adjacent lines of code:
> >
> > static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> > unsigned long ip, bool quarantine)
> > {
> > ...
> > kasan_set_free_info(cache, object, tag);
> > quarantine_put(get_free_info(cache, object), cache);
> >
> >
> > Just to make sure, what I meant is that we add free_track to kasan_free_meta:
> >
> > struct kasan_free_meta {
> > struct qlist_node quarantine_link;
> > + struct kasan_track free_track;
> > };
> >
>
> When I see above struct kasan_free_meta, I know why you don't understand
> my meaning, because I thought you were going to overlay the
> quarantine_link by free_track, but it seems like to add free_track to
> kasan_free_meta. Does it enlarge meta-data size?
I would assume it should not increase meta-data size. In both cases we
store exactly the same information inside of the object: quarantine
link and free track.
I see it more as a question of code organization. We already have a
concept of "this data is placed inside of the freed object", we
already have a name for it (kasan_free_meta), we already have code to
choose where to place it, we already have helper functions to access
it. And your change effectively duplicates all of this to place the
free track.
> > And I think its life-time and everything should be exactly what we need.
> >
> > Also it should help to fix the problem with ctors: kasan_free_meta is
> > already allocated on the side for such objects, and that's exactly
> > what we need for objects with ctor's.
>
> I see.
On Wed, 2020-05-13 at 08:51 +0200, 'Dmitry Vyukov' via kasan-dev wrote:
> On Wed, May 13, 2020 at 3:48 AM Walter Wu <[email protected]> wrote:
> > > > > Are you sure it will increase object size?
> > > > > I think we overlap kasan_free_meta with the object as well. The only
> > > > > case we don't overlap kasan_free_meta with the object are
> > > > > SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> > > > > only affect small objects with small redzones.
> > > > > And I think now we simply have a bug for these objects, we check
> > > > > KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> > > > > objects with ctor, they still contain live object data, we don't store
> > > > > free stack in them.
> > > > > Such objects can be both free and still contain user data.
> > > > >
> > > >
> > > > Overlay kasan_free_meta. I see. but overlay it only when the object was
> > > > freed. kasan_free_meta will be used until free object.
> > > > 1). When put object into quarantine, it need kasan_free_meta.
> > > > 2). When the object exit from quarantine, it need kasan_free_meta
> > > >
> > > > If we choose to overlay kasan_free_meta, then the free stack will be
> > > > stored very late. It may has no free stack in report.
> > >
> > > Sorry, I don't understand what you mean.
> > >
> > > Why will it be stored too late?
> > > In __kasan_slab_free() putting into quarantine and recording free
> > > stack are literally adjacent lines of code:
> > >
> > > static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> > > unsigned long ip, bool quarantine)
> > > {
> > > ...
> > > kasan_set_free_info(cache, object, tag);
> > > quarantine_put(get_free_info(cache, object), cache);
> > >
> > >
> > > Just to make sure, what I meant is that we add free_track to kasan_free_meta:
> > >
> > > struct kasan_free_meta {
> > > struct qlist_node quarantine_link;
> > > + struct kasan_track free_track;
> > > };
> > >
> >
> > When I see above struct kasan_free_meta, I know why you don't understand
> > my meaning, because I thought you were going to overlay the
> > quarantine_link by free_track, but it seems like to add free_track to
> > kasan_free_meta. Does it enlarge meta-data size?
>
> I would assume it should not increase meta-data size. In both cases we
> store exactly the same information inside of the object: quarantine
> link and free track.
> I see it more as a question of code organization. We already have a
> concept of "this data is placed inside of the freed object", we
> already have a name for it (kasan_free_meta), we already have code to
> choose where to place it, we already have helper functions to access
> it. And your change effectively duplicates all of this to place the
> free track.
>
I want to make a summary. Which of the following is the approach we
want? or if I have some misunderstandings, please help me to correct.
Thanks.
1) For different object, then it will has two ways.
1.a) When object are LAB_TYPESAFE_BY_RCU || cache->ctor, then store free
stack into free track of struct kasan_free_meta.
2.b) Except 1.a), store free stack into freed object.
or
2) We always store free stack into free track of struct kasan_free_meta
> > > And I think its life-time and everything should be exactly what we need.
> > >
> > > Also it should help to fix the problem with ctors: kasan_free_meta is
> > > already allocated on the side for such objects, and that's exactly
> > > what we need for objects with ctor's.
> >
> > I see.
>
On Wed, May 13, 2020 at 11:05 AM Walter Wu <[email protected]> wrote:
>
> On Wed, 2020-05-13 at 08:51 +0200, 'Dmitry Vyukov' via kasan-dev wrote:
> > On Wed, May 13, 2020 at 3:48 AM Walter Wu <[email protected]> wrote:
> > > > > > Are you sure it will increase object size?
> > > > > > I think we overlap kasan_free_meta with the object as well. The only
> > > > > > case we don't overlap kasan_free_meta with the object are
> > > > > > SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> > > > > > only affect small objects with small redzones.
> > > > > > And I think now we simply have a bug for these objects, we check
> > > > > > KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> > > > > > objects with ctor, they still contain live object data, we don't store
> > > > > > free stack in them.
> > > > > > Such objects can be both free and still contain user data.
> > > > > >
> > > > >
> > > > > Overlay kasan_free_meta. I see. but overlay it only when the object was
> > > > > freed. kasan_free_meta will be used until free object.
> > > > > 1). When put object into quarantine, it need kasan_free_meta.
> > > > > 2). When the object exit from quarantine, it need kasan_free_meta
> > > > >
> > > > > If we choose to overlay kasan_free_meta, then the free stack will be
> > > > > stored very late. It may has no free stack in report.
> > > >
> > > > Sorry, I don't understand what you mean.
> > > >
> > > > Why will it be stored too late?
> > > > In __kasan_slab_free() putting into quarantine and recording free
> > > > stack are literally adjacent lines of code:
> > > >
> > > > static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> > > > unsigned long ip, bool quarantine)
> > > > {
> > > > ...
> > > > kasan_set_free_info(cache, object, tag);
> > > > quarantine_put(get_free_info(cache, object), cache);
> > > >
> > > >
> > > > Just to make sure, what I meant is that we add free_track to kasan_free_meta:
> > > >
> > > > struct kasan_free_meta {
> > > > struct qlist_node quarantine_link;
> > > > + struct kasan_track free_track;
> > > > };
> > > >
> > >
> > > When I see above struct kasan_free_meta, I know why you don't understand
> > > my meaning, because I thought you were going to overlay the
> > > quarantine_link by free_track, but it seems like to add free_track to
> > > kasan_free_meta. Does it enlarge meta-data size?
> >
> > I would assume it should not increase meta-data size. In both cases we
> > store exactly the same information inside of the object: quarantine
> > link and free track.
> > I see it more as a question of code organization. We already have a
> > concept of "this data is placed inside of the freed object", we
> > already have a name for it (kasan_free_meta), we already have code to
> > choose where to place it, we already have helper functions to access
> > it. And your change effectively duplicates all of this to place the
> > free track.
> >
>
> I want to make a summary. Which of the following is the approach we
> want? or if I have some misunderstandings, please help me to correct.
> Thanks.
>
> 1) For different object, then it will has two ways.
> 1.a) When object are LAB_TYPESAFE_BY_RCU || cache->ctor, then store free
> stack into free track of struct kasan_free_meta.
> 2.b) Except 1.a), store free stack into freed object.
>
> or
>
> 2) We always store free stack into free track of struct kasan_free_meta
I meant 2): We always store free stack into free track of struct
kasan_free_meta.
I think it will do the same as other options but just with less code
(and simpler code).
Maybe I am missing something here?
> > > > And I think its life-time and everything should be exactly what we need.
> > > >
> > > > Also it should help to fix the problem with ctors: kasan_free_meta is
> > > > already allocated on the side for such objects, and that's exactly
> > > > what we need for objects with ctor's.
> > >
> > > I see.
On Wed, 2020-05-13 at 11:16 +0200, Dmitry Vyukov wrote:
> On Wed, May 13, 2020 at 11:05 AM Walter Wu <[email protected]> wrote:
> >
> > On Wed, 2020-05-13 at 08:51 +0200, 'Dmitry Vyukov' via kasan-dev wrote:
> > > On Wed, May 13, 2020 at 3:48 AM Walter Wu <[email protected]> wrote:
> > > > > > > Are you sure it will increase object size?
> > > > > > > I think we overlap kasan_free_meta with the object as well. The only
> > > > > > > case we don't overlap kasan_free_meta with the object are
> > > > > > > SLAB_TYPESAFE_BY_RCU || cache->ctor. But these are rare and it should
> > > > > > > only affect small objects with small redzones.
> > > > > > > And I think now we simply have a bug for these objects, we check
> > > > > > > KASAN_KMALLOC_FREE and then assume object contains free stack, but for
> > > > > > > objects with ctor, they still contain live object data, we don't store
> > > > > > > free stack in them.
> > > > > > > Such objects can be both free and still contain user data.
> > > > > > >
> > > > > >
> > > > > > Overlay kasan_free_meta. I see. but overlay it only when the object was
> > > > > > freed. kasan_free_meta will be used until free object.
> > > > > > 1). When put object into quarantine, it need kasan_free_meta.
> > > > > > 2). When the object exit from quarantine, it need kasan_free_meta
> > > > > >
> > > > > > If we choose to overlay kasan_free_meta, then the free stack will be
> > > > > > stored very late. It may has no free stack in report.
> > > > >
> > > > > Sorry, I don't understand what you mean.
> > > > >
> > > > > Why will it be stored too late?
> > > > > In __kasan_slab_free() putting into quarantine and recording free
> > > > > stack are literally adjacent lines of code:
> > > > >
> > > > > static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> > > > > unsigned long ip, bool quarantine)
> > > > > {
> > > > > ...
> > > > > kasan_set_free_info(cache, object, tag);
> > > > > quarantine_put(get_free_info(cache, object), cache);
> > > > >
> > > > >
> > > > > Just to make sure, what I meant is that we add free_track to kasan_free_meta:
> > > > >
> > > > > struct kasan_free_meta {
> > > > > struct qlist_node quarantine_link;
> > > > > + struct kasan_track free_track;
> > > > > };
> > > > >
> > > >
> > > > When I see above struct kasan_free_meta, I know why you don't understand
> > > > my meaning, because I thought you were going to overlay the
> > > > quarantine_link by free_track, but it seems like to add free_track to
> > > > kasan_free_meta. Does it enlarge meta-data size?
> > >
> > > I would assume it should not increase meta-data size. In both cases we
> > > store exactly the same information inside of the object: quarantine
> > > link and free track.
> > > I see it more as a question of code organization. We already have a
> > > concept of "this data is placed inside of the freed object", we
> > > already have a name for it (kasan_free_meta), we already have code to
> > > choose where to place it, we already have helper functions to access
> > > it. And your change effectively duplicates all of this to place the
> > > free track.
> > >
> >
> > I want to make a summary. Which of the following is the approach we
> > want? or if I have some misunderstandings, please help me to correct.
> > Thanks.
> >
> > 1) For different object, then it will has two ways.
> > 1.a) When object are LAB_TYPESAFE_BY_RCU || cache->ctor, then store free
> > stack into free track of struct kasan_free_meta.
> > 2.b) Except 1.a), store free stack into freed object.
> >
> > or
> >
> > 2) We always store free stack into free track of struct kasan_free_meta
>
> I meant 2): We always store free stack into free track of struct
> kasan_free_meta.
> I think it will do the same as other options but just with less code
> (and simpler code).
>
> Maybe I am missing something here?
>
You are right, I only make a final confirmation with you. Now there
should be no problems, I will try to implement it.
Thank you for your good suggestion.
>
>
>
> > > > > And I think its life-time and everything should be exactly what we need.
> > > > >
> > > > > Also it should help to fix the problem with ctors: kasan_free_meta is
> > > > > already allocated on the side for such objects, and that's exactly
> > > > > what we need for objects with ctor's.
> > > >
> > > > I see.