when KASAN multishot is ON and some buggy code hits same code path
of KASAN issue repetetively, it can flood logs on console.
Check for allocaton, free and backtrace path at time of KASAN error,
if these are same then it is duplicate error and avoid these prints
from KASAN.
Co-developed-by: Vaneet Narang <[email protected]>
Signed-off-by: Vaneet Narang <[email protected]>
Signed-off-by: Maninder Singh <[email protected]>
---
mm/kasan/kasan.h | 6 +++++
mm/kasan/report.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 73 insertions(+)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 78cf99247139..d14ccce246ba 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -102,6 +102,12 @@ struct kasan_access_info {
unsigned long ip;
};
+struct kasan_record {
+ depot_stack_handle_t bt_handle;
+ depot_stack_handle_t alloc_handle;
+ depot_stack_handle_t free_handle;
+};
+
/* The layout of struct dictated by compiler */
struct kasan_source_location {
const char *filename;
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 87b271206163..4576de76991b 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -39,6 +39,10 @@ static unsigned long kasan_flags;
#define KASAN_BIT_REPORTED 0
#define KASAN_BIT_MULTI_SHOT 1
+#define MAX_RECORDS (200)
+static struct kasan_record kasan_records[MAX_RECORDS];
+static int stored_kasan_records;
+
bool kasan_save_enable_multi_shot(void)
{
return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
@@ -360,6 +364,65 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
end_report(&flags, (unsigned long)object);
}
+/*
+ * @save_report()
+ *
+ * returns false if same record is already saved.
+ * returns true if its new record and saved in database of KASAN.
+ */
+static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsigned long *flags)
+{
+ struct kasan_record record = {0};
+ depot_stack_handle_t bt_handle;
+ int i = 0;
+ const char *bug_type;
+ struct kasan_alloc_meta *alloc_meta;
+ struct kasan_track *free_track;
+ struct page *page;
+ bool ret = true;
+
+ kasan_disable_current();
+ spin_lock_irqsave(&report_lock, *flags);
+
+ bug_type = kasan_get_bug_type(info);
+ page = kasan_addr_to_page(addr);
+ bt_handle = kasan_save_stack(GFP_KERNEL);
+
+ if (page && PageSlab(page)) {
+ struct kmem_cache *cache = page->slab_cache;
+ void *object = nearest_obj(cache, page, addr);
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ free_track = kasan_get_free_track(cache, object, tag);
+ record.alloc_handle = alloc_meta->alloc_track.stack;
+ if (free_track)
+ record.free_handle = free_track->stack;
+ }
+
+ record.bt_handle = bt_handle;
+
+ for (i = 0; i < stored_kasan_records; i++) {
+ if (record.bt_handle != kasan_records[i].bt_handle)
+ continue;
+ if (record.alloc_handle != kasan_records[i].alloc_handle)
+ continue;
+ if (!strncmp("use-after-free", bug_type, 15) &&
+ (record.free_handle != kasan_records[i].free_handle))
+ continue;
+
+ ret = false;
+ goto done;
+ }
+
+ memcpy(&kasan_records[stored_kasan_records], &record, sizeof(struct kasan_record));
+ stored_kasan_records++;
+
+done:
+ spin_unlock_irqrestore(&report_lock, *flags);
+ kasan_enable_current();
+ return ret;
+}
+
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
unsigned long ip)
{
@@ -388,6 +451,10 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
info.is_write = is_write;
info.ip = ip;
+ if (addr_has_metadata(untagged_addr) &&
+ !save_report(untagged_addr, &info, get_tag(tagged_addr), &flags))
+ return;
+
start_report(&flags);
print_error_description(&info);
--
2.17.1
store minimal information required to regenerate KASAN logs
from reading of proc interface.
kernel buffer is allocated for 4K bytes to avoid multiple
checks of sanity for snprintf returns, and it will be checked
before copying that data to user space if user buffer has that
much capacity or not.
During long time aging test of targets, it is diffucult to check for
KASAN reported issues. Thus it will be better it proc interface is
present to check for Unique KASAN errors reported till time.
sample output and verification for ARM64:
Run sample TC's of KASAN:
[ 25.450749] kasan test: kmalloc_oob_right out-of-bounds to right
[ 25.452851] ==================================================================
[ 25.453779] BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0x80/0xa4
[ 25.454800] Write of size 1 at addr ffff0000c3f1c27b by task cat/125
[ 25.455891]
....
[ 25.488283] kasan test: kmalloc_oob_left out-of-bounds to left
[ 25.488819] ==================================================================
[ 25.489189] BUG: KASAN: slab-out-of-bounds in kmalloc_oob_left+0x80/0xac
[ 25.489601] Read of size 1 at addr ffff0000c3f1c2ff by task cat/125
...
After first reporting, NO KASAN reports for same issues:
[ 115.078095] kasan test: kmalloc_oob_right out-of-bounds to right
[ 115.078773] kasan test: kmalloc_oob_left out-of-bounds to left
[ 115.079237] kasan test: kmalloc_node_oob_right kmalloc_node(): out-of-bounds to right
[ 115.080056] kasan test: kmalloc_pagealloc_oob_right kmalloc pagealloc allocation: out-of-bounds to right
[ 115.080683] kasan test: kmalloc_pagealloc_uaf kmalloc pagealloc allocation: use-after-free
[ 115.081209] kasan test: kmalloc_pagealloc_invalid_free kmalloc pagealloc allocation: invalid-free
Check same from /proc/kasan_log:
KASAN Issue no. 1
==================================================================
BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0x80/0xa4 at addr ffff0000c3f1c27b
Write of size 1 by task cat/125
...
...
Memory state around the buggy address:
ffff0000c3f1c100: 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc fc
ffff0000c3f1c180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
>ffff0000c3f1c200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03
^
ffff0000c3f1c280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff0000c3f1c300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
==================================================================
KASAN Issue no. 2
==================================================================
BUG: KASAN: slab-out-of-bounds in kmalloc_oob_left+0x80/0xac at addr ffff0000c3f1c2ff
Read of size 1 by task cat/125
...
...
Memory state around the buggy address:
ffff0000c3f1c180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff0000c3f1c200: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff0000c3f1c280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
^
ffff0000c3f1c300: 00 07 fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff0000c3f1c380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
==================================================================
KASAN Issue no. 3
==================================================================
...
...
Co-developed-by: Vaneet Narang <[email protected]>
Signed-off-by: Vaneet Narang <[email protected]>
Signed-off-by: Maninder Singh <[email protected]>
---
mm/kasan/kasan.h | 32 +++-
mm/kasan/report.c | 377 ++++++++++++++++++++++++++++++++++----
mm/kasan/report_generic.c | 42 ++++-
mm/kasan/report_hw_tags.c | 5 +-
mm/kasan/report_sw_tags.c | 30 ++-
5 files changed, 431 insertions(+), 55 deletions(-)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d14ccce246ba..2c2c79551cbd 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -94,6 +94,25 @@ extern bool kasan_flag_panic __ro_after_init;
#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
#define META_ROWS_AROUND_ADDR 2
+#define SHADOW_ROWS (2 * META_ROWS_AROUND_ADDR + 1)
+#define CACHE_NAME_LEN (20)
+
+typedef enum {
+ UNKNOWN,
+ OUT_OF_BOUNDS,
+ OUT_OF_BOUNDS_SLAB,
+ OUT_OF_BOUNDS_GLOBAL,
+ OUT_OF_BOUNDS_STACK,
+ USE_AFTER_FREE,
+ OUT_OF_BOUNDS_ALLOCA,
+ OUT_OF_BOUNDS_VMALLOC,
+ INVALID_ACCESS,
+ NULL_PTR_DEREFER,
+ USER_MEMORY_ACCESS,
+ WILD_MEMORY_ACCESS,
+ DOUBLE_INVALID_FREE
+} kasan_bug_type;
+
struct kasan_access_info {
const void *access_addr;
const void *first_bad_addr;
@@ -106,6 +125,17 @@ struct kasan_record {
depot_stack_handle_t bt_handle;
depot_stack_handle_t alloc_handle;
depot_stack_handle_t free_handle;
+ const void *access_addr;
+ const void *first_bad_addr;
+ unsigned long ip;
+ size_t access_size;
+ char comm[TASK_COMM_LEN];
+ char cache_name[CACHE_NAME_LEN];
+ int cache_size;
+ pid_t pid;
+ kasan_bug_type bug_type;
+ u8 buf[SHADOW_ROWS][META_BYTES_PER_ROW];
+ bool is_write;
};
/* The layout of struct dictated by compiler */
@@ -234,7 +264,7 @@ static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
#endif
void *kasan_find_first_bad_addr(void *addr, size_t size);
-const char *kasan_get_bug_type(struct kasan_access_info *info);
+const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug);
void kasan_metadata_fetch_row(char *buffer, void *row);
#if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 4576de76991b..b0cc95fedc29 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
+#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
@@ -66,7 +67,7 @@ __setup("kasan_multi_shot", kasan_set_multi_shot);
static void print_error_description(struct kasan_access_info *info)
{
pr_err("BUG: KASAN: %s in %pS\n",
- kasan_get_bug_type(info), (void *)info->ip);
+ kasan_get_bug_type(info, NULL), (void *)info->ip);
if (info->access_size)
pr_err("%s of size %zu at addr %px by task %s/%d\n",
info->is_write ? "Write" : "Read", info->access_size,
@@ -342,26 +343,50 @@ static void kasan_update_kunit_status(struct kunit *cur_test)
}
#endif /* IS_ENABLED(CONFIG_KUNIT) */
-void kasan_report_invalid_free(void *object, unsigned long ip)
+static void copy_error_description(struct kasan_access_info *info,
+ struct kasan_record *record)
{
- unsigned long flags;
- u8 tag = get_tag(object);
+ record->ip = info->ip;
+ record->first_bad_addr = info->first_bad_addr;
+ record->access_addr = info->access_addr;
+ record->is_write = info->is_write;
+ record->access_size = info->access_size;
+ record->pid = task_pid_nr(current);
+ strncpy(record->comm, current->comm, TASK_COMM_LEN);
+}
- object = kasan_reset_tag(object);
+static void copy_shadow_for_address(struct kasan_record *record)
+{
+ int i;
+ void *addr = (void *)record->first_bad_addr;
+ void *row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
+ - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
-#if IS_ENABLED(CONFIG_KUNIT)
- if (current->kunit_test)
- kasan_update_kunit_status(current->kunit_test);
-#endif /* IS_ENABLED(CONFIG_KUNIT) */
- start_report(&flags);
- pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
- kasan_print_tags(tag, object);
- pr_err("\n");
- print_address_description(object, tag);
- pr_err("\n");
- print_memory_metadata(object);
- end_report(&flags, (unsigned long)object);
+ for (i = 0; i < SHADOW_ROWS; i++) {
+ kasan_metadata_fetch_row((char *)&record->buf[i], row);
+ row += META_MEM_BYTES_PER_ROW;
+ }
+}
+
+static bool match_handles(struct kasan_record *record)
+{
+ int i = 0;
+
+ for (i = 0; i < stored_kasan_records; i++) {
+ if (record->bt_handle != kasan_records[i].bt_handle)
+ continue;
+ if (record->alloc_handle != kasan_records[i].alloc_handle)
+ continue;
+ if ((record->bug_type == USE_AFTER_FREE ||
+ record->bug_type == DOUBLE_INVALID_FREE) &&
+ (record->free_handle != kasan_records[i].free_handle))
+ continue;
+
+ return true;
+ }
+
+ return false;
}
/*
@@ -370,21 +395,26 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
* returns false if same record is already saved.
* returns true if its new record and saved in database of KASAN.
*/
-static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsigned long *flags)
+static bool save_report(void *addr, struct kasan_access_info *info, u8 tag,
+ unsigned long *flags, kasan_bug_type *bug)
{
struct kasan_record record = {0};
depot_stack_handle_t bt_handle;
- int i = 0;
- const char *bug_type;
struct kasan_alloc_meta *alloc_meta;
struct kasan_track *free_track;
struct page *page;
+ kasan_bug_type bug_enum = UNKNOWN;
bool ret = true;
kasan_disable_current();
spin_lock_irqsave(&report_lock, *flags);
- bug_type = kasan_get_bug_type(info);
+ if (!bug) {
+ kasan_get_bug_type(info, &bug_enum);
+ record.bug_type = bug_enum;
+ } else
+ record.bug_type = *bug;
+
page = kasan_addr_to_page(addr);
bt_handle = kasan_save_stack(GFP_KERNEL);
@@ -397,23 +427,27 @@ static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsi
record.alloc_handle = alloc_meta->alloc_track.stack;
if (free_track)
record.free_handle = free_track->stack;
+
+ strncpy(record.cache_name, cache->name, CACHE_NAME_LEN - 1);
+ record.cache_name[CACHE_NAME_LEN - 1] = '\0';
+ record.cache_size = cache->object_size;
}
record.bt_handle = bt_handle;
- for (i = 0; i < stored_kasan_records; i++) {
- if (record.bt_handle != kasan_records[i].bt_handle)
- continue;
- if (record.alloc_handle != kasan_records[i].alloc_handle)
- continue;
- if (!strncmp("use-after-free", bug_type, 15) &&
- (record.free_handle != kasan_records[i].free_handle))
- continue;
+ if (match_handles(&record)) {
+ ret = false;
+ goto done;
+ }
+ if (stored_kasan_records >= MAX_RECORDS) {
+ WARN_ONCE(1, "KASAN database reached capacity");
ret = false;
goto done;
}
+ copy_error_description(info, &record);
+ copy_shadow_for_address(&record);
memcpy(&kasan_records[stored_kasan_records], &record, sizeof(struct kasan_record));
stored_kasan_records++;
@@ -423,6 +457,38 @@ static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsi
return ret;
}
+void kasan_report_invalid_free(void *object, unsigned long ip)
+{
+ unsigned long flags;
+ u8 tag = get_tag(object);
+ struct kasan_access_info info;
+ kasan_bug_type bug_enum = DOUBLE_INVALID_FREE;
+
+ object = kasan_reset_tag(object);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ if (current->kunit_test)
+ kasan_update_kunit_status(current->kunit_test);
+#endif /* IS_ENABLED(CONFIG_KUNIT) */
+
+ info.ip = ip;
+ info.first_bad_addr = object;
+ info.access_addr = 0;
+ info.is_write = 0;
+ info.access_size = 0;
+ if (!save_report(object, &info, tag, &flags, &bug_enum))
+ return;
+
+ start_report(&flags);
+ pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
+ kasan_print_tags(tag, object);
+ pr_err("\n");
+ print_address_description(object, tag);
+ pr_err("\n");
+ print_memory_metadata(object);
+ end_report(&flags, (unsigned long)object);
+}
+
static void __kasan_report(unsigned long addr, size_t size, bool is_write,
unsigned long ip)
{
@@ -442,18 +508,17 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
untagged_addr = kasan_reset_tag(tagged_addr);
info.access_addr = tagged_addr;
- if (addr_has_metadata(untagged_addr))
- info.first_bad_addr =
- kasan_find_first_bad_addr(tagged_addr, size);
- else
- info.first_bad_addr = untagged_addr;
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
- if (addr_has_metadata(untagged_addr) &&
- !save_report(untagged_addr, &info, get_tag(tagged_addr), &flags))
- return;
+ if (addr_has_metadata(untagged_addr)) {
+ info.first_bad_addr = kasan_find_first_bad_addr(tagged_addr, size);
+
+ if (!save_report(untagged_addr, &info, get_tag(tagged_addr), &flags, NULL))
+ return;
+ } else
+ info.first_bad_addr = untagged_addr;
start_report(&flags);
@@ -528,3 +593,241 @@ void kasan_non_canonical_hook(unsigned long addr)
orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
}
#endif
+
+#define READ_SIZE (4096)
+static ssize_t print_kasan_error(char __user *buf, size_t count,
+ struct kasan_record *record, loff_t *ppos, char *kbuf)
+{
+ int ret = 0;
+ unsigned long *entries;
+ unsigned long nr_entries;
+ const char *bug_type = "unknown-crash";
+ int i, j;
+ void *row;
+ void *addr = (void *)record->first_bad_addr;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ switch (record->bug_type) {
+ case OUT_OF_BOUNDS:
+ bug_type = "out-of-bounds";
+ break;
+ case OUT_OF_BOUNDS_SLAB:
+ bug_type = "slab-out-of-bounds";
+ break;
+ case OUT_OF_BOUNDS_GLOBAL:
+ bug_type = "global-out-of-bounds";
+ break;
+ case OUT_OF_BOUNDS_STACK:
+ bug_type = "stack-out-of-bounds";
+ break;
+ case USE_AFTER_FREE:
+ bug_type = "use-after-free";
+ break;
+ case OUT_OF_BOUNDS_ALLOCA:
+ bug_type = "alloca-out-of-bounds";
+ break;
+ case OUT_OF_BOUNDS_VMALLOC:
+ bug_type = "alloca-out-of-vmalloc";
+ break;
+ case INVALID_ACCESS:
+ bug_type = "invalid-access";
+ break;
+ case NULL_PTR_DEREFER:
+ bug_type = "null-ptr-deref";
+ break;
+ case USER_MEMORY_ACCESS:
+ bug_type = "user-memory-access";
+ break;
+ case WILD_MEMORY_ACCESS:
+ bug_type = "wild-memory-access";
+ break;
+ case DOUBLE_INVALID_FREE:
+ bug_type = "double-free or invalid-free";
+ break;
+ default:
+ break;
+ }
+
+ ret += snprintf(kbuf + ret, count - ret,
+ "KASAN Issue no. %lld\n", *ppos);
+ ret += snprintf(kbuf + ret, count - ret,
+ "==============================="
+ "===================================\n");
+
+ if (record->bug_type != DOUBLE_INVALID_FREE) {
+ ret += snprintf(kbuf + ret, count - ret,
+ "\nBUG: KASAN: %s in %pS at addr %px\n",
+ bug_type, (void *)record->ip, record->access_addr);
+ ret += snprintf(kbuf + ret, count - ret,
+ "%s of size %zu by task %s/%d\n",
+ record->is_write ? "Write" : "Read",
+ record->access_size, record->comm, record->pid);
+ } else {
+ ret += snprintf(kbuf + ret, count - ret,
+ "\nBUG: KASAN: %s in %pS\n",
+ bug_type, (void *)record->ip);
+ }
+
+ ret += snprintf(kbuf + ret, count - ret, "\nBacktrace:\n");
+ nr_entries = stack_depot_fetch(record->bt_handle, &entries);
+
+ ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
+
+ if (record->alloc_handle) {
+ ret += snprintf(kbuf + ret, count - ret,
+ "\nBelongs to the cache %s of size: %d\n",
+ record->cache_name, record->cache_size);
+ ret += snprintf(kbuf + ret, count - ret,
+ "------------------------------------------"
+ "-----------------------------------\n");
+
+ nr_entries = stack_depot_fetch(record->alloc_handle, &entries);
+ ret += snprintf(kbuf + ret, count - ret, "INFO Allocation path:\n");
+
+ ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
+
+ if (record->free_handle) {
+ ret += snprintf(kbuf + ret, count - ret, "\nINFO Free path:\n");
+
+ nr_entries = stack_depot_fetch(record->free_handle, &entries);
+ ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
+ }
+ }
+
+ if (kernel_or_module_addr(addr)) {
+ if (!init_task_stack_addr(addr))
+ ret += snprintf(kbuf + ret, count - ret,
+ "The buggy address belongs to the variable %pS\n",
+ (void *)record->access_addr);
+ }
+
+ ret += snprintf(kbuf + ret, count - ret,
+ "Memory state around the buggy address:\n");
+
+ row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
+ - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
+
+ for (i = 0; i < SHADOW_ROWS; i++) {
+ if (i)
+ ret += snprintf(kbuf + ret, count - ret, "\n");
+
+ ret += snprintf(kbuf + ret, count - ret,
+ (i == 2) ? ">%px: " : " %px: ", row);
+
+ for (j = 0; j < META_BYTES_PER_ROW; j++) {
+ u8 value = record->buf[i][j];
+ ret += snprintf(kbuf + ret, count - ret, "%02x ", value);
+ }
+
+ if (meta_row_is_guilty(row, addr))
+ ret += snprintf(kbuf + ret, count - ret, "\n%*c",
+ meta_pointer_offset(row, addr),
+ '^');
+
+ row += META_MEM_BYTES_PER_ROW;
+ }
+
+ ret += snprintf(kbuf + ret, count - ret,
+ "\n==============================="
+ "===================================\n");
+
+ /*
+ * checking for space in buffer only when copying to user,
+ * otherwise if overflow'ed in kernel buffer, it will
+ * lead to kernel crash and then size of vmalloc'ed
+ * memory can be increased.
+ *
+ * Benefit: checks on each snprintf avoided.
+ */
+ if (ret >= count) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_to_user(buf, kbuf, ret))
+ ret = -EFAULT;
+
+err:
+ return ret;
+}
+
+/*
+ * read_kasan_errors()
+ *
+ * function to print all the entries present
+ * in KASAN depot_stack database currently in system.
+ */
+static ssize_t read_kasan_errors(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * No need of lock here for reading stored_kasan_records,
+ * As it is an integer variable, we can read it one value less
+ * if it is getting updated simultaneously.
+ */
+ int total_records = stored_kasan_records;
+ char *kbuf = (char *)file->private_data;
+
+ while (*ppos < total_records) {
+ struct kasan_record *record;
+
+ record = &kasan_records[*ppos];
+ *ppos = *ppos + 1;
+ return print_kasan_error(buf, count, record, ppos, kbuf);
+ }
+
+ return 0;
+}
+
+int read_kasan_open(struct inode *inode, struct file *file)
+{
+ char *kasan_kbuf;
+
+ /*
+ * One KASAN error will always be less than 4 KB,
+ * without page dump info.
+ *
+ * Thus allocate buffer of READ_SIZE, rather than
+ * count to avoid return checks of snprintfs.
+ */
+ kasan_kbuf = vzalloc(READ_SIZE);
+
+ if (!kasan_kbuf)
+ return -ENOMEM;
+
+ file->private_data = (void *)kasan_kbuf;
+
+ return 0;
+}
+
+int read_kasan_release(struct inode *inode, struct file *file)
+{
+ char *kasan_kbuf = (char *)file->private_data;
+
+ if (kasan_kbuf)
+ vfree(kasan_kbuf);
+
+ return 0;
+}
+
+static const struct proc_ops proc_kasan_ops = {
+ .proc_open = read_kasan_open,
+ .proc_read = read_kasan_errors,
+ .proc_release = read_kasan_release,
+};
+
+static int __init register_kasan_proc(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("kasan_log", 0400,
+ NULL, &proc_kasan_ops);
+
+ if (!entry)
+ pr_err("registration of KASAN proc interface failed\n");
+
+ return 0;
+}
+fs_initcall(register_kasan_proc);
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index 139615ef326b..0206d5f9b486 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -39,10 +39,12 @@ void *kasan_find_first_bad_addr(void *addr, size_t size)
return p;
}
-static const char *get_shadow_bug_type(struct kasan_access_info *info)
+static const char *get_shadow_bug_type(struct kasan_access_info *info,
+ kasan_bug_type *bug_save)
{
const char *bug_type = "unknown-crash";
u8 *shadow_addr;
+ kasan_bug_type bug = UNKNOWN;
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
@@ -60,52 +62,70 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
* due to a data race in the kernel code.
*/
bug_type = "out-of-bounds";
+ bug = OUT_OF_BOUNDS;
break;
case KASAN_PAGE_REDZONE:
case KASAN_KMALLOC_REDZONE:
bug_type = "slab-out-of-bounds";
+ bug = OUT_OF_BOUNDS_SLAB;
break;
case KASAN_GLOBAL_REDZONE:
bug_type = "global-out-of-bounds";
+ bug = OUT_OF_BOUNDS_GLOBAL;
break;
case KASAN_STACK_LEFT:
case KASAN_STACK_MID:
case KASAN_STACK_RIGHT:
case KASAN_STACK_PARTIAL:
bug_type = "stack-out-of-bounds";
+ bug = OUT_OF_BOUNDS_STACK;
break;
case KASAN_FREE_PAGE:
case KASAN_KMALLOC_FREE:
case KASAN_KMALLOC_FREETRACK:
bug_type = "use-after-free";
+ bug = USE_AFTER_FREE;
break;
case KASAN_ALLOCA_LEFT:
case KASAN_ALLOCA_RIGHT:
bug_type = "alloca-out-of-bounds";
+ bug = OUT_OF_BOUNDS_ALLOCA;
break;
case KASAN_VMALLOC_INVALID:
bug_type = "vmalloc-out-of-bounds";
+ bug = OUT_OF_BOUNDS_VMALLOC;
break;
}
+ if (bug_save)
+ *bug_save = bug;
+
return bug_type;
}
-static const char *get_wild_bug_type(struct kasan_access_info *info)
+static const char *get_wild_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
{
const char *bug_type = "unknown-crash";
+ kasan_bug_type bug_enum = UNKNOWN;
- if ((unsigned long)info->access_addr < PAGE_SIZE)
+ if ((unsigned long)info->access_addr < PAGE_SIZE) {
bug_type = "null-ptr-deref";
- else if ((unsigned long)info->access_addr < TASK_SIZE)
+ bug_enum = NULL_PTR_DEREFER;
+ } else if ((unsigned long)info->access_addr < TASK_SIZE) {
bug_type = "user-memory-access";
- else
+ bug_enum = USER_MEMORY_ACCESS;
+ } else {
bug_type = "wild-memory-access";
+ bug_enum = WILD_MEMORY_ACCESS;
+ }
+
+ if (bug)
+ *bug = bug_enum;
return bug_type;
}
-const char *kasan_get_bug_type(struct kasan_access_info *info)
+const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
{
/*
* If access_size is a negative number, then it has reason to be
@@ -115,12 +135,16 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
* a large size_t and its value will be larger than ULONG_MAX/2,
* so that this can qualify as out-of-bounds.
*/
- if (info->access_addr + info->access_size < info->access_addr)
+ if (info->access_addr + info->access_size < info->access_addr) {
+ if (bug)
+ *bug = OUT_OF_BOUNDS;
+
return "out-of-bounds";
+ }
if (addr_has_metadata(info->access_addr))
- return get_shadow_bug_type(info);
- return get_wild_bug_type(info);
+ return get_shadow_bug_type(info, bug);
+ return get_wild_bug_type(info, bug);
}
void kasan_metadata_fetch_row(char *buffer, void *row)
diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c
index 42b2168755d6..ae516e92f9f3 100644
--- a/mm/kasan/report_hw_tags.c
+++ b/mm/kasan/report_hw_tags.c
@@ -15,8 +15,11 @@
#include "kasan.h"
-const char *kasan_get_bug_type(struct kasan_access_info *info)
+const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
{
+ if (bug)
+ *bug = INVALID_ACCESS;
+
return "invalid-access";
}
diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c
index 3d20d3451d9e..11c869d4ad3c 100644
--- a/mm/kasan/report_sw_tags.c
+++ b/mm/kasan/report_sw_tags.c
@@ -29,8 +29,10 @@
#include "kasan.h"
#include "../slab.h"
-const char *kasan_get_bug_type(struct kasan_access_info *info)
+const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
{
+ kasan_bug_type bug_enum;
+ const char *bug_type;
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
struct kasan_alloc_meta *alloc_meta;
struct kmem_cache *cache;
@@ -50,11 +52,16 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
if (alloc_meta) {
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
- if (alloc_meta->free_pointer_tag[i] == tag)
- return "use-after-free";
+ if (alloc_meta->free_pointer_tag[i] == tag) {
+ bug_type = "use-after-free";
+ bug_enum = USE_AFTER_FREE;
+ goto done;
+ }
}
}
- return "out-of-bounds";
+ bug_type = "out-of-bounds";
+ bug_enum = OUT_OF_BOUNDS;
+ goto done;
}
#endif
@@ -66,10 +73,19 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
* a large size_t and its value will be larger than ULONG_MAX/2,
* so that this can qualify as out-of-bounds.
*/
- if (info->access_addr + info->access_size < info->access_addr)
- return "out-of-bounds";
+ if (info->access_addr + info->access_size < info->access_addr) {
+ bug_enum = OUT_OF_BOUNDS;
+ bug_type = "out-of-bounds";
+ goto done;
+ }
+
+ bug_enum = INVALID_ACCESS;
+ bug_type = "invalid-access";
+done:
+ if (bug)
+ *bug = bug_enum;
- return "invalid-access";
+ return bug_type;
}
void *kasan_find_first_bad_addr(void *addr, size_t size)
--
2.17.1
On Thu, Apr 22, 2021 at 11:17 AM Maninder Singh <[email protected]> wrote:
>
> when KASAN multishot is ON and some buggy code hits same code path
> of KASAN issue repetetively, it can flood logs on console.
>
> Check for allocaton, free and backtrace path at time of KASAN error,
> if these are same then it is duplicate error and avoid these prints
> from KASAN.
>
> Co-developed-by: Vaneet Narang <[email protected]>
> Signed-off-by: Vaneet Narang <[email protected]>
> Signed-off-by: Maninder Singh <[email protected]>
> ---
> mm/kasan/kasan.h | 6 +++++
> mm/kasan/report.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 73 insertions(+)
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 78cf99247139..d14ccce246ba 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -102,6 +102,12 @@ struct kasan_access_info {
> unsigned long ip;
> };
>
> +struct kasan_record {
> + depot_stack_handle_t bt_handle;
> + depot_stack_handle_t alloc_handle;
> + depot_stack_handle_t free_handle;
> +};
Hi Maninder,
There is no need to declare this in the header, it can be declared
more locally in report.h.
> +
> /* The layout of struct dictated by compiler */
> struct kasan_source_location {
> const char *filename;
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 87b271206163..4576de76991b 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -39,6 +39,10 @@ static unsigned long kasan_flags;
> #define KASAN_BIT_REPORTED 0
> #define KASAN_BIT_MULTI_SHOT 1
>
> +#define MAX_RECORDS (200)
s/MAX_RECORDS/KASAN_MAX_RECORDS/
> +static struct kasan_record kasan_records[MAX_RECORDS];
Since all fields in kasan_record are stack handles, the code will be
simpler and more uniform, if we store just an array of handles w/o
distinguishing between alloc/free/access.
> +static int stored_kasan_records;
> +
> bool kasan_save_enable_multi_shot(void)
> {
> return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
> @@ -360,6 +364,65 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
> end_report(&flags, (unsigned long)object);
> }
>
> +/*
> + * @save_report()
> + *
> + * returns false if same record is already saved.
s/same/the same/
> + * returns true if its new record and saved in database of KASAN.
s/its/it's/
s/database/the database/
> + */
> +static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsigned long *flags)
> +{
> + struct kasan_record record = {0};
> + depot_stack_handle_t bt_handle;
> + int i = 0;
> + const char *bug_type;
> + struct kasan_alloc_meta *alloc_meta;
> + struct kasan_track *free_track;
> + struct page *page;
> + bool ret = true;
> +
> + kasan_disable_current();
> + spin_lock_irqsave(&report_lock, *flags);
Reusing the caller flags looks strange, do we need it?
But also the very next function start_report() also does the same
dance: kasan_disable_current/spin_lock_irqsave. It feels reasonable to
lock once, check for dups and return early if it's a dup.
> + bug_type = kasan_get_bug_type(info);
> + page = kasan_addr_to_page(addr);
> + bt_handle = kasan_save_stack(GFP_KERNEL);
ASsign directly to record.bt_handle.
> + if (page && PageSlab(page)) {
> + struct kmem_cache *cache = page->slab_cache;
> + void *object = nearest_obj(cache, page, addr);
Since you already declare new var in this block, move
alloc_meta/free_track here as well.
> +
> + alloc_meta = kasan_get_alloc_meta(cache, object);
> + free_track = kasan_get_free_track(cache, object, tag);
> + record.alloc_handle = alloc_meta->alloc_track.stack;
> + if (free_track)
> + record.free_handle = free_track->stack;
> + }
> +
> + record.bt_handle = bt_handle;
> +
> + for (i = 0; i < stored_kasan_records; i++) {
> + if (record.bt_handle != kasan_records[i].bt_handle)
> + continue;
> + if (record.alloc_handle != kasan_records[i].alloc_handle)
> + continue;
> + if (!strncmp("use-after-free", bug_type, 15) &&
Comparing strings is unreliable and will break in future. Compare
handle with 0 instead, you already assume that 0 handle is "no
handle".
> + (record.free_handle != kasan_records[i].free_handle))
> + continue;
> +
> + ret = false;
> + goto done;
> + }
> +
> + memcpy(&kasan_records[stored_kasan_records], &record, sizeof(struct kasan_record));
> + stored_kasan_records++;
I think you just introduced an out-of-bounds write into KASAN, check
for MAX_RECORDS ;)
> +
> +done:
> + spin_unlock_irqrestore(&report_lock, *flags);
> + kasan_enable_current();
> + return ret;
> +}
> +
> static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> unsigned long ip)
> {
> @@ -388,6 +451,10 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> info.is_write = is_write;
> info.ip = ip;
>
> + if (addr_has_metadata(untagged_addr) &&
Why addr_has_metadata check?
The kernel will probably crash later anyway, but from point of view of
this code, I don't see reasons to not dedup wild accesses.
> + !save_report(untagged_addr, &info, get_tag(tagged_addr), &flags))
> + return;
> +
> start_report(&flags);
>
> print_error_description(&info);
> --
> 2.17.1
>
On Thu, Apr 22, 2021 at 11:58 AM Dmitry Vyukov <[email protected]> wrote:
>
> On Thu, Apr 22, 2021 at 11:17 AM Maninder Singh <[email protected]> wrote:
> >
> > when KASAN multishot is ON and some buggy code hits same code path
> > of KASAN issue repetetively, it can flood logs on console.
> >
> > Check for allocaton, free and backtrace path at time of KASAN error,
> > if these are same then it is duplicate error and avoid these prints
> > from KASAN.
Can this be tested with the kunit kasan tests? If yes, please add a
test for this new code.
> > Co-developed-by: Vaneet Narang <[email protected]>
> > Signed-off-by: Vaneet Narang <[email protected]>
> > Signed-off-by: Maninder Singh <[email protected]>
> > ---
> > mm/kasan/kasan.h | 6 +++++
> > mm/kasan/report.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++
> > 2 files changed, 73 insertions(+)
> >
> > diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> > index 78cf99247139..d14ccce246ba 100644
> > --- a/mm/kasan/kasan.h
> > +++ b/mm/kasan/kasan.h
> > @@ -102,6 +102,12 @@ struct kasan_access_info {
> > unsigned long ip;
> > };
> >
> > +struct kasan_record {
> > + depot_stack_handle_t bt_handle;
> > + depot_stack_handle_t alloc_handle;
> > + depot_stack_handle_t free_handle;
> > +};
>
> Hi Maninder,
>
> There is no need to declare this in the header, it can be declared
> more locally in report.h.
>
> > +
> > /* The layout of struct dictated by compiler */
> > struct kasan_source_location {
> > const char *filename;
> > diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> > index 87b271206163..4576de76991b 100644
> > --- a/mm/kasan/report.c
> > +++ b/mm/kasan/report.c
> > @@ -39,6 +39,10 @@ static unsigned long kasan_flags;
> > #define KASAN_BIT_REPORTED 0
> > #define KASAN_BIT_MULTI_SHOT 1
> >
> > +#define MAX_RECORDS (200)
>
> s/MAX_RECORDS/KASAN_MAX_RECORDS/
>
> > +static struct kasan_record kasan_records[MAX_RECORDS];
>
> Since all fields in kasan_record are stack handles, the code will be
> simpler and more uniform, if we store just an array of handles w/o
> distinguishing between alloc/free/access.
>
> > +static int stored_kasan_records;
> > +
> > bool kasan_save_enable_multi_shot(void)
> > {
> > return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
> > @@ -360,6 +364,65 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
> > end_report(&flags, (unsigned long)object);
> > }
> >
> > +/*
> > + * @save_report()
> > + *
> > + * returns false if same record is already saved.
>
> s/same/the same/
>
> > + * returns true if its new record and saved in database of KASAN.
>
> s/its/it's/
> s/database/the database/
>
> > + */
> > +static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsigned long *flags)
> > +{
> > + struct kasan_record record = {0};
> > + depot_stack_handle_t bt_handle;
> > + int i = 0;
> > + const char *bug_type;
> > + struct kasan_alloc_meta *alloc_meta;
> > + struct kasan_track *free_track;
> > + struct page *page;
> > + bool ret = true;
> > +
> > + kasan_disable_current();
> > + spin_lock_irqsave(&report_lock, *flags);
>
> Reusing the caller flags looks strange, do we need it?
> But also the very next function start_report() also does the same
> dance: kasan_disable_current/spin_lock_irqsave. It feels reasonable to
> lock once, check for dups and return early if it's a dup.
>
> > + bug_type = kasan_get_bug_type(info);
> > + page = kasan_addr_to_page(addr);
> > + bt_handle = kasan_save_stack(GFP_KERNEL);
>
> ASsign directly to record.bt_handle.
>
> > + if (page && PageSlab(page)) {
> > + struct kmem_cache *cache = page->slab_cache;
> > + void *object = nearest_obj(cache, page, addr);
>
> Since you already declare new var in this block, move
> alloc_meta/free_track here as well.
>
> > +
> > + alloc_meta = kasan_get_alloc_meta(cache, object);
> > + free_track = kasan_get_free_track(cache, object, tag);
> > + record.alloc_handle = alloc_meta->alloc_track.stack;
> > + if (free_track)
> > + record.free_handle = free_track->stack;
> > + }
> > +
> > + record.bt_handle = bt_handle;
> > +
> > + for (i = 0; i < stored_kasan_records; i++) {
> > + if (record.bt_handle != kasan_records[i].bt_handle)
> > + continue;
> > + if (record.alloc_handle != kasan_records[i].alloc_handle)
> > + continue;
> > + if (!strncmp("use-after-free", bug_type, 15) &&
>
> Comparing strings is unreliable and will break in future. Compare
> handle with 0 instead, you already assume that 0 handle is "no
> handle".
>
> > + (record.free_handle != kasan_records[i].free_handle))
> > + continue;
> > +
> > + ret = false;
> > + goto done;
> > + }
> > +
> > + memcpy(&kasan_records[stored_kasan_records], &record, sizeof(struct kasan_record));
> > + stored_kasan_records++;
>
> I think you just introduced an out-of-bounds write into KASAN, check
> for MAX_RECORDS ;)
>
>
> > +
> > +done:
> > + spin_unlock_irqrestore(&report_lock, *flags);
> > + kasan_enable_current();
> > + return ret;
> > +}
> > +
> > static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> > unsigned long ip)
> > {
> > @@ -388,6 +451,10 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> > info.is_write = is_write;
> > info.ip = ip;
> >
> > + if (addr_has_metadata(untagged_addr) &&
>
> Why addr_has_metadata check?
> The kernel will probably crash later anyway, but from point of view of
> this code, I don't see reasons to not dedup wild accesses.
>
> > + !save_report(untagged_addr, &info, get_tag(tagged_addr), &flags))
> > + return;
> > +
> > start_report(&flags);
> >
> > print_error_description(&info);
> > --
> > 2.17.1
> >
On Thu, Apr 22, 2021 at 11:17 AM Maninder Singh <[email protected]> wrote:
>
> store minimal information required to regenerate KASAN logs
> from reading of proc interface.
>
> kernel buffer is allocated for 4K bytes to avoid multiple
> checks of sanity for snprintf returns, and it will be checked
> before copying that data to user space if user buffer has that
> much capacity or not.
>
> During long time aging test of targets, it is diffucult to check for
> KASAN reported issues. Thus it will be better it proc interface is
> present to check for Unique KASAN errors reported till time.
>
> sample output and verification for ARM64:
> Run sample TC's of KASAN:
Alex, Marco, can the recently added error_report_notify interface be
used for this? Looks like they are doing roughly the same thing with
the same intentions.
> [ 25.450749] kasan test: kmalloc_oob_right out-of-bounds to right
> [ 25.452851] ==================================================================
> [ 25.453779] BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0x80/0xa4
> [ 25.454800] Write of size 1 at addr ffff0000c3f1c27b by task cat/125
> [ 25.455891]
> ....
> [ 25.488283] kasan test: kmalloc_oob_left out-of-bounds to left
> [ 25.488819] ==================================================================
> [ 25.489189] BUG: KASAN: slab-out-of-bounds in kmalloc_oob_left+0x80/0xac
> [ 25.489601] Read of size 1 at addr ffff0000c3f1c2ff by task cat/125
> ...
>
> After first reporting, NO KASAN reports for same issues:
>
> [ 115.078095] kasan test: kmalloc_oob_right out-of-bounds to right
> [ 115.078773] kasan test: kmalloc_oob_left out-of-bounds to left
> [ 115.079237] kasan test: kmalloc_node_oob_right kmalloc_node(): out-of-bounds to right
> [ 115.080056] kasan test: kmalloc_pagealloc_oob_right kmalloc pagealloc allocation: out-of-bounds to right
> [ 115.080683] kasan test: kmalloc_pagealloc_uaf kmalloc pagealloc allocation: use-after-free
> [ 115.081209] kasan test: kmalloc_pagealloc_invalid_free kmalloc pagealloc allocation: invalid-free
>
> Check same from /proc/kasan_log:
> KASAN Issue no. 1
> ==================================================================
>
> BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0x80/0xa4 at addr ffff0000c3f1c27b
> Write of size 1 by task cat/125
> ...
> ...
> Memory state around the buggy address:
> ffff0000c3f1c100: 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc fc
> ffff0000c3f1c180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> >ffff0000c3f1c200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03
> ^
> ffff0000c3f1c280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ffff0000c3f1c300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ==================================================================
> KASAN Issue no. 2
> ==================================================================
>
> BUG: KASAN: slab-out-of-bounds in kmalloc_oob_left+0x80/0xac at addr ffff0000c3f1c2ff
> Read of size 1 by task cat/125
> ...
> ...
> Memory state around the buggy address:
> ffff0000c3f1c180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ffff0000c3f1c200: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> >ffff0000c3f1c280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ^
> ffff0000c3f1c300: 00 07 fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ffff0000c3f1c380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ==================================================================
> KASAN Issue no. 3
> ==================================================================
> ...
> ...
>
> Co-developed-by: Vaneet Narang <[email protected]>
> Signed-off-by: Vaneet Narang <[email protected]>
> Signed-off-by: Maninder Singh <[email protected]>
> ---
> mm/kasan/kasan.h | 32 +++-
> mm/kasan/report.c | 377 ++++++++++++++++++++++++++++++++++----
> mm/kasan/report_generic.c | 42 ++++-
> mm/kasan/report_hw_tags.c | 5 +-
> mm/kasan/report_sw_tags.c | 30 ++-
> 5 files changed, 431 insertions(+), 55 deletions(-)
>
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index d14ccce246ba..2c2c79551cbd 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -94,6 +94,25 @@ extern bool kasan_flag_panic __ro_after_init;
> #define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
> #define META_ROWS_AROUND_ADDR 2
>
> +#define SHADOW_ROWS (2 * META_ROWS_AROUND_ADDR + 1)
> +#define CACHE_NAME_LEN (20)
> +
> +typedef enum {
> + UNKNOWN,
> + OUT_OF_BOUNDS,
> + OUT_OF_BOUNDS_SLAB,
> + OUT_OF_BOUNDS_GLOBAL,
> + OUT_OF_BOUNDS_STACK,
> + USE_AFTER_FREE,
> + OUT_OF_BOUNDS_ALLOCA,
> + OUT_OF_BOUNDS_VMALLOC,
> + INVALID_ACCESS,
> + NULL_PTR_DEREFER,
> + USER_MEMORY_ACCESS,
> + WILD_MEMORY_ACCESS,
> + DOUBLE_INVALID_FREE
> +} kasan_bug_type;
> +
> struct kasan_access_info {
> const void *access_addr;
> const void *first_bad_addr;
> @@ -106,6 +125,17 @@ struct kasan_record {
> depot_stack_handle_t bt_handle;
> depot_stack_handle_t alloc_handle;
> depot_stack_handle_t free_handle;
> + const void *access_addr;
> + const void *first_bad_addr;
> + unsigned long ip;
> + size_t access_size;
> + char comm[TASK_COMM_LEN];
> + char cache_name[CACHE_NAME_LEN];
> + int cache_size;
> + pid_t pid;
> + kasan_bug_type bug_type;
> + u8 buf[SHADOW_ROWS][META_BYTES_PER_ROW];
> + bool is_write;
> };
>
> /* The layout of struct dictated by compiler */
> @@ -234,7 +264,7 @@ static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
> #endif
>
> void *kasan_find_first_bad_addr(void *addr, size_t size);
> -const char *kasan_get_bug_type(struct kasan_access_info *info);
> +const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug);
> void kasan_metadata_fetch_row(char *buffer, void *row);
>
> #if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index 4576de76991b..b0cc95fedc29 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -15,6 +15,7 @@
> #include <linux/kernel.h>
> #include <linux/mm.h>
> #include <linux/printk.h>
> +#include <linux/proc_fs.h>
> #include <linux/sched.h>
> #include <linux/slab.h>
> #include <linux/stackdepot.h>
> @@ -66,7 +67,7 @@ __setup("kasan_multi_shot", kasan_set_multi_shot);
> static void print_error_description(struct kasan_access_info *info)
> {
> pr_err("BUG: KASAN: %s in %pS\n",
> - kasan_get_bug_type(info), (void *)info->ip);
> + kasan_get_bug_type(info, NULL), (void *)info->ip);
> if (info->access_size)
> pr_err("%s of size %zu at addr %px by task %s/%d\n",
> info->is_write ? "Write" : "Read", info->access_size,
> @@ -342,26 +343,50 @@ static void kasan_update_kunit_status(struct kunit *cur_test)
> }
> #endif /* IS_ENABLED(CONFIG_KUNIT) */
>
> -void kasan_report_invalid_free(void *object, unsigned long ip)
> +static void copy_error_description(struct kasan_access_info *info,
> + struct kasan_record *record)
> {
> - unsigned long flags;
> - u8 tag = get_tag(object);
> + record->ip = info->ip;
> + record->first_bad_addr = info->first_bad_addr;
> + record->access_addr = info->access_addr;
> + record->is_write = info->is_write;
> + record->access_size = info->access_size;
> + record->pid = task_pid_nr(current);
> + strncpy(record->comm, current->comm, TASK_COMM_LEN);
> +}
>
> - object = kasan_reset_tag(object);
> +static void copy_shadow_for_address(struct kasan_record *record)
> +{
> + int i;
> + void *addr = (void *)record->first_bad_addr;
> + void *row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
> + - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
>
> -#if IS_ENABLED(CONFIG_KUNIT)
> - if (current->kunit_test)
> - kasan_update_kunit_status(current->kunit_test);
> -#endif /* IS_ENABLED(CONFIG_KUNIT) */
>
> - start_report(&flags);
> - pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
> - kasan_print_tags(tag, object);
> - pr_err("\n");
> - print_address_description(object, tag);
> - pr_err("\n");
> - print_memory_metadata(object);
> - end_report(&flags, (unsigned long)object);
> + for (i = 0; i < SHADOW_ROWS; i++) {
> + kasan_metadata_fetch_row((char *)&record->buf[i], row);
> + row += META_MEM_BYTES_PER_ROW;
> + }
> +}
> +
> +static bool match_handles(struct kasan_record *record)
> +{
> + int i = 0;
> +
> + for (i = 0; i < stored_kasan_records; i++) {
> + if (record->bt_handle != kasan_records[i].bt_handle)
> + continue;
> + if (record->alloc_handle != kasan_records[i].alloc_handle)
> + continue;
> + if ((record->bug_type == USE_AFTER_FREE ||
> + record->bug_type == DOUBLE_INVALID_FREE) &&
> + (record->free_handle != kasan_records[i].free_handle))
> + continue;
> +
> + return true;
> + }
> +
> + return false;
> }
>
> /*
> @@ -370,21 +395,26 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
> * returns false if same record is already saved.
> * returns true if its new record and saved in database of KASAN.
> */
> -static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsigned long *flags)
> +static bool save_report(void *addr, struct kasan_access_info *info, u8 tag,
> + unsigned long *flags, kasan_bug_type *bug)
> {
> struct kasan_record record = {0};
> depot_stack_handle_t bt_handle;
> - int i = 0;
> - const char *bug_type;
> struct kasan_alloc_meta *alloc_meta;
> struct kasan_track *free_track;
> struct page *page;
> + kasan_bug_type bug_enum = UNKNOWN;
> bool ret = true;
>
> kasan_disable_current();
> spin_lock_irqsave(&report_lock, *flags);
>
> - bug_type = kasan_get_bug_type(info);
> + if (!bug) {
> + kasan_get_bug_type(info, &bug_enum);
> + record.bug_type = bug_enum;
> + } else
> + record.bug_type = *bug;
> +
> page = kasan_addr_to_page(addr);
> bt_handle = kasan_save_stack(GFP_KERNEL);
>
> @@ -397,23 +427,27 @@ static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsi
> record.alloc_handle = alloc_meta->alloc_track.stack;
> if (free_track)
> record.free_handle = free_track->stack;
> +
> + strncpy(record.cache_name, cache->name, CACHE_NAME_LEN - 1);
> + record.cache_name[CACHE_NAME_LEN - 1] = '\0';
> + record.cache_size = cache->object_size;
> }
>
> record.bt_handle = bt_handle;
>
> - for (i = 0; i < stored_kasan_records; i++) {
> - if (record.bt_handle != kasan_records[i].bt_handle)
> - continue;
> - if (record.alloc_handle != kasan_records[i].alloc_handle)
> - continue;
> - if (!strncmp("use-after-free", bug_type, 15) &&
> - (record.free_handle != kasan_records[i].free_handle))
> - continue;
> + if (match_handles(&record)) {
> + ret = false;
> + goto done;
> + }
>
> + if (stored_kasan_records >= MAX_RECORDS) {
> + WARN_ONCE(1, "KASAN database reached capacity");
> ret = false;
> goto done;
> }
>
> + copy_error_description(info, &record);
> + copy_shadow_for_address(&record);
> memcpy(&kasan_records[stored_kasan_records], &record, sizeof(struct kasan_record));
> stored_kasan_records++;
>
> @@ -423,6 +457,38 @@ static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsi
> return ret;
> }
>
> +void kasan_report_invalid_free(void *object, unsigned long ip)
> +{
> + unsigned long flags;
> + u8 tag = get_tag(object);
> + struct kasan_access_info info;
> + kasan_bug_type bug_enum = DOUBLE_INVALID_FREE;
> +
> + object = kasan_reset_tag(object);
> +
> +#if IS_ENABLED(CONFIG_KUNIT)
> + if (current->kunit_test)
> + kasan_update_kunit_status(current->kunit_test);
> +#endif /* IS_ENABLED(CONFIG_KUNIT) */
> +
> + info.ip = ip;
> + info.first_bad_addr = object;
> + info.access_addr = 0;
> + info.is_write = 0;
> + info.access_size = 0;
> + if (!save_report(object, &info, tag, &flags, &bug_enum))
> + return;
> +
> + start_report(&flags);
> + pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
> + kasan_print_tags(tag, object);
> + pr_err("\n");
> + print_address_description(object, tag);
> + pr_err("\n");
> + print_memory_metadata(object);
> + end_report(&flags, (unsigned long)object);
> +}
> +
> static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> unsigned long ip)
> {
> @@ -442,18 +508,17 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> untagged_addr = kasan_reset_tag(tagged_addr);
>
> info.access_addr = tagged_addr;
> - if (addr_has_metadata(untagged_addr))
> - info.first_bad_addr =
> - kasan_find_first_bad_addr(tagged_addr, size);
> - else
> - info.first_bad_addr = untagged_addr;
> info.access_size = size;
> info.is_write = is_write;
> info.ip = ip;
>
> - if (addr_has_metadata(untagged_addr) &&
> - !save_report(untagged_addr, &info, get_tag(tagged_addr), &flags))
> - return;
> + if (addr_has_metadata(untagged_addr)) {
> + info.first_bad_addr = kasan_find_first_bad_addr(tagged_addr, size);
> +
> + if (!save_report(untagged_addr, &info, get_tag(tagged_addr), &flags, NULL))
> + return;
> + } else
> + info.first_bad_addr = untagged_addr;
>
> start_report(&flags);
>
> @@ -528,3 +593,241 @@ void kasan_non_canonical_hook(unsigned long addr)
> orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
> }
> #endif
> +
> +#define READ_SIZE (4096)
> +static ssize_t print_kasan_error(char __user *buf, size_t count,
> + struct kasan_record *record, loff_t *ppos, char *kbuf)
> +{
> + int ret = 0;
> + unsigned long *entries;
> + unsigned long nr_entries;
> + const char *bug_type = "unknown-crash";
> + int i, j;
> + void *row;
> + void *addr = (void *)record->first_bad_addr;
> +
> + if (!kbuf)
> + return -ENOMEM;
> +
> + switch (record->bug_type) {
> + case OUT_OF_BOUNDS:
> + bug_type = "out-of-bounds";
> + break;
> + case OUT_OF_BOUNDS_SLAB:
> + bug_type = "slab-out-of-bounds";
> + break;
> + case OUT_OF_BOUNDS_GLOBAL:
> + bug_type = "global-out-of-bounds";
> + break;
> + case OUT_OF_BOUNDS_STACK:
> + bug_type = "stack-out-of-bounds";
> + break;
> + case USE_AFTER_FREE:
> + bug_type = "use-after-free";
> + break;
> + case OUT_OF_BOUNDS_ALLOCA:
> + bug_type = "alloca-out-of-bounds";
> + break;
> + case OUT_OF_BOUNDS_VMALLOC:
> + bug_type = "alloca-out-of-vmalloc";
> + break;
> + case INVALID_ACCESS:
> + bug_type = "invalid-access";
> + break;
> + case NULL_PTR_DEREFER:
> + bug_type = "null-ptr-deref";
> + break;
> + case USER_MEMORY_ACCESS:
> + bug_type = "user-memory-access";
> + break;
> + case WILD_MEMORY_ACCESS:
> + bug_type = "wild-memory-access";
> + break;
> + case DOUBLE_INVALID_FREE:
> + bug_type = "double-free or invalid-free";
> + break;
> + default:
> + break;
> + }
> +
> + ret += snprintf(kbuf + ret, count - ret,
> + "KASAN Issue no. %lld\n", *ppos);
> + ret += snprintf(kbuf + ret, count - ret,
> + "==============================="
> + "===================================\n");
> +
> + if (record->bug_type != DOUBLE_INVALID_FREE) {
> + ret += snprintf(kbuf + ret, count - ret,
> + "\nBUG: KASAN: %s in %pS at addr %px\n",
> + bug_type, (void *)record->ip, record->access_addr);
> + ret += snprintf(kbuf + ret, count - ret,
> + "%s of size %zu by task %s/%d\n",
> + record->is_write ? "Write" : "Read",
> + record->access_size, record->comm, record->pid);
> + } else {
> + ret += snprintf(kbuf + ret, count - ret,
> + "\nBUG: KASAN: %s in %pS\n",
> + bug_type, (void *)record->ip);
> + }
> +
> + ret += snprintf(kbuf + ret, count - ret, "\nBacktrace:\n");
> + nr_entries = stack_depot_fetch(record->bt_handle, &entries);
> +
> + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
> +
> + if (record->alloc_handle) {
> + ret += snprintf(kbuf + ret, count - ret,
> + "\nBelongs to the cache %s of size: %d\n",
> + record->cache_name, record->cache_size);
> + ret += snprintf(kbuf + ret, count - ret,
> + "------------------------------------------"
> + "-----------------------------------\n");
> +
> + nr_entries = stack_depot_fetch(record->alloc_handle, &entries);
> + ret += snprintf(kbuf + ret, count - ret, "INFO Allocation path:\n");
> +
> + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
> +
> + if (record->free_handle) {
> + ret += snprintf(kbuf + ret, count - ret, "\nINFO Free path:\n");
> +
> + nr_entries = stack_depot_fetch(record->free_handle, &entries);
> + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
> + }
> + }
> +
> + if (kernel_or_module_addr(addr)) {
> + if (!init_task_stack_addr(addr))
> + ret += snprintf(kbuf + ret, count - ret,
> + "The buggy address belongs to the variable %pS\n",
> + (void *)record->access_addr);
> + }
> +
> + ret += snprintf(kbuf + ret, count - ret,
> + "Memory state around the buggy address:\n");
> +
> + row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
> + - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
> +
> + for (i = 0; i < SHADOW_ROWS; i++) {
> + if (i)
> + ret += snprintf(kbuf + ret, count - ret, "\n");
> +
> + ret += snprintf(kbuf + ret, count - ret,
> + (i == 2) ? ">%px: " : " %px: ", row);
> +
> + for (j = 0; j < META_BYTES_PER_ROW; j++) {
> + u8 value = record->buf[i][j];
> + ret += snprintf(kbuf + ret, count - ret, "%02x ", value);
> + }
> +
> + if (meta_row_is_guilty(row, addr))
> + ret += snprintf(kbuf + ret, count - ret, "\n%*c",
> + meta_pointer_offset(row, addr),
> + '^');
> +
> + row += META_MEM_BYTES_PER_ROW;
> + }
> +
> + ret += snprintf(kbuf + ret, count - ret,
> + "\n==============================="
> + "===================================\n");
> +
> + /*
> + * checking for space in buffer only when copying to user,
> + * otherwise if overflow'ed in kernel buffer, it will
> + * lead to kernel crash and then size of vmalloc'ed
> + * memory can be increased.
> + *
> + * Benefit: checks on each snprintf avoided.
> + */
> + if (ret >= count) {
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + if (copy_to_user(buf, kbuf, ret))
> + ret = -EFAULT;
> +
> +err:
> + return ret;
> +}
> +
> +/*
> + * read_kasan_errors()
> + *
> + * function to print all the entries present
> + * in KASAN depot_stack database currently in system.
> + */
> +static ssize_t read_kasan_errors(struct file *file, char __user *buf,
> + size_t count, loff_t *ppos)
> +{
> + /*
> + * No need of lock here for reading stored_kasan_records,
> + * As it is an integer variable, we can read it one value less
> + * if it is getting updated simultaneously.
> + */
> + int total_records = stored_kasan_records;
> + char *kbuf = (char *)file->private_data;
> +
> + while (*ppos < total_records) {
> + struct kasan_record *record;
> +
> + record = &kasan_records[*ppos];
> + *ppos = *ppos + 1;
> + return print_kasan_error(buf, count, record, ppos, kbuf);
> + }
> +
> + return 0;
> +}
> +
> +int read_kasan_open(struct inode *inode, struct file *file)
> +{
> + char *kasan_kbuf;
> +
> + /*
> + * One KASAN error will always be less than 4 KB,
> + * without page dump info.
> + *
> + * Thus allocate buffer of READ_SIZE, rather than
> + * count to avoid return checks of snprintfs.
> + */
> + kasan_kbuf = vzalloc(READ_SIZE);
> +
> + if (!kasan_kbuf)
> + return -ENOMEM;
> +
> + file->private_data = (void *)kasan_kbuf;
> +
> + return 0;
> +}
> +
> +int read_kasan_release(struct inode *inode, struct file *file)
> +{
> + char *kasan_kbuf = (char *)file->private_data;
> +
> + if (kasan_kbuf)
> + vfree(kasan_kbuf);
> +
> + return 0;
> +}
> +
> +static const struct proc_ops proc_kasan_ops = {
> + .proc_open = read_kasan_open,
> + .proc_read = read_kasan_errors,
> + .proc_release = read_kasan_release,
> +};
> +
> +static int __init register_kasan_proc(void)
> +{
> + struct proc_dir_entry *entry;
> +
> + entry = proc_create("kasan_log", 0400,
> + NULL, &proc_kasan_ops);
> +
> + if (!entry)
> + pr_err("registration of KASAN proc interface failed\n");
> +
> + return 0;
> +}
> +fs_initcall(register_kasan_proc);
> diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
> index 139615ef326b..0206d5f9b486 100644
> --- a/mm/kasan/report_generic.c
> +++ b/mm/kasan/report_generic.c
> @@ -39,10 +39,12 @@ void *kasan_find_first_bad_addr(void *addr, size_t size)
> return p;
> }
>
> -static const char *get_shadow_bug_type(struct kasan_access_info *info)
> +static const char *get_shadow_bug_type(struct kasan_access_info *info,
> + kasan_bug_type *bug_save)
> {
> const char *bug_type = "unknown-crash";
> u8 *shadow_addr;
> + kasan_bug_type bug = UNKNOWN;
>
> shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
>
> @@ -60,52 +62,70 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
> * due to a data race in the kernel code.
> */
> bug_type = "out-of-bounds";
> + bug = OUT_OF_BOUNDS;
> break;
> case KASAN_PAGE_REDZONE:
> case KASAN_KMALLOC_REDZONE:
> bug_type = "slab-out-of-bounds";
> + bug = OUT_OF_BOUNDS_SLAB;
> break;
> case KASAN_GLOBAL_REDZONE:
> bug_type = "global-out-of-bounds";
> + bug = OUT_OF_BOUNDS_GLOBAL;
> break;
> case KASAN_STACK_LEFT:
> case KASAN_STACK_MID:
> case KASAN_STACK_RIGHT:
> case KASAN_STACK_PARTIAL:
> bug_type = "stack-out-of-bounds";
> + bug = OUT_OF_BOUNDS_STACK;
> break;
> case KASAN_FREE_PAGE:
> case KASAN_KMALLOC_FREE:
> case KASAN_KMALLOC_FREETRACK:
> bug_type = "use-after-free";
> + bug = USE_AFTER_FREE;
> break;
> case KASAN_ALLOCA_LEFT:
> case KASAN_ALLOCA_RIGHT:
> bug_type = "alloca-out-of-bounds";
> + bug = OUT_OF_BOUNDS_ALLOCA;
> break;
> case KASAN_VMALLOC_INVALID:
> bug_type = "vmalloc-out-of-bounds";
> + bug = OUT_OF_BOUNDS_VMALLOC;
> break;
> }
>
> + if (bug_save)
> + *bug_save = bug;
> +
> return bug_type;
> }
>
> -static const char *get_wild_bug_type(struct kasan_access_info *info)
> +static const char *get_wild_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
> {
> const char *bug_type = "unknown-crash";
> + kasan_bug_type bug_enum = UNKNOWN;
>
> - if ((unsigned long)info->access_addr < PAGE_SIZE)
> + if ((unsigned long)info->access_addr < PAGE_SIZE) {
> bug_type = "null-ptr-deref";
> - else if ((unsigned long)info->access_addr < TASK_SIZE)
> + bug_enum = NULL_PTR_DEREFER;
> + } else if ((unsigned long)info->access_addr < TASK_SIZE) {
> bug_type = "user-memory-access";
> - else
> + bug_enum = USER_MEMORY_ACCESS;
> + } else {
> bug_type = "wild-memory-access";
> + bug_enum = WILD_MEMORY_ACCESS;
> + }
> +
> + if (bug)
> + *bug = bug_enum;
>
> return bug_type;
> }
>
> -const char *kasan_get_bug_type(struct kasan_access_info *info)
> +const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
> {
> /*
> * If access_size is a negative number, then it has reason to be
> @@ -115,12 +135,16 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
> * a large size_t and its value will be larger than ULONG_MAX/2,
> * so that this can qualify as out-of-bounds.
> */
> - if (info->access_addr + info->access_size < info->access_addr)
> + if (info->access_addr + info->access_size < info->access_addr) {
> + if (bug)
> + *bug = OUT_OF_BOUNDS;
> +
> return "out-of-bounds";
> + }
>
> if (addr_has_metadata(info->access_addr))
> - return get_shadow_bug_type(info);
> - return get_wild_bug_type(info);
> + return get_shadow_bug_type(info, bug);
> + return get_wild_bug_type(info, bug);
> }
>
> void kasan_metadata_fetch_row(char *buffer, void *row)
> diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c
> index 42b2168755d6..ae516e92f9f3 100644
> --- a/mm/kasan/report_hw_tags.c
> +++ b/mm/kasan/report_hw_tags.c
> @@ -15,8 +15,11 @@
>
> #include "kasan.h"
>
> -const char *kasan_get_bug_type(struct kasan_access_info *info)
> +const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
> {
> + if (bug)
> + *bug = INVALID_ACCESS;
> +
> return "invalid-access";
> }
>
> diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c
> index 3d20d3451d9e..11c869d4ad3c 100644
> --- a/mm/kasan/report_sw_tags.c
> +++ b/mm/kasan/report_sw_tags.c
> @@ -29,8 +29,10 @@
> #include "kasan.h"
> #include "../slab.h"
>
> -const char *kasan_get_bug_type(struct kasan_access_info *info)
> +const char *kasan_get_bug_type(struct kasan_access_info *info, kasan_bug_type *bug)
> {
> + kasan_bug_type bug_enum;
> + const char *bug_type;
> #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
> struct kasan_alloc_meta *alloc_meta;
> struct kmem_cache *cache;
> @@ -50,11 +52,16 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
>
> if (alloc_meta) {
> for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
> - if (alloc_meta->free_pointer_tag[i] == tag)
> - return "use-after-free";
> + if (alloc_meta->free_pointer_tag[i] == tag) {
> + bug_type = "use-after-free";
> + bug_enum = USE_AFTER_FREE;
> + goto done;
> + }
> }
> }
> - return "out-of-bounds";
> + bug_type = "out-of-bounds";
> + bug_enum = OUT_OF_BOUNDS;
> + goto done;
> }
>
> #endif
> @@ -66,10 +73,19 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
> * a large size_t and its value will be larger than ULONG_MAX/2,
> * so that this can qualify as out-of-bounds.
> */
> - if (info->access_addr + info->access_size < info->access_addr)
> - return "out-of-bounds";
> + if (info->access_addr + info->access_size < info->access_addr) {
> + bug_enum = OUT_OF_BOUNDS;
> + bug_type = "out-of-bounds";
> + goto done;
> + }
> +
> + bug_enum = INVALID_ACCESS;
> + bug_type = "invalid-access";
> +done:
> + if (bug)
> + *bug = bug_enum;
>
> - return "invalid-access";
> + return bug_type;
> }
>
> void *kasan_find_first_bad_addr(void *addr, size_t size)
> --
> 2.17.1
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/1619079317-1131-2-git-send-email-maninder1.s%40samsung.com.
> Alex, Marco, can the recently added error_report_notify interface be
> used for this? Looks like they are doing roughly the same thing with
> the same intentions.
We've recently attempted to build a universal library capturing every
error report, but then were pointed to tracefs, which was just enough
for our purpose (https://lkml.org/lkml/2021/1/15/609).
Greg also stated that procfs is a bad place for storing reports:
https://lkml.org/lkml/2021/1/15/929.
Maninder, which exactly problem are you trying to solve?
Note that KASAN already triggers a trace_error_report_end tracepoint
on every error report:
https://elixir.bootlin.com/linux/v5.12-rc8/source/mm/kasan/report.c#L90
Would it help if you used that one? It could probably be extended with
more parameters.
Another option if you want verbatim reports is to use the console
tracepoints, as this is done in
https://elixir.bootlin.com/linux/v5.12-rc8/source/mm/kfence/kfence_test.c
Note that there are many caveats with error report collection (see the
links above), but for testing purpose it might be enough.
Alex
On Thu, 22 Apr 2021 at 11:17, Maninder Singh <[email protected]> wrote:
>
> when KASAN multishot is ON and some buggy code hits same code path
> of KASAN issue repetetively, it can flood logs on console.
>
> Check for allocaton, free and backtrace path at time of KASAN error,
> if these are same then it is duplicate error and avoid these prints
> from KASAN.
On a more fundamental level, I think this sort of filtering is the
wrong solution to your problem. One reason why it's good that
multishot is off by default is, because _every_ KASAN report is
critical and can destabilize the system. Therefore, any report after
the first one might be completely bogus, because the system is in a
potentially bad state and its behaviour might be completely random.
The correct solution is to not leave the system running, fix the first
bug found, continue; rinse and repeat. Therefore, this patch adds a
lot of code for little benefit.
The much simpler solution that will likely yield a similar result is
to simply define an upper bound on the number of reports if multishot
is on. Because if I've seen 1000 reports, I already know the system is
completely trashed and whatever else it's reporting might just be
random.
Thanks,
-- Marco
On Thu, Apr 22, 2021 at 4:10 PM Marco Elver <[email protected]> wrote:
>
> On Thu, 22 Apr 2021 at 11:17, Maninder Singh <[email protected]> wrote:
> >
> > when KASAN multishot is ON and some buggy code hits same code path
> > of KASAN issue repetetively, it can flood logs on console.
> >
> > Check for allocaton, free and backtrace path at time of KASAN error,
> > if these are same then it is duplicate error and avoid these prints
> > from KASAN.
>
> On a more fundamental level, I think this sort of filtering is the
> wrong solution to your problem. One reason why it's good that
> multishot is off by default is, because _every_ KASAN report is
> critical and can destabilize the system. Therefore, any report after
> the first one might be completely bogus, because the system is in a
> potentially bad state and its behaviour might be completely random.
>
> The correct solution is to not leave the system running, fix the first
> bug found, continue; rinse and repeat. Therefore, this patch adds a
> lot of code for little benefit.
I agree with Marco here.
It doesn't make sense to have this deduplication code in the kernel
anyway. If you want unique reports, write a userspace script that
parses dmesg and groups the reports.
Thanks!
Hi Alex,
>We've recently attempted to build a universal library capturing every
>error report, but then were pointed to tracefs, which was just enough
>for our purpose (https://protect2.fireeye.com/v1/url?k=36bfb191-6924888b-36be3ade-0cc47a6cba04-0e7fd520f09636ee&q=1&e=a6b7f23a-98d4-4084-af0a-a88af0b4c9d0&u=https%3A%2F%2Flkml.org%2Flkml%2F2021%2F1%2F15%2F609).
>Greg also stated that procfs is a bad place for storing reports:
>https://protect2.fireeye.com/v1/url?k=924a3ffc-cdd106e6-924bb4b3-0cc47a6cba04-882467cbf9e8b46f&q=1&e=a6b7f23a-98d4-4084-af0a-a88af0b4c9d0&u=https%3A%2F%2Flkml.org%2Flkml%2F2021%2F1%2F15%2F929.
>
>Maninder, which exactly problem are you trying to solve?
We focussed on 2 problems, 1 is to remove duplicate error reporting
from KASAN when multishot is ON
and second was to save KASAN metadata (minimal) to regenerate same KASAN warnings
when user reads new proc interface.
>Note that KASAN already triggers a trace_error_report_end tracepoint
>on every error report:
>https://protect2.fireeye.com/v1/url?k=2d128c9c-7289b586-2d1307d3-0cc47a6cba04-3e939a06aa0346db&q=1&e=a6b7f23a-98d4-4084-af0a-a88af0b4c9d0&u=https%3A%2F%2Felixir.bootlin.com%2Flinux%2Fv5.12-rc8%2Fsource%2Fmm%2Fkasan%2Freport.c%23L90
>Would it help if you used that one? It could probably be extended with
>more parameters.
>
>Another option if you want verbatim reports is to use the console
>tracepoints, as this is done in
>https://protect2.fireeye.com/v1/url?k=5f368dc2-00adb4d8-5f37068d-0cc47a6cba04-fe4efc4f73dbea2f&q=1&e=a6b7f23a-98d4-4084-af0a-a88af0b4c9d0&u=https%3A%2F%2Felixir.bootlin.com%2Flinux%2Fv5.12-rc8%2Fsource%2Fmm%2Fkfence%2Fkfence_test.c
>Note that there are many caveats with error report collection (see the
>links above), but for testing purpose it might be enough.
>
Ok We will check these tracing methods also.
Thanks
Maninder Singh
Hi Dmitry,
Sorry for late response.
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -102,6 +102,12 @@ struct kasan_access_info {
> unsigned long ip;
> };
>
>> +struct kasan_record {
>> + depot_stack_handle_t bt_handle;
>> + depot_stack_handle_t alloc_handle;
>> + depot_stack_handle_t free_handle;
>> +};
>
>Hi Maninder,
>
>There is no need to declare this in the header, it can be declared
>more locally in report.h.
>
Actual we wanted to send both patches in 1 patch, then we though
to break in 2 ideas for better review, first one is to give idea
of remove duplicate KASAN errors and second is to save KASAN metadata.
and structure was required in other files in second patch so it was
decalred in header
>> +
>> /* The layout of struct dictated by compiler */
>> struct kasan_source_location {
>> const char *filename;
>> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
>> index 87b271206163..4576de76991b 100644
>> --- a/mm/kasan/report.c
>> +++ b/mm/kasan/report.c
>> @@ -39,6 +39,10 @@ static unsigned long kasan_flags;
>> #define KASAN_BIT_REPORTED 0
>> #define KASAN_BIT_MULTI_SHOT 1
>>
>> +#define MAX_RECORDS (200)
>
>s/MAX_RECORDS/KASAN_MAX_RECORDS/
OK
>> +static struct kasan_record kasan_records[MAX_RECORDS];
>
>Since all fields in kasan_record are stack handles, the code will be
>simpler and more uniform, if we store just an array of handles w/o
>distinguishing between alloc/free/access.
Ok got your point.
>> +static int stored_kasan_records;
>> +
>> bool kasan_save_enable_multi_shot(void)
>> {
>> return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
>> @@ -360,6 +364,65 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
>> end_report(&flags, (unsigned long)object);
>> }
>>
>> +/*
>> + * @save_report()
>> + *
>> + * returns false if same record is already saved.
>
>s/same/the same/
>
>> + * returns true if its new record and saved in database of KASAN.
>
>s/its/it's/
>s/database/the database/
ok
>> +static bool save_report(void *addr, struct kasan_access_info *info, u8 tag, unsigned long *flags)
>> +{
>> + struct kasan_record record = {0};
>> + depot_stack_handle_t bt_handle;
>> + int i = 0;
>> + const char *bug_type;
>> + struct kasan_alloc_meta *alloc_meta;
>> + struct kasan_track *free_track;
>> + struct page *page;
>> + bool ret = true;
>> +
>> + kasan_disable_current();
>> + spin_lock_irqsave(&report_lock, *flags);
>
>Reusing the caller flags looks strange, do we need it?
>But also the very next function start_report() also does the same
>dance: kasan_disable_current/spin_lock_irqsave. It feels reasonable to
>lock once, check for dups and return early if it's a dup.
ok, will check that (if only first patch seems to be good for mainline)
>> + bug_type = kasan_get_bug_type(info);
>> + page = kasan_addr_to_page(addr);
>> + bt_handle = kasan_save_stack(GFP_KERNEL);
>
OK
>
>> + if (page && PageSlab(page)) {
>> + struct kmem_cache *cache = page->slab_cache;
>> + void *object = nearest_obj(cache, page, addr);
>
>Since you already declare new var in this block, move
>alloc_meta/free_track here as well.
ok
>> + alloc_meta = kasan_get_alloc_meta(cache, object);
>> + free_track = kasan_get_free_track(cache, object, tag);
>> + record.alloc_handle = alloc_meta->alloc_track.stack;
>> + if (free_track)
>> + record.free_handle = free_track->stack;
>> + }
>> +
>> + record.bt_handle = bt_handle;
>> +
>> + for (i = 0; i < stored_kasan_records; i++) {
>> + if (record.bt_handle != kasan_records[i].bt_handle)
>> + continue;
>> + if (record.alloc_handle != kasan_records[i].alloc_handle)
>> + continue;
>> + if (!strncmp("use-after-free", bug_type, 15) &&
>
>Comparing strings is unreliable and will break in future. Compare
>handle with 0 instead, you already assume that 0 handle is "no
>handle".
Ok will check that also
>> + (record.free_handle != kasan_records[i].free_handle))
>> + continue;
>> +
>> + ret = false;
>> + goto done;
>> + }
>> +
>> + memcpy(&kasan_records[stored_kasan_records], &record, sizeof(struct kasan_record));
>> + stored_kasan_records++;
>
>I think you just introduced an out-of-bounds write into KASAN, check
>for MAX_RECORDS ;)
:), it was taken care in second patch [2/2]
>
>> +
>> +done:
>> + spin_unlock_irqrestore(&report_lock, *flags);
>> + kasan_enable_current();
>> + return ret;
>> +}
>> +
>> static void __kasan_report(unsigned long addr, size_t size, bool is_write,
>> unsigned long ip)
>> {
>> @@ -388,6 +451,10 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write,
>> info.is_write = is_write;
>> info.ip = ip;
>>
>> + if (addr_has_metadata(untagged_addr) &&
>
>Why addr_has_metadata check?
>The kernel will probably crash later anyway, but from point of view of
>this code, I don't see reasons to not dedup wild accesses.
Just to align with current code.
>> + !save_report(untagged_addr, &info, get_tag(tagged_addr), &flags))
>> + return;
>> +
>> start_report(&flags);
>>
>> print_error_description(&info);
>> --
>> 2.17.1
>>
I will revert on other threads also[2/2], and then please let me know
if only first patch can be good for mainline
Thanks,
Maninder Singh
Hi,
>
> > On Thu, 22 Apr 2021 at 11:17, Maninder Singh <[email protected]> wrote:
> > >
> > > when KASAN multishot is ON and some buggy code hits same code path
> > > of KASAN issue repetetively, it can flood logs on console.
> > >
> > > Check for allocaton, free and backtrace path at time of KASAN error,
> > > if these are same then it is duplicate error and avoid these prints
> > > from KASAN.
> >
> > On a more fundamental level, I think this sort of filtering is the
> > wrong solution to your problem. One reason why it's good that
> > multishot is off by default is, because _every_ KASAN report is
> > critical and can destabilize the system. Therefore, any report after
> > the first one might be completely bogus, because the system is in a
> > potentially bad state and its behaviour might be completely random.
Yes it's valid point , But in Some scenarios testing in production take time and
waiting for one issue fix takes time as there are multiple stakeholders
in modules. So we thought it was better to catch multiple issues in one long run.
> > The correct solution is to not leave the system running, fix the first
> > bug found, continue; rinse and repeat. Therefore, this patch adds a
> > lot of code for little benefit.
>
> I agree with Marco here.
>
> It doesn't make sense to have this deduplication code in the kernel
> anyway. If you want unique reports, write a userspace script that
> parses dmesg and groups the reports.
>
Yes agree, but issue is when multishot is ON, same KASAN error
reports multiple time and can flood the serial logs.
which can be avoided with patch [1/2]
Thanks,
Maninder Singh