2021-01-14 19:39:22

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 00/15] kasan: HW_TAGS tests support and fixes

This patchset adds support for running KASAN-KUnit tests with the
hardware tag-based mode and also contains a few fixes.

Changes v2->v3:
- Don't call kmalloc(0) when generating random size.
- Use ARRAY_SIZE() in kmem_cache_bulk_alloc() test.
- Print error message when tests are being ran with kasan.mode=off.
- Move _RET_IP_ to inline wrappers for kasan annotations.

Andrey Konovalov (15):
kasan: prefix global functions with kasan_
kasan: clarify HW_TAGS impact on TBI
kasan: clean up comments in tests
kasan: add macros to simplify checking test constraints
kasan: add match-all tag tests
kasan, arm64: allow using KUnit tests with HW_TAGS mode
kasan: rename CONFIG_TEST_KASAN_MODULE
kasan: add compiler barriers to KUNIT_EXPECT_KASAN_FAIL
kasan: adapt kmalloc_uaf2 test to HW_TAGS mode
kasan: fix memory corruption in kasan_bitops_tags test
kasan: move _RET_IP_ to inline wrappers
kasan: fix bug detection via ksize for HW_TAGS mode
kasan: add proper page allocator tests
kasan: add a test for kmem_cache_alloc/free_bulk
kasan: don't run tests when KASAN is not enabled

Documentation/dev-tools/kasan.rst | 24 +-
arch/arm64/include/asm/memory.h | 1 +
arch/arm64/include/asm/mte-kasan.h | 12 +
arch/arm64/kernel/mte.c | 12 +
arch/arm64/mm/fault.c | 16 +-
include/linux/kasan-checks.h | 6 +
include/linux/kasan.h | 37 ++-
lib/Kconfig.kasan | 6 +-
lib/Makefile | 2 +-
lib/test_kasan.c | 424 +++++++++++++++++++++--------
lib/test_kasan_module.c | 5 +-
mm/kasan/common.c | 56 ++--
mm/kasan/generic.c | 38 +--
mm/kasan/kasan.h | 69 +++--
mm/kasan/quarantine.c | 22 +-
mm/kasan/report.c | 15 +-
mm/kasan/report_generic.c | 8 +-
mm/kasan/report_hw_tags.c | 8 +-
mm/kasan/report_sw_tags.c | 8 +-
mm/kasan/shadow.c | 26 +-
mm/kasan/sw_tags.c | 20 +-
mm/mempool.c | 2 +-
mm/slab.c | 2 +-
mm/slab_common.c | 16 +-
mm/slub.c | 4 +-
tools/objtool/check.c | 2 +-
26 files changed, 559 insertions(+), 282 deletions(-)

--
2.30.0.284.gd98b1dd5eaa7-goog


2021-01-14 19:39:49

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 04/15] kasan: add macros to simplify checking test constraints

Some KASAN tests require specific kernel configs to be enabled.
Instead of copy-pasting the checks for these configs add a few helper
macros and use them.

Link: https://linux-review.googlesource.com/id/I237484a7fddfedf4a4aae9cc61ecbcdbe85a0a63
Suggested-by: Alexander Potapenko <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 101 +++++++++++++++--------------------------------
1 file changed, 31 insertions(+), 70 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 6f46e27c2af7..714ea27fcc3e 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -73,6 +73,20 @@ static void kasan_test_exit(struct kunit *test)
fail_data.report_found); \
} while (0)

+#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
+ if (!IS_ENABLED(config)) { \
+ kunit_info((test), "skipping, " #config " required"); \
+ return; \
+ } \
+} while (0)
+
+#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
+ if (IS_ENABLED(config)) { \
+ kunit_info((test), "skipping, " #config " enabled"); \
+ return; \
+ } \
+} while (0)
+
static void kmalloc_oob_right(struct kunit *test)
{
char *ptr;
@@ -114,10 +128,7 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

/*
* Allocate a chunk that does not fit into a SLUB cache to trigger
@@ -135,10 +146,7 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -152,10 +160,7 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -218,10 +223,7 @@ static void kmalloc_oob_16(struct kunit *test)
} *ptr1, *ptr2;

/* This test is specifically crafted for the generic mode. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);

ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -454,10 +456,7 @@ static void kasan_global_oob(struct kunit *test)
char *p = &global_array[ARRAY_SIZE(global_array) + i];

/* Only generic mode instruments globals. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);

KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -486,10 +485,7 @@ static void kasan_stack_oob(struct kunit *test)
volatile int i = OOB_TAG_OFF;
char *p = &stack_array[ARRAY_SIZE(stack_array) + i];

- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);

KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -501,15 +497,8 @@ static void kasan_alloca_oob_left(struct kunit *test)
char *p = alloca_array - 1;

/* Only generic mode instruments dynamic allocas. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
-
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);

KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -521,15 +510,8 @@ static void kasan_alloca_oob_right(struct kunit *test)
char *p = alloca_array + i;

/* Only generic mode instruments dynamic allocas. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
-
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);

KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -593,11 +575,7 @@ static void kasan_memchr(struct kunit *test)
* str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
*/
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);

if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
@@ -621,11 +599,7 @@ static void kasan_memcmp(struct kunit *test)
* str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
*/
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);

if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
@@ -648,11 +622,7 @@ static void kasan_strings(struct kunit *test)
* str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
*/
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);

ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -713,10 +683,7 @@ static void kasan_bitops_generic(struct kunit *test)
long *bits;

/* This test is specifically crafted for the generic mode. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);

/*
* Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
@@ -744,11 +711,8 @@ static void kasan_bitops_tags(struct kunit *test)
{
long *bits;

- /* This test is specifically crafted for the tag-based mode. */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
- return;
- }
+ /* This test is specifically crafted for tag-based modes. */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);

/* Allocation size will be rounded to up granule size, which is 16. */
bits = kzalloc(sizeof(*bits), GFP_KERNEL);
@@ -777,10 +741,7 @@ static void vmalloc_oob(struct kunit *test)
{
void *area;

- if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);

/*
* We have to be careful not to hit the guard page.
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:40:09

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 10/15] kasan: fix memory corruption in kasan_bitops_tags test

Since the hardware tag-based KASAN mode might not have a redzone that
comes after an allocated object (when kasan.mode=prod is enabled), the
kasan_bitops_tags() test ends up corrupting the next object in memory.

Change the test so it always accesses the redzone that lies within the
allocated object's boundaries.

Link: https://linux-review.googlesource.com/id/I67f51d1ee48f0a8d0fe2658c2a39e4879fe0832a
Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0cda4a1ff394..a06e7946f581 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -749,13 +749,13 @@ static void kasan_bitops_tags(struct kunit *test)
/* This test is specifically crafted for tag-based modes. */
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);

- /* Allocation size will be rounded to up granule size, which is 16. */
- bits = kzalloc(sizeof(*bits), GFP_KERNEL);
+ /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
+ bits = kzalloc(48, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);

- /* Do the accesses past the 16 allocated bytes. */
- kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
- kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
+ /* Do the accesses past the 48 allocated bytes, but within the redone. */
+ kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);

kfree(bits);
}
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:40:14

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 02/15] kasan: clarify HW_TAGS impact on TBI

Mention in the documentation that enabling CONFIG_KASAN_HW_TAGS
always results in in-kernel TBI (Top Byte Ignore) being enabled.

Also do a few minor documentation cleanups.

Link: https://linux-review.googlesource.com/id/Iba2a6697e3c6304cb53f89ec61dedc77fa29e3ae
Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
Documentation/dev-tools/kasan.rst | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 0fc3fb1860c4..26c99852a852 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -147,15 +147,14 @@ negative values to distinguish between different kinds of inaccessible memory
like redzones or freed memory (see mm/kasan/kasan.h).

In the report above the arrows point to the shadow byte 03, which means that
-the accessed address is partially accessible.
-
-For tag-based KASAN this last report section shows the memory tags around the
-accessed address (see `Implementation details`_ section).
+the accessed address is partially accessible. For tag-based KASAN modes this
+last report section shows the memory tags around the accessed address
+(see the `Implementation details`_ section).

Boot parameters
~~~~~~~~~~~~~~~

-Hardware tag-based KASAN mode (see the section about different mode below) is
+Hardware tag-based KASAN mode (see the section about various modes below) is
intended for use in production as a security mitigation. Therefore it supports
boot parameters that allow to disable KASAN competely or otherwise control
particular KASAN features.
@@ -305,6 +304,13 @@ reserved to tag freed memory regions.
Hardware tag-based KASAN currently only supports tagging of
kmem_cache_alloc/kmalloc and page_alloc memory.

+If the hardware doesn't support MTE (pre ARMv8.5), hardware tag-based KASAN
+won't be enabled. In this case all boot parameters are ignored.
+
+Note, that enabling CONFIG_KASAN_HW_TAGS always results in in-kernel TBI being
+enabled. Even when kasan.mode=off is provided, or when the hardware doesn't
+support MTE (but supports TBI).
+
What memory accesses are sanitised by KASAN?
--------------------------------------------

--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:40:18

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 06/15] kasan, arm64: allow using KUnit tests with HW_TAGS mode

On a high level, this patch allows running KUnit KASAN tests with the
hardware tag-based KASAN mode.

Internally, this change reenables tag checking at the end of each KASAN
test that triggers a tag fault and leads to tag checking being disabled.

With this patch KASAN tests are still failing for the hardware tag-based
mode; fixes come in the next few patches.

Link: https://linux-review.googlesource.com/id/Id94dc9eccd33b23cda4950be408c27f879e474c8
Signed-off-by: Andrey Konovalov <[email protected]>
---
arch/arm64/include/asm/memory.h | 1 +
arch/arm64/include/asm/mte-kasan.h | 12 +++++++++
arch/arm64/kernel/mte.c | 12 +++++++++
arch/arm64/mm/fault.c | 16 +++++++-----
lib/Kconfig.kasan | 4 +--
lib/test_kasan.c | 42 +++++++++++++++++++++---------
mm/kasan/kasan.h | 9 +++++++
7 files changed, 75 insertions(+), 21 deletions(-)

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 18fce223b67b..cedfc9e97bcc 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -232,6 +232,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)

#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging() mte_enable_kernel()
+#define arch_set_tagging_report_once(state) mte_set_report_once(state)
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 26349a4b5e2e..3748d5bb88c0 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -32,6 +32,9 @@ void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
void mte_enable_kernel(void);
void mte_init_tags(u64 max_tag);

+void mte_set_report_once(bool state);
+bool mte_report_once(void);
+
#else /* CONFIG_ARM64_MTE */

static inline u8 mte_get_ptr_tag(void *ptr)
@@ -60,6 +63,15 @@ static inline void mte_init_tags(u64 max_tag)
{
}

+static inline void mte_set_report_once(bool state)
+{
+}
+
+static inline bool mte_report_once(void)
+{
+ return false;
+}
+
#endif /* CONFIG_ARM64_MTE */

#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dc9ada64feed..c63b3d7a3cd9 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -25,6 +25,8 @@

u64 gcr_kernel_excl __ro_after_init;

+static bool report_fault_once = true;
+
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
@@ -158,6 +160,16 @@ void mte_enable_kernel(void)
isb();
}

+void mte_set_report_once(bool state)
+{
+ WRITE_ONCE(report_fault_once, state);
+}
+
+bool mte_report_once(void)
+{
+ return READ_ONCE(report_fault_once);
+}
+
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index a218f6f2fdc8..f1b77dc79948 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -302,7 +302,14 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
static void report_tag_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- bool is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0;
+ static bool reported;
+ bool is_write;
+
+ if (READ_ONCE(reported))
+ return;
+
+ if (mte_report_once())
+ WRITE_ONCE(reported, true);

/* The format of KASAN tags is 0xF<x>. */
addr |= (0xF0UL << MTE_TAG_SHIFT);
@@ -310,6 +317,7 @@ static void report_tag_fault(unsigned long addr, unsigned int esr,
* SAS bits aren't set for all faults reported in EL1, so we can't
* find out access size.
*/
+ is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0;
kasan_report(addr, 0, is_write, regs->pc);
}
#else
@@ -321,12 +329,8 @@ static inline void report_tag_fault(unsigned long addr, unsigned int esr,
static void do_tag_recovery(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- static bool reported;

- if (!READ_ONCE(reported)) {
- report_tag_fault(addr, esr, regs);
- WRITE_ONCE(reported, true);
- }
+ report_tag_fault(addr, esr, regs);

/*
* Disable MTE Tag Checking on the local CPU for the current EL.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index f5fa4ba126bf..3091432acb0a 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -190,11 +190,11 @@ config KASAN_KUNIT_TEST
kernel debugging features like KASAN.

For more information on KUnit and unit tests in general, please refer
- to the KUnit documentation in Documentation/dev-tools/kunit
+ to the KUnit documentation in Documentation/dev-tools/kunit.

config TEST_KASAN_MODULE
tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
- depends on m && KASAN
+ depends on m && KASAN && !KASAN_HW_TAGS
help
This is a part of the KASAN test suite that is incompatible with
KUnit. Currently includes tests that do bad copy_from/to_user
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index c344fe506ffc..ef663bcf83e5 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -41,16 +41,20 @@ static bool multishot;

/*
* Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
- * first detected bug and panic the kernel if panic_on_warn is enabled.
+ * first detected bug and panic the kernel if panic_on_warn is enabled. For
+ * hardware tag-based KASAN also allow tag checking to be reenabled for each
+ * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
*/
static int kasan_test_init(struct kunit *test)
{
multishot = kasan_save_enable_multi_shot();
+ hw_set_tagging_report_once(false);
return 0;
}

static void kasan_test_exit(struct kunit *test)
{
+ hw_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
}

@@ -59,19 +63,31 @@ static void kasan_test_exit(struct kunit *test)
* KASAN report; causes a test failure otherwise. This relies on a KUnit
* resource named "kasan_data". Do not use this name for KUnit resources
* outside of KASAN tests.
+ *
+ * For hardware tag-based KASAN, when a tag fault happens, tag checking is
+ * normally auto-disabled. When this happens, this test handler reenables
+ * tag checking. As tag checking can be only disabled or enabled per CPU, this
+ * handler disables migration (preemption).
*/
-#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
- fail_data.report_expected = true; \
- fail_data.report_found = false; \
- kunit_add_named_resource(test, \
- NULL, \
- NULL, \
- &resource, \
- "kasan_data", &fail_data); \
- expression; \
- KUNIT_EXPECT_EQ(test, \
- fail_data.report_expected, \
- fail_data.report_found); \
+#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
+ migrate_disable(); \
+ fail_data.report_expected = true; \
+ fail_data.report_found = false; \
+ kunit_add_named_resource(test, \
+ NULL, \
+ NULL, \
+ &resource, \
+ "kasan_data", &fail_data); \
+ expression; \
+ KUNIT_EXPECT_EQ(test, \
+ fail_data.report_expected, \
+ fail_data.report_found); \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
+ if (fail_data.report_found) \
+ hw_enable_tagging(); \
+ migrate_enable(); \
+ } \
} while (0)

#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index c3fb9bf241d3..292dfbc37deb 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -280,6 +280,9 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifndef arch_init_tags
#define arch_init_tags(max_tag)
#endif
+#ifndef arch_set_tagging_report_once
+#define arch_set_tagging_report_once(state)
+#endif
#ifndef arch_get_random_tag
#define arch_get_random_tag() (0xFF)
#endif
@@ -292,10 +295,16 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)

#define hw_enable_tagging() arch_enable_tagging()
#define hw_init_tags(max_tag) arch_init_tags(max_tag)
+#define hw_set_tagging_report_once(state) arch_set_tagging_report_once(state)
#define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
#define hw_set_mem_tag_range(addr, size, tag) arch_set_mem_tag_range((addr), (size), (tag))

+#else /* CONFIG_KASAN_HW_TAGS */
+
+#define hw_enable_tagging()
+#define hw_set_tagging_report_once(state)
+
#endif /* CONFIG_KASAN_HW_TAGS */

#ifdef CONFIG_KASAN_SW_TAGS
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:40:25

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 05/15] kasan: add match-all tag tests

Add 3 new tests for tag-based KASAN modes:

1. Check that match-all pointer tag is not assigned randomly.
2. Check that 0xff works as a match-all pointer tag.
3. Check that there are no match-all memory tags.

Note, that test #3 causes a significant number (255) of KASAN reports
to be printed during execution for the SW_TAGS mode.

Link: https://linux-review.googlesource.com/id/I78f1375efafa162b37f3abcb2c5bc2f3955dfd8e
Reviewed-by: Marco Elver <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++
mm/kasan/kasan.h | 6 ++++
2 files changed, 98 insertions(+)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 714ea27fcc3e..c344fe506ffc 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/printk.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -754,6 +755,94 @@ static void vmalloc_oob(struct kunit *test)
vfree(area);
}

+/*
+ * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
+ * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
+ * modes.
+ */
+static void match_all_not_assigned(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ int i, size, order;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ for (i = 0; i < 256; i++) {
+ size = (get_random_int() % 1024) + 1;
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ kfree(ptr);
+ }
+
+ for (i = 0; i < 256; i++) {
+ order = (get_random_int() % 4) + 1;
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ free_pages((unsigned long)ptr, order);
+ }
+}
+
+/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
+static void match_all_ptr_tag(struct kunit *test)
+{
+ char *ptr;
+ u8 tag;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr = kmalloc(128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ /* Backup the assigned tag. */
+ tag = get_tag(ptr);
+ KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
+
+ /* Reset the tag to 0xff.*/
+ ptr = set_tag(ptr, KASAN_TAG_KERNEL);
+
+ /* This access shouldn't trigger a KASAN report. */
+ *ptr = 0;
+
+ /* Recover the pointer tag and free. */
+ ptr = set_tag(ptr, tag);
+ kfree(ptr);
+}
+
+/* Check that there are no match-all memory tags for tag-based modes. */
+static void match_all_mem_tag(struct kunit *test)
+{
+ char *ptr;
+ int tag;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr = kmalloc(128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* For each possible tag value not matching the pointer tag. */
+ for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
+ if (tag == get_tag(ptr))
+ continue;
+
+ /* Mark the first memory granule with the chosen memory tag. */
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
+
+ /* This access must cause a KASAN report. */
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
+ }
+
+ /* Recover the memory tag and free. */
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
+ kfree(ptr);
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -793,6 +882,9 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob),
+ KUNIT_CASE(match_all_not_assigned),
+ KUNIT_CASE(match_all_ptr_tag),
+ KUNIT_CASE(match_all_mem_tag),
{}
};

diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3b38baddec47..c3fb9bf241d3 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -36,6 +36,12 @@ extern bool kasan_flag_panic __ro_after_init;
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */

+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* mimimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* mimimum value for random tags */
+#endif
+
#ifdef CONFIG_KASAN_GENERIC
#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:40:55

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 15/15] kasan: don't run tests when KASAN is not enabled

Don't run KASAN tests when it's disabled with kasan.mode=off to avoid
corrupting kernel memory.

Link: https://linux-review.googlesource.com/id/I6447af436a69a94bfc35477f6bf4e2122948355e
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index a96376aa7293..6238b56127f8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -47,6 +47,11 @@ static bool multishot;
*/
static int kasan_test_init(struct kunit *test)
{
+ if (!kasan_enabled()) {
+ kunit_err(test, "can't run KASAN tests with KASAN disabled");
+ return -1;
+ }
+
multishot = kasan_save_enable_multi_shot();
hw_set_tagging_report_once(false);
return 0;
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:41:26

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 13/15] kasan: add proper page allocator tests

The currently existing page allocator tests rely on kmalloc fallback
with large sizes that is only present for SLUB. Add proper tests that
use alloc/free_pages().

Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570
Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 51 +++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 46 insertions(+), 5 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 566d894ba20b..ab22a653762e 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -147,6 +147,12 @@ static void kmalloc_node_oob_right(struct kunit *test)
kfree(ptr);
}

+/*
+ * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
+ * fit into a slab cache and therefore is allocated via the page allocator
+ * fallback. Since this kind of fallback is only implemented for SLUB, these
+ * tests are limited to that allocator.
+ */
static void kmalloc_pagealloc_oob_right(struct kunit *test)
{
char *ptr;
@@ -154,14 +160,11 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)

KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

- /*
- * Allocate a chunk that does not fit into a SLUB cache to trigger
- * the page allocator fallback.
- */
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
+
kfree(ptr);
}

@@ -174,8 +177,8 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
kfree(ptr);
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}

@@ -192,6 +195,42 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}

+static void pagealloc_oob_right(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+ size_t size = (1UL << (PAGE_SHIFT + order));
+
+ /*
+ * With generic KASAN page allocations have no redzones, thus
+ * out-of-bounds detection is not guaranteed.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ free_pages((unsigned long)ptr, order);
+}
+
+static void pagealloc_uaf(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ free_pages((unsigned long)ptr, order);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+}
+
static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
@@ -903,6 +942,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_pagealloc_oob_right),
KUNIT_CASE(kmalloc_pagealloc_uaf),
KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+ KUNIT_CASE(pagealloc_oob_right),
+ KUNIT_CASE(pagealloc_uaf),
KUNIT_CASE(kmalloc_large_oob_right),
KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less),
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:41:37

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 11/15] kasan: move _RET_IP_ to inline wrappers

Generic mm functions that call KASAN annotations that might report a bug
pass _RET_IP_ to them as an argument. This allows KASAN to include the
name of the function that called the mm function in its report's header.

Now that KASAN has inline wrappers for all of its annotations, move
_RET_IP_ to those wrappers to simplify annotation call sites.

Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/kasan.h | 20 +++++++++-----------
mm/mempool.c | 2 +-
mm/slab.c | 2 +-
mm/slub.c | 4 ++--
4 files changed, 13 insertions(+), 15 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5e0655fb2a6f..bba1637827c3 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -181,19 +181,18 @@ static __always_inline void * __must_check kasan_init_slab_obj(
}

bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, ip);
+ return __kasan_slab_free(s, object, _RET_IP_);
return false;
}

void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static __always_inline void kasan_slab_free_mempool(void *ptr)
{
if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, ip);
+ __kasan_slab_free_mempool(ptr, _RET_IP_);
}

void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -237,10 +236,10 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
}

void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+static __always_inline void kasan_kfree_large(void *ptr)
{
if (kasan_enabled())
- __kasan_kfree_large(ptr, ip);
+ __kasan_kfree_large(ptr, _RET_IP_);
}

bool kasan_save_enable_multi_shot(void);
@@ -273,12 +272,11 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
return false;
}
-static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags)
{
@@ -298,7 +296,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr) {}

#endif /* CONFIG_KASAN */

diff --git a/mm/mempool.c b/mm/mempool.c
index 624ed51b060f..79959fac27d7 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_slab_free_mempool(element, _RET_IP_);
+ kasan_slab_free_mempool(element);
else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
diff --git a/mm/slab.c b/mm/slab.c
index d7c8da9319c7..afeb6191fb1e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3421,7 +3421,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
memset(objp, 0, cachep->object_size);

/* Put the object into the quarantine, don't touch it for now. */
- if (kasan_slab_free(cachep, objp, _RET_IP_))
+ if (kasan_slab_free(cachep, objp))
return;

/* Use KCSAN to help debug racy use-after-free. */
diff --git a/mm/slub.c b/mm/slub.c
index 75fb097d990d..0afb53488238 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1514,7 +1514,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
static __always_inline void kfree_hook(void *x)
{
kmemleak_free(x);
- kasan_kfree_large(x, _RET_IP_);
+ kasan_kfree_large(x);
}

static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
@@ -1544,7 +1544,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);

/* KASAN might put x into memory quarantine, delaying its reuse */
- return kasan_slab_free(s, x, _RET_IP_);
+ return kasan_slab_free(s, x);
}

static inline bool slab_free_freelist_hook(struct kmem_cache *s,
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:41:44

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 09/15] kasan: adapt kmalloc_uaf2 test to HW_TAGS mode

In the kmalloc_uaf2() test, the pointers to the two allocated memory
blocks might happen to be the same, and the test will fail. With the
software tag-based mode, the probability of the that is 1/254, so it's
hard to observe the failure. For the hardware tag-based mode though,
the probablity is 1/14, which is quite noticable.

Allow up to 16 attempts at generating different tags for the tag-based
modes.

Link: https://linux-review.googlesource.com/id/Ibfa458ef2804ff465d8eb07434a300bf36388d55
Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 11 +++++++++++
1 file changed, 11 insertions(+)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 2419e36e117b..0cda4a1ff394 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -382,7 +382,9 @@ static void kmalloc_uaf2(struct kunit *test)
{
char *ptr1, *ptr2;
size_t size = 43;
+ int counter = 0;

+again:
ptr1 = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);

@@ -391,6 +393,15 @@ static void kmalloc_uaf2(struct kunit *test)
ptr2 = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);

+ /*
+ * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
+ * Allow up to 16 attempts at generating different tags.
+ */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
+ kfree(ptr2);
+ goto again;
+ }
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);

--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:41:53

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 08/15] kasan: add compiler barriers to KUNIT_EXPECT_KASAN_FAIL

It might not be obvious to the compiler that the expression must be
executed between writing and reading to fail_data. In this case, the
compiler might reorder or optimize away some of the accesses, and
the tests will fail.

Add compiler barriers around the expression in KUNIT_EXPECT_KASAN_FAIL
and use READ/WRITE_ONCE() for accessing fail_data fields.

Link: https://linux-review.googlesource.com/id/I046079f48641a1d36fe627fc8827a9249102fd50
Reviewed-by: Marco Elver <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 17 ++++++++++++-----
mm/kasan/report.c | 2 +-
2 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ef663bcf83e5..2419e36e117b 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -68,23 +68,30 @@ static void kasan_test_exit(struct kunit *test)
* normally auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU, this
* handler disables migration (preemption).
+ *
+ * Since the compiler doesn't see that the expression can change the fail_data
+ * fields, it can reorder or optimize away the accesses to those fields.
+ * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
+ * expression to prevent that.
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
migrate_disable(); \
- fail_data.report_expected = true; \
- fail_data.report_found = false; \
+ WRITE_ONCE(fail_data.report_expected, true); \
+ WRITE_ONCE(fail_data.report_found, false); \
kunit_add_named_resource(test, \
NULL, \
NULL, \
&resource, \
"kasan_data", &fail_data); \
+ barrier(); \
expression; \
+ barrier(); \
KUNIT_EXPECT_EQ(test, \
- fail_data.report_expected, \
- fail_data.report_found); \
+ READ_ONCE(fail_data.report_expected), \
+ READ_ONCE(fail_data.report_found)); \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
- if (fail_data.report_found) \
+ if (READ_ONCE(fail_data.report_found)) \
hw_enable_tagging(); \
migrate_enable(); \
} \
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index e93d7973792e..234f35a84f19 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -331,7 +331,7 @@ static void kasan_update_kunit_status(struct kunit *cur_test)
}

kasan_data = (struct kunit_kasan_expectation *)resource->data;
- kasan_data->report_found = true;
+ WRITE_ONCE(kasan_data->report_found, true);
kunit_put_resource(resource);
}
#endif /* IS_ENABLED(CONFIG_KUNIT) */
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:42:10

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 07/15] kasan: rename CONFIG_TEST_KASAN_MODULE

Rename CONFIG_TEST_KASAN_MODULE to CONFIG_KASAN_MODULE_TEST.

This naming is more consistent with the existing CONFIG_KASAN_KUNIT_TEST.

Link: https://linux-review.googlesource.com/id/Id347dfa5fe8788b7a1a189863e039f409da0ae5f
Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
---
Documentation/dev-tools/kasan.rst | 8 ++++----
lib/Kconfig.kasan | 2 +-
lib/Makefile | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 26c99852a852..b25ae43d683e 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -374,17 +374,17 @@ unmapped. This will require changes in arch-specific code.
This allows ``VMAP_STACK`` support on x86, and can simplify support of
architectures that do not have a fixed module region.

-CONFIG_KASAN_KUNIT_TEST & CONFIG_TEST_KASAN_MODULE
---------------------------------------------------
+CONFIG_KASAN_KUNIT_TEST and CONFIG_KASAN_MODULE_TEST
+----------------------------------------------------

-KASAN tests consist on two parts:
+KASAN tests consist of two parts:

1. Tests that are integrated with the KUnit Test Framework. Enabled with
``CONFIG_KASAN_KUNIT_TEST``. These tests can be run and partially verified
automatically in a few different ways, see the instructions below.

2. Tests that are currently incompatible with KUnit. Enabled with
-``CONFIG_TEST_KASAN_MODULE`` and can only be run as a module. These tests can
+``CONFIG_KASAN_MODULE_TEST`` and can only be run as a module. These tests can
only be verified manually, by loading the kernel module and inspecting the
kernel log for KASAN reports.

diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 3091432acb0a..624ae1df7984 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -192,7 +192,7 @@ config KASAN_KUNIT_TEST
For more information on KUnit and unit tests in general, please refer
to the KUnit documentation in Documentation/dev-tools/kunit.

-config TEST_KASAN_MODULE
+config KASAN_MODULE_TEST
tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
depends on m && KASAN && !KASAN_HW_TAGS
help
diff --git a/lib/Makefile b/lib/Makefile
index afeff05fa8c5..122f25d6407e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -68,7 +68,7 @@ obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
-obj-$(CONFIG_TEST_KASAN_MODULE) += test_kasan_module.o
+obj-$(CONFIG_KASAN_MODULE_TEST) += test_kasan_module.o
CFLAGS_test_kasan_module.o += -fno-builtin
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:42:35

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 12/15] kasan: fix bug detection via ksize for HW_TAGS mode

The currently existing kasan_check_read/write() annotations are intended
to be used for kernel modules that have KASAN compiler instrumentation
disabled. Thus, they are only relevant for the software KASAN modes that
rely on compiler instrumentation.

However there's another use case for these annotations: ksize() checks
that the object passed to it is indeed accessible before unpoisoning the
whole object. This is currently done via __kasan_check_read(), which is
compiled away for the hardware tag-based mode that doesn't rely on
compiler instrumentation. This leads to KASAN missing detecting some
memory corruptions.

Provide another annotation called kasan_check_byte() that is available
for all KASAN modes. As the implementation rename and reuse
kasan_check_invalid_free(). Use this new annotation in ksize().
To avoid having ksize() as the top frame in the reported stack trace
pass _RET_IP_ to __kasan_check_byte().

Also add a new ksize_uaf() test that checks that a use-after-free is
detected via ksize() itself, and via plain accesses that happen later.

Link: https://linux-review.googlesource.com/id/Iaabf771881d0f9ce1b969f2a62938e99d3308ec5
Signed-off-by: Andrey Konovalov <[email protected]>
---
include/linux/kasan-checks.h | 6 ++++++
include/linux/kasan.h | 17 +++++++++++++++++
lib/test_kasan.c | 20 ++++++++++++++++++++
mm/kasan/common.c | 11 ++++++++++-
mm/kasan/generic.c | 4 ++--
mm/kasan/kasan.h | 10 +++++-----
mm/kasan/sw_tags.c | 6 +++---
mm/slab_common.c | 16 +++++++++-------
8 files changed, 72 insertions(+), 18 deletions(-)

diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index ca5e89fb10d3..3d6d22a25bdc 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -4,6 +4,12 @@

#include <linux/types.h>

+/*
+ * The annotations present in this file are only relevant for the software
+ * KASAN modes that rely on compiler instrumentation, and will be optimized
+ * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
+ */
+
/*
* __kasan_check_*: Always available when KASAN is enabled. This may be used
* even in compilation units that selectively disable KASAN, but must use KASAN
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index bba1637827c3..5bedd5ee481f 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -242,6 +242,19 @@ static __always_inline void kasan_kfree_large(void *ptr)
__kasan_kfree_large(ptr, _RET_IP_);
}

+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
+{
+ if (kasan_enabled())
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
+}
+
+
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);

@@ -297,6 +310,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
return (void *)object;
}
static inline void kasan_kfree_large(void *ptr) {}
+static inline bool kasan_check_byte(const void *address)
+{
+ return true;
+}

#endif /* CONFIG_KASAN */

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index a06e7946f581..566d894ba20b 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -496,6 +496,7 @@ static void kasan_global_oob(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}

+/* Check that ksize() makes the whole object accessible. */
static void ksize_unpoisons_memory(struct kunit *test)
{
char *ptr;
@@ -514,6 +515,24 @@ static void ksize_unpoisons_memory(struct kunit *test)
kfree(ptr);
}

+/*
+ * Check that a use-after-free is detected by ksize() and via normal accesses
+ * after it.
+ */
+static void ksize_uaf(struct kunit *test)
+{
+ char *ptr;
+ int size = 128 - KASAN_GRANULE_SIZE;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ kfree(ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
+}
+
static void kasan_stack_oob(struct kunit *test)
{
char stack_array[10];
@@ -907,6 +926,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_alloca_oob_left),
KUNIT_CASE(kasan_alloca_oob_right),
KUNIT_CASE(ksize_unpoisons_memory),
+ KUNIT_CASE(ksize_uaf),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kasan_memchr),
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index eedc3e0fe365..b18189ef3a92 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -345,7 +345,7 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;

- if (kasan_check_invalid_free(tagged_object)) {
+ if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip);
return true;
}
@@ -490,3 +490,12 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
kasan_report_invalid_free(ptr, ip);
/* The object will be poisoned by kasan_free_pages(). */
}
+
+bool __kasan_check_byte(const void *address, unsigned long ip)
+{
+ if (!kasan_byte_accessible(address)) {
+ kasan_report((unsigned long)address, 1, false, ip);
+ return false;
+ }
+ return true;
+}
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index acab8862dc67..3f17a1218055 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -185,11 +185,11 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
return check_region_inline(addr, size, write, ret_ip);
}

-bool kasan_check_invalid_free(void *addr)
+bool kasan_byte_accessible(const void *addr)
{
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));

- return shadow_byte < 0 || shadow_byte >= KASAN_GRANULE_SIZE;
+ return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
}

void kasan_cache_shrink(struct kmem_cache *cache)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 292dfbc37deb..bd4ee6fab648 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -329,20 +329,20 @@ static inline void kasan_unpoison(const void *address, size_t size)
round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
}

-static inline bool kasan_check_invalid_free(void *addr)
+static inline bool kasan_byte_accessible(const void *addr)
{
u8 ptr_tag = get_tag(addr);
- u8 mem_tag = hw_get_mem_tag(addr);
+ u8 mem_tag = hw_get_mem_tag((void *)addr);

- return (mem_tag == KASAN_TAG_INVALID) ||
- (ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag);
+ return (mem_tag != KASAN_TAG_INVALID) &&
+ (ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag);
}

#else /* CONFIG_KASAN_HW_TAGS */

void kasan_poison(const void *address, size_t size, u8 value);
void kasan_unpoison(const void *address, size_t size);
-bool kasan_check_invalid_free(void *addr);
+bool kasan_byte_accessible(const void *addr);

#endif /* CONFIG_KASAN_HW_TAGS */

diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index cc271fceb5d5..94c2d33be333 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -118,13 +118,13 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
return true;
}

-bool kasan_check_invalid_free(void *addr)
+bool kasan_byte_accessible(const void *addr)
{
u8 tag = get_tag(addr);
u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr)));

- return (shadow_byte == KASAN_TAG_INVALID) ||
- (tag != KASAN_TAG_KERNEL && tag != shadow_byte);
+ return (shadow_byte != KASAN_TAG_INVALID) &&
+ (tag == KASAN_TAG_KERNEL || tag == shadow_byte);
}

#define DEFINE_HWASAN_LOAD_STORE(size) \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e981c80d216c..9c12cf4212ea 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1157,19 +1157,21 @@ size_t ksize(const void *objp)
size_t size;

/*
- * We need to check that the pointed to object is valid, and only then
- * unpoison the shadow memory below. We use __kasan_check_read(), to
- * generate a more useful report at the time ksize() is called (rather
- * than later where behaviour is undefined due to potential
- * use-after-free or double-free).
+ * We need to first check that the pointer to the object is valid, and
+ * only then unpoison the memory. The report printed from ksize() is
+ * more useful, then when it's printed later when the behaviour could
+ * be undefined due to a potential use-after-free or double-free.
*
- * If the pointed to memory is invalid we return 0, to avoid users of
+ * We use kasan_check_byte(), which is supported for the hardware
+ * tag-based KASAN mode, unlike kasan_check_read/write().
+ *
+ * If the pointed to memory is invalid, we return 0 to avoid users of
* ksize() writing to and potentially corrupting the memory region.
*
* We want to perform the check before __ksize(), to avoid potentially
* crashing in __ksize() due to accessing invalid metadata.
*/
- if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
+ if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
return 0;

size = __ksize(objp);
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-14 19:43:12

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v3 14/15] kasan: add a test for kmem_cache_alloc/free_bulk

Add a test for kmem_cache_alloc/free_bulk to make sure there are no
false-positives when these functions are used.

Link: https://linux-review.googlesource.com/id/I2a8bf797aecf81baeac61380c567308f319e263d
Signed-off-by: Andrey Konovalov <[email protected]>
---
lib/test_kasan.c | 38 +++++++++++++++++++++++++++++++++-----
1 file changed, 33 insertions(+), 5 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ab22a653762e..a96376aa7293 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -479,10 +479,11 @@ static void kmem_cache_oob(struct kunit *test)
{
char *p;
size_t size = 200;
- struct kmem_cache *cache = kmem_cache_create("test_cache",
- size, 0,
- 0, NULL);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
kunit_err(test, "Allocation failed: %s\n", __func__);
@@ -491,11 +492,12 @@ static void kmem_cache_oob(struct kunit *test)
}

KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
+
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}

-static void memcg_accounted_kmem_cache(struct kunit *test)
+static void kmem_cache_accounted(struct kunit *test)
{
int i;
char *p;
@@ -522,6 +524,31 @@ static void memcg_accounted_kmem_cache(struct kunit *test)
kmem_cache_destroy(cache);
}

+static void kmem_cache_bulk(struct kunit *test)
+{
+ struct kmem_cache *cache;
+ size_t size = 200;
+ char *p[10];
+ bool ret;
+ int i;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
+ if (!ret) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(p); i++)
+ p[i][0] = p[i][size - 1] = 42;
+
+ kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
+ kmem_cache_destroy(cache);
+}
+
static char global_array[10];

static void kasan_global_oob(struct kunit *test)
@@ -961,7 +988,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kfree_via_page),
KUNIT_CASE(kfree_via_phys),
KUNIT_CASE(kmem_cache_oob),
- KUNIT_CASE(memcg_accounted_kmem_cache),
+ KUNIT_CASE(kmem_cache_accounted),
+ KUNIT_CASE(kmem_cache_bulk),
KUNIT_CASE(kasan_global_oob),
KUNIT_CASE(kasan_stack_oob),
KUNIT_CASE(kasan_alloca_oob_left),
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-15 13:15:58

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH v3 15/15] kasan: don't run tests when KASAN is not enabled

On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> Don't run KASAN tests when it's disabled with kasan.mode=off to avoid
> corrupting kernel memory.
>
> Link: https://linux-review.googlesource.com/id/I6447af436a69a94bfc35477f6bf4e2122948355e
> Signed-off-by: Andrey Konovalov <[email protected]>

Reviewed-by: Marco Elver <[email protected]>

> ---
> lib/test_kasan.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index a96376aa7293..6238b56127f8 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -47,6 +47,11 @@ static bool multishot;
> */
> static int kasan_test_init(struct kunit *test)
> {
> + if (!kasan_enabled()) {
> + kunit_err(test, "can't run KASAN tests with KASAN disabled");
> + return -1;
> + }
> +
> multishot = kasan_save_enable_multi_shot();
> hw_set_tagging_report_once(false);
> return 0;
> --
> 2.30.0.284.gd98b1dd5eaa7-goog
>

2021-01-15 13:21:08

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH v3 14/15] kasan: add a test for kmem_cache_alloc/free_bulk

On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> Add a test for kmem_cache_alloc/free_bulk to make sure there are no
> false-positives when these functions are used.
>
> Link: https://linux-review.googlesource.com/id/I2a8bf797aecf81baeac61380c567308f319e263d
> Signed-off-by: Andrey Konovalov <[email protected]>

Reviewed-by: Marco Elver <[email protected]>

> ---
> lib/test_kasan.c | 38 +++++++++++++++++++++++++++++++++-----
> 1 file changed, 33 insertions(+), 5 deletions(-)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index ab22a653762e..a96376aa7293 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -479,10 +479,11 @@ static void kmem_cache_oob(struct kunit *test)
> {
> char *p;
> size_t size = 200;
> - struct kmem_cache *cache = kmem_cache_create("test_cache",
> - size, 0,
> - 0, NULL);
> + struct kmem_cache *cache;
> +
> + cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
> +
> p = kmem_cache_alloc(cache, GFP_KERNEL);
> if (!p) {
> kunit_err(test, "Allocation failed: %s\n", __func__);
> @@ -491,11 +492,12 @@ static void kmem_cache_oob(struct kunit *test)
> }
>
> KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
> +
> kmem_cache_free(cache, p);
> kmem_cache_destroy(cache);
> }
>
> -static void memcg_accounted_kmem_cache(struct kunit *test)
> +static void kmem_cache_accounted(struct kunit *test)
> {
> int i;
> char *p;
> @@ -522,6 +524,31 @@ static void memcg_accounted_kmem_cache(struct kunit *test)
> kmem_cache_destroy(cache);
> }
>
> +static void kmem_cache_bulk(struct kunit *test)
> +{
> + struct kmem_cache *cache;
> + size_t size = 200;
> + char *p[10];
> + bool ret;
> + int i;
> +
> + cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
> +
> + ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
> + if (!ret) {
> + kunit_err(test, "Allocation failed: %s\n", __func__);
> + kmem_cache_destroy(cache);
> + return;
> + }
> +
> + for (i = 0; i < ARRAY_SIZE(p); i++)
> + p[i][0] = p[i][size - 1] = 42;
> +
> + kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
> + kmem_cache_destroy(cache);
> +}
> +
> static char global_array[10];
>
> static void kasan_global_oob(struct kunit *test)
> @@ -961,7 +988,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
> KUNIT_CASE(kfree_via_page),
> KUNIT_CASE(kfree_via_phys),
> KUNIT_CASE(kmem_cache_oob),
> - KUNIT_CASE(memcg_accounted_kmem_cache),
> + KUNIT_CASE(kmem_cache_accounted),
> + KUNIT_CASE(kmem_cache_bulk),
> KUNIT_CASE(kasan_global_oob),
> KUNIT_CASE(kasan_stack_oob),
> KUNIT_CASE(kasan_alloca_oob_left),
> --
> 2.30.0.284.gd98b1dd5eaa7-goog
>

2021-01-15 13:21:26

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH v3 12/15] kasan: fix bug detection via ksize for HW_TAGS mode

On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> The currently existing kasan_check_read/write() annotations are intended
> to be used for kernel modules that have KASAN compiler instrumentation
> disabled. Thus, they are only relevant for the software KASAN modes that
> rely on compiler instrumentation.
>
> However there's another use case for these annotations: ksize() checks
> that the object passed to it is indeed accessible before unpoisoning the
> whole object. This is currently done via __kasan_check_read(), which is
> compiled away for the hardware tag-based mode that doesn't rely on
> compiler instrumentation. This leads to KASAN missing detecting some
> memory corruptions.
>
> Provide another annotation called kasan_check_byte() that is available
> for all KASAN modes. As the implementation rename and reuse
> kasan_check_invalid_free(). Use this new annotation in ksize().
> To avoid having ksize() as the top frame in the reported stack trace
> pass _RET_IP_ to __kasan_check_byte().
>
> Also add a new ksize_uaf() test that checks that a use-after-free is
> detected via ksize() itself, and via plain accesses that happen later.
>
> Link: https://linux-review.googlesource.com/id/Iaabf771881d0f9ce1b969f2a62938e99d3308ec5
> Signed-off-by: Andrey Konovalov <[email protected]>

Reviewed-by: Marco Elver <[email protected]>

> ---
> include/linux/kasan-checks.h | 6 ++++++
> include/linux/kasan.h | 17 +++++++++++++++++
> lib/test_kasan.c | 20 ++++++++++++++++++++
> mm/kasan/common.c | 11 ++++++++++-
> mm/kasan/generic.c | 4 ++--
> mm/kasan/kasan.h | 10 +++++-----
> mm/kasan/sw_tags.c | 6 +++---
> mm/slab_common.c | 16 +++++++++-------
> 8 files changed, 72 insertions(+), 18 deletions(-)
>
> diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
> index ca5e89fb10d3..3d6d22a25bdc 100644
> --- a/include/linux/kasan-checks.h
> +++ b/include/linux/kasan-checks.h
> @@ -4,6 +4,12 @@
>
> #include <linux/types.h>
>
> +/*
> + * The annotations present in this file are only relevant for the software
> + * KASAN modes that rely on compiler instrumentation, and will be optimized
> + * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
> + */
> +
> /*
> * __kasan_check_*: Always available when KASAN is enabled. This may be used
> * even in compilation units that selectively disable KASAN, but must use KASAN
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index bba1637827c3..5bedd5ee481f 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -242,6 +242,19 @@ static __always_inline void kasan_kfree_large(void *ptr)
> __kasan_kfree_large(ptr, _RET_IP_);
> }
>
> +/*
> + * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
> + * the hardware tag-based mode that doesn't rely on compiler instrumentation.
> + */
> +bool __kasan_check_byte(const void *addr, unsigned long ip);
> +static __always_inline bool kasan_check_byte(const void *addr)
> +{
> + if (kasan_enabled())
> + return __kasan_check_byte(addr, _RET_IP_);
> + return true;
> +}
> +
> +
> bool kasan_save_enable_multi_shot(void);
> void kasan_restore_multi_shot(bool enabled);
>
> @@ -297,6 +310,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
> return (void *)object;
> }
> static inline void kasan_kfree_large(void *ptr) {}
> +static inline bool kasan_check_byte(const void *address)
> +{
> + return true;
> +}
>
> #endif /* CONFIG_KASAN */
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index a06e7946f581..566d894ba20b 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -496,6 +496,7 @@ static void kasan_global_oob(struct kunit *test)
> KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> }
>
> +/* Check that ksize() makes the whole object accessible. */
> static void ksize_unpoisons_memory(struct kunit *test)
> {
> char *ptr;
> @@ -514,6 +515,24 @@ static void ksize_unpoisons_memory(struct kunit *test)
> kfree(ptr);
> }
>
> +/*
> + * Check that a use-after-free is detected by ksize() and via normal accesses
> + * after it.
> + */
> +static void ksize_uaf(struct kunit *test)
> +{
> + char *ptr;
> + int size = 128 - KASAN_GRANULE_SIZE;
> +
> + ptr = kmalloc(size, GFP_KERNEL);
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> + kfree(ptr);
> +
> + KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
> + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
> + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
> +}
> +
> static void kasan_stack_oob(struct kunit *test)
> {
> char stack_array[10];
> @@ -907,6 +926,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
> KUNIT_CASE(kasan_alloca_oob_left),
> KUNIT_CASE(kasan_alloca_oob_right),
> KUNIT_CASE(ksize_unpoisons_memory),
> + KUNIT_CASE(ksize_uaf),
> KUNIT_CASE(kmem_cache_double_free),
> KUNIT_CASE(kmem_cache_invalid_free),
> KUNIT_CASE(kasan_memchr),
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index eedc3e0fe365..b18189ef3a92 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -345,7 +345,7 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
> if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> return false;
>
> - if (kasan_check_invalid_free(tagged_object)) {
> + if (!kasan_byte_accessible(tagged_object)) {
> kasan_report_invalid_free(tagged_object, ip);
> return true;
> }
> @@ -490,3 +490,12 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
> kasan_report_invalid_free(ptr, ip);
> /* The object will be poisoned by kasan_free_pages(). */
> }
> +
> +bool __kasan_check_byte(const void *address, unsigned long ip)
> +{
> + if (!kasan_byte_accessible(address)) {
> + kasan_report((unsigned long)address, 1, false, ip);
> + return false;
> + }
> + return true;
> +}
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index acab8862dc67..3f17a1218055 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -185,11 +185,11 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
> return check_region_inline(addr, size, write, ret_ip);
> }
>
> -bool kasan_check_invalid_free(void *addr)
> +bool kasan_byte_accessible(const void *addr)
> {
> s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
>
> - return shadow_byte < 0 || shadow_byte >= KASAN_GRANULE_SIZE;
> + return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
> }
>
> void kasan_cache_shrink(struct kmem_cache *cache)
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 292dfbc37deb..bd4ee6fab648 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -329,20 +329,20 @@ static inline void kasan_unpoison(const void *address, size_t size)
> round_up(size, KASAN_GRANULE_SIZE), get_tag(address));
> }
>
> -static inline bool kasan_check_invalid_free(void *addr)
> +static inline bool kasan_byte_accessible(const void *addr)
> {
> u8 ptr_tag = get_tag(addr);
> - u8 mem_tag = hw_get_mem_tag(addr);
> + u8 mem_tag = hw_get_mem_tag((void *)addr);
>
> - return (mem_tag == KASAN_TAG_INVALID) ||
> - (ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag);
> + return (mem_tag != KASAN_TAG_INVALID) &&
> + (ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag);
> }
>
> #else /* CONFIG_KASAN_HW_TAGS */
>
> void kasan_poison(const void *address, size_t size, u8 value);
> void kasan_unpoison(const void *address, size_t size);
> -bool kasan_check_invalid_free(void *addr);
> +bool kasan_byte_accessible(const void *addr);
>
> #endif /* CONFIG_KASAN_HW_TAGS */
>
> diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
> index cc271fceb5d5..94c2d33be333 100644
> --- a/mm/kasan/sw_tags.c
> +++ b/mm/kasan/sw_tags.c
> @@ -118,13 +118,13 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
> return true;
> }
>
> -bool kasan_check_invalid_free(void *addr)
> +bool kasan_byte_accessible(const void *addr)
> {
> u8 tag = get_tag(addr);
> u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr)));
>
> - return (shadow_byte == KASAN_TAG_INVALID) ||
> - (tag != KASAN_TAG_KERNEL && tag != shadow_byte);
> + return (shadow_byte != KASAN_TAG_INVALID) &&
> + (tag == KASAN_TAG_KERNEL || tag == shadow_byte);
> }
>
> #define DEFINE_HWASAN_LOAD_STORE(size) \
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index e981c80d216c..9c12cf4212ea 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1157,19 +1157,21 @@ size_t ksize(const void *objp)
> size_t size;
>
> /*
> - * We need to check that the pointed to object is valid, and only then
> - * unpoison the shadow memory below. We use __kasan_check_read(), to
> - * generate a more useful report at the time ksize() is called (rather
> - * than later where behaviour is undefined due to potential
> - * use-after-free or double-free).
> + * We need to first check that the pointer to the object is valid, and
> + * only then unpoison the memory. The report printed from ksize() is
> + * more useful, then when it's printed later when the behaviour could
> + * be undefined due to a potential use-after-free or double-free.
> *
> - * If the pointed to memory is invalid we return 0, to avoid users of
> + * We use kasan_check_byte(), which is supported for the hardware
> + * tag-based KASAN mode, unlike kasan_check_read/write().
> + *
> + * If the pointed to memory is invalid, we return 0 to avoid users of
> * ksize() writing to and potentially corrupting the memory region.
> *
> * We want to perform the check before __ksize(), to avoid potentially
> * crashing in __ksize() due to accessing invalid metadata.
> */
> - if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
> + if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
> return 0;
>
> size = __ksize(objp);
> --
> 2.30.0.284.gd98b1dd5eaa7-goog
>

2021-01-15 13:22:21

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH v3 11/15] kasan: move _RET_IP_ to inline wrappers

On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> Generic mm functions that call KASAN annotations that might report a bug
> pass _RET_IP_ to them as an argument. This allows KASAN to include the
> name of the function that called the mm function in its report's header.
>
> Now that KASAN has inline wrappers for all of its annotations, move
> _RET_IP_ to those wrappers to simplify annotation call sites.
>
> Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
> Signed-off-by: Andrey Konovalov <[email protected]>

Much nicer!

Reviewed-by: Marco Elver <[email protected]>

> ---
> include/linux/kasan.h | 20 +++++++++-----------
> mm/mempool.c | 2 +-
> mm/slab.c | 2 +-
> mm/slub.c | 4 ++--
> 4 files changed, 13 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 5e0655fb2a6f..bba1637827c3 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -181,19 +181,18 @@ static __always_inline void * __must_check kasan_init_slab_obj(
> }
>
> bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
> -static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> - unsigned long ip)
> +static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
> {
> if (kasan_enabled())
> - return __kasan_slab_free(s, object, ip);
> + return __kasan_slab_free(s, object, _RET_IP_);
> return false;
> }
>
> void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
> -static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
> +static __always_inline void kasan_slab_free_mempool(void *ptr)
> {
> if (kasan_enabled())
> - __kasan_slab_free_mempool(ptr, ip);
> + __kasan_slab_free_mempool(ptr, _RET_IP_);
> }
>
> void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
> @@ -237,10 +236,10 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
> }
>
> void __kasan_kfree_large(void *ptr, unsigned long ip);
> -static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
> +static __always_inline void kasan_kfree_large(void *ptr)
> {
> if (kasan_enabled())
> - __kasan_kfree_large(ptr, ip);
> + __kasan_kfree_large(ptr, _RET_IP_);
> }
>
> bool kasan_save_enable_multi_shot(void);
> @@ -273,12 +272,11 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
> {
> return (void *)object;
> }
> -static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> - unsigned long ip)
> +static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
> {
> return false;
> }
> -static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
> +static inline void kasan_slab_free_mempool(void *ptr) {}
> static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
> gfp_t flags)
> {
> @@ -298,7 +296,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
> {
> return (void *)object;
> }
> -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
> +static inline void kasan_kfree_large(void *ptr) {}
>
> #endif /* CONFIG_KASAN */
>
> diff --git a/mm/mempool.c b/mm/mempool.c
> index 624ed51b060f..79959fac27d7 100644
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
> static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
> {
> if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> - kasan_slab_free_mempool(element, _RET_IP_);
> + kasan_slab_free_mempool(element);
> else if (pool->alloc == mempool_alloc_pages)
> kasan_free_pages(element, (unsigned long)pool->pool_data);
> }
> diff --git a/mm/slab.c b/mm/slab.c
> index d7c8da9319c7..afeb6191fb1e 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3421,7 +3421,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
> memset(objp, 0, cachep->object_size);
>
> /* Put the object into the quarantine, don't touch it for now. */
> - if (kasan_slab_free(cachep, objp, _RET_IP_))
> + if (kasan_slab_free(cachep, objp))
> return;
>
> /* Use KCSAN to help debug racy use-after-free. */
> diff --git a/mm/slub.c b/mm/slub.c
> index 75fb097d990d..0afb53488238 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1514,7 +1514,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
> static __always_inline void kfree_hook(void *x)
> {
> kmemleak_free(x);
> - kasan_kfree_large(x, _RET_IP_);
> + kasan_kfree_large(x);
> }
>
> static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
> @@ -1544,7 +1544,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
> KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
>
> /* KASAN might put x into memory quarantine, delaying its reuse */
> - return kasan_slab_free(s, x, _RET_IP_);
> + return kasan_slab_free(s, x);
> }
>
> static inline bool slab_free_freelist_hook(struct kmem_cache *s,
> --
> 2.30.0.284.gd98b1dd5eaa7-goog
>

2021-01-15 13:36:28

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v3 05/15] kasan: add match-all tag tests

On Thu, Jan 14, 2021 at 8:36 PM Andrey Konovalov <[email protected]> wrote:
>
> Add 3 new tests for tag-based KASAN modes:
>
> 1. Check that match-all pointer tag is not assigned randomly.
> 2. Check that 0xff works as a match-all pointer tag.
> 3. Check that there are no match-all memory tags.
>
> Note, that test #3 causes a significant number (255) of KASAN reports
> to be printed during execution for the SW_TAGS mode.
>
> Link: https://linux-review.googlesource.com/id/I78f1375efafa162b37f3abcb2c5bc2f3955dfd8e
> Reviewed-by: Marco Elver <[email protected]>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>

2021-01-15 13:47:03

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v3 15/15] kasan: don't run tests when KASAN is not enabled

On Fri, Jan 15, 2021 at 2:13 PM Marco Elver <[email protected]> wrote:
>
> On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> > Don't run KASAN tests when it's disabled with kasan.mode=off to avoid
> > corrupting kernel memory.
> >
> > Link: https://linux-review.googlesource.com/id/I6447af436a69a94bfc35477f6bf4e2122948355e
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>

2021-01-15 13:54:22

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v3 14/15] kasan: add a test for kmem_cache_alloc/free_bulk

On Fri, Jan 15, 2021 at 2:16 PM Marco Elver <[email protected]> wrote:
>
> On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> > Add a test for kmem_cache_alloc/free_bulk to make sure there are no
> > false-positives when these functions are used.
> >
> > Link: https://linux-review.googlesource.com/id/I2a8bf797aecf81baeac61380c567308f319e263d
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>

(see a nit below)

> > + cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
Looks like there's a tab between "test_cache" and size, please double-check.

2021-01-15 14:06:30

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v3 12/15] kasan: fix bug detection via ksize for HW_TAGS mode

On Fri, Jan 15, 2021 at 2:18 PM Marco Elver <[email protected]> wrote:
>
> On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> > The currently existing kasan_check_read/write() annotations are intended
> > to be used for kernel modules that have KASAN compiler instrumentation
> > disabled. Thus, they are only relevant for the software KASAN modes that
> > rely on compiler instrumentation.
> >
> > However there's another use case for these annotations: ksize() checks
> > that the object passed to it is indeed accessible before unpoisoning the
> > whole object. This is currently done via __kasan_check_read(), which is
> > compiled away for the hardware tag-based mode that doesn't rely on
> > compiler instrumentation. This leads to KASAN missing detecting some
> > memory corruptions.
> >
> > Provide another annotation called kasan_check_byte() that is available
> > for all KASAN modes. As the implementation rename and reuse
> > kasan_check_invalid_free(). Use this new annotation in ksize().
> > To avoid having ksize() as the top frame in the reported stack trace
> > pass _RET_IP_ to __kasan_check_byte().
> >
> > Also add a new ksize_uaf() test that checks that a use-after-free is
> > detected via ksize() itself, and via plain accesses that happen later.
> >
> > Link: https://linux-review.googlesource.com/id/Iaabf771881d0f9ce1b969f2a62938e99d3308ec5
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>

2021-01-15 14:09:58

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v3 11/15] kasan: move _RET_IP_ to inline wrappers

On Fri, Jan 15, 2021 at 2:19 PM Marco Elver <[email protected]> wrote:
>
> On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> > Generic mm functions that call KASAN annotations that might report a bug
> > pass _RET_IP_ to them as an argument. This allows KASAN to include the
> > name of the function that called the mm function in its report's header.
> >
> > Now that KASAN has inline wrappers for all of its annotations, move
> > _RET_IP_ to those wrappers to simplify annotation call sites.
> >
> > Link: https://linux-review.googlesource.com/id/I8fb3c06d49671305ee184175a39591bc26647a67
> > Signed-off-by: Andrey Konovalov <[email protected]>
>
> Much nicer!
>
> Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>

2021-01-15 14:14:42

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v3 08/15] kasan: add compiler barriers to KUNIT_EXPECT_KASAN_FAIL

On Thu, Jan 14, 2021 at 8:36 PM Andrey Konovalov <[email protected]> wrote:
>
> It might not be obvious to the compiler that the expression must be
> executed between writing and reading to fail_data. In this case, the
> compiler might reorder or optimize away some of the accesses, and
> the tests will fail.
>
> Add compiler barriers around the expression in KUNIT_EXPECT_KASAN_FAIL
> and use READ/WRITE_ONCE() for accessing fail_data fields.
>
> Link: https://linux-review.googlesource.com/id/I046079f48641a1d36fe627fc8827a9249102fd50
> Reviewed-by: Marco Elver <[email protected]>
> Signed-off-by: Andrey Konovalov <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>

2021-01-15 17:27:10

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH v3 14/15] kasan: add a test for kmem_cache_alloc/free_bulk

On Fri, Jan 15, 2021 at 2:49 PM Alexander Potapenko <[email protected]> wrote:
>
> On Fri, Jan 15, 2021 at 2:16 PM Marco Elver <[email protected]> wrote:
> >
> > On Thu, Jan 14, 2021 at 08:36PM +0100, Andrey Konovalov wrote:
> > > Add a test for kmem_cache_alloc/free_bulk to make sure there are no
> > > false-positives when these functions are used.
> > >
> > > Link: https://linux-review.googlesource.com/id/I2a8bf797aecf81baeac61380c567308f319e263d
> > > Signed-off-by: Andrey Konovalov <[email protected]>
> >
> > Reviewed-by: Marco Elver <[email protected]>
> Reviewed-by: Alexander Potapenko <[email protected]>
>
> (see a nit below)
>
> > > + cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
> Looks like there's a tab between "test_cache" and size, please double-check.

Indeed, thanks for noticing! Will fix.