2021-01-05 22:11:11

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH 00/11] kasan: HW_TAGS tests support and fixes

This patchset adds support for running KASAN-KUnit tests with the
hardware tag-based mode and also contains a few fixes.

Andrey Konovalov (11):
kasan: prefix exported functions with kasan_
kasan: clarify HW_TAGS impact on TBI
kasan: clean up comments in tests
kasan: add match-all tag tests
kasan, arm64: allow using KUnit tests with HW_TAGS mode
kasan: rename CONFIG_TEST_KASAN_MODULE
kasan: add compiler barriers to KUNIT_EXPECT_KASAN_FAIL
kasan: adopt kmalloc_uaf2 test to HW_TAGS mode
kasan: fix memory corruption in kasan_bitops_tags test
kasan: fix bug detection via ksize for HW_TAGS mode
kasan: add proper page allocator tests

Documentation/dev-tools/kasan.rst | 22 +-
arch/arm64/include/asm/memory.h | 1 +
arch/arm64/include/asm/mte-kasan.h | 12 ++
arch/arm64/kernel/mte.c | 12 ++
arch/arm64/mm/fault.c | 16 +-
include/linux/kasan-checks.h | 6 +
include/linux/kasan.h | 13 ++
lib/Kconfig.kasan | 6 +-
lib/Makefile | 2 +-
lib/test_kasan.c | 312 +++++++++++++++++++++++------
lib/test_kasan_module.c | 5 +-
mm/kasan/common.c | 56 +++---
mm/kasan/generic.c | 38 ++--
mm/kasan/kasan.h | 69 ++++---
mm/kasan/quarantine.c | 22 +-
mm/kasan/report.c | 13 +-
mm/kasan/report_generic.c | 8 +-
mm/kasan/report_hw_tags.c | 8 +-
mm/kasan/report_sw_tags.c | 8 +-
mm/kasan/shadow.c | 26 +--
mm/kasan/sw_tags.c | 20 +-
mm/slab_common.c | 15 +-
tools/objtool/check.c | 2 +-
23 files changed, 484 insertions(+), 208 deletions(-)

--
2.29.2.729.g45daf8777d-goog


2021-01-05 22:11:21

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH 11/11] kasan: add proper page allocator tests

The currently existing page allocator tests rely on kmalloc fallback
with large sizes that is only present for SLUB. Add proper tests that
use alloc/free_pages().

Signed-off-by: Andrey Konovalov <[email protected]>
Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570
---
lib/test_kasan.c | 54 +++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 49 insertions(+), 5 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 6261521e57ad..24798c034d05 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -128,6 +128,12 @@ static void kmalloc_node_oob_right(struct kunit *test)
kfree(ptr);
}

+/*
+ * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
+ * fit into a slab cache and therefore is allocated via the page allocator
+ * fallback. Since this kind of fallback is only implemented for SLUB, these
+ * tests are limited to that allocator.
+ */
static void kmalloc_pagealloc_oob_right(struct kunit *test)
{
char *ptr;
@@ -138,14 +144,11 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
return;
}

- /*
- * Allocate a chunk that does not fit into a SLUB cache to trigger
- * the page allocator fallback.
- */
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
+
kfree(ptr);
}

@@ -161,8 +164,8 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
kfree(ptr);
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}

@@ -182,6 +185,45 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}

+static void pagealloc_oob_right(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+ size_t size = (1UL << (PAGE_SHIFT + order));
+
+ /*
+ * With generic KASAN page allocations have no redzones, thus
+ * out-of-bounds detection is not guaranteed.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "skipping, CONFIG_KASAN_GENERIC enabled");
+ return;
+ }
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ free_pages((unsigned long)ptr, order);
+}
+
+static void pagealloc_uaf(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ free_pages((unsigned long)ptr, order);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+}
+
static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
@@ -933,6 +975,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_pagealloc_oob_right),
KUNIT_CASE(kmalloc_pagealloc_uaf),
KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+ KUNIT_CASE(pagealloc_oob_right),
+ KUNIT_CASE(pagealloc_uaf),
KUNIT_CASE(kmalloc_large_oob_right),
KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less),
--
2.29.2.729.g45daf8777d-goog

2021-01-12 14:41:27

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 11/11] kasan: add proper page allocator tests

On Tue, Jan 05, 2021 at 07:27PM +0100, Andrey Konovalov wrote:
> The currently existing page allocator tests rely on kmalloc fallback
> with large sizes that is only present for SLUB. Add proper tests that
> use alloc/free_pages().
>
> Signed-off-by: Andrey Konovalov <[email protected]>
> Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570

Reviewed-by: Marco Elver <[email protected]>

> ---
> lib/test_kasan.c | 54 +++++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 49 insertions(+), 5 deletions(-)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index 6261521e57ad..24798c034d05 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -128,6 +128,12 @@ static void kmalloc_node_oob_right(struct kunit *test)
> kfree(ptr);
> }
>
> +/*
> + * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
> + * fit into a slab cache and therefore is allocated via the page allocator
> + * fallback. Since this kind of fallback is only implemented for SLUB, these
> + * tests are limited to that allocator.
> + */
> static void kmalloc_pagealloc_oob_right(struct kunit *test)
> {
> char *ptr;
> @@ -138,14 +144,11 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
> return;
> }
>
> - /*
> - * Allocate a chunk that does not fit into a SLUB cache to trigger
> - * the page allocator fallback.
> - */
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
>
> KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
> +
> kfree(ptr);
> }
>
> @@ -161,8 +164,8 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
>
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> -
> kfree(ptr);
> +
> KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
> }
>
> @@ -182,6 +185,45 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
> KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
> }
>
> +static void pagealloc_oob_right(struct kunit *test)
> +{
> + char *ptr;
> + struct page *pages;
> + size_t order = 4;
> + size_t size = (1UL << (PAGE_SHIFT + order));
> +
> + /*
> + * With generic KASAN page allocations have no redzones, thus
> + * out-of-bounds detection is not guaranteed.
> + * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
> + */
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
> + kunit_info(test, "skipping, CONFIG_KASAN_GENERIC enabled");
> + return;
> + }
> +
> + pages = alloc_pages(GFP_KERNEL, order);
> + ptr = page_address(pages);
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
> + free_pages((unsigned long)ptr, order);
> +}
> +
> +static void pagealloc_uaf(struct kunit *test)
> +{
> + char *ptr;
> + struct page *pages;
> + size_t order = 4;
> +
> + pages = alloc_pages(GFP_KERNEL, order);
> + ptr = page_address(pages);
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> + free_pages((unsigned long)ptr, order);
> +
> + KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
> +}
> +
> static void kmalloc_large_oob_right(struct kunit *test)
> {
> char *ptr;
> @@ -933,6 +975,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
> KUNIT_CASE(kmalloc_pagealloc_oob_right),
> KUNIT_CASE(kmalloc_pagealloc_uaf),
> KUNIT_CASE(kmalloc_pagealloc_invalid_free),
> + KUNIT_CASE(pagealloc_oob_right),
> + KUNIT_CASE(pagealloc_uaf),
> KUNIT_CASE(kmalloc_large_oob_right),
> KUNIT_CASE(kmalloc_oob_krealloc_more),
> KUNIT_CASE(kmalloc_oob_krealloc_less),
> --
> 2.29.2.729.g45daf8777d-goog
>

2021-01-12 23:31:29

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH 11/11] kasan: add proper page allocator tests

On Tue, Jan 5, 2021 at 7:28 PM Andrey Konovalov <[email protected]> wrote:
>
> The currently existing page allocator tests rely on kmalloc fallback
> with large sizes that is only present for SLUB. Add proper tests that
> use alloc/free_pages().
>
> Signed-off-by: Andrey Konovalov <[email protected]>
> Link: https://linux-review.googlesource.com/id/Ia173d5a1b215fe6b2548d814ef0f4433cf983570
Reviewed-by: Alexander Potapenko <[email protected]>