2024-02-23 20:35:51

by Paul Heidekrüger

[permalink] [raw]
Subject: Re: [merged mm-stable] kasan-add-atomic-tests.patch removed from -mm tree

On 21.02.2024 16:03, Andrew Morton wrote:
>
> The quilt patch titled
> Subject: kasan: add atomic tests
> has been removed from the -mm tree. Its filename was
> kasan-add-atomic-tests.patch
>
> This patch was dropped because it was merged into the mm-stable branch
> of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
>
> ------------------------------------------------------
> From: Paul Heidekr??ger <[email protected]>
> Subject: kasan: add atomic tests
> Date: Fri, 2 Feb 2024 11:32:59 +0000
>
> Test that KASan can detect some unsafe atomic accesses.
>
> As discussed in the linked thread below, these tests attempt to cover
> the most common uses of atomics and, therefore, aren't exhaustive.
>
> Link: https://lkml.kernel.org/r/[email protected]
> Link: https://lore.kernel.org/all/[email protected]/T/#u
> Signed-off-by: Paul Heidekr??ger <[email protected]>
> Closes: https://bugzilla.kernel.org/show_bug.cgi?id=214055
> Acked-by: Mark Rutland <[email protected]>
> Cc: Marco Elver <[email protected]>
> Cc: Andrey Konovalov <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Vincenzo Frascino <[email protected]>
> Signed-off-by: Andrew Morton <[email protected]>
> ---
>
> mm/kasan/kasan_test.c | 79 ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 79 insertions(+)
>
> --- a/mm/kasan/kasan_test.c~kasan-add-atomic-tests
> +++ a/mm/kasan/kasan_test.c
> @@ -697,6 +697,84 @@ static void kmalloc_uaf3(struct kunit *t
> KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
> }
>
> +static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
> +{
> + int *i_unsafe = (int *)unsafe;
> +
> + KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
> +
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
> +
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
> + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
> +}
> +
> +static void kasan_atomics(struct kunit *test)
> +{
> + void *a1, *a2;
> +
> + /*
> + * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
> + * that the following 16 bytes will make up the redzone.
> + */
> + a1 = kzalloc(48, GFP_KERNEL);
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
> + a2 = kzalloc(sizeof(int), GFP_KERNEL);
> + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
> +
> + /* Use atomics to access the redzone. */
> + kasan_atomics_helper(test, a1 + 48, a2);
> +
> + kfree(a1);
> + kfree(a2);
> +}
> +
> static void kmalloc_double_kzfree(struct kunit *test)
> {
> char *ptr;
> @@ -1883,6 +1961,7 @@ static struct kunit_case kasan_kunit_tes
> KUNIT_CASE(kasan_strings),
> KUNIT_CASE(kasan_bitops_generic),
> KUNIT_CASE(kasan_bitops_tags),
> + KUNIT_CASE(kasan_atomics),
> KUNIT_CASE(vmalloc_helpers_tags),
> KUNIT_CASE(vmalloc_oob),
> KUNIT_CASE(vmap_tags),
> _
>
> Patches currently in -mm which might be from [email protected] are
>
>

Hi Andrew!

There was further discussion around this patch [1], which led to a v3 of the
above patch but might have gotten lost in the wave of emails.

I'm unsure what the protocol is now; do I send you a new patch for the diff
between the above patch and the v3 patch, or can you just use v3 instead of the
above patch?

I hope this doesn't cause too much trouble.

Many thanks,
Paul

[1]:
https://lore.kernel.org/all/[email protected]/



2024-02-24 00:10:27

by Andrew Morton

[permalink] [raw]
Subject: Re: [merged mm-stable] kasan-add-atomic-tests.patch removed from -mm tree

On Fri, 23 Feb 2024 21:25:04 +0100 Paul Heidekr?ger <[email protected]> wrote:

> I'm unsure what the protocol is now; do I send you a new patch for the diff
> between the above patch and the v3 patch, or can you just use v3 instead of the
> above patch?

Yes please.

2024-02-24 11:00:38

by Paul Heidekrüger

[permalink] [raw]
Subject: [PATCH] kasan: fix a2 allocation and remove explicit cast in atomic tests

Address the additional feedback since "kasan: add atomic tests"
(4e76c8cc3378a20923965e3345f40f6b8ae0bdba) by removing an explicit cast
and fixing the size as well as the check of the allocation of `a2`.

CC: Marco Elver <[email protected]>
CC: Andrey Konovalov <[email protected]>
Link: https://lore.kernel.org/all/[email protected]/T/#u
Fixes: 4e76c8cc3378a20923965e3345f40f6b8ae0bdba
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=214055
Reviewed-by: Marco Elver <[email protected]>
Tested-by: Marco Elver <[email protected]>
Acked-by: Mark Rutland <[email protected]>
Reviewed-by: Andrey Konovalov <[email protected]>
Signed-off-by: Paul Heidekrüger <[email protected]>
---
@Andrew:
I wasn't sure whether you'd be squashing this patch into v1 or
if it'll end up as a separate commit. Hope this works either way!

Changes PATCH v2 -> PATCH v3:
* Fix the wrong variable being used when checking a2 after allocation
* Add Andrey's reviewed-by tag

Changes PATCH v1 -> PATCH v2:
* Make explicit cast implicit as per Mark's feedback
* Increase the size of the "a2" allocation as per Andrey's feedback
* Add tags

Changes PATCH RFC v2 -> PATCH v1:
* Remove casts to void*
* Remove i_safe variable
* Add atomic_long_* test cases
* Carry over comment from kasan_bitops_tags()

Changes PATCH RFC v1 -> PATCH RFC v2:
* Adjust size of allocations to make kasan_atomics() work with all KASan modes
* Remove comments and move tests closer to the bitops tests
* For functions taking two addresses as an input, test each address in a separate function call.
* Rename variables for clarity
* Add tests for READ_ONCE(), WRITE_ONCE(), smp_load_acquire() and smp_store_release()

mm/kasan/kasan_test.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 4ef2280c322c..7f0f87a2c3c4 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -1152,7 +1152,7 @@ static void kasan_bitops_tags(struct kunit *test)

static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
{
- int *i_unsafe = (int *)unsafe;
+ int *i_unsafe = unsafe;

KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
@@ -1218,8 +1218,8 @@ static void kasan_atomics(struct kunit *test)
*/
a1 = kzalloc(48, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
- a2 = kzalloc(sizeof(int), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
+ a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);

/* Use atomics to access the redzone. */
kasan_atomics_helper(test, a1 + 48, a2);
--
2.40.1