With all "silently resizing" callers of ksize() refactored, remove the
logic in ksize() that would allow it to be used to effectively change
the size of an allocation (bypassing __alloc_size hints, etc). Users
wanting this feature need to either use kmalloc_size_roundup() before an
allocation, or use krealloc() directly.
For kfree_sensitive(), move the unpoisoning logic inline. Replace the
some of the partially open-coded ksize() in __do_krealloc with ksize()
now that it doesn't perform unpoisoning.
Adjust the KUnit tests to match the new ksize() behavior.
Cc: Andrey Konovalov <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Hyeonggon Yoo <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Vincenzo Frascino <[email protected]>
Cc: [email protected]
Cc: [email protected]
Acked-by: Vlastimil Babka <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
---
v2:
- improve kunit test precision (andreyknvl)
- add Ack (vbabka)
v1: https://lore.kernel.org/all/[email protected]
---
mm/kasan/kasan_test.c | 14 +++++++++-----
mm/slab_common.c | 26 ++++++++++----------------
2 files changed, 19 insertions(+), 21 deletions(-)
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 7502f03c807c..fc4b22916587 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-/* Check that ksize() makes the whole object accessible. */
+/* Check that ksize() does NOT unpoison whole object. */
static void ksize_unpoisons_memory(struct kunit *test)
{
char *ptr;
@@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
real_size = ksize(ptr);
+ KUNIT_EXPECT_GT(test, real_size, size);
OPTIMIZER_HIDE_VAR(ptr);
- /* This access shouldn't trigger a KASAN report. */
- ptr[size] = 'x';
+ /* These accesses shouldn't trigger a KASAN report. */
+ ptr[0] = 'x';
+ ptr[size - 1] = 'x';
- /* This one must. */
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
+ /* These must trigger a KASAN report. */
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
kfree(ptr);
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8276022f0da4..27caa57af070 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1335,11 +1335,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
void *ret;
size_t ks;
- /* Don't use instrumented ksize to allow precise KASAN poisoning. */
+ /* Check for double-free before calling ksize. */
if (likely(!ZERO_OR_NULL_PTR(p))) {
if (!kasan_check_byte(p))
return NULL;
- ks = kfence_ksize(p) ?: __ksize(p);
+ ks = ksize(p);
} else
ks = 0;
@@ -1407,21 +1407,21 @@ void kfree_sensitive(const void *p)
void *mem = (void *)p;
ks = ksize(mem);
- if (ks)
+ if (ks) {
+ kasan_unpoison_range(mem, ks);
memzero_explicit(mem, ks);
+ }
kfree(mem);
}
EXPORT_SYMBOL(kfree_sensitive);
size_t ksize(const void *objp)
{
- size_t size;
-
/*
- * We need to first check that the pointer to the object is valid, and
- * only then unpoison the memory. The report printed from ksize() is
- * more useful, then when it's printed later when the behaviour could
- * be undefined due to a potential use-after-free or double-free.
+ * We need to first check that the pointer to the object is valid.
+ * The KASAN report printed from ksize() is more useful, then when
+ * it's printed later when the behaviour could be undefined due to
+ * a potential use-after-free or double-free.
*
* We use kasan_check_byte(), which is supported for the hardware
* tag-based KASAN mode, unlike kasan_check_read/write().
@@ -1435,13 +1435,7 @@ size_t ksize(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
return 0;
- size = kfence_ksize(objp) ?: __ksize(objp);
- /*
- * We assume that ksize callers could use whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_range(objp, size);
- return size;
+ return kfence_ksize(objp) ?: __ksize(objp);
}
EXPORT_SYMBOL(ksize);
--
2.34.1
On 11/18/22 04:56, Kees Cook wrote:
> With all "silently resizing" callers of ksize() refactored, remove the
At cursory look seems it's true now in -next (but not mainline?) can you
confirm?
That would probably be safe enough to have slab.git expose this to -next now
and time a PR appropriately in the next merge window?
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
>
> Cc: Andrey Konovalov <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Joonsoo Kim <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Roman Gushchin <[email protected]>
> Cc: Hyeonggon Yoo <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Vincenzo Frascino <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Acked-by: Vlastimil Babka <[email protected]>
> Signed-off-by: Kees Cook <[email protected]>
> ---
> v2:
> - improve kunit test precision (andreyknvl)
> - add Ack (vbabka)
> v1: https://lore.kernel.org/all/[email protected]
> ---
> mm/kasan/kasan_test.c | 14 +++++++++-----
> mm/slab_common.c | 26 ++++++++++----------------
> 2 files changed, 19 insertions(+), 21 deletions(-)
>
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7502f03c807c..fc4b22916587 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
> KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> }
>
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
> static void ksize_unpoisons_memory(struct kunit *test)
> {
> char *ptr;
> @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
>
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> real_size = ksize(ptr);
> + KUNIT_EXPECT_GT(test, real_size, size);
>
> OPTIMIZER_HIDE_VAR(ptr);
>
> - /* This access shouldn't trigger a KASAN report. */
> - ptr[size] = 'x';
> + /* These accesses shouldn't trigger a KASAN report. */
> + ptr[0] = 'x';
> + ptr[size - 1] = 'x';
>
> - /* This one must. */
> - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> + /* These must trigger a KASAN report. */
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
>
> kfree(ptr);
> }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..27caa57af070 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1335,11 +1335,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
> void *ret;
> size_t ks;
>
> - /* Don't use instrumented ksize to allow precise KASAN poisoning. */
> + /* Check for double-free before calling ksize. */
> if (likely(!ZERO_OR_NULL_PTR(p))) {
> if (!kasan_check_byte(p))
> return NULL;
> - ks = kfence_ksize(p) ?: __ksize(p);
> + ks = ksize(p);
> } else
> ks = 0;
>
> @@ -1407,21 +1407,21 @@ void kfree_sensitive(const void *p)
> void *mem = (void *)p;
>
> ks = ksize(mem);
> - if (ks)
> + if (ks) {
> + kasan_unpoison_range(mem, ks);
> memzero_explicit(mem, ks);
> + }
> kfree(mem);
> }
> EXPORT_SYMBOL(kfree_sensitive);
>
> size_t ksize(const void *objp)
> {
> - size_t size;
> -
> /*
> - * We need to first check that the pointer to the object is valid, and
> - * only then unpoison the memory. The report printed from ksize() is
> - * more useful, then when it's printed later when the behaviour could
> - * be undefined due to a potential use-after-free or double-free.
> + * We need to first check that the pointer to the object is valid.
> + * The KASAN report printed from ksize() is more useful, then when
> + * it's printed later when the behaviour could be undefined due to
> + * a potential use-after-free or double-free.
> *
> * We use kasan_check_byte(), which is supported for the hardware
> * tag-based KASAN mode, unlike kasan_check_read/write().
> @@ -1435,13 +1435,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
> return 0;
>
> - size = kfence_ksize(objp) ?: __ksize(objp);
> - /*
> - * We assume that ksize callers could use whole allocated area,
> - * so we need to unpoison this area.
> - */
> - kasan_unpoison_range(objp, size);
> - return size;
> + return kfence_ksize(objp) ?: __ksize(objp);
> }
> EXPORT_SYMBOL(ksize);
>
On Fri, Nov 18, 2022 at 11:32:36AM +0100, Vlastimil Babka wrote:
> On 11/18/22 04:56, Kees Cook wrote:
> > With all "silently resizing" callers of ksize() refactored, remove the
>
> At cursory look seems it's true now in -next (but not mainline?) can you
> confirm?
Almost, yes. I realized there is 1 case in the BPF verifier that
remains. (I thought it was picked up, but only a prereq patch was.) I'm
going to resend that one today, but I would expect it to be picked
up soon. (But, yes, definitely not for mainline.)
> That would probably be safe enough to have slab.git expose this to -next now
> and time a PR appropriately in the next merge window?
Possibly. I suspect syzkaller might trip KASAN on any larger BPF tests
until I get the last one landed. And if you don't want to do the timing
of the PR, I can carry this patch in my hardening tree, since I already
have to do a two-part early/late-merge-window PR there.
--
Kees Cook
On 11/18/22 18:11, Kees Cook wrote:
> On Fri, Nov 18, 2022 at 11:32:36AM +0100, Vlastimil Babka wrote:
>> On 11/18/22 04:56, Kees Cook wrote:
>> > With all "silently resizing" callers of ksize() refactored, remove the
>>
>> At cursory look seems it's true now in -next (but not mainline?) can you
>> confirm?
>
> Almost, yes. I realized there is 1 case in the BPF verifier that
> remains. (I thought it was picked up, but only a prereq patch was.) I'm
> going to resend that one today, but I would expect it to be picked
> up soon. (But, yes, definitely not for mainline.)
>
>> That would probably be safe enough to have slab.git expose this to -next now
>> and time a PR appropriately in the next merge window?
>
> Possibly. I suspect syzkaller might trip KASAN on any larger BPF tests
> until I get the last one landed. And if you don't want to do the timing
> of the PR, I can carry this patch in my hardening tree, since I already
> have to do a two-part early/late-merge-window PR there.
OK I'm fine with you doing that, there's my ack already, hopefully Andrey is
now also happy :)
Vlastimil
On Thu, 17 Nov 2022, Kees Cook wrote:
> With all "silently resizing" callers of ksize() refactored, remove the
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
>
> Cc: Andrey Konovalov <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Joonsoo Kim <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Roman Gushchin <[email protected]>
> Cc: Hyeonggon Yoo <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Vincenzo Frascino <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Acked-by: Vlastimil Babka <[email protected]>
> Signed-off-by: Kees Cook <[email protected]>
Acked-by: David Rientjes <[email protected]>
On Fri, Nov 18, 2022 at 4:57 AM Kees Cook <[email protected]> wrote:
>
> With all "silently resizing" callers of ksize() refactored, remove the
> logic in ksize() that would allow it to be used to effectively change
> the size of an allocation (bypassing __alloc_size hints, etc). Users
> wanting this feature need to either use kmalloc_size_roundup() before an
> allocation, or use krealloc() directly.
>
> For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> some of the partially open-coded ksize() in __do_krealloc with ksize()
> now that it doesn't perform unpoisoning.
>
> Adjust the KUnit tests to match the new ksize() behavior.
>
> Cc: Andrey Konovalov <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Joonsoo Kim <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Roman Gushchin <[email protected]>
> Cc: Hyeonggon Yoo <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Alexander Potapenko <[email protected]>
> Cc: Dmitry Vyukov <[email protected]>
> Cc: Vincenzo Frascino <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Acked-by: Vlastimil Babka <[email protected]>
> Signed-off-by: Kees Cook <[email protected]>
> ---
> v2:
> - improve kunit test precision (andreyknvl)
> - add Ack (vbabka)
> v1: https://lore.kernel.org/all/[email protected]
> ---
> mm/kasan/kasan_test.c | 14 +++++++++-----
> mm/slab_common.c | 26 ++++++++++----------------
> 2 files changed, 19 insertions(+), 21 deletions(-)
>
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7502f03c807c..fc4b22916587 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
> KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> }
>
> -/* Check that ksize() makes the whole object accessible. */
> +/* Check that ksize() does NOT unpoison whole object. */
> static void ksize_unpoisons_memory(struct kunit *test)
> {
> char *ptr;
> @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
>
> ptr = kmalloc(size, GFP_KERNEL);
> KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> +
> real_size = ksize(ptr);
> + KUNIT_EXPECT_GT(test, real_size, size);
>
> OPTIMIZER_HIDE_VAR(ptr);
>
> - /* This access shouldn't trigger a KASAN report. */
> - ptr[size] = 'x';
> + /* These accesses shouldn't trigger a KASAN report. */
> + ptr[0] = 'x';
> + ptr[size - 1] = 'x';
>
> - /* This one must. */
> - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> + /* These must trigger a KASAN report. */
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
Hi Kees,
I just realized there's an issue here with the tag-based modes, as
they align the unpoisoned area to 16 bytes.
One solution would be to change the allocation size to 128 -
KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
so that the last 16-byte granule won't get unpoisoned for the
tag-based modes. And then check that the ptr[size] access fails only
for the Generic mode.
Thanks!
>
> kfree(ptr);
> }
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 8276022f0da4..27caa57af070 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -1335,11 +1335,11 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
> void *ret;
> size_t ks;
>
> - /* Don't use instrumented ksize to allow precise KASAN poisoning. */
> + /* Check for double-free before calling ksize. */
> if (likely(!ZERO_OR_NULL_PTR(p))) {
> if (!kasan_check_byte(p))
> return NULL;
> - ks = kfence_ksize(p) ?: __ksize(p);
> + ks = ksize(p);
> } else
> ks = 0;
>
> @@ -1407,21 +1407,21 @@ void kfree_sensitive(const void *p)
> void *mem = (void *)p;
>
> ks = ksize(mem);
> - if (ks)
> + if (ks) {
> + kasan_unpoison_range(mem, ks);
> memzero_explicit(mem, ks);
> + }
> kfree(mem);
> }
> EXPORT_SYMBOL(kfree_sensitive);
>
> size_t ksize(const void *objp)
> {
> - size_t size;
> -
> /*
> - * We need to first check that the pointer to the object is valid, and
> - * only then unpoison the memory. The report printed from ksize() is
> - * more useful, then when it's printed later when the behaviour could
> - * be undefined due to a potential use-after-free or double-free.
> + * We need to first check that the pointer to the object is valid.
> + * The KASAN report printed from ksize() is more useful, then when
> + * it's printed later when the behaviour could be undefined due to
> + * a potential use-after-free or double-free.
> *
> * We use kasan_check_byte(), which is supported for the hardware
> * tag-based KASAN mode, unlike kasan_check_read/write().
> @@ -1435,13 +1435,7 @@ size_t ksize(const void *objp)
> if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
> return 0;
>
> - size = kfence_ksize(objp) ?: __ksize(objp);
> - /*
> - * We assume that ksize callers could use whole allocated area,
> - * so we need to unpoison this area.
> - */
> - kasan_unpoison_range(objp, size);
> - return size;
> + return kfence_ksize(objp) ?: __ksize(objp);
> }
> EXPORT_SYMBOL(ksize);
>
> --
> 2.34.1
>
On Sat, Nov 26, 2022 at 06:04:39PM +0100, Andrey Konovalov wrote:
> On Fri, Nov 18, 2022 at 4:57 AM Kees Cook <[email protected]> wrote:
> >
> > With all "silently resizing" callers of ksize() refactored, remove the
> > logic in ksize() that would allow it to be used to effectively change
> > the size of an allocation (bypassing __alloc_size hints, etc). Users
> > wanting this feature need to either use kmalloc_size_roundup() before an
> > allocation, or use krealloc() directly.
> >
> > For kfree_sensitive(), move the unpoisoning logic inline. Replace the
> > some of the partially open-coded ksize() in __do_krealloc with ksize()
> > now that it doesn't perform unpoisoning.
> >
> > Adjust the KUnit tests to match the new ksize() behavior.
> >
> > Cc: Andrey Konovalov <[email protected]>
> > Cc: Christoph Lameter <[email protected]>
> > Cc: Pekka Enberg <[email protected]>
> > Cc: David Rientjes <[email protected]>
> > Cc: Joonsoo Kim <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Roman Gushchin <[email protected]>
> > Cc: Hyeonggon Yoo <[email protected]>
> > Cc: Andrey Ryabinin <[email protected]>
> > Cc: Alexander Potapenko <[email protected]>
> > Cc: Dmitry Vyukov <[email protected]>
> > Cc: Vincenzo Frascino <[email protected]>
> > Cc: [email protected]
> > Cc: [email protected]
> > Acked-by: Vlastimil Babka <[email protected]>
> > Signed-off-by: Kees Cook <[email protected]>
> > ---
> > v2:
> > - improve kunit test precision (andreyknvl)
> > - add Ack (vbabka)
> > v1: https://lore.kernel.org/all/[email protected]
> > ---
> > mm/kasan/kasan_test.c | 14 +++++++++-----
> > mm/slab_common.c | 26 ++++++++++----------------
> > 2 files changed, 19 insertions(+), 21 deletions(-)
> >
> > diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> > index 7502f03c807c..fc4b22916587 100644
> > --- a/mm/kasan/kasan_test.c
> > +++ b/mm/kasan/kasan_test.c
> > @@ -821,7 +821,7 @@ static void kasan_global_oob_left(struct kunit *test)
> > KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
> > }
> >
> > -/* Check that ksize() makes the whole object accessible. */
> > +/* Check that ksize() does NOT unpoison whole object. */
> > static void ksize_unpoisons_memory(struct kunit *test)
> > {
> > char *ptr;
> > @@ -829,15 +829,19 @@ static void ksize_unpoisons_memory(struct kunit *test)
> >
> > ptr = kmalloc(size, GFP_KERNEL);
> > KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
> > +
> > real_size = ksize(ptr);
> > + KUNIT_EXPECT_GT(test, real_size, size);
> >
> > OPTIMIZER_HIDE_VAR(ptr);
> >
> > - /* This access shouldn't trigger a KASAN report. */
> > - ptr[size] = 'x';
> > + /* These accesses shouldn't trigger a KASAN report. */
> > + ptr[0] = 'x';
> > + ptr[size - 1] = 'x';
> >
> > - /* This one must. */
> > - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
> > + /* These must trigger a KASAN report. */
> > + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> > + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
>
> Hi Kees,
>
> I just realized there's an issue here with the tag-based modes, as
> they align the unpoisoned area to 16 bytes.
>
> One solution would be to change the allocation size to 128 -
> KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
> so that the last 16-byte granule won't get unpoisoned for the
> tag-based modes. And then check that the ptr[size] access fails only
> for the Generic mode.
Ah! Good point. Are you able to send a patch? I suspect you know exactly
what to change; it might take me a bit longer to double-check all of
those details.
-Kees
--
Kees Cook
On Sun, Nov 27, 2022 at 1:55 AM Kees Cook <[email protected]> wrote:
>
> > I just realized there's an issue here with the tag-based modes, as
> > they align the unpoisoned area to 16 bytes.
> >
> > One solution would be to change the allocation size to 128 -
> > KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
> > so that the last 16-byte granule won't get unpoisoned for the
> > tag-based modes. And then check that the ptr[size] access fails only
> > for the Generic mode.
>
> Ah! Good point. Are you able to send a patch? I suspect you know exactly
> what to change; it might take me a bit longer to double-check all of
> those details.
Let's do it like this:
size_t size = 128 - KASAN_GRANULE_SIZE - 5, real_size.
...
/* These must trigger a KASAN report. */
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
Thanks!
On Wed, Nov 30, 2022 at 03:11:35PM +0100, Andrey Konovalov wrote:
> On Sun, Nov 27, 2022 at 1:55 AM Kees Cook <[email protected]> wrote:
> >
> > > I just realized there's an issue here with the tag-based modes, as
> > > they align the unpoisoned area to 16 bytes.
> > >
> > > One solution would be to change the allocation size to 128 -
> > > KASAN_GRANULE_SIZE - 5, the same way kmalloc_oob_right test does it,
> > > so that the last 16-byte granule won't get unpoisoned for the
> > > tag-based modes. And then check that the ptr[size] access fails only
> > > for the Generic mode.
> >
> > Ah! Good point. Are you able to send a patch? I suspect you know exactly
> > what to change; it might take me a bit longer to double-check all of
> > those details.
>
> Let's do it like this:
>
> size_t size = 128 - KASAN_GRANULE_SIZE - 5, real_size.
>
> ...
>
> /* These must trigger a KASAN report. */
> if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
> KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
> KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
Done, and tested! Thanks :)
--
Kees Cook