2021-02-01 19:47:00

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH 03/12] kasan: optimize large kmalloc poisoning

Similarly to kasan_kmalloc(), kasan_kmalloc_large() doesn't need
to unpoison the object as it as already unpoisoned by alloc_pages()
(or by ksize() for krealloc()).

This patch changes kasan_kmalloc_large() to only poison the redzone.

Signed-off-by: Andrey Konovalov <[email protected]>
---
mm/kasan/common.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 128cb330ca73..a7eb553c8e91 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -494,7 +494,6 @@ EXPORT_SYMBOL(__kasan_kmalloc);
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
gfp_t flags)
{
- struct page *page;
unsigned long redzone_start;
unsigned long redzone_end;

@@ -504,12 +503,23 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
if (unlikely(ptr == NULL))
return NULL;

- page = virt_to_page(ptr);
+ /*
+ * The object has already been unpoisoned by kasan_alloc_pages() for
+ * alloc_pages() or by ksize() for krealloc().
+ */
+
+ /*
+ * The redzone has byte-level precision for the generic mode.
+ * Partially poison the last object granule to cover the unaligned
+ * part of the redzone.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ kasan_poison_last_granule(ptr, size);
+
+ /* Poison the aligned part of the redzone. */
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_GRANULE_SIZE);
- redzone_end = (unsigned long)ptr + page_size(page);
-
- kasan_unpoison(ptr, size);
+ redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE);

--
2.30.0.365.g02bc693789-goog


2021-02-02 23:22:04

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 03/12] kasan: optimize large kmalloc poisoning

On Mon, Feb 01, 2021 at 08:43PM +0100, Andrey Konovalov wrote:
> Similarly to kasan_kmalloc(), kasan_kmalloc_large() doesn't need
> to unpoison the object as it as already unpoisoned by alloc_pages()
> (or by ksize() for krealloc()).
>
> This patch changes kasan_kmalloc_large() to only poison the redzone.
>
> Signed-off-by: Andrey Konovalov <[email protected]>

Reviewed-by: Marco Elver <[email protected]>

> ---
> mm/kasan/common.c | 20 +++++++++++++++-----
> 1 file changed, 15 insertions(+), 5 deletions(-)
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 128cb330ca73..a7eb553c8e91 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -494,7 +494,6 @@ EXPORT_SYMBOL(__kasan_kmalloc);
> void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> gfp_t flags)
> {
> - struct page *page;
> unsigned long redzone_start;
> unsigned long redzone_end;
>
> @@ -504,12 +503,23 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
> if (unlikely(ptr == NULL))
> return NULL;
>
> - page = virt_to_page(ptr);
> + /*
> + * The object has already been unpoisoned by kasan_alloc_pages() for
> + * alloc_pages() or by ksize() for krealloc().
> + */
> +
> + /*
> + * The redzone has byte-level precision for the generic mode.
> + * Partially poison the last object granule to cover the unaligned
> + * part of the redzone.
> + */
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> + kasan_poison_last_granule(ptr, size);
> +
> + /* Poison the aligned part of the redzone. */
> redzone_start = round_up((unsigned long)(ptr + size),
> KASAN_GRANULE_SIZE);
> - redzone_end = (unsigned long)ptr + page_size(page);
> -
> - kasan_unpoison(ptr, size);
> + redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
> kasan_poison((void *)redzone_start, redzone_end - redzone_start,
> KASAN_PAGE_REDZONE);
>
> --
> 2.30.0.365.g02bc693789-goog
>