2020-11-10 22:19:03

by Andrey Konovalov

[permalink] [raw]
Subject: [PATCH v9 03/44] kasan: group vmalloc code

This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.

Group all vmalloc-related function declarations in include/linux/kasan.h,
and their implementations in mm/kasan/common.c.

No functional changes.

Signed-off-by: Andrey Konovalov <[email protected]>
Signed-off-by: Vincenzo Frascino <[email protected]>
Reviewed-by: Marco Elver <[email protected]>
---
Change-Id: Ie20b6c689203cd6de4fd7f2c465ec081c00c5f15
---
include/linux/kasan.h | 41 +++++++++++++----------
mm/kasan/common.c | 78 ++++++++++++++++++++++---------------------
2 files changed, 63 insertions(+), 56 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 30d343b4a40a..59538e795df4 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -75,19 +75,6 @@ struct kasan_cache {
int free_meta_offset;
};

-/*
- * These functions provide a special case to support backing module
- * allocations with real shadow memory. With KASAN vmalloc, the special
- * case is unnecessary, as the work is handled in the generic case.
- */
-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
-#else
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-#endif
-
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);

@@ -156,9 +143,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
return false;
}

-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-
static inline int kasan_add_zero_shadow(void *start, unsigned long size)
{
return 0;
@@ -211,13 +195,16 @@ static inline void *kasan_reset_tag(const void *addr)
#endif /* CONFIG_KASAN_SW_TAGS */

#ifdef CONFIG_KASAN_VMALLOC
+
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
void kasan_poison_vmalloc(const void *start, unsigned long size);
void kasan_unpoison_vmalloc(const void *start, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
-#else
+
+#else /* CONFIG_KASAN_VMALLOC */
+
static inline int kasan_populate_vmalloc(unsigned long start,
unsigned long size)
{
@@ -232,7 +219,25 @@ static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end) {}
-#endif
+
+#endif /* CONFIG_KASAN_VMALLOC */
+
+#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
+
+/*
+ * These functions provide a special case to support backing module
+ * allocations with real shadow memory. With KASAN vmalloc, the special
+ * case is unnecessary, as the work is handled in the generic case.
+ */
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_free_shadow(const struct vm_struct *vm);
+
+#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
+
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+
+#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */

#ifdef CONFIG_KASAN_INLINE
void kasan_non_canonical_hook(unsigned long addr);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 33d863f55db1..89e5ef9417a7 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -536,44 +536,6 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
/* The object will be poisoned by page_alloc. */
}

-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size)
-{
- void *ret;
- size_t scaled_size;
- size_t shadow_size;
- unsigned long shadow_start;
-
- shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
- scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
- shadow_size = round_up(scaled_size, PAGE_SIZE);
-
- if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
- return -EINVAL;
-
- ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
- shadow_start + shadow_size,
- GFP_KERNEL,
- PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
- __builtin_return_address(0));
-
- if (ret) {
- __memset(ret, KASAN_SHADOW_INIT, shadow_size);
- find_vm_area(addr)->flags |= VM_KASAN;
- kmemleak_ignore(ret);
- return 0;
- }
-
- return -ENOMEM;
-}
-
-void kasan_free_shadow(const struct vm_struct *vm)
-{
- if (vm->flags & VM_KASAN)
- vfree(kasan_mem_to_shadow(vm->addr));
-}
-#endif
-
#ifdef CONFIG_MEMORY_HOTPLUG
static bool shadow_mapped(unsigned long addr)
{
@@ -685,6 +647,7 @@ core_initcall(kasan_memhotplug_init);
#endif

#ifdef CONFIG_KASAN_VMALLOC
+
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
void *unused)
{
@@ -923,4 +886,43 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
(unsigned long)shadow_end);
}
}
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+int kasan_module_alloc(void *addr, size_t size)
+{
+ void *ret;
+ size_t scaled_size;
+ size_t shadow_size;
+ unsigned long shadow_start;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+ scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+ shadow_size = round_up(scaled_size, PAGE_SIZE);
+
+ if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+ return -EINVAL;
+
+ ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
+ shadow_start + shadow_size,
+ GFP_KERNEL,
+ PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
+ __builtin_return_address(0));
+
+ if (ret) {
+ __memset(ret, KASAN_SHADOW_INIT, shadow_size);
+ find_vm_area(addr)->flags |= VM_KASAN;
+ kmemleak_ignore(ret);
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void kasan_free_shadow(const struct vm_struct *vm)
+{
+ if (vm->flags & VM_KASAN)
+ vfree(kasan_mem_to_shadow(vm->addr));
+}
+
#endif
--
2.29.2.222.g5d2a92d10f8-goog


2020-11-11 13:25:05

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH v9 03/44] kasan: group vmalloc code

On Tue, Nov 10, 2020 at 11:11 PM Andrey Konovalov <[email protected]> wrote:
>
> This is a preparatory commit for the upcoming addition of a new hardware
> tag-based (MTE-based) KASAN mode.
>
> Group all vmalloc-related function declarations in include/linux/kasan.h,
> and their implementations in mm/kasan/common.c.
>
> No functional changes.
>
> Signed-off-by: Andrey Konovalov <[email protected]>
> Signed-off-by: Vincenzo Frascino <[email protected]>
> Reviewed-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
> ---
> Change-Id: Ie20b6c689203cd6de4fd7f2c465ec081c00c5f15
> ---
> include/linux/kasan.h | 41 +++++++++++++----------
> mm/kasan/common.c | 78 ++++++++++++++++++++++---------------------
> 2 files changed, 63 insertions(+), 56 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 30d343b4a40a..59538e795df4 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -75,19 +75,6 @@ struct kasan_cache {
> int free_meta_offset;
> };
>
> -/*
> - * These functions provide a special case to support backing module
> - * allocations with real shadow memory. With KASAN vmalloc, the special
> - * case is unnecessary, as the work is handled in the generic case.
> - */
> -#ifndef CONFIG_KASAN_VMALLOC
> -int kasan_module_alloc(void *addr, size_t size);
> -void kasan_free_shadow(const struct vm_struct *vm);
> -#else
> -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> -static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> -#endif
> -
> int kasan_add_zero_shadow(void *start, unsigned long size);
> void kasan_remove_zero_shadow(void *start, unsigned long size);
>
> @@ -156,9 +143,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> return false;
> }
>
> -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> -static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> -
> static inline int kasan_add_zero_shadow(void *start, unsigned long size)
> {
> return 0;
> @@ -211,13 +195,16 @@ static inline void *kasan_reset_tag(const void *addr)
> #endif /* CONFIG_KASAN_SW_TAGS */
>
> #ifdef CONFIG_KASAN_VMALLOC
> +
> int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
> void kasan_poison_vmalloc(const void *start, unsigned long size);
> void kasan_unpoison_vmalloc(const void *start, unsigned long size);
> void kasan_release_vmalloc(unsigned long start, unsigned long end,
> unsigned long free_region_start,
> unsigned long free_region_end);
> -#else
> +
> +#else /* CONFIG_KASAN_VMALLOC */
> +
> static inline int kasan_populate_vmalloc(unsigned long start,
> unsigned long size)
> {
> @@ -232,7 +219,25 @@ static inline void kasan_release_vmalloc(unsigned long start,
> unsigned long end,
> unsigned long free_region_start,
> unsigned long free_region_end) {}
> -#endif
> +
> +#endif /* CONFIG_KASAN_VMALLOC */
> +
> +#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
> +
> +/*
> + * These functions provide a special case to support backing module
> + * allocations with real shadow memory. With KASAN vmalloc, the special
> + * case is unnecessary, as the work is handled in the generic case.
> + */
> +int kasan_module_alloc(void *addr, size_t size);
> +void kasan_free_shadow(const struct vm_struct *vm);
> +
> +#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
> +
> +static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> +static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> +
> +#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
>
> #ifdef CONFIG_KASAN_INLINE
> void kasan_non_canonical_hook(unsigned long addr);
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 33d863f55db1..89e5ef9417a7 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -536,44 +536,6 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
> /* The object will be poisoned by page_alloc. */
> }
>
> -#ifndef CONFIG_KASAN_VMALLOC
> -int kasan_module_alloc(void *addr, size_t size)
> -{
> - void *ret;
> - size_t scaled_size;
> - size_t shadow_size;
> - unsigned long shadow_start;
> -
> - shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
> - scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
> - shadow_size = round_up(scaled_size, PAGE_SIZE);
> -
> - if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
> - return -EINVAL;
> -
> - ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
> - shadow_start + shadow_size,
> - GFP_KERNEL,
> - PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
> - __builtin_return_address(0));
> -
> - if (ret) {
> - __memset(ret, KASAN_SHADOW_INIT, shadow_size);
> - find_vm_area(addr)->flags |= VM_KASAN;
> - kmemleak_ignore(ret);
> - return 0;
> - }
> -
> - return -ENOMEM;
> -}
> -
> -void kasan_free_shadow(const struct vm_struct *vm)
> -{
> - if (vm->flags & VM_KASAN)
> - vfree(kasan_mem_to_shadow(vm->addr));
> -}
> -#endif
> -
> #ifdef CONFIG_MEMORY_HOTPLUG
> static bool shadow_mapped(unsigned long addr)
> {
> @@ -685,6 +647,7 @@ core_initcall(kasan_memhotplug_init);
> #endif
>
> #ifdef CONFIG_KASAN_VMALLOC
> +
> static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
> void *unused)
> {
> @@ -923,4 +886,43 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
> (unsigned long)shadow_end);
> }
> }
> +
> +#else /* CONFIG_KASAN_VMALLOC */
> +
> +int kasan_module_alloc(void *addr, size_t size)
> +{
> + void *ret;
> + size_t scaled_size;
> + size_t shadow_size;
> + unsigned long shadow_start;
> +
> + shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
> + scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
> + shadow_size = round_up(scaled_size, PAGE_SIZE);
> +
> + if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
> + return -EINVAL;
> +
> + ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
> + shadow_start + shadow_size,
> + GFP_KERNEL,
> + PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
> + __builtin_return_address(0));
> +
> + if (ret) {
> + __memset(ret, KASAN_SHADOW_INIT, shadow_size);
> + find_vm_area(addr)->flags |= VM_KASAN;
> + kmemleak_ignore(ret);
> + return 0;
> + }
> +
> + return -ENOMEM;
> +}
> +
> +void kasan_free_shadow(const struct vm_struct *vm)
> +{
> + if (vm->flags & VM_KASAN)
> + vfree(kasan_mem_to_shadow(vm->addr));
> +}
> +
> #endif
> --
> 2.29.2.222.g5d2a92d10f8-goog
>


--
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg