2019-08-15 00:43:09

by Daniel Axtens

[permalink] [raw]
Subject: [PATCH v4 3/3] x86/kasan: support KASAN_VMALLOC

In the case where KASAN directly allocates memory to back vmalloc
space, don't map the early shadow page over it.

We prepopulate pgds/p4ds for the range that would otherwise be empty.
This is required to get it synced to hardware on boot, allowing the
lower levels of the page tables to be filled dynamically.

Acked-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Daniel Axtens <[email protected]>

---

v2: move from faulting in shadow pgds to prepopulating
---
arch/x86/Kconfig | 1 +
arch/x86/mm/kasan_init_64.c | 61 +++++++++++++++++++++++++++++++++++++
2 files changed, 62 insertions(+)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 222855cc0158..40562cc3771f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -134,6 +134,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
+ select HAVE_ARCH_KASAN_VMALLOC if X86_64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 296da58f3013..2f57c4ddff61 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -245,6 +245,52 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
} while (pgd++, addr = next, addr != end);
}

+static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
+ unsigned long addr,
+ unsigned long end,
+ int nid)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ void *p;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (p4d_none(*p4d)) {
+ p = early_alloc(PAGE_SIZE, nid, true);
+ p4d_populate(&init_mm, p4d, p);
+ }
+ } while (p4d++, addr = next, addr != end);
+}
+
+static void __init kasan_shallow_populate_pgds(void *start, void *end)
+{
+ unsigned long addr, next;
+ pgd_t *pgd;
+ void *p;
+ int nid = early_pfn_to_nid((unsigned long)start);
+
+ addr = (unsigned long)start;
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, (unsigned long)end);
+
+ if (pgd_none(*pgd)) {
+ p = early_alloc(PAGE_SIZE, nid, true);
+ pgd_populate(&init_mm, pgd, p);
+ }
+
+ /*
+ * we need to populate p4ds to be synced when running in
+ * four level mode - see sync_global_pgds_l4()
+ */
+ kasan_shallow_populate_p4ds(pgd, addr, next, nid);
+ } while (pgd++, addr = next, addr != (unsigned long)end);
+}
+
+
#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
unsigned long val,
@@ -352,9 +398,24 @@ void __init kasan_init(void)
shadow_cpu_entry_end = (void *)round_up(
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);

+ /*
+ * If we're in full vmalloc mode, don't back vmalloc space with early
+ * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
+ * the global table and we can populate the lower levels on demand.
+ */
+#ifdef CONFIG_KASAN_VMALLOC
+ kasan_shallow_populate_pgds(
+ kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+ kasan_mem_to_shadow((void *)VMALLOC_END));
+
+ kasan_populate_early_shadow(
+ kasan_mem_to_shadow((void *)VMALLOC_END + 1),
+ shadow_cpu_entry_begin);
+#else
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
shadow_cpu_entry_begin);
+#endif

kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);
--
2.20.1


2019-08-16 08:05:38

by Christophe Leroy

[permalink] [raw]
Subject: Re: [PATCH v4 3/3] x86/kasan: support KASAN_VMALLOC



Le 15/08/2019 à 02:16, Daniel Axtens a écrit :
> In the case where KASAN directly allocates memory to back vmalloc
> space, don't map the early shadow page over it.

If early shadow page is not mapped, any bad memory access will Oops on
the shadow access instead of Oopsing on the real bad access.

You should still map early shadow page, and replace it with real page
when needed.

Christophe

>
> We prepopulate pgds/p4ds for the range that would otherwise be empty.
> This is required to get it synced to hardware on boot, allowing the
> lower levels of the page tables to be filled dynamically.
>
> Acked-by: Dmitry Vyukov <[email protected]>
> Signed-off-by: Daniel Axtens <[email protected]>
>
> ---
>
> v2: move from faulting in shadow pgds to prepopulating
> ---
> arch/x86/Kconfig | 1 +
> arch/x86/mm/kasan_init_64.c | 61 +++++++++++++++++++++++++++++++++++++
> 2 files changed, 62 insertions(+)
>
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 222855cc0158..40562cc3771f 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -134,6 +134,7 @@ config X86
> select HAVE_ARCH_JUMP_LABEL
> select HAVE_ARCH_JUMP_LABEL_RELATIVE
> select HAVE_ARCH_KASAN if X86_64
> + select HAVE_ARCH_KASAN_VMALLOC if X86_64
> select HAVE_ARCH_KGDB
> select HAVE_ARCH_MMAP_RND_BITS if MMU
> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
> diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
> index 296da58f3013..2f57c4ddff61 100644
> --- a/arch/x86/mm/kasan_init_64.c
> +++ b/arch/x86/mm/kasan_init_64.c
> @@ -245,6 +245,52 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
> } while (pgd++, addr = next, addr != end);
> }
>
> +static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
> + unsigned long addr,
> + unsigned long end,
> + int nid)
> +{
> + p4d_t *p4d;
> + unsigned long next;
> + void *p;
> +
> + p4d = p4d_offset(pgd, addr);
> + do {
> + next = p4d_addr_end(addr, end);
> +
> + if (p4d_none(*p4d)) {
> + p = early_alloc(PAGE_SIZE, nid, true);
> + p4d_populate(&init_mm, p4d, p);
> + }
> + } while (p4d++, addr = next, addr != end);
> +}
> +
> +static void __init kasan_shallow_populate_pgds(void *start, void *end)
> +{
> + unsigned long addr, next;
> + pgd_t *pgd;
> + void *p;
> + int nid = early_pfn_to_nid((unsigned long)start);
> +
> + addr = (unsigned long)start;
> + pgd = pgd_offset_k(addr);
> + do {
> + next = pgd_addr_end(addr, (unsigned long)end);
> +
> + if (pgd_none(*pgd)) {
> + p = early_alloc(PAGE_SIZE, nid, true);
> + pgd_populate(&init_mm, pgd, p);
> + }
> +
> + /*
> + * we need to populate p4ds to be synced when running in
> + * four level mode - see sync_global_pgds_l4()
> + */
> + kasan_shallow_populate_p4ds(pgd, addr, next, nid);
> + } while (pgd++, addr = next, addr != (unsigned long)end);
> +}
> +
> +
> #ifdef CONFIG_KASAN_INLINE
> static int kasan_die_handler(struct notifier_block *self,
> unsigned long val,
> @@ -352,9 +398,24 @@ void __init kasan_init(void)
> shadow_cpu_entry_end = (void *)round_up(
> (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
>
> + /*
> + * If we're in full vmalloc mode, don't back vmalloc space with early
> + * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
> + * the global table and we can populate the lower levels on demand.
> + */
> +#ifdef CONFIG_KASAN_VMALLOC
> + kasan_shallow_populate_pgds(
> + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
> + kasan_mem_to_shadow((void *)VMALLOC_END));
> +
> + kasan_populate_early_shadow(
> + kasan_mem_to_shadow((void *)VMALLOC_END + 1),
> + shadow_cpu_entry_begin);
> +#else
> kasan_populate_early_shadow(
> kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
> shadow_cpu_entry_begin);
> +#endif
>
> kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
> (unsigned long)shadow_cpu_entry_end, 0);
>