2019-03-28 14:22:35

by Christophe Leroy

[permalink] [raw]
Subject: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit

The KASAN shadow area is mapped into vmemmap space:
0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
For this vmemmap has to be disabled.

Cc: Daniel Axtens <[email protected]>
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/Kconfig.debug | 3 +-
arch/powerpc/include/asm/kasan.h | 11 +++
arch/powerpc/kernel/Makefile | 2 +
arch/powerpc/kernel/head_64.S | 3 +
arch/powerpc/kernel/setup_64.c | 20 +++---
arch/powerpc/mm/kasan/Makefile | 1 +
arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
8 files changed, 159 insertions(+), 11 deletions(-)
create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1a2fb50126b2..e0b7c45e4dc7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -174,6 +174,7 @@ config PPC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
+ select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 61febbbdd02b..b4140dd6b4e4 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0xe0000000
+ default 0xe0000000 if PPC32
+ default 0x6800040000000000 if PPC64
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 296e51c2f066..756b3d58f921 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -23,10 +23,21 @@

#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)

+#ifdef CONFIG_PPC32
#define KASAN_SHADOW_END 0UL

#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)

+#else
+
+#include <asm/pgtable.h>
+
+#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#endif /* CONFIG_PPC32 */
+
#ifdef CONFIG_KASAN
void kasan_early_init(void);
void kasan_mmu_init(void);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0ea6c4aa3a20..7f232c06f11d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
KASAN_SANITIZE_cputable.o := n
KASAN_SANITIZE_prom_init.o := n
KASAN_SANITIZE_btext.o := n
+KASAN_SANITIZE_paca.o := n
+KASAN_SANITIZE_setup_64.o := n

ifdef CONFIG_KASAN
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3fad8d499767..80fbd8024fb2 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -966,6 +966,9 @@ start_here_multiplatform:
* and SLB setup before we turn on relocation.
*/

+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
/* Restore parameters passed from prom_init/kexec */
mr r3,r31
bl early_setup /* also sets r13 and SPRG_PACA */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ba404dd9ce1d..d2bf860dd966 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);

/*
+ * Configure exception handlers. This include setting up trampolines
+ * if needed, setting exception endian mode, etc...
+ */
+ configure_exceptions();
+
+ /* Apply all the dynamic patching */
+ apply_feature_fixups();
+ setup_feature_keys();
+
+ /*
* Do early initialization using the flattened device
* tree, such as retrieving the physical memory map or
* calculating/retrieving the hash table size.
@@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
setup_paca(paca_ptrs[boot_cpuid]);
fixup_boot_paca();

- /*
- * Configure exception handlers. This include setting up trampolines
- * if needed, setting exception endian mode, etc...
- */
- configure_exceptions();
-
- /* Apply all the dynamic patching */
- apply_feature_fixups();
- setup_feature_keys();
-
/* Initialize the hash table or TLB handling */
early_init_mmu();

diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
index 6577897673dd..0bfbe3892808 100644
--- a/arch/powerpc/mm/kasan/Makefile
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -3,3 +3,4 @@
KASAN_SANITIZE := n

obj-$(CONFIG_PPC32) += kasan_init_32.o
+obj-$(CONFIG_PPC64) += kasan_init_64.o
diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
new file mode 100644
index 000000000000..7fd71b8e883b
--- /dev/null
+++ b/arch/powerpc/mm/kasan/kasan_init_64.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/sched/task.h>
+#include <asm/pgalloc.h>
+
+static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+{
+ unsigned long va = (unsigned long)kasan_early_shadow_page;
+ phys_addr_t pa = __pa(kasan_early_shadow_page);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
+ __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+}
+
+static void __init kasan_populate_pmd(pmd_t *pmdp)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
+}
+
+static void __init kasan_populate_pud(pud_t *pudp)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
+}
+
+static void __init *kasan_alloc_pgtable(unsigned long size)
+{
+ void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
+ __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
+ __func__, size, size, __pa(MAX_DMA_ADDRESS));
+
+ return ptr;
+}
+
+static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
+{
+ pgd_t *pgdp = pgd_offset_k(va);
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
+ pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
+ kasan_populate_pud(pudp);
+ pgd_populate(&init_mm, pgdp, pudp);
+ }
+ pudp = pud_offset(pgdp, va);
+ if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
+ pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
+ kasan_populate_pmd(pmdp);
+ pud_populate(&init_mm, pudp, pmdp);
+ }
+ pmdp = pmd_offset(pudp, va);
+ if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
+ ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
+ kasan_populate_pte(ptep, PAGE_KERNEL);
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+ ptep = pte_offset_kernel(pmdp, va);
+
+ __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
+
+ return 0;
+}
+
+static void __init kasan_init_region(struct memblock_region *reg)
+{
+ void *start = __va(reg->base);
+ void *end = __va(reg->base + reg->size);
+ unsigned long k_start, k_end, k_cur;
+
+ if (start >= end)
+ return;
+
+ k_start = (unsigned long)kasan_mem_to_shadow(start);
+ k_end = (unsigned long)kasan_mem_to_shadow(end);
+
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
+ void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+}
+
+void __init kasan_init(void)
+{
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg)
+ kasan_init_region(reg);
+
+ /* It's too early to use clear_page() ! */
+ memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
+
+ /* Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done\n");
+}
+
+/* The early shadow maps everything to a single page of zeroes */
+asmlinkage void __init kasan_early_init(void)
+{
+ unsigned long addr = KASAN_SHADOW_START;
+ unsigned long end = KASAN_SHADOW_END;
+ pgd_t *pgdp = pgd_offset_k(addr);
+
+ kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
+ kasan_populate_pmd(kasan_early_shadow_pmd);
+ kasan_populate_pud(kasan_early_shadow_pud);
+
+ do {
+ pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
+ } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
+}
--
2.13.3



2019-03-28 14:41:33

by Christophe Leroy

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit

Daniel,

This patch applies on top of my series.

With this patch, I've managed to enable KASAN without the changes you
proposed on the KASAN core. This allows a full support of KASAN, ie not
limited to KASAN_MINIMAL.

There is still some details to address, but it boots OK on qemu-e500.

Can you have a try and tell me if it works on your side too ?

It will likely fail for modules at the time being as I did nothing about it.

Tell me if you continue working on it. I don't plan to spend more time
on it for the time being.

Like I did on PPC32, it would be good to create an early_64.c file and
move into it the few functions from setup_64.c and paca.c that are
called before feature_fixups are done, in order to not disable KASAN on
the entire paca.c and setup_64.c

I guess we could reduce a bit the size of the IOREMAP_AREA and put KASAN
on top of it instead of using VMEMMAP space, allthough I don't have a
clean view of how it would cooperate with VMEMMMAP if we keep it enabled.

I've not been able to identify what is the largest size of the linear
mapping (ie the 0xc000000000000000 region).

Christophe

Le 28/03/2019 à 15:21, Christophe Leroy a écrit :
> The KASAN shadow area is mapped into vmemmap space:
> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
> For this vmemmap has to be disabled.
>
> Cc: Daniel Axtens <[email protected]>
> Signed-off-by: Christophe Leroy <[email protected]>
> ---
> arch/powerpc/Kconfig | 1 +
> arch/powerpc/Kconfig.debug | 3 +-
> arch/powerpc/include/asm/kasan.h | 11 +++
> arch/powerpc/kernel/Makefile | 2 +
> arch/powerpc/kernel/head_64.S | 3 +
> arch/powerpc/kernel/setup_64.c | 20 +++---
> arch/powerpc/mm/kasan/Makefile | 1 +
> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
> 8 files changed, 159 insertions(+), 11 deletions(-)
> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 1a2fb50126b2..e0b7c45e4dc7 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -174,6 +174,7 @@ config PPC
> select HAVE_ARCH_AUDITSYSCALL
> select HAVE_ARCH_JUMP_LABEL
> select HAVE_ARCH_KASAN if PPC32
> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
> select HAVE_ARCH_KGDB
> select HAVE_ARCH_MMAP_RND_BITS
> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
> index 61febbbdd02b..b4140dd6b4e4 100644
> --- a/arch/powerpc/Kconfig.debug
> +++ b/arch/powerpc/Kconfig.debug
> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
> config KASAN_SHADOW_OFFSET
> hex
> depends on KASAN
> - default 0xe0000000
> + default 0xe0000000 if PPC32
> + default 0x6800040000000000 if PPC64
> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
> index 296e51c2f066..756b3d58f921 100644
> --- a/arch/powerpc/include/asm/kasan.h
> +++ b/arch/powerpc/include/asm/kasan.h
> @@ -23,10 +23,21 @@
>
> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>
> +#ifdef CONFIG_PPC32
> #define KASAN_SHADOW_END 0UL
>
> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>
> +#else
> +
> +#include <asm/pgtable.h>
> +
> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
> +
> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
> +
> +#endif /* CONFIG_PPC32 */
> +
> #ifdef CONFIG_KASAN
> void kasan_early_init(void);
> void kasan_mmu_init(void);
> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
> index 0ea6c4aa3a20..7f232c06f11d 100644
> --- a/arch/powerpc/kernel/Makefile
> +++ b/arch/powerpc/kernel/Makefile
> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
> KASAN_SANITIZE_cputable.o := n
> KASAN_SANITIZE_prom_init.o := n
> KASAN_SANITIZE_btext.o := n
> +KASAN_SANITIZE_paca.o := n
> +KASAN_SANITIZE_setup_64.o := n
>
> ifdef CONFIG_KASAN
> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
> index 3fad8d499767..80fbd8024fb2 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -966,6 +966,9 @@ start_here_multiplatform:
> * and SLB setup before we turn on relocation.
> */
>
> +#ifdef CONFIG_KASAN
> + bl kasan_early_init
> +#endif
> /* Restore parameters passed from prom_init/kexec */
> mr r3,r31
> bl early_setup /* also sets r13 and SPRG_PACA */
> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
> index ba404dd9ce1d..d2bf860dd966 100644
> --- a/arch/powerpc/kernel/setup_64.c
> +++ b/arch/powerpc/kernel/setup_64.c
> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>
> /*
> + * Configure exception handlers. This include setting up trampolines
> + * if needed, setting exception endian mode, etc...
> + */
> + configure_exceptions();
> +
> + /* Apply all the dynamic patching */
> + apply_feature_fixups();
> + setup_feature_keys();
> +
> + /*
> * Do early initialization using the flattened device
> * tree, such as retrieving the physical memory map or
> * calculating/retrieving the hash table size.
> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
> setup_paca(paca_ptrs[boot_cpuid]);
> fixup_boot_paca();
>
> - /*
> - * Configure exception handlers. This include setting up trampolines
> - * if needed, setting exception endian mode, etc...
> - */
> - configure_exceptions();
> -
> - /* Apply all the dynamic patching */
> - apply_feature_fixups();
> - setup_feature_keys();
> -
> /* Initialize the hash table or TLB handling */
> early_init_mmu();
>
> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
> index 6577897673dd..0bfbe3892808 100644
> --- a/arch/powerpc/mm/kasan/Makefile
> +++ b/arch/powerpc/mm/kasan/Makefile
> @@ -3,3 +3,4 @@
> KASAN_SANITIZE := n
>
> obj-$(CONFIG_PPC32) += kasan_init_32.o
> +obj-$(CONFIG_PPC64) += kasan_init_64.o
> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
> new file mode 100644
> index 000000000000..7fd71b8e883b
> --- /dev/null
> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
> @@ -0,0 +1,129 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#define DISABLE_BRANCH_PROFILING
> +
> +#include <linux/kasan.h>
> +#include <linux/printk.h>
> +#include <linux/memblock.h>
> +#include <linux/sched/task.h>
> +#include <asm/pgalloc.h>
> +
> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
> +{
> + unsigned long va = (unsigned long)kasan_early_shadow_page;
> + phys_addr_t pa = __pa(kasan_early_shadow_page);
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
> +}
> +
> +static void __init kasan_populate_pmd(pmd_t *pmdp)
> +{
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PMD; i++)
> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
> +}
> +
> +static void __init kasan_populate_pud(pud_t *pudp)
> +{
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PUD; i++)
> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
> +}
> +
> +static void __init *kasan_alloc_pgtable(unsigned long size)
> +{
> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
> +
> + if (!ptr)
> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
> +
> + return ptr;
> +}
> +
> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
> +{
> + pgd_t *pgdp = pgd_offset_k(va);
> + pud_t *pudp;
> + pmd_t *pmdp;
> + pte_t *ptep;
> +
> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
> + kasan_populate_pud(pudp);
> + pgd_populate(&init_mm, pgdp, pudp);
> + }
> + pudp = pud_offset(pgdp, va);
> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
> + kasan_populate_pmd(pmdp);
> + pud_populate(&init_mm, pudp, pmdp);
> + }
> + pmdp = pmd_offset(pudp, va);
> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
> + kasan_populate_pte(ptep, PAGE_KERNEL);
> + pmd_populate_kernel(&init_mm, pmdp, ptep);
> + }
> + ptep = pte_offset_kernel(pmdp, va);
> +
> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
> +
> + return 0;
> +}
> +
> +static void __init kasan_init_region(struct memblock_region *reg)
> +{
> + void *start = __va(reg->base);
> + void *end = __va(reg->base + reg->size);
> + unsigned long k_start, k_end, k_cur;
> +
> + if (start >= end)
> + return;
> +
> + k_start = (unsigned long)kasan_mem_to_shadow(start);
> + k_end = (unsigned long)kasan_mem_to_shadow(end);
> +
> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +
> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
> + }
> + flush_tlb_kernel_range(k_start, k_end);
> +}
> +
> +void __init kasan_init(void)
> +{
> + struct memblock_region *reg;
> +
> + for_each_memblock(memory, reg)
> + kasan_init_region(reg);
> +
> + /* It's too early to use clear_page() ! */
> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
> +
> + /* Enable error messages */
> + init_task.kasan_depth = 0;
> + pr_info("KASAN init done\n");
> +}
> +
> +/* The early shadow maps everything to a single page of zeroes */
> +asmlinkage void __init kasan_early_init(void)
> +{
> + unsigned long addr = KASAN_SHADOW_START;
> + unsigned long end = KASAN_SHADOW_END;
> + pgd_t *pgdp = pgd_offset_k(addr);
> +
> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
> + kasan_populate_pmd(kasan_early_shadow_pmd);
> + kasan_populate_pud(kasan_early_shadow_pud);
> +
> + do {
> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
> +}
>

2019-03-28 23:42:26

by Daniel Axtens

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit

Hi Christophe,

> Daniel,
>
> This patch applies on top of my series.
>
> With this patch, I've managed to enable KASAN without the changes you
> proposed on the KASAN core. This allows a full support of KASAN, ie not
> limited to KASAN_MINIMAL.
>
> There is still some details to address, but it boots OK on qemu-e500.
>
> Can you have a try and tell me if it works on your side too ?
>
> It will likely fail for modules at the time being as I did nothing about it.
>
> Tell me if you continue working on it. I don't plan to spend more time
> on it for the time being.

Thanks!! I will have a look - I've been trying to do something on 3s
Radix and was hoping to apply lessons learned from that to the 3e case.
So it may take a little while but I will eventually get back to this.

Regards,
Daniel
>
> Like I did on PPC32, it would be good to create an early_64.c file and
> move into it the few functions from setup_64.c and paca.c that are
> called before feature_fixups are done, in order to not disable KASAN on
> the entire paca.c and setup_64.c
>
> I guess we could reduce a bit the size of the IOREMAP_AREA and put KASAN
> on top of it instead of using VMEMMAP space, allthough I don't have a
> clean view of how it would cooperate with VMEMMMAP if we keep it enabled.
>
> I've not been able to identify what is the largest size of the linear
> mapping (ie the 0xc000000000000000 region).
>
> Christophe
>
> Le 28/03/2019 à 15:21, Christophe Leroy a écrit :
>> The KASAN shadow area is mapped into vmemmap space:
>> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
>> For this vmemmap has to be disabled.
>>
>> Cc: Daniel Axtens <[email protected]>
>> Signed-off-by: Christophe Leroy <[email protected]>
>> ---
>> arch/powerpc/Kconfig | 1 +
>> arch/powerpc/Kconfig.debug | 3 +-
>> arch/powerpc/include/asm/kasan.h | 11 +++
>> arch/powerpc/kernel/Makefile | 2 +
>> arch/powerpc/kernel/head_64.S | 3 +
>> arch/powerpc/kernel/setup_64.c | 20 +++---
>> arch/powerpc/mm/kasan/Makefile | 1 +
>> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
>> 8 files changed, 159 insertions(+), 11 deletions(-)
>> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 1a2fb50126b2..e0b7c45e4dc7 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -174,6 +174,7 @@ config PPC
>> select HAVE_ARCH_AUDITSYSCALL
>> select HAVE_ARCH_JUMP_LABEL
>> select HAVE_ARCH_KASAN if PPC32
>> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
>> select HAVE_ARCH_KGDB
>> select HAVE_ARCH_MMAP_RND_BITS
>> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
>> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
>> index 61febbbdd02b..b4140dd6b4e4 100644
>> --- a/arch/powerpc/Kconfig.debug
>> +++ b/arch/powerpc/Kconfig.debug
>> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
>> config KASAN_SHADOW_OFFSET
>> hex
>> depends on KASAN
>> - default 0xe0000000
>> + default 0xe0000000 if PPC32
>> + default 0x6800040000000000 if PPC64
>> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
>> index 296e51c2f066..756b3d58f921 100644
>> --- a/arch/powerpc/include/asm/kasan.h
>> +++ b/arch/powerpc/include/asm/kasan.h
>> @@ -23,10 +23,21 @@
>>
>> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>>
>> +#ifdef CONFIG_PPC32
>> #define KASAN_SHADOW_END 0UL
>>
>> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>>
>> +#else
>> +
>> +#include <asm/pgtable.h>
>> +
>> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
>> +
>> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
>> +
>> +#endif /* CONFIG_PPC32 */
>> +
>> #ifdef CONFIG_KASAN
>> void kasan_early_init(void);
>> void kasan_mmu_init(void);
>> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
>> index 0ea6c4aa3a20..7f232c06f11d 100644
>> --- a/arch/powerpc/kernel/Makefile
>> +++ b/arch/powerpc/kernel/Makefile
>> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
>> KASAN_SANITIZE_cputable.o := n
>> KASAN_SANITIZE_prom_init.o := n
>> KASAN_SANITIZE_btext.o := n
>> +KASAN_SANITIZE_paca.o := n
>> +KASAN_SANITIZE_setup_64.o := n
>>
>> ifdef CONFIG_KASAN
>> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
>> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
>> index 3fad8d499767..80fbd8024fb2 100644
>> --- a/arch/powerpc/kernel/head_64.S
>> +++ b/arch/powerpc/kernel/head_64.S
>> @@ -966,6 +966,9 @@ start_here_multiplatform:
>> * and SLB setup before we turn on relocation.
>> */
>>
>> +#ifdef CONFIG_KASAN
>> + bl kasan_early_init
>> +#endif
>> /* Restore parameters passed from prom_init/kexec */
>> mr r3,r31
>> bl early_setup /* also sets r13 and SPRG_PACA */
>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>> index ba404dd9ce1d..d2bf860dd966 100644
>> --- a/arch/powerpc/kernel/setup_64.c
>> +++ b/arch/powerpc/kernel/setup_64.c
>> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
>> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>>
>> /*
>> + * Configure exception handlers. This include setting up trampolines
>> + * if needed, setting exception endian mode, etc...
>> + */
>> + configure_exceptions();
>> +
>> + /* Apply all the dynamic patching */
>> + apply_feature_fixups();
>> + setup_feature_keys();
>> +
>> + /*
>> * Do early initialization using the flattened device
>> * tree, such as retrieving the physical memory map or
>> * calculating/retrieving the hash table size.
>> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
>> setup_paca(paca_ptrs[boot_cpuid]);
>> fixup_boot_paca();
>>
>> - /*
>> - * Configure exception handlers. This include setting up trampolines
>> - * if needed, setting exception endian mode, etc...
>> - */
>> - configure_exceptions();
>> -
>> - /* Apply all the dynamic patching */
>> - apply_feature_fixups();
>> - setup_feature_keys();
>> -
>> /* Initialize the hash table or TLB handling */
>> early_init_mmu();
>>
>> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
>> index 6577897673dd..0bfbe3892808 100644
>> --- a/arch/powerpc/mm/kasan/Makefile
>> +++ b/arch/powerpc/mm/kasan/Makefile
>> @@ -3,3 +3,4 @@
>> KASAN_SANITIZE := n
>>
>> obj-$(CONFIG_PPC32) += kasan_init_32.o
>> +obj-$(CONFIG_PPC64) += kasan_init_64.o
>> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
>> new file mode 100644
>> index 000000000000..7fd71b8e883b
>> --- /dev/null
>> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
>> @@ -0,0 +1,129 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +
>> +#define DISABLE_BRANCH_PROFILING
>> +
>> +#include <linux/kasan.h>
>> +#include <linux/printk.h>
>> +#include <linux/memblock.h>
>> +#include <linux/sched/task.h>
>> +#include <asm/pgalloc.h>
>> +
>> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
>> +{
>> + unsigned long va = (unsigned long)kasan_early_shadow_page;
>> + phys_addr_t pa = __pa(kasan_early_shadow_page);
>> + int i;
>> +
>> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
>> +}
>> +
>> +static void __init kasan_populate_pmd(pmd_t *pmdp)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < PTRS_PER_PMD; i++)
>> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
>> +}
>> +
>> +static void __init kasan_populate_pud(pud_t *pudp)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < PTRS_PER_PUD; i++)
>> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
>> +}
>> +
>> +static void __init *kasan_alloc_pgtable(unsigned long size)
>> +{
>> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
>> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
>> +
>> + if (!ptr)
>> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
>> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
>> +
>> + return ptr;
>> +}
>> +
>> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
>> +{
>> + pgd_t *pgdp = pgd_offset_k(va);
>> + pud_t *pudp;
>> + pmd_t *pmdp;
>> + pte_t *ptep;
>> +
>> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
>> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
>> + kasan_populate_pud(pudp);
>> + pgd_populate(&init_mm, pgdp, pudp);
>> + }
>> + pudp = pud_offset(pgdp, va);
>> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
>> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
>> + kasan_populate_pmd(pmdp);
>> + pud_populate(&init_mm, pudp, pmdp);
>> + }
>> + pmdp = pmd_offset(pudp, va);
>> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
>> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
>> + kasan_populate_pte(ptep, PAGE_KERNEL);
>> + pmd_populate_kernel(&init_mm, pmdp, ptep);
>> + }
>> + ptep = pte_offset_kernel(pmdp, va);
>> +
>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
>> +
>> + return 0;
>> +}
>> +
>> +static void __init kasan_init_region(struct memblock_region *reg)
>> +{
>> + void *start = __va(reg->base);
>> + void *end = __va(reg->base + reg->size);
>> + unsigned long k_start, k_end, k_cur;
>> +
>> + if (start >= end)
>> + return;
>> +
>> + k_start = (unsigned long)kasan_mem_to_shadow(start);
>> + k_end = (unsigned long)kasan_mem_to_shadow(end);
>> +
>> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
>> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> +
>> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
>> + }
>> + flush_tlb_kernel_range(k_start, k_end);
>> +}
>> +
>> +void __init kasan_init(void)
>> +{
>> + struct memblock_region *reg;
>> +
>> + for_each_memblock(memory, reg)
>> + kasan_init_region(reg);
>> +
>> + /* It's too early to use clear_page() ! */
>> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
>> +
>> + /* Enable error messages */
>> + init_task.kasan_depth = 0;
>> + pr_info("KASAN init done\n");
>> +}
>> +
>> +/* The early shadow maps everything to a single page of zeroes */
>> +asmlinkage void __init kasan_early_init(void)
>> +{
>> + unsigned long addr = KASAN_SHADOW_START;
>> + unsigned long end = KASAN_SHADOW_END;
>> + pgd_t *pgdp = pgd_offset_k(addr);
>> +
>> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
>> + kasan_populate_pmd(kasan_early_shadow_pmd);
>> + kasan_populate_pud(kasan_early_shadow_pud);
>> +
>> + do {
>> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
>> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
>> +}
>>

2019-05-31 01:32:29

by Daniel Axtens

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit

Hi Christophe,

I tried this on the t4240rdb and it fails to boot if KASAN is
enabled. It does boot with the patch applied but KASAN disabled, so that
narrows it down a little bit.

I need to focus on 3s first so I'll just drop 3e from my patch set for
now.

Regards,
Daniel

> The KASAN shadow area is mapped into vmemmap space:
> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
> For this vmemmap has to be disabled.
>
> Cc: Daniel Axtens <[email protected]>
> Signed-off-by: Christophe Leroy <[email protected]>
> ---
> arch/powerpc/Kconfig | 1 +
> arch/powerpc/Kconfig.debug | 3 +-
> arch/powerpc/include/asm/kasan.h | 11 +++
> arch/powerpc/kernel/Makefile | 2 +
> arch/powerpc/kernel/head_64.S | 3 +
> arch/powerpc/kernel/setup_64.c | 20 +++---
> arch/powerpc/mm/kasan/Makefile | 1 +
> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
> 8 files changed, 159 insertions(+), 11 deletions(-)
> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 1a2fb50126b2..e0b7c45e4dc7 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -174,6 +174,7 @@ config PPC
> select HAVE_ARCH_AUDITSYSCALL
> select HAVE_ARCH_JUMP_LABEL
> select HAVE_ARCH_KASAN if PPC32
> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
> select HAVE_ARCH_KGDB
> select HAVE_ARCH_MMAP_RND_BITS
> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
> index 61febbbdd02b..b4140dd6b4e4 100644
> --- a/arch/powerpc/Kconfig.debug
> +++ b/arch/powerpc/Kconfig.debug
> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
> config KASAN_SHADOW_OFFSET
> hex
> depends on KASAN
> - default 0xe0000000
> + default 0xe0000000 if PPC32
> + default 0x6800040000000000 if PPC64
> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
> index 296e51c2f066..756b3d58f921 100644
> --- a/arch/powerpc/include/asm/kasan.h
> +++ b/arch/powerpc/include/asm/kasan.h
> @@ -23,10 +23,21 @@
>
> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>
> +#ifdef CONFIG_PPC32
> #define KASAN_SHADOW_END 0UL
>
> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>
> +#else
> +
> +#include <asm/pgtable.h>
> +
> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
> +
> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
> +
> +#endif /* CONFIG_PPC32 */
> +
> #ifdef CONFIG_KASAN
> void kasan_early_init(void);
> void kasan_mmu_init(void);
> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
> index 0ea6c4aa3a20..7f232c06f11d 100644
> --- a/arch/powerpc/kernel/Makefile
> +++ b/arch/powerpc/kernel/Makefile
> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
> KASAN_SANITIZE_cputable.o := n
> KASAN_SANITIZE_prom_init.o := n
> KASAN_SANITIZE_btext.o := n
> +KASAN_SANITIZE_paca.o := n
> +KASAN_SANITIZE_setup_64.o := n
>
> ifdef CONFIG_KASAN
> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
> index 3fad8d499767..80fbd8024fb2 100644
> --- a/arch/powerpc/kernel/head_64.S
> +++ b/arch/powerpc/kernel/head_64.S
> @@ -966,6 +966,9 @@ start_here_multiplatform:
> * and SLB setup before we turn on relocation.
> */
>
> +#ifdef CONFIG_KASAN
> + bl kasan_early_init
> +#endif
> /* Restore parameters passed from prom_init/kexec */
> mr r3,r31
> bl early_setup /* also sets r13 and SPRG_PACA */
> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
> index ba404dd9ce1d..d2bf860dd966 100644
> --- a/arch/powerpc/kernel/setup_64.c
> +++ b/arch/powerpc/kernel/setup_64.c
> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>
> /*
> + * Configure exception handlers. This include setting up trampolines
> + * if needed, setting exception endian mode, etc...
> + */
> + configure_exceptions();
> +
> + /* Apply all the dynamic patching */
> + apply_feature_fixups();
> + setup_feature_keys();
> +
> + /*
> * Do early initialization using the flattened device
> * tree, such as retrieving the physical memory map or
> * calculating/retrieving the hash table size.
> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
> setup_paca(paca_ptrs[boot_cpuid]);
> fixup_boot_paca();
>
> - /*
> - * Configure exception handlers. This include setting up trampolines
> - * if needed, setting exception endian mode, etc...
> - */
> - configure_exceptions();
> -
> - /* Apply all the dynamic patching */
> - apply_feature_fixups();
> - setup_feature_keys();
> -
> /* Initialize the hash table or TLB handling */
> early_init_mmu();
>
> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
> index 6577897673dd..0bfbe3892808 100644
> --- a/arch/powerpc/mm/kasan/Makefile
> +++ b/arch/powerpc/mm/kasan/Makefile
> @@ -3,3 +3,4 @@
> KASAN_SANITIZE := n
>
> obj-$(CONFIG_PPC32) += kasan_init_32.o
> +obj-$(CONFIG_PPC64) += kasan_init_64.o
> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
> new file mode 100644
> index 000000000000..7fd71b8e883b
> --- /dev/null
> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
> @@ -0,0 +1,129 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#define DISABLE_BRANCH_PROFILING
> +
> +#include <linux/kasan.h>
> +#include <linux/printk.h>
> +#include <linux/memblock.h>
> +#include <linux/sched/task.h>
> +#include <asm/pgalloc.h>
> +
> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
> +{
> + unsigned long va = (unsigned long)kasan_early_shadow_page;
> + phys_addr_t pa = __pa(kasan_early_shadow_page);
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
> +}
> +
> +static void __init kasan_populate_pmd(pmd_t *pmdp)
> +{
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PMD; i++)
> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
> +}
> +
> +static void __init kasan_populate_pud(pud_t *pudp)
> +{
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PUD; i++)
> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
> +}
> +
> +static void __init *kasan_alloc_pgtable(unsigned long size)
> +{
> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
> +
> + if (!ptr)
> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
> +
> + return ptr;
> +}
> +
> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
> +{
> + pgd_t *pgdp = pgd_offset_k(va);
> + pud_t *pudp;
> + pmd_t *pmdp;
> + pte_t *ptep;
> +
> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
> + kasan_populate_pud(pudp);
> + pgd_populate(&init_mm, pgdp, pudp);
> + }
> + pudp = pud_offset(pgdp, va);
> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
> + kasan_populate_pmd(pmdp);
> + pud_populate(&init_mm, pudp, pmdp);
> + }
> + pmdp = pmd_offset(pudp, va);
> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
> + kasan_populate_pte(ptep, PAGE_KERNEL);
> + pmd_populate_kernel(&init_mm, pmdp, ptep);
> + }
> + ptep = pte_offset_kernel(pmdp, va);
> +
> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
> +
> + return 0;
> +}
> +
> +static void __init kasan_init_region(struct memblock_region *reg)
> +{
> + void *start = __va(reg->base);
> + void *end = __va(reg->base + reg->size);
> + unsigned long k_start, k_end, k_cur;
> +
> + if (start >= end)
> + return;
> +
> + k_start = (unsigned long)kasan_mem_to_shadow(start);
> + k_end = (unsigned long)kasan_mem_to_shadow(end);
> +
> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +
> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
> + }
> + flush_tlb_kernel_range(k_start, k_end);
> +}
> +
> +void __init kasan_init(void)
> +{
> + struct memblock_region *reg;
> +
> + for_each_memblock(memory, reg)
> + kasan_init_region(reg);
> +
> + /* It's too early to use clear_page() ! */
> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
> +
> + /* Enable error messages */
> + init_task.kasan_depth = 0;
> + pr_info("KASAN init done\n");
> +}
> +
> +/* The early shadow maps everything to a single page of zeroes */
> +asmlinkage void __init kasan_early_init(void)
> +{
> + unsigned long addr = KASAN_SHADOW_START;
> + unsigned long end = KASAN_SHADOW_END;
> + pgd_t *pgdp = pgd_offset_k(addr);
> +
> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
> + kasan_populate_pmd(kasan_early_shadow_pmd);
> + kasan_populate_pud(kasan_early_shadow_pud);
> +
> + do {
> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
> +}
> --
> 2.13.3

2019-06-03 07:26:53

by Christophe Leroy

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit

Hi,

Ok, can you share your .config ?

Christophe

Le 31/05/2019 à 03:29, Daniel Axtens a écrit :
> Hi Christophe,
>
> I tried this on the t4240rdb and it fails to boot if KASAN is
> enabled. It does boot with the patch applied but KASAN disabled, so that
> narrows it down a little bit.
>
> I need to focus on 3s first so I'll just drop 3e from my patch set for
> now.
>
> Regards,
> Daniel
>
>> The KASAN shadow area is mapped into vmemmap space:
>> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
>> For this vmemmap has to be disabled.
>>
>> Cc: Daniel Axtens <[email protected]>
>> Signed-off-by: Christophe Leroy <[email protected]>
>> ---
>> arch/powerpc/Kconfig | 1 +
>> arch/powerpc/Kconfig.debug | 3 +-
>> arch/powerpc/include/asm/kasan.h | 11 +++
>> arch/powerpc/kernel/Makefile | 2 +
>> arch/powerpc/kernel/head_64.S | 3 +
>> arch/powerpc/kernel/setup_64.c | 20 +++---
>> arch/powerpc/mm/kasan/Makefile | 1 +
>> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
>> 8 files changed, 159 insertions(+), 11 deletions(-)
>> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 1a2fb50126b2..e0b7c45e4dc7 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -174,6 +174,7 @@ config PPC
>> select HAVE_ARCH_AUDITSYSCALL
>> select HAVE_ARCH_JUMP_LABEL
>> select HAVE_ARCH_KASAN if PPC32
>> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
>> select HAVE_ARCH_KGDB
>> select HAVE_ARCH_MMAP_RND_BITS
>> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
>> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
>> index 61febbbdd02b..b4140dd6b4e4 100644
>> --- a/arch/powerpc/Kconfig.debug
>> +++ b/arch/powerpc/Kconfig.debug
>> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
>> config KASAN_SHADOW_OFFSET
>> hex
>> depends on KASAN
>> - default 0xe0000000
>> + default 0xe0000000 if PPC32
>> + default 0x6800040000000000 if PPC64
>> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
>> index 296e51c2f066..756b3d58f921 100644
>> --- a/arch/powerpc/include/asm/kasan.h
>> +++ b/arch/powerpc/include/asm/kasan.h
>> @@ -23,10 +23,21 @@
>>
>> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>>
>> +#ifdef CONFIG_PPC32
>> #define KASAN_SHADOW_END 0UL
>>
>> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>>
>> +#else
>> +
>> +#include <asm/pgtable.h>
>> +
>> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
>> +
>> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
>> +
>> +#endif /* CONFIG_PPC32 */
>> +
>> #ifdef CONFIG_KASAN
>> void kasan_early_init(void);
>> void kasan_mmu_init(void);
>> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
>> index 0ea6c4aa3a20..7f232c06f11d 100644
>> --- a/arch/powerpc/kernel/Makefile
>> +++ b/arch/powerpc/kernel/Makefile
>> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
>> KASAN_SANITIZE_cputable.o := n
>> KASAN_SANITIZE_prom_init.o := n
>> KASAN_SANITIZE_btext.o := n
>> +KASAN_SANITIZE_paca.o := n
>> +KASAN_SANITIZE_setup_64.o := n
>>
>> ifdef CONFIG_KASAN
>> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
>> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
>> index 3fad8d499767..80fbd8024fb2 100644
>> --- a/arch/powerpc/kernel/head_64.S
>> +++ b/arch/powerpc/kernel/head_64.S
>> @@ -966,6 +966,9 @@ start_here_multiplatform:
>> * and SLB setup before we turn on relocation.
>> */
>>
>> +#ifdef CONFIG_KASAN
>> + bl kasan_early_init
>> +#endif
>> /* Restore parameters passed from prom_init/kexec */
>> mr r3,r31
>> bl early_setup /* also sets r13 and SPRG_PACA */
>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>> index ba404dd9ce1d..d2bf860dd966 100644
>> --- a/arch/powerpc/kernel/setup_64.c
>> +++ b/arch/powerpc/kernel/setup_64.c
>> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
>> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>>
>> /*
>> + * Configure exception handlers. This include setting up trampolines
>> + * if needed, setting exception endian mode, etc...
>> + */
>> + configure_exceptions();
>> +
>> + /* Apply all the dynamic patching */
>> + apply_feature_fixups();
>> + setup_feature_keys();
>> +
>> + /*
>> * Do early initialization using the flattened device
>> * tree, such as retrieving the physical memory map or
>> * calculating/retrieving the hash table size.
>> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
>> setup_paca(paca_ptrs[boot_cpuid]);
>> fixup_boot_paca();
>>
>> - /*
>> - * Configure exception handlers. This include setting up trampolines
>> - * if needed, setting exception endian mode, etc...
>> - */
>> - configure_exceptions();
>> -
>> - /* Apply all the dynamic patching */
>> - apply_feature_fixups();
>> - setup_feature_keys();
>> -
>> /* Initialize the hash table or TLB handling */
>> early_init_mmu();
>>
>> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
>> index 6577897673dd..0bfbe3892808 100644
>> --- a/arch/powerpc/mm/kasan/Makefile
>> +++ b/arch/powerpc/mm/kasan/Makefile
>> @@ -3,3 +3,4 @@
>> KASAN_SANITIZE := n
>>
>> obj-$(CONFIG_PPC32) += kasan_init_32.o
>> +obj-$(CONFIG_PPC64) += kasan_init_64.o
>> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
>> new file mode 100644
>> index 000000000000..7fd71b8e883b
>> --- /dev/null
>> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
>> @@ -0,0 +1,129 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +
>> +#define DISABLE_BRANCH_PROFILING
>> +
>> +#include <linux/kasan.h>
>> +#include <linux/printk.h>
>> +#include <linux/memblock.h>
>> +#include <linux/sched/task.h>
>> +#include <asm/pgalloc.h>
>> +
>> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
>> +{
>> + unsigned long va = (unsigned long)kasan_early_shadow_page;
>> + phys_addr_t pa = __pa(kasan_early_shadow_page);
>> + int i;
>> +
>> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
>> +}
>> +
>> +static void __init kasan_populate_pmd(pmd_t *pmdp)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < PTRS_PER_PMD; i++)
>> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
>> +}
>> +
>> +static void __init kasan_populate_pud(pud_t *pudp)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < PTRS_PER_PUD; i++)
>> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
>> +}
>> +
>> +static void __init *kasan_alloc_pgtable(unsigned long size)
>> +{
>> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
>> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
>> +
>> + if (!ptr)
>> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
>> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
>> +
>> + return ptr;
>> +}
>> +
>> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
>> +{
>> + pgd_t *pgdp = pgd_offset_k(va);
>> + pud_t *pudp;
>> + pmd_t *pmdp;
>> + pte_t *ptep;
>> +
>> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
>> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
>> + kasan_populate_pud(pudp);
>> + pgd_populate(&init_mm, pgdp, pudp);
>> + }
>> + pudp = pud_offset(pgdp, va);
>> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
>> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
>> + kasan_populate_pmd(pmdp);
>> + pud_populate(&init_mm, pudp, pmdp);
>> + }
>> + pmdp = pmd_offset(pudp, va);
>> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
>> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
>> + kasan_populate_pte(ptep, PAGE_KERNEL);
>> + pmd_populate_kernel(&init_mm, pmdp, ptep);
>> + }
>> + ptep = pte_offset_kernel(pmdp, va);
>> +
>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
>> +
>> + return 0;
>> +}
>> +
>> +static void __init kasan_init_region(struct memblock_region *reg)
>> +{
>> + void *start = __va(reg->base);
>> + void *end = __va(reg->base + reg->size);
>> + unsigned long k_start, k_end, k_cur;
>> +
>> + if (start >= end)
>> + return;
>> +
>> + k_start = (unsigned long)kasan_mem_to_shadow(start);
>> + k_end = (unsigned long)kasan_mem_to_shadow(end);
>> +
>> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
>> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> +
>> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
>> + }
>> + flush_tlb_kernel_range(k_start, k_end);
>> +}
>> +
>> +void __init kasan_init(void)
>> +{
>> + struct memblock_region *reg;
>> +
>> + for_each_memblock(memory, reg)
>> + kasan_init_region(reg);
>> +
>> + /* It's too early to use clear_page() ! */
>> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
>> +
>> + /* Enable error messages */
>> + init_task.kasan_depth = 0;
>> + pr_info("KASAN init done\n");
>> +}
>> +
>> +/* The early shadow maps everything to a single page of zeroes */
>> +asmlinkage void __init kasan_early_init(void)
>> +{
>> + unsigned long addr = KASAN_SHADOW_START;
>> + unsigned long end = KASAN_SHADOW_END;
>> + pgd_t *pgdp = pgd_offset_k(addr);
>> +
>> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
>> + kasan_populate_pmd(kasan_early_shadow_pmd);
>> + kasan_populate_pud(kasan_early_shadow_pud);
>> +
>> + do {
>> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
>> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
>> +}
>> --
>> 2.13.3

2019-06-03 23:51:59

by Daniel Axtens

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit


Regards,
Daniel

>
> Christophe
>
> Le 31/05/2019 à 03:29, Daniel Axtens a écrit :
>> Hi Christophe,
>>
>> I tried this on the t4240rdb and it fails to boot if KASAN is
>> enabled. It does boot with the patch applied but KASAN disabled, so that
>> narrows it down a little bit.
>>
>> I need to focus on 3s first so I'll just drop 3e from my patch set for
>> now.
>>
>> Regards,
>> Daniel
>>
>>> The KASAN shadow area is mapped into vmemmap space:
>>> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
>>> For this vmemmap has to be disabled.
>>>
>>> Cc: Daniel Axtens <[email protected]>
>>> Signed-off-by: Christophe Leroy <[email protected]>
>>> ---
>>> arch/powerpc/Kconfig | 1 +
>>> arch/powerpc/Kconfig.debug | 3 +-
>>> arch/powerpc/include/asm/kasan.h | 11 +++
>>> arch/powerpc/kernel/Makefile | 2 +
>>> arch/powerpc/kernel/head_64.S | 3 +
>>> arch/powerpc/kernel/setup_64.c | 20 +++---
>>> arch/powerpc/mm/kasan/Makefile | 1 +
>>> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
>>> 8 files changed, 159 insertions(+), 11 deletions(-)
>>> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>>>
>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>> index 1a2fb50126b2..e0b7c45e4dc7 100644
>>> --- a/arch/powerpc/Kconfig
>>> +++ b/arch/powerpc/Kconfig
>>> @@ -174,6 +174,7 @@ config PPC
>>> select HAVE_ARCH_AUDITSYSCALL
>>> select HAVE_ARCH_JUMP_LABEL
>>> select HAVE_ARCH_KASAN if PPC32
>>> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
>>> select HAVE_ARCH_KGDB
>>> select HAVE_ARCH_MMAP_RND_BITS
>>> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
>>> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
>>> index 61febbbdd02b..b4140dd6b4e4 100644
>>> --- a/arch/powerpc/Kconfig.debug
>>> +++ b/arch/powerpc/Kconfig.debug
>>> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
>>> config KASAN_SHADOW_OFFSET
>>> hex
>>> depends on KASAN
>>> - default 0xe0000000
>>> + default 0xe0000000 if PPC32
>>> + default 0x6800040000000000 if PPC64
>>> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
>>> index 296e51c2f066..756b3d58f921 100644
>>> --- a/arch/powerpc/include/asm/kasan.h
>>> +++ b/arch/powerpc/include/asm/kasan.h
>>> @@ -23,10 +23,21 @@
>>>
>>> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>>>
>>> +#ifdef CONFIG_PPC32
>>> #define KASAN_SHADOW_END 0UL
>>>
>>> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>>>
>>> +#else
>>> +
>>> +#include <asm/pgtable.h>
>>> +
>>> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
>>> +
>>> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
>>> +
>>> +#endif /* CONFIG_PPC32 */
>>> +
>>> #ifdef CONFIG_KASAN
>>> void kasan_early_init(void);
>>> void kasan_mmu_init(void);
>>> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
>>> index 0ea6c4aa3a20..7f232c06f11d 100644
>>> --- a/arch/powerpc/kernel/Makefile
>>> +++ b/arch/powerpc/kernel/Makefile
>>> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
>>> KASAN_SANITIZE_cputable.o := n
>>> KASAN_SANITIZE_prom_init.o := n
>>> KASAN_SANITIZE_btext.o := n
>>> +KASAN_SANITIZE_paca.o := n
>>> +KASAN_SANITIZE_setup_64.o := n
>>>
>>> ifdef CONFIG_KASAN
>>> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
>>> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
>>> index 3fad8d499767..80fbd8024fb2 100644
>>> --- a/arch/powerpc/kernel/head_64.S
>>> +++ b/arch/powerpc/kernel/head_64.S
>>> @@ -966,6 +966,9 @@ start_here_multiplatform:
>>> * and SLB setup before we turn on relocation.
>>> */
>>>
>>> +#ifdef CONFIG_KASAN
>>> + bl kasan_early_init
>>> +#endif
>>> /* Restore parameters passed from prom_init/kexec */
>>> mr r3,r31
>>> bl early_setup /* also sets r13 and SPRG_PACA */
>>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>>> index ba404dd9ce1d..d2bf860dd966 100644
>>> --- a/arch/powerpc/kernel/setup_64.c
>>> +++ b/arch/powerpc/kernel/setup_64.c
>>> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
>>> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>>>
>>> /*
>>> + * Configure exception handlers. This include setting up trampolines
>>> + * if needed, setting exception endian mode, etc...
>>> + */
>>> + configure_exceptions();
>>> +
>>> + /* Apply all the dynamic patching */
>>> + apply_feature_fixups();
>>> + setup_feature_keys();
>>> +
>>> + /*
>>> * Do early initialization using the flattened device
>>> * tree, such as retrieving the physical memory map or
>>> * calculating/retrieving the hash table size.
>>> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
>>> setup_paca(paca_ptrs[boot_cpuid]);
>>> fixup_boot_paca();
>>>
>>> - /*
>>> - * Configure exception handlers. This include setting up trampolines
>>> - * if needed, setting exception endian mode, etc...
>>> - */
>>> - configure_exceptions();
>>> -
>>> - /* Apply all the dynamic patching */
>>> - apply_feature_fixups();
>>> - setup_feature_keys();
>>> -
>>> /* Initialize the hash table or TLB handling */
>>> early_init_mmu();
>>>
>>> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
>>> index 6577897673dd..0bfbe3892808 100644
>>> --- a/arch/powerpc/mm/kasan/Makefile
>>> +++ b/arch/powerpc/mm/kasan/Makefile
>>> @@ -3,3 +3,4 @@
>>> KASAN_SANITIZE := n
>>>
>>> obj-$(CONFIG_PPC32) += kasan_init_32.o
>>> +obj-$(CONFIG_PPC64) += kasan_init_64.o
>>> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
>>> new file mode 100644
>>> index 000000000000..7fd71b8e883b
>>> --- /dev/null
>>> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
>>> @@ -0,0 +1,129 @@
>>> +// SPDX-License-Identifier: GPL-2.0
>>> +
>>> +#define DISABLE_BRANCH_PROFILING
>>> +
>>> +#include <linux/kasan.h>
>>> +#include <linux/printk.h>
>>> +#include <linux/memblock.h>
>>> +#include <linux/sched/task.h>
>>> +#include <asm/pgalloc.h>
>>> +
>>> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
>>> +{
>>> + unsigned long va = (unsigned long)kasan_early_shadow_page;
>>> + phys_addr_t pa = __pa(kasan_early_shadow_page);
>>> + int i;
>>> +
>>> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
>>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
>>> +}
>>> +
>>> +static void __init kasan_populate_pmd(pmd_t *pmdp)
>>> +{
>>> + int i;
>>> +
>>> + for (i = 0; i < PTRS_PER_PMD; i++)
>>> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
>>> +}
>>> +
>>> +static void __init kasan_populate_pud(pud_t *pudp)
>>> +{
>>> + int i;
>>> +
>>> + for (i = 0; i < PTRS_PER_PUD; i++)
>>> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
>>> +}
>>> +
>>> +static void __init *kasan_alloc_pgtable(unsigned long size)
>>> +{
>>> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
>>> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
>>> +
>>> + if (!ptr)
>>> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
>>> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
>>> +
>>> + return ptr;
>>> +}
>>> +
>>> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
>>> +{
>>> + pgd_t *pgdp = pgd_offset_k(va);
>>> + pud_t *pudp;
>>> + pmd_t *pmdp;
>>> + pte_t *ptep;
>>> +
>>> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
>>> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
>>> + kasan_populate_pud(pudp);
>>> + pgd_populate(&init_mm, pgdp, pudp);
>>> + }
>>> + pudp = pud_offset(pgdp, va);
>>> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
>>> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
>>> + kasan_populate_pmd(pmdp);
>>> + pud_populate(&init_mm, pudp, pmdp);
>>> + }
>>> + pmdp = pmd_offset(pudp, va);
>>> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
>>> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
>>> + kasan_populate_pte(ptep, PAGE_KERNEL);
>>> + pmd_populate_kernel(&init_mm, pmdp, ptep);
>>> + }
>>> + ptep = pte_offset_kernel(pmdp, va);
>>> +
>>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static void __init kasan_init_region(struct memblock_region *reg)
>>> +{
>>> + void *start = __va(reg->base);
>>> + void *end = __va(reg->base + reg->size);
>>> + unsigned long k_start, k_end, k_cur;
>>> +
>>> + if (start >= end)
>>> + return;
>>> +
>>> + k_start = (unsigned long)kasan_mem_to_shadow(start);
>>> + k_end = (unsigned long)kasan_mem_to_shadow(end);
>>> +
>>> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
>>> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>>> +
>>> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
>>> + }
>>> + flush_tlb_kernel_range(k_start, k_end);
>>> +}
>>> +
>>> +void __init kasan_init(void)
>>> +{
>>> + struct memblock_region *reg;
>>> +
>>> + for_each_memblock(memory, reg)
>>> + kasan_init_region(reg);
>>> +
>>> + /* It's too early to use clear_page() ! */
>>> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
>>> +
>>> + /* Enable error messages */
>>> + init_task.kasan_depth = 0;
>>> + pr_info("KASAN init done\n");
>>> +}
>>> +
>>> +/* The early shadow maps everything to a single page of zeroes */
>>> +asmlinkage void __init kasan_early_init(void)
>>> +{
>>> + unsigned long addr = KASAN_SHADOW_START;
>>> + unsigned long end = KASAN_SHADOW_END;
>>> + pgd_t *pgdp = pgd_offset_k(addr);
>>> +
>>> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
>>> + kasan_populate_pmd(kasan_early_shadow_pmd);
>>> + kasan_populate_pud(kasan_early_shadow_pud);
>>> +
>>> + do {
>>> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
>>> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
>>> +}
>>> --
>>> 2.13.3


Attachments:
.config (89.24 kB)
.config
.config.old (89.41 kB)
.config
Download all attachments

2019-06-04 12:49:06

by Christophe Leroy

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit



On 06/03/2019 11:50 PM, Daniel Axtens wrote:
> Christophe Leroy <[email protected]> writes:
>
>> Hi,
>>
>> Ok, can you share your .config ?
>
> Sure! This one is with kasan off as the last build I did was testing to
> see if the code reorgisation was the cause of the issues. (it was not)
>
>
>
>
> This was the kasan-enabled config that failed to boot:
>
>

Same issue with your .config under QEMU:

A go with gdb shows:

Breakpoint 3, 0xc000000000027b6c in exc_0x700_common ()
=> 0xc000000000027b6c <exc_0x700_common+0>: f8 01 00 70 std r0,112(r1)
(gdb) bt
#0 0xc000000000027b6c in exc_0x700_common ()
#1 0xc00000000136f80c in .udbg_init_memcons ()


Without CONFIG_PPC_EARLY_DEBUG, it boots fine for me. Can you check on
your side ?

Deactivating KASAN for arch/powerpc/kernel/udbg.o and
arch/powerpc/sysdev/udbg_memcons.o is not enough, we hit a call to
strstr() in register_early_udbg_console(), and once we get rid of it (in
the same way as in prom_init.c) the next issue is register_console() and
I don't know what to do about that one.

Christophe

>
>
> Regards,
> Daniel
>
>>
>> Christophe
>>
>> Le 31/05/2019 à 03:29, Daniel Axtens a écrit :
>>> Hi Christophe,
>>>
>>> I tried this on the t4240rdb and it fails to boot if KASAN is
>>> enabled. It does boot with the patch applied but KASAN disabled, so that
>>> narrows it down a little bit.
>>>
>>> I need to focus on 3s first so I'll just drop 3e from my patch set for
>>> now.
>>>
>>> Regards,
>>> Daniel
>>>
>>>> The KASAN shadow area is mapped into vmemmap space:
>>>> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
>>>> For this vmemmap has to be disabled.
>>>>
>>>> Cc: Daniel Axtens <[email protected]>
>>>> Signed-off-by: Christophe Leroy <[email protected]>
>>>> ---
>>>> arch/powerpc/Kconfig | 1 +
>>>> arch/powerpc/Kconfig.debug | 3 +-
>>>> arch/powerpc/include/asm/kasan.h | 11 +++
>>>> arch/powerpc/kernel/Makefile | 2 +
>>>> arch/powerpc/kernel/head_64.S | 3 +
>>>> arch/powerpc/kernel/setup_64.c | 20 +++---
>>>> arch/powerpc/mm/kasan/Makefile | 1 +
>>>> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
>>>> 8 files changed, 159 insertions(+), 11 deletions(-)
>>>> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>>>>
>>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>>> index 1a2fb50126b2..e0b7c45e4dc7 100644
>>>> --- a/arch/powerpc/Kconfig
>>>> +++ b/arch/powerpc/Kconfig
>>>> @@ -174,6 +174,7 @@ config PPC
>>>> select HAVE_ARCH_AUDITSYSCALL
>>>> select HAVE_ARCH_JUMP_LABEL
>>>> select HAVE_ARCH_KASAN if PPC32
>>>> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
>>>> select HAVE_ARCH_KGDB
>>>> select HAVE_ARCH_MMAP_RND_BITS
>>>> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
>>>> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
>>>> index 61febbbdd02b..b4140dd6b4e4 100644
>>>> --- a/arch/powerpc/Kconfig.debug
>>>> +++ b/arch/powerpc/Kconfig.debug
>>>> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
>>>> config KASAN_SHADOW_OFFSET
>>>> hex
>>>> depends on KASAN
>>>> - default 0xe0000000
>>>> + default 0xe0000000 if PPC32
>>>> + default 0x6800040000000000 if PPC64
>>>> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
>>>> index 296e51c2f066..756b3d58f921 100644
>>>> --- a/arch/powerpc/include/asm/kasan.h
>>>> +++ b/arch/powerpc/include/asm/kasan.h
>>>> @@ -23,10 +23,21 @@
>>>>
>>>> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>>>>
>>>> +#ifdef CONFIG_PPC32
>>>> #define KASAN_SHADOW_END 0UL
>>>>
>>>> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>>>>
>>>> +#else
>>>> +
>>>> +#include <asm/pgtable.h>
>>>> +
>>>> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
>>>> +
>>>> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
>>>> +
>>>> +#endif /* CONFIG_PPC32 */
>>>> +
>>>> #ifdef CONFIG_KASAN
>>>> void kasan_early_init(void);
>>>> void kasan_mmu_init(void);
>>>> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
>>>> index 0ea6c4aa3a20..7f232c06f11d 100644
>>>> --- a/arch/powerpc/kernel/Makefile
>>>> +++ b/arch/powerpc/kernel/Makefile
>>>> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
>>>> KASAN_SANITIZE_cputable.o := n
>>>> KASAN_SANITIZE_prom_init.o := n
>>>> KASAN_SANITIZE_btext.o := n
>>>> +KASAN_SANITIZE_paca.o := n
>>>> +KASAN_SANITIZE_setup_64.o := n
>>>>
>>>> ifdef CONFIG_KASAN
>>>> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
>>>> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
>>>> index 3fad8d499767..80fbd8024fb2 100644
>>>> --- a/arch/powerpc/kernel/head_64.S
>>>> +++ b/arch/powerpc/kernel/head_64.S
>>>> @@ -966,6 +966,9 @@ start_here_multiplatform:
>>>> * and SLB setup before we turn on relocation.
>>>> */
>>>>
>>>> +#ifdef CONFIG_KASAN
>>>> + bl kasan_early_init
>>>> +#endif
>>>> /* Restore parameters passed from prom_init/kexec */
>>>> mr r3,r31
>>>> bl early_setup /* also sets r13 and SPRG_PACA */
>>>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>>>> index ba404dd9ce1d..d2bf860dd966 100644
>>>> --- a/arch/powerpc/kernel/setup_64.c
>>>> +++ b/arch/powerpc/kernel/setup_64.c
>>>> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
>>>> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>>>>
>>>> /*
>>>> + * Configure exception handlers. This include setting up trampolines
>>>> + * if needed, setting exception endian mode, etc...
>>>> + */
>>>> + configure_exceptions();
>>>> +
>>>> + /* Apply all the dynamic patching */
>>>> + apply_feature_fixups();
>>>> + setup_feature_keys();
>>>> +
>>>> + /*
>>>> * Do early initialization using the flattened device
>>>> * tree, such as retrieving the physical memory map or
>>>> * calculating/retrieving the hash table size.
>>>> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
>>>> setup_paca(paca_ptrs[boot_cpuid]);
>>>> fixup_boot_paca();
>>>>
>>>> - /*
>>>> - * Configure exception handlers. This include setting up trampolines
>>>> - * if needed, setting exception endian mode, etc...
>>>> - */
>>>> - configure_exceptions();
>>>> -
>>>> - /* Apply all the dynamic patching */
>>>> - apply_feature_fixups();
>>>> - setup_feature_keys();
>>>> -
>>>> /* Initialize the hash table or TLB handling */
>>>> early_init_mmu();
>>>>
>>>> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
>>>> index 6577897673dd..0bfbe3892808 100644
>>>> --- a/arch/powerpc/mm/kasan/Makefile
>>>> +++ b/arch/powerpc/mm/kasan/Makefile
>>>> @@ -3,3 +3,4 @@
>>>> KASAN_SANITIZE := n
>>>>
>>>> obj-$(CONFIG_PPC32) += kasan_init_32.o
>>>> +obj-$(CONFIG_PPC64) += kasan_init_64.o
>>>> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
>>>> new file mode 100644
>>>> index 000000000000..7fd71b8e883b
>>>> --- /dev/null
>>>> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
>>>> @@ -0,0 +1,129 @@
>>>> +// SPDX-License-Identifier: GPL-2.0
>>>> +
>>>> +#define DISABLE_BRANCH_PROFILING
>>>> +
>>>> +#include <linux/kasan.h>
>>>> +#include <linux/printk.h>
>>>> +#include <linux/memblock.h>
>>>> +#include <linux/sched/task.h>
>>>> +#include <asm/pgalloc.h>
>>>> +
>>>> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
>>>> +{
>>>> + unsigned long va = (unsigned long)kasan_early_shadow_page;
>>>> + phys_addr_t pa = __pa(kasan_early_shadow_page);
>>>> + int i;
>>>> +
>>>> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
>>>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
>>>> +}
>>>> +
>>>> +static void __init kasan_populate_pmd(pmd_t *pmdp)
>>>> +{
>>>> + int i;
>>>> +
>>>> + for (i = 0; i < PTRS_PER_PMD; i++)
>>>> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
>>>> +}
>>>> +
>>>> +static void __init kasan_populate_pud(pud_t *pudp)
>>>> +{
>>>> + int i;
>>>> +
>>>> + for (i = 0; i < PTRS_PER_PUD; i++)
>>>> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
>>>> +}
>>>> +
>>>> +static void __init *kasan_alloc_pgtable(unsigned long size)
>>>> +{
>>>> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
>>>> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
>>>> +
>>>> + if (!ptr)
>>>> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
>>>> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
>>>> +
>>>> + return ptr;
>>>> +}
>>>> +
>>>> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
>>>> +{
>>>> + pgd_t *pgdp = pgd_offset_k(va);
>>>> + pud_t *pudp;
>>>> + pmd_t *pmdp;
>>>> + pte_t *ptep;
>>>> +
>>>> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
>>>> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
>>>> + kasan_populate_pud(pudp);
>>>> + pgd_populate(&init_mm, pgdp, pudp);
>>>> + }
>>>> + pudp = pud_offset(pgdp, va);
>>>> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
>>>> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
>>>> + kasan_populate_pmd(pmdp);
>>>> + pud_populate(&init_mm, pudp, pmdp);
>>>> + }
>>>> + pmdp = pmd_offset(pudp, va);
>>>> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
>>>> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
>>>> + kasan_populate_pte(ptep, PAGE_KERNEL);
>>>> + pmd_populate_kernel(&init_mm, pmdp, ptep);
>>>> + }
>>>> + ptep = pte_offset_kernel(pmdp, va);
>>>> +
>>>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
>>>> +
>>>> + return 0;
>>>> +}
>>>> +
>>>> +static void __init kasan_init_region(struct memblock_region *reg)
>>>> +{
>>>> + void *start = __va(reg->base);
>>>> + void *end = __va(reg->base + reg->size);
>>>> + unsigned long k_start, k_end, k_cur;
>>>> +
>>>> + if (start >= end)
>>>> + return;
>>>> +
>>>> + k_start = (unsigned long)kasan_mem_to_shadow(start);
>>>> + k_end = (unsigned long)kasan_mem_to_shadow(end);
>>>> +
>>>> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
>>>> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>>>> +
>>>> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
>>>> + }
>>>> + flush_tlb_kernel_range(k_start, k_end);
>>>> +}
>>>> +
>>>> +void __init kasan_init(void)
>>>> +{
>>>> + struct memblock_region *reg;
>>>> +
>>>> + for_each_memblock(memory, reg)
>>>> + kasan_init_region(reg);
>>>> +
>>>> + /* It's too early to use clear_page() ! */
>>>> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
>>>> +
>>>> + /* Enable error messages */
>>>> + init_task.kasan_depth = 0;
>>>> + pr_info("KASAN init done\n");
>>>> +}
>>>> +
>>>> +/* The early shadow maps everything to a single page of zeroes */
>>>> +asmlinkage void __init kasan_early_init(void)
>>>> +{
>>>> + unsigned long addr = KASAN_SHADOW_START;
>>>> + unsigned long end = KASAN_SHADOW_END;
>>>> + pgd_t *pgdp = pgd_offset_k(addr);
>>>> +
>>>> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
>>>> + kasan_populate_pmd(kasan_early_shadow_pmd);
>>>> + kasan_populate_pud(kasan_early_shadow_pud);
>>>> +
>>>> + do {
>>>> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
>>>> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
>>>> +}
>>>> --
>>>> 2.13.3

2019-06-11 01:22:56

by Daniel Axtens

[permalink] [raw]
Subject: Re: [RFC PATCH] powerpc/book3e: KASAN Full support for 64bit

Christophe Leroy <[email protected]> writes:

> On 06/03/2019 11:50 PM, Daniel Axtens wrote:
>> Christophe Leroy <[email protected]> writes:
>>
>>> Hi,
>>>
>>> Ok, can you share your .config ?
>>
>> Sure! This one is with kasan off as the last build I did was testing to
>> see if the code reorgisation was the cause of the issues. (it was not)
>>
>>
>>
>>
>> This was the kasan-enabled config that failed to boot:
>>
>>
>
> Same issue with your .config under QEMU:
>
> A go with gdb shows:
>
> Breakpoint 3, 0xc000000000027b6c in exc_0x700_common ()
> => 0xc000000000027b6c <exc_0x700_common+0>: f8 01 00 70 std r0,112(r1)
> (gdb) bt
> #0 0xc000000000027b6c in exc_0x700_common ()
> #1 0xc00000000136f80c in .udbg_init_memcons ()
>

Thanks for debugging this!

> Without CONFIG_PPC_EARLY_DEBUG, it boots fine for me. Can you check on
> your side ?

Yes, that works on my side.

> Deactivating KASAN for arch/powerpc/kernel/udbg.o and
> arch/powerpc/sysdev/udbg_memcons.o is not enough, we hit a call to
> strstr() in register_early_udbg_console(), and once we get rid of it (in
> the same way as in prom_init.c) the next issue is register_console() and
> I don't know what to do about that one.

Disabling early debug seems like a reasonable restriction to add.

I'll have a look at modules across this and book3s next.

Regards,
Daniel

>
> Christophe
>
>>
>>
>> Regards,
>> Daniel
>>
>>>
>>> Christophe
>>>
>>> Le 31/05/2019 à 03:29, Daniel Axtens a écrit :
>>>> Hi Christophe,
>>>>
>>>> I tried this on the t4240rdb and it fails to boot if KASAN is
>>>> enabled. It does boot with the patch applied but KASAN disabled, so that
>>>> narrows it down a little bit.
>>>>
>>>> I need to focus on 3s first so I'll just drop 3e from my patch set for
>>>> now.
>>>>
>>>> Regards,
>>>> Daniel
>>>>
>>>>> The KASAN shadow area is mapped into vmemmap space:
>>>>> 0x8000 0400 0000 0000 to 0x8000 0600 0000 0000.
>>>>> For this vmemmap has to be disabled.
>>>>>
>>>>> Cc: Daniel Axtens <[email protected]>
>>>>> Signed-off-by: Christophe Leroy <[email protected]>
>>>>> ---
>>>>> arch/powerpc/Kconfig | 1 +
>>>>> arch/powerpc/Kconfig.debug | 3 +-
>>>>> arch/powerpc/include/asm/kasan.h | 11 +++
>>>>> arch/powerpc/kernel/Makefile | 2 +
>>>>> arch/powerpc/kernel/head_64.S | 3 +
>>>>> arch/powerpc/kernel/setup_64.c | 20 +++---
>>>>> arch/powerpc/mm/kasan/Makefile | 1 +
>>>>> arch/powerpc/mm/kasan/kasan_init_64.c | 129 ++++++++++++++++++++++++++++++++++
>>>>> 8 files changed, 159 insertions(+), 11 deletions(-)
>>>>> create mode 100644 arch/powerpc/mm/kasan/kasan_init_64.c
>>>>>
>>>>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>>>>> index 1a2fb50126b2..e0b7c45e4dc7 100644
>>>>> --- a/arch/powerpc/Kconfig
>>>>> +++ b/arch/powerpc/Kconfig
>>>>> @@ -174,6 +174,7 @@ config PPC
>>>>> select HAVE_ARCH_AUDITSYSCALL
>>>>> select HAVE_ARCH_JUMP_LABEL
>>>>> select HAVE_ARCH_KASAN if PPC32
>>>>> + select HAVE_ARCH_KASAN if PPC_BOOK3E_64 && !SPARSEMEM_VMEMMAP
>>>>> select HAVE_ARCH_KGDB
>>>>> select HAVE_ARCH_MMAP_RND_BITS
>>>>> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
>>>>> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
>>>>> index 61febbbdd02b..b4140dd6b4e4 100644
>>>>> --- a/arch/powerpc/Kconfig.debug
>>>>> +++ b/arch/powerpc/Kconfig.debug
>>>>> @@ -370,4 +370,5 @@ config PPC_FAST_ENDIAN_SWITCH
>>>>> config KASAN_SHADOW_OFFSET
>>>>> hex
>>>>> depends on KASAN
>>>>> - default 0xe0000000
>>>>> + default 0xe0000000 if PPC32
>>>>> + default 0x6800040000000000 if PPC64
>>>>> diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
>>>>> index 296e51c2f066..756b3d58f921 100644
>>>>> --- a/arch/powerpc/include/asm/kasan.h
>>>>> +++ b/arch/powerpc/include/asm/kasan.h
>>>>> @@ -23,10 +23,21 @@
>>>>>
>>>>> #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
>>>>>
>>>>> +#ifdef CONFIG_PPC32
>>>>> #define KASAN_SHADOW_END 0UL
>>>>>
>>>>> #define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
>>>>>
>>>>> +#else
>>>>> +
>>>>> +#include <asm/pgtable.h>
>>>>> +
>>>>> +#define KASAN_SHADOW_SIZE (KERN_VIRT_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
>>>>> +
>>>>> +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
>>>>> +
>>>>> +#endif /* CONFIG_PPC32 */
>>>>> +
>>>>> #ifdef CONFIG_KASAN
>>>>> void kasan_early_init(void);
>>>>> void kasan_mmu_init(void);
>>>>> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
>>>>> index 0ea6c4aa3a20..7f232c06f11d 100644
>>>>> --- a/arch/powerpc/kernel/Makefile
>>>>> +++ b/arch/powerpc/kernel/Makefile
>>>>> @@ -35,6 +35,8 @@ KASAN_SANITIZE_early_32.o := n
>>>>> KASAN_SANITIZE_cputable.o := n
>>>>> KASAN_SANITIZE_prom_init.o := n
>>>>> KASAN_SANITIZE_btext.o := n
>>>>> +KASAN_SANITIZE_paca.o := n
>>>>> +KASAN_SANITIZE_setup_64.o := n
>>>>>
>>>>> ifdef CONFIG_KASAN
>>>>> CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
>>>>> diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
>>>>> index 3fad8d499767..80fbd8024fb2 100644
>>>>> --- a/arch/powerpc/kernel/head_64.S
>>>>> +++ b/arch/powerpc/kernel/head_64.S
>>>>> @@ -966,6 +966,9 @@ start_here_multiplatform:
>>>>> * and SLB setup before we turn on relocation.
>>>>> */
>>>>>
>>>>> +#ifdef CONFIG_KASAN
>>>>> + bl kasan_early_init
>>>>> +#endif
>>>>> /* Restore parameters passed from prom_init/kexec */
>>>>> mr r3,r31
>>>>> bl early_setup /* also sets r13 and SPRG_PACA */
>>>>> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
>>>>> index ba404dd9ce1d..d2bf860dd966 100644
>>>>> --- a/arch/powerpc/kernel/setup_64.c
>>>>> +++ b/arch/powerpc/kernel/setup_64.c
>>>>> @@ -311,6 +311,16 @@ void __init early_setup(unsigned long dt_ptr)
>>>>> DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
>>>>>
>>>>> /*
>>>>> + * Configure exception handlers. This include setting up trampolines
>>>>> + * if needed, setting exception endian mode, etc...
>>>>> + */
>>>>> + configure_exceptions();
>>>>> +
>>>>> + /* Apply all the dynamic patching */
>>>>> + apply_feature_fixups();
>>>>> + setup_feature_keys();
>>>>> +
>>>>> + /*
>>>>> * Do early initialization using the flattened device
>>>>> * tree, such as retrieving the physical memory map or
>>>>> * calculating/retrieving the hash table size.
>>>>> @@ -325,16 +335,6 @@ void __init early_setup(unsigned long dt_ptr)
>>>>> setup_paca(paca_ptrs[boot_cpuid]);
>>>>> fixup_boot_paca();
>>>>>
>>>>> - /*
>>>>> - * Configure exception handlers. This include setting up trampolines
>>>>> - * if needed, setting exception endian mode, etc...
>>>>> - */
>>>>> - configure_exceptions();
>>>>> -
>>>>> - /* Apply all the dynamic patching */
>>>>> - apply_feature_fixups();
>>>>> - setup_feature_keys();
>>>>> -
>>>>> /* Initialize the hash table or TLB handling */
>>>>> early_init_mmu();
>>>>>
>>>>> diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
>>>>> index 6577897673dd..0bfbe3892808 100644
>>>>> --- a/arch/powerpc/mm/kasan/Makefile
>>>>> +++ b/arch/powerpc/mm/kasan/Makefile
>>>>> @@ -3,3 +3,4 @@
>>>>> KASAN_SANITIZE := n
>>>>>
>>>>> obj-$(CONFIG_PPC32) += kasan_init_32.o
>>>>> +obj-$(CONFIG_PPC64) += kasan_init_64.o
>>>>> diff --git a/arch/powerpc/mm/kasan/kasan_init_64.c b/arch/powerpc/mm/kasan/kasan_init_64.c
>>>>> new file mode 100644
>>>>> index 000000000000..7fd71b8e883b
>>>>> --- /dev/null
>>>>> +++ b/arch/powerpc/mm/kasan/kasan_init_64.c
>>>>> @@ -0,0 +1,129 @@
>>>>> +// SPDX-License-Identifier: GPL-2.0
>>>>> +
>>>>> +#define DISABLE_BRANCH_PROFILING
>>>>> +
>>>>> +#include <linux/kasan.h>
>>>>> +#include <linux/printk.h>
>>>>> +#include <linux/memblock.h>
>>>>> +#include <linux/sched/task.h>
>>>>> +#include <asm/pgalloc.h>
>>>>> +
>>>>> +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
>>>>> +{
>>>>> + unsigned long va = (unsigned long)kasan_early_shadow_page;
>>>>> + phys_addr_t pa = __pa(kasan_early_shadow_page);
>>>>> + int i;
>>>>> +
>>>>> + for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
>>>>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
>>>>> +}
>>>>> +
>>>>> +static void __init kasan_populate_pmd(pmd_t *pmdp)
>>>>> +{
>>>>> + int i;
>>>>> +
>>>>> + for (i = 0; i < PTRS_PER_PMD; i++)
>>>>> + pmd_populate_kernel(&init_mm, pmdp + i, kasan_early_shadow_pte);
>>>>> +}
>>>>> +
>>>>> +static void __init kasan_populate_pud(pud_t *pudp)
>>>>> +{
>>>>> + int i;
>>>>> +
>>>>> + for (i = 0; i < PTRS_PER_PUD; i++)
>>>>> + pud_populate(&init_mm, pudp + i, kasan_early_shadow_pmd);
>>>>> +}
>>>>> +
>>>>> +static void __init *kasan_alloc_pgtable(unsigned long size)
>>>>> +{
>>>>> + void *ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
>>>>> + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
>>>>> +
>>>>> + if (!ptr)
>>>>> + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
>>>>> + __func__, size, size, __pa(MAX_DMA_ADDRESS));
>>>>> +
>>>>> + return ptr;
>>>>> +}
>>>>> +
>>>>> +static int __init kasan_map_page(unsigned long va, unsigned long pa, pgprot_t prot)
>>>>> +{
>>>>> + pgd_t *pgdp = pgd_offset_k(va);
>>>>> + pud_t *pudp;
>>>>> + pmd_t *pmdp;
>>>>> + pte_t *ptep;
>>>>> +
>>>>> + if (pgd_none(*pgdp) || (void *)pgd_page_vaddr(*pgdp) == kasan_early_shadow_pud) {
>>>>> + pudp = kasan_alloc_pgtable(PUD_TABLE_SIZE);
>>>>> + kasan_populate_pud(pudp);
>>>>> + pgd_populate(&init_mm, pgdp, pudp);
>>>>> + }
>>>>> + pudp = pud_offset(pgdp, va);
>>>>> + if (pud_none(*pudp) || (void *)pud_page_vaddr(*pudp) == kasan_early_shadow_pmd) {
>>>>> + pmdp = kasan_alloc_pgtable(PMD_TABLE_SIZE);
>>>>> + kasan_populate_pmd(pmdp);
>>>>> + pud_populate(&init_mm, pudp, pmdp);
>>>>> + }
>>>>> + pmdp = pmd_offset(pudp, va);
>>>>> + if (!pmd_present(*pmdp) || (void *)pmd_page_vaddr(*pmdp) == kasan_early_shadow_pte) {
>>>>> + ptep = kasan_alloc_pgtable(PTE_TABLE_SIZE);
>>>>> + kasan_populate_pte(ptep, PAGE_KERNEL);
>>>>> + pmd_populate_kernel(&init_mm, pmdp, ptep);
>>>>> + }
>>>>> + ptep = pte_offset_kernel(pmdp, va);
>>>>> +
>>>>> + __set_pte_at(&init_mm, va, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
>>>>> +
>>>>> + return 0;
>>>>> +}
>>>>> +
>>>>> +static void __init kasan_init_region(struct memblock_region *reg)
>>>>> +{
>>>>> + void *start = __va(reg->base);
>>>>> + void *end = __va(reg->base + reg->size);
>>>>> + unsigned long k_start, k_end, k_cur;
>>>>> +
>>>>> + if (start >= end)
>>>>> + return;
>>>>> +
>>>>> + k_start = (unsigned long)kasan_mem_to_shadow(start);
>>>>> + k_end = (unsigned long)kasan_mem_to_shadow(end);
>>>>> +
>>>>> + for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
>>>>> + void *va = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>>>>> +
>>>>> + kasan_map_page(k_cur, __pa(va), PAGE_KERNEL);
>>>>> + }
>>>>> + flush_tlb_kernel_range(k_start, k_end);
>>>>> +}
>>>>> +
>>>>> +void __init kasan_init(void)
>>>>> +{
>>>>> + struct memblock_region *reg;
>>>>> +
>>>>> + for_each_memblock(memory, reg)
>>>>> + kasan_init_region(reg);
>>>>> +
>>>>> + /* It's too early to use clear_page() ! */
>>>>> + memset(kasan_early_shadow_page, 0, sizeof(kasan_early_shadow_page));
>>>>> +
>>>>> + /* Enable error messages */
>>>>> + init_task.kasan_depth = 0;
>>>>> + pr_info("KASAN init done\n");
>>>>> +}
>>>>> +
>>>>> +/* The early shadow maps everything to a single page of zeroes */
>>>>> +asmlinkage void __init kasan_early_init(void)
>>>>> +{
>>>>> + unsigned long addr = KASAN_SHADOW_START;
>>>>> + unsigned long end = KASAN_SHADOW_END;
>>>>> + pgd_t *pgdp = pgd_offset_k(addr);
>>>>> +
>>>>> + kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
>>>>> + kasan_populate_pmd(kasan_early_shadow_pmd);
>>>>> + kasan_populate_pud(kasan_early_shadow_pud);
>>>>> +
>>>>> + do {
>>>>> + pgd_populate(&init_mm, pgdp, kasan_early_shadow_pud);
>>>>> + } while (pgdp++, addr = pgd_addr_end(addr, end), addr != end);
>>>>> +}
>>>>> --
>>>>> 2.13.3