2020-01-14 17:55:07

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH 1/5] powerpc/32: add support of KASAN_VMALLOC

Add support of KASAN_VMALLOC on PPC32.

To allow this, the early shadow covering the VMALLOC space
need to be removed once high_memory var is set and before
freeing memblock.

And the VMALLOC area need to be aligned such that boundaries
are covered by a full shadow page.

Signed-off-by: Christophe Leroy <[email protected]>

---
v3: added missing inclusion of asm/kasan.h needed when CONFIG_KASAN is not set.

v2: rebased ; exclude specific module handling when CONFIG_KASAN_VMALLOC is set.
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/book3s/32/pgtable.h | 5 +++++
arch/powerpc/include/asm/kasan.h | 2 ++
arch/powerpc/include/asm/nohash/32/pgtable.h | 5 +++++
arch/powerpc/mm/kasan/kasan_init_32.c | 33 +++++++++++++++++++++++++++-
arch/powerpc/mm/mem.c | 4 ++++
6 files changed, 49 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1ec34e16ed65..a247bbfb03d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -173,6 +173,7 @@ config PPC
select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
+ select HAVE_ARCH_KASAN_VMALLOC if PPC32
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 0796533d37dd..5b39c11e884a 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -193,7 +193,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
#define VMALLOC_END ioremap_bot
+#endif

#ifndef __ASSEMBLY__
#include <linux/sched.h>
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 296e51c2f066..fbff9ff9032e 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -31,9 +31,11 @@
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
+void kasan_late_init(void);
#else
static inline void kasan_init(void) { }
static inline void kasan_mmu_init(void) { }
+static inline void kasan_late_init(void) { }
#endif

#endif /* __ASSEMBLY */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 552b96eef0c8..60c4d829152e 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -114,7 +114,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
#define VMALLOC_END ioremap_bot
+#endif

/*
* Bits in a linux-style PTE. These match the bits in the
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 0e6ed4413eea..88036fb88350 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -129,6 +129,31 @@ static void __init kasan_remap_early_shadow_ro(void)
flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
}

+static void __init kasan_unmap_early_shadow_vmalloc(void)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
+ unsigned long k_cur;
+ phys_addr_t pa = __pa(kasan_early_shadow_page);
+
+ if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ int ret = kasan_init_shadow_page_tables(k_start, k_end);
+
+ if (ret)
+ panic("kasan: kasan_init_shadow_page_tables() failed");
+ }
+ for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+ if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+ continue;
+
+ __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+}
+
void __init kasan_mmu_init(void)
{
int ret;
@@ -165,7 +190,13 @@ void __init kasan_init(void)
pr_info("KASAN init done\n");
}

-#ifdef CONFIG_MODULES
+void __init kasan_late_init(void)
+{
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_unmap_early_shadow_vmalloc();
+}
+
+#if defined(CONFIG_MODULES) && !defined(CONFIG_KASAN_VMALLOC)
void *module_alloc(unsigned long size)
{
void *base;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f5535eae637f..ef7b1119b2e2 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -49,6 +49,7 @@
#include <asm/fixmap.h>
#include <asm/swiotlb.h>
#include <asm/rtas.h>
+#include <asm/kasan.h>

#include <mm/mmu_decl.h>

@@ -301,6 +302,9 @@ void __init mem_init(void)

high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
+
+ kasan_late_init();
+
memblock_free_all();

#ifdef CONFIG_HIGHMEM
--
2.13.3


2020-01-14 17:55:15

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH 3/5] powerpc/32: force KASAN_VMALLOC for modules

Unloading/Reloading of modules seems to fail with KASAN_VMALLOC
but works properly with it.

Force selection of KASAN_VMALLOC when MODULES are selected, and
drop module_alloc() which was dedicated to KASAN for modules.

Reported-by: <[email protected]>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=205283
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/kasan/kasan_init_32.c | 31 +++++--------------------------
arch/powerpc/platforms/Kconfig.cputype | 1 +
2 files changed, 6 insertions(+), 26 deletions(-)

diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 88036fb88350..b782d92622b4 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -12,7 +12,7 @@
#include <asm/code-patching.h>
#include <mm/mmu_decl.h>

-static pgprot_t kasan_prot_ro(void)
+static pgprot_t __init kasan_prot_ro(void)
{
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
return PAGE_READONLY;
@@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void)
return PAGE_KERNEL_RO;
}

-static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
{
unsigned long va = (unsigned long)kasan_early_shadow_page;
phys_addr_t pa = __pa(kasan_early_shadow_page);
@@ -30,7 +30,7 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
}

-static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{
pmd_t *pmd;
unsigned long k_cur, k_next;
@@ -70,7 +70,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
return 0;
}

-static void __ref *kasan_get_one_page(void)
+static void __init *kasan_get_one_page(void)
{
if (slab_is_available())
return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
@@ -78,7 +78,7 @@ static void __ref *kasan_get_one_page(void)
return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
}

-static int __ref kasan_init_region(void *start, size_t size)
+static int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
@@ -196,27 +196,6 @@ void __init kasan_late_init(void)
kasan_unmap_early_shadow_vmalloc();
}

-#if defined(CONFIG_MODULES) && !defined(CONFIG_KASAN_VMALLOC)
-void *module_alloc(unsigned long size)
-{
- void *base;
-
- base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __builtin_return_address(0));
-
- if (!base)
- return NULL;
-
- if (!kasan_init_region(base, size))
- return base;
-
- vfree(base);
-
- return NULL;
-}
-#endif
-
#ifdef CONFIG_PPC_BOOK3S_32
u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};

diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 536a2efcb7f0..851a92ffe290 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,6 +2,7 @@
config PPC32
bool
default y if !PPC64
+ select KASAN_VMALLOC if KASAN && MODULES

config PPC64
bool "64-bit kernel"
--
2.13.3

2020-01-14 17:55:23

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH 4/5] powerpc/32: Simplify KASAN init

Since kasan_init_region() is not used anymore for modules,
KASAN init is done while slab_is_available() is false.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/kasan/kasan_init_32.c | 26 +++++---------------------
1 file changed, 5 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index b782d92622b4..c4bf9ed04f88 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -34,7 +34,6 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
{
pmd_t *pmd;
unsigned long k_cur, k_next;
- pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;

pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);

@@ -45,14 +44,11 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;

- if (slab_is_available())
- new = pte_alloc_one_kernel(&init_mm);
- else
- new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
+ new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);

if (!new)
return -ENOMEM;
- kasan_populate_pte(new, prot);
+ kasan_populate_pte(new, PAGE_KERNEL);

smp_wmb(); /* See comment in __pte_alloc */

@@ -63,39 +59,27 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
new = NULL;
}
spin_unlock(&init_mm.page_table_lock);
-
- if (new && slab_is_available())
- pte_free_kernel(&init_mm, new);
}
return 0;
}

-static void __init *kasan_get_one_page(void)
-{
- if (slab_is_available())
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-
- return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-}
-
static int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
unsigned long k_cur;
int ret;
- void *block = NULL;
+ void *block;

ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret)
return ret;

- if (!slab_is_available())
- block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ block = memblock_alloc(k_end - k_start, PAGE_SIZE);

for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
- void *va = block ? block + k_cur - k_start : kasan_get_one_page();
+ void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);

if (!va)
--
2.13.3

2020-01-14 17:55:56

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH 2/5] powerpc/kconfig: move CONFIG_PPC32 into Kconfig.cputype

Move CONFIG_PPC32 at the same place as CONFIG_PPC64 for consistency.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/Kconfig | 4 ----
arch/powerpc/platforms/Kconfig.cputype | 4 ++++
2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a247bbfb03d4..c2a604b9592b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -1,10 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
source "arch/powerpc/platforms/Kconfig.cputype"

-config PPC32
- bool
- default y if !PPC64
-
config 32BIT
bool
default y if PPC32
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 8d7f9c3dc771..536a2efcb7f0 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+config PPC32
+ bool
+ default y if !PPC64
+
config PPC64
bool "64-bit kernel"
select ZLIB_DEFLATE
--
2.13.3

2020-01-14 17:56:46

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH 5/5] powerpc/32: reuse orphaned memblocks in kasan_init_shadow_page_tables()

If concurrent PMD population has happened, re-use orphaned memblocks.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/kasan/kasan_init_32.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index c4bf9ed04f88..d3cacd462560 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -34,17 +34,17 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
{
pmd_t *pmd;
unsigned long k_cur, k_next;
+ pte_t *new = NULL;

pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);

for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
- pte_t *new;
-
k_next = pgd_addr_end(k_cur, k_end);
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;

- new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
+ if (!new)
+ new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);

if (!new)
return -ENOMEM;
--
2.13.3

2020-01-29 05:19:27

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH 1/5] powerpc/32: add support of KASAN_VMALLOC

On Tue, 2020-01-14 at 17:54:00 UTC, Christophe Leroy wrote:
> Add support of KASAN_VMALLOC on PPC32.
>
> To allow this, the early shadow covering the VMALLOC space
> need to be removed once high_memory var is set and before
> freeing memblock.
>
> And the VMALLOC area need to be aligned such that boundaries
> are covered by a full shadow page.
>
> Signed-off-by: Christophe Leroy <[email protected]>

Series applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/3d4247fcc938d0ab5cf6fdb752dae07fdeab9736

cheers