2021-10-15 18:22:00

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 1/8] powerpc/booke: Disable STRICT_KERNEL_RWX, DEBUG_PAGEALLOC and KFENCE

fsl_booke and 44x are not able to map kernel linear memory with
pages, so they can't support DEBUG_PAGEALLOC and KFENCE, and
STRICT_KERNEL_RWX is also a problem for now.

Enable those only on book3s (both 32 and 64 except KFENCE), 8xx and 40x.

Fixes: 88df6e90fa97 ("[POWERPC] DEBUG_PAGEALLOC for 32-bit")
Fixes: 95902e6c8864 ("powerpc/mm: Implement STRICT_KERNEL_RWX on PPC32")
Fixes: 90cbac0e995d ("powerpc: Enable KFENCE for PPC32")
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/Kconfig | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ba5b66189358..6b9f523882c5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,7 +138,7 @@ config PPC
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION
select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
@@ -150,7 +150,7 @@ config PPC
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
@@ -190,7 +190,7 @@ config PPC
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
- select HAVE_ARCH_KFENCE if PPC32
+ select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
--
2.31.1


2021-10-15 18:22:21

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 4/8] powerpc/fsl_booke: Enable reloading of TLBCAM without switching to AS1

Avoid switching to AS1 when reloading TLBCAM after init for
STRICT_KERNEL_RWX.

When we setup AS1 we expect the entire accessible memory to be mapped
through one entry, this is not the case anymore at the end of init.

We are not changing the size of TLBCAMs, only flags, so no need to
switch to AS1.

So change loadcam_multi() to not switch to AS1 when the given
temporary tlb entry in 0.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/nohash/tlb_low.S | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 5add4a51e51f..dd39074de9af 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -369,7 +369,7 @@ _GLOBAL(_tlbivax_bcast)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
- * Must preserve r7, r8, r9, r10 and r11
+ * Must preserve r7, r8, r9, r10, r11, r12
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -401,7 +401,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*
* r3 = first entry to write
* r4 = number of entries to write
- * r5 = temporary tlb entry
+ * r5 = temporary tlb entry (0 means no switch to AS1)
*/
_GLOBAL(loadcam_multi)
mflr r8
@@ -409,6 +409,8 @@ _GLOBAL(loadcam_multi)
mfmsr r11
andi. r11,r11,MSR_IS
bne 10f
+ mr. r12, r5
+ beq 10f

/*
* Set up temporary TLB entry that is the same as what we're
@@ -446,6 +448,8 @@ _GLOBAL(loadcam_multi)
/* Don't return to AS=0 if we were in AS=1 at function start */
andi. r11,r11,MSR_IS
bne 3f
+ cmpwi r12, 0
+ beq 3f

/* Return to AS=0 and clear the temporary entry */
mfmsr r6
--
2.31.1

2021-10-15 18:26:53

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 7/8] powerpc/fsl_booke: Update of TLBCAMs after init

After init, set readonly memory as ROX and set readwrite
memory as RWX, if STRICT_KERNEL_RWX is enabled.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/mmu_decl.h | 2 +-
arch/powerpc/mm/nohash/fsl_book3e.c | 32 +++++++++++++++++++++++++----
2 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index e13a3c0caa02..0dd4c18f8363 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -168,7 +168,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif

-#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E)
void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void);
#else
diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c
index c1bc11f46344..978e0bcdfa2c 100644
--- a/arch/powerpc/mm/nohash/fsl_book3e.c
+++ b/arch/powerpc/mm/nohash/fsl_book3e.c
@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
/* Calculate CAM values */
for (i = 0; boundary && i < max_cam_idx; i++) {
unsigned long cam_sz;
- pgprot_t prot = PAGE_KERNEL_X;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;

cam_sz = calc_cam_sz(boundary, virt, phys);
if (!dryrun)
@@ -194,7 +194,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
}
for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
unsigned long cam_sz;
- pgprot_t prot = PAGE_KERNEL_X;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;

cam_sz = calc_cam_sz(ram, virt, phys);
if (!dryrun)
@@ -209,8 +209,13 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
if (dryrun)
return amount_mapped;

- loadcam_multi(0, i, max_cam_idx);
- tlbcam_index = i;
+ if (init) {
+ loadcam_multi(0, i, max_cam_idx);
+ tlbcam_index = i;
+ } else {
+ loadcam_multi(0, i, 0);
+ WARN_ON(i > tlbcam_index);
+ }

#ifdef CONFIG_PPC64
get_paca()->tcd.esel_next = i;
@@ -279,6 +284,25 @@ void __init adjust_total_lowmem(void)
memblock_set_current_limit(memstart_addr + __max_low_memory);
}

+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+ /* Everything is done in mmu_mark_initmem_nx() */
+}
+#endif
+
+void mmu_mark_initmem_nx(void)
+{
+ unsigned long remapped;
+
+ if (!strict_kernel_rwx_enabled())
+ return;
+
+ remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
+
+ WARN_ON(__max_low_memory != remapped);
+}
+
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
--
2.31.1

2021-10-15 18:26:54

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 5/8] powerpc/fsl_booke: Tell map_mem_in_cams() if init is done

In order to be able to call map_mem_in_cams() once more
after init for STRICT_KERNEL_RWX, add an argument.

For now, map_mem_in_cams() is always called only during init.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/mmu_decl.h | 2 +-
arch/powerpc/mm/nohash/fsl_book3e.c | 12 ++++++------
arch/powerpc/mm/nohash/kaslr_booke.c | 2 +-
arch/powerpc/mm/nohash/tlb.c | 4 ++--
4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index dd1cabc2ea0f..e13a3c0caa02 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -126,7 +126,7 @@ unsigned long mmu_mapin_ram(unsigned long base, unsigned long top);

#ifdef CONFIG_PPC_FSL_BOOK3E
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx,
- bool dryrun);
+ bool dryrun, bool init);
extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
phys_addr_t phys);
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c
index fdf1029e62f0..375b2b8238c1 100644
--- a/arch/powerpc/mm/nohash/fsl_book3e.c
+++ b/arch/powerpc/mm/nohash/fsl_book3e.c
@@ -167,7 +167,7 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,

static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long ram, int max_cam_idx,
- bool dryrun)
+ bool dryrun, bool init)
{
int i;
unsigned long amount_mapped = 0;
@@ -202,12 +202,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
return amount_mapped;
}

-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun)
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
{
unsigned long virt = PAGE_OFFSET;
phys_addr_t phys = memstart_addr;

- return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun);
+ return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
}

#ifdef CONFIG_PPC32
@@ -248,7 +248,7 @@ void __init adjust_total_lowmem(void)
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);

i = switch_to_as1();
- __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false);
+ __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
restore_to_as0(i, 0, 0, 1);

pr_info("Memory CAM mapping: ");
@@ -319,11 +319,11 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
/* map a 64M area for the second relocation */
if (memstart_addr > start)
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false);
+ false, true);
else
map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false);
+ false, true);
restore_to_as0(n, offset, __va(dt_ptr), 1);
/* We should never reach here */
panic("Relocation error");
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4c74e8a5482b..8fc49b1b4a91 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");

ram = min_t(phys_addr_t, __max_low_memory, size);
- ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
+ ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false);
linear_sz = min_t(unsigned long, ram, SZ_512M);

/* If the linear size is smaller than 64M, do not randmize */
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 5872f69141d5..fc195b9f524b 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -643,7 +643,7 @@ static void early_init_this_mmu(void)

if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
- num_cams, false);
+ num_cams, true, true);
}
#endif

@@ -764,7 +764,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;

linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
- true);
+ false, true);

ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
--
2.31.1

2021-10-15 18:26:54

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 8/8] powerpc/fsl_booke: Enable STRICT_KERNEL_RWX

Enable STRICT_KERNEL_RWX on fsl_booke.

For that, we need additional TLBCAMs dedicated to linear mapping,
based on the alignment of _sinittext.

By default, up to 768 Mbytes of memory are mapped.
It uses 3 TLBCAMs of size 256 Mbytes.

With a data alignment of 16, we need up to 9 TLBCAMs:
16/16/16/16/64/64/64/256/256

With a data alignment of 4, we need up to 12 TLBCAMs:
4/4/4/4/16/16/16/64/64/64/256/256

With a data alignment of 1, we need up to 15 TLBCAMs:
1/1/1/1/4/4/4/16/16/16/64/64/64/256/256

By default, set a 16 Mbytes alignment as a compromise between memory
usage and number of TLBCAMs. This can be adjusted manually when needed.

For the time being, it doens't work when the base is randomised.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/Kconfig | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6b9f523882c5..939a47642a9c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -139,6 +139,7 @@ config PPC
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION
+ select ARCH_HAS_STRICT_KERNEL_RWX if FSL_BOOKE && !HIBERNATION && !RANDOMIZE_BASE
select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
@@ -778,7 +779,8 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
- depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
+ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
+ FSL_BOOKE
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
@@ -791,11 +793,13 @@ config DATA_SHIFT
default 24 if STRICT_KERNEL_RWX && PPC64
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_FSL_BOOKE
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ default 24 if STRICT_KERNEL_RWX && FSL_BOOKE
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
@@ -1123,7 +1127,10 @@ config LOWMEM_CAM_NUM_BOOL
config LOWMEM_CAM_NUM
depends on FSL_BOOKE
int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL
- default 3
+ default 3 if !STRICT_KERNEL_RWX
+ default 9 if DATA_SHIFT >= 24
+ default 12 if DATA_SHIFT >= 22
+ default 15

config DYNAMIC_MEMSTART
bool "Enable page aligned dynamic load address for kernel"
--
2.31.1

2021-11-02 11:41:38

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH v1 1/8] powerpc/booke: Disable STRICT_KERNEL_RWX, DEBUG_PAGEALLOC and KFENCE

On Fri, 15 Oct 2021 12:02:42 +0200, Christophe Leroy wrote:
> fsl_booke and 44x are not able to map kernel linear memory with
> pages, so they can't support DEBUG_PAGEALLOC and KFENCE, and
> STRICT_KERNEL_RWX is also a problem for now.
>
> Enable those only on book3s (both 32 and 64 except KFENCE), 8xx and 40x.
>
>
> [...]

Applied to powerpc/next.

[1/8] powerpc/booke: Disable STRICT_KERNEL_RWX, DEBUG_PAGEALLOC and KFENCE
https://git.kernel.org/powerpc/c/68b44f94d6370e2c6c790fedd28e637fa9964a93
[2/8] powerpc/fsl_booke: Rename fsl_booke.c to fsl_book3e.c
https://git.kernel.org/powerpc/c/3a75fd709c89cb45b8b1044b8ef0d15027a69f9b
[3/8] powerpc/fsl_booke: Take exec flag into account when setting TLBCAMs
https://git.kernel.org/powerpc/c/01116e6e98b08ab0641fa516ddafb1b1b2088e64
[4/8] powerpc/fsl_booke: Enable reloading of TLBCAM without switching to AS1
https://git.kernel.org/powerpc/c/a97dd9e2f760c6996a8f1385ddab0bfef325b364
[5/8] powerpc/fsl_booke: Tell map_mem_in_cams() if init is done
https://git.kernel.org/powerpc/c/52bda69ae8b5102fe08c9db10f4a1514478e07d3
[6/8] powerpc/fsl_booke: Allocate separate TLBCAMs for readonly memory
https://git.kernel.org/powerpc/c/0b2859a74306b2b89f6e77c216fe0992ff890fa6
[7/8] powerpc/fsl_booke: Update of TLBCAMs after init
https://git.kernel.org/powerpc/c/d5970045cf9e266d9a43941ac0866865fd22a36a
[8/8] powerpc/fsl_booke: Enable STRICT_KERNEL_RWX
https://git.kernel.org/powerpc/c/49e3d8ea62482625c3835f0a33ae9c1dda39ea8f

cheers