Some debug setup like CONFIG_KASAN generate huge
kernels with text size over the 8M limit.
This patch maps a second 8M page when _einittext is over 8M.
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/kernel/head_8xx.S | 27 +++++++++++++++++++++++++--
arch/powerpc/mm/8xx_mmu.c | 4 ++++
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index b171b7c0a0e7..f6bc4392ea9f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -334,8 +334,8 @@ InstructionTLBMiss:
rlwinm r10, r10, 16, 0xfff8
cmpli cr0, r10, PAGE_OFFSET@h
#ifndef CONFIG_PIN_TLB_TEXT
- /* It is assumed that kernel code fits into the first 8M page */
-0: cmpli cr7, r10, (PAGE_OFFSET + 0x0800000)@h
+ /* It is assumed that kernel code fits into the two first 8M pages */
+0: cmpli cr7, r10, (PAGE_OFFSET + 0x1000000)@h
patch_site 0b, patch__itlbmiss_linmem_top
#endif
#endif
@@ -904,6 +904,29 @@ initial_mmu:
li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
+ /* Map a second 8M page if needed */
+ lis r9, _einittext@h
+ oris r9, r9, _einittext@l
+ cmpli cr0, r9, (PAGE_OFFSET + 0x8000000)@h
+ blt 1f
+
+#ifdef CONFIG_PIN_TLB_TEXT
+ lis r8, MI_RSV4I@h
+ ori r8, r8, 0x1d00
+
+ mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
+#endif
+
+ lis r8, (KERNELBASE + 0x800000)@h /* Create vaddr for TLB */
+ ori r8, r8, MI_EVALID /* Mark it valid */
+ mtspr SPRN_MI_EPN, r8
+ li r8, MI_PS8MEG /* Set 8M byte page */
+ ori r8, r8, MI_SVALID /* Make it valid */
+ mtspr SPRN_MI_TWC, r8
+ li r8, MI_BOOTINIT /* Create RPN for address 0 */
+ addis r8, r8, 0x80
+ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
+1:
lis r8, MI_APG_INIT@h /* Set protection modes */
ori r8, r8, MI_APG_INIT@l
mtspr SPRN_MI_AP, r8
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index e2b6687ebb50..1bdbfbf9fe16 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -122,6 +122,10 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
#endif
} else {
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
+#ifndef CONFIG_PIN_TLB_TEXT
+ mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top,
+ _ALIGN(__pa(_einittext), 8 << 20));
+#endif
}
mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
--
2.13.3
On Thu, Dec 20, 2018 at 05:48:25AM +0000, Christophe Leroy wrote:
> Some debug setup like CONFIG_KASAN generate huge
> kernels with text size over the 8M limit.
>
> This patch maps a second 8M page when _einittext is over 8M.
Do we also need a check to generate a useful warning if we ever overflow
the 16MB?
Le 20/12/2018 à 09:24, Christoph Hellwig a écrit :
> On Thu, Dec 20, 2018 at 05:48:25AM +0000, Christophe Leroy wrote:
>> Some debug setup like CONFIG_KASAN generate huge
>> kernels with text size over the 8M limit.
>>
>> This patch maps a second 8M page when _einittext is over 8M.
>
> Do we also need a check to generate a useful warning if we ever overflow
> the 16MB?
>
I don't think any other platform does that (the 40x also maps 16Mb,
book3s/601 maps 24Mb)
But why not, could do that in another patch.
Is there an easy way to get the link to fail when CONFIG_PIN_TLB_TEXT is
set and _einittext is higher than 16Mb ?
Or should we just map up to 24Mb on the 8xx and consider we are on the
safe side with that much ?
Christophe