2020-11-24 15:27:56

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 1/6] powerpc/8xx: DEBUG_PAGEALLOC doesn't require an ITLB miss exception handler

Since commit e611939fc8ec ("powerpc/mm: Ensure change_page_attr()
doesn't invalidate pinned TLBs"), pinned TLBs are not anymore
invalidated by __kernel_map_pages() when CONFIG_DEBUG_PAGEALLOC is
selected.

Remove the dependency on CONFIG_DEBUG_PAGEALLOC.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/kernel/head_8xx.S | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index ee0bfebc375f..66ee62f30d36 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -47,8 +47,7 @@
* - Either we have modules
* - Or we have not pinned the first 8M
*/
-#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT) || \
- defined(CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT)
#define ITLB_MISS_KERNEL 1
#endif

--
2.25.0


2020-11-24 15:28:03

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 2/6] powerpc/8xx: Always pin kernel text TLB

There is no big poing in not pinning kernel text anymore, as now
we can keep pinned TLB even with things like DEBUG_PAGEALLOC.

Remove CONFIG_PIN_TLB_TEXT, making it always right.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/Kconfig | 3 +--
arch/powerpc/kernel/head_8xx.S | 20 +++-----------------
arch/powerpc/mm/nohash/8xx.c | 3 +--
arch/powerpc/platforms/8xx/Kconfig | 7 -------
4 files changed, 5 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e9f13fe08492..bf088b5b0a89 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -795,8 +795,7 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
- depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && \
- (!PIN_TLB_TEXT || !STRICT_KERNEL_RWX))
+ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 66ee62f30d36..775b4f4d011e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -42,15 +42,6 @@
#endif
.endm

-/*
- * We need an ITLB miss handler for kernel addresses if:
- * - Either we have modules
- * - Or we have not pinned the first 8M
- */
-#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT)
-#define ITLB_MISS_KERNEL 1
-#endif
-
/*
* Value for the bits that have fixed value in RPN entries.
* Also used for tagging DAR for DTLBerror.
@@ -209,12 +200,12 @@ InstructionTLBMiss:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10)
mtspr SPRN_MD_EPN, r10
-#ifdef ITLB_MISS_KERNEL
+#ifdef CONFIG_MODULES
mfcr r11
compare_to_kernel_boundary r10, r10
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
-#ifdef ITLB_MISS_KERNEL
+#ifdef CONFIG_MODULES
blt+ 3f
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
@@ -618,10 +609,6 @@ start_here:
lis r0, (MD_TWAM | MD_RSV4I)@h
mtspr SPRN_MD_CTR, r0
#endif
-#ifndef CONFIG_PIN_TLB_TEXT
- li r0, 0
- mtspr SPRN_MI_CTR, r0
-#endif
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
lis r0, MD_TWAM@h
mtspr SPRN_MD_CTR, r0
@@ -739,7 +726,6 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_MD_CTR, r6
tlbia

-#ifdef CONFIG_PIN_TLB_TEXT
LOAD_REG_IMMEDIATE(r5, 28 << 8)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
@@ -760,7 +746,7 @@ _GLOBAL(mmu_pin_tlb)
bdnzt lt, 2b
lis r0, MI_RSV4I@h
mtspr SPRN_MI_CTR, r0
-#endif
+
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
#ifdef CONFIG_PIN_TLB_DATA
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 231ca95f9ffb..19a3eec1d8c5 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void)
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);

- if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_pin_tlb(block_mapped_ram, false);
+ mmu_pin_tlb(block_mapped_ram, false);
}

#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index cdda034733ff..1a8400bfbe82 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -202,13 +202,6 @@ config PIN_TLB_IMMR
CONFIG_PIN_TLB_DATA is also selected, it will reduce
CONFIG_PIN_TLB_DATA to 24 Mbytes.

-config PIN_TLB_TEXT
- bool "Pinned TLB for TEXT"
- depends on PIN_TLB
- default y
- help
- This pins kernel text with 8M pages.
-
endmenu

endmenu
--
2.25.0

2020-11-24 15:28:11

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 5/6] powerpc/8xx: Use SPRN_SPRG_SCRATCH2 in DTLB miss exception

Use SPRN_SPRG_SCRATCH2 in DTLB miss exception instead of DAR
in order to be similar to ITLB miss exception.

This also simplifies mpc8xx_pmu_del()

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/kernel/head_8xx.S | 9 ++++-----
arch/powerpc/perf/8xx-pmu.c | 19 +++++++------------
2 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 45239b06b6ce..35707e86c5f3 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -247,7 +247,7 @@ InstructionTLBMiss:

. = 0x1200
DataStoreTLBMiss:
- mtspr SPRN_DAR, r10
+ mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
mfcr r11

@@ -286,11 +286,11 @@ DataStoreTLBMiss:
li r11, RPN_PATTERN
rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_DAR, r11 /* Tag DAR */

/* Restore registers */

-0: mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
+0: mfspr r10, SPRN_SPRG_SCRATCH2
mfspr r11, SPRN_M_TW
rfi
patch_site 0b, patch__dtlbmiss_exit_1
@@ -300,8 +300,7 @@ DataStoreTLBMiss:
0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
addi r10, r10, 1
stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r10, SPRN_SPRG_SCRATCH2
mfspr r11, SPRN_M_TW
rfi
#endif
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 02db58c7427a..93004ee586a1 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -153,6 +153,11 @@ static void mpc8xx_pmu_read(struct perf_event *event)

static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
+ struct ppc_inst insn;
+
+ /* mfspr r10, SPRN_SPRG_SCRATCH2 */
+ insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) | __PPC_SPR(SPRN_SPRG_SCRATCH2));
+
mpc8xx_pmu_read(event);

/* If it was the last user, stop counting to avoid useles overhead */
@@ -164,22 +169,12 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
mtspr(SPRN_ICTRL, 7);
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
- if (atomic_dec_return(&itlb_miss_ref) == 0) {
- /* mfspr r10, SPRN_SPRG_SCRATCH2 */
- struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_SPRG_SCRATCH2));
-
+ if (atomic_dec_return(&itlb_miss_ref) == 0)
patch_instruction_site(&patch__itlbmiss_exit_1, insn);
- }
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
- if (atomic_dec_return(&dtlb_miss_ref) == 0) {
- /* mfspr r10, SPRN_DAR */
- struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_DAR));
-
+ if (atomic_dec_return(&dtlb_miss_ref) == 0)
patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
- }
break;
}
}
--
2.25.0

2020-11-24 15:28:40

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 4/6] powerpc/8xx: Use SPRN_SPRG_SCRATCH2 in ITLB miss exception

In order to re-enable MMU earlier, ensure ITLB miss exception
cannot clobber SPRN_SPRG_SCRATCH0 and SPRN_SPRG_SCRATCH1.
Do so by using SPRN_SPRG_SCRATCH2 and SPRN_M_TW instead, like
the DTLB miss exception.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/kernel/head_8xx.S | 12 ++++++------
arch/powerpc/perf/8xx-pmu.c | 4 ++--
2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 558c8e615ef9..45239b06b6ce 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -190,8 +190,8 @@ SystemCall:
#endif

InstructionTLBMiss:
- mtspr SPRN_SPRG_SCRATCH0, r10
- mtspr SPRN_SPRG_SCRATCH1, r11
+ mtspr SPRN_SPRG_SCRATCH2, r10
+ mtspr SPRN_M_TW, r11

/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -230,8 +230,8 @@ InstructionTLBMiss:
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */

/* Restore registers */
-0: mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
+0: mfspr r10, SPRN_SPRG_SCRATCH2
+ mfspr r11, SPRN_M_TW
rfi
patch_site 0b, patch__itlbmiss_exit_1

@@ -240,8 +240,8 @@ InstructionTLBMiss:
0: lwz r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
addi r10, r10, 1
stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
- mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
+ mfspr r10, SPRN_SPRG_SCRATCH2
+ mfspr r11, SPRN_M_TW
rfi
#endif

diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index e53c3c161257..02db58c7427a 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -165,9 +165,9 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
if (atomic_dec_return(&itlb_miss_ref) == 0) {
- /* mfspr r10, SPRN_SPRG_SCRATCH0 */
+ /* mfspr r10, SPRN_SPRG_SCRATCH2 */
struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_SPRG_SCRATCH0));
+ __PPC_SPR(SPRN_SPRG_SCRATCH2));

patch_instruction_site(&patch__itlbmiss_exit_1, insn);
}
--
2.25.0

2020-11-24 15:30:20

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 6/6] powerpc/ppc-opcode: Add PPC_RAW_MFSPR()

Add PPC_RAW_MFSPR() to replace open coding done in 8xx-pmu.c

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/include/asm/ppc-opcode.h | 3 ++-
arch/powerpc/perf/8xx-pmu.c | 5 +----
2 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index a6e3700c4566..da6f300e9788 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -230,7 +230,6 @@
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_RFEBB 0x4c000124
#define PPC_INST_RFID 0x4c000024
-#define PPC_INST_MFSPR 0x7c0002a6
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
@@ -507,6 +506,8 @@

#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))

+#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 93004ee586a1..f970d1510d3d 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -153,10 +153,7 @@ static void mpc8xx_pmu_read(struct perf_event *event)

static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
- struct ppc_inst insn;
-
- /* mfspr r10, SPRN_SPRG_SCRATCH2 */
- insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) | __PPC_SPR(SPRN_SPRG_SCRATCH2));
+ struct ppc_inst insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));

mpc8xx_pmu_read(event);

--
2.25.0

2020-12-09 11:57:19

by Christophe Leroy

[permalink] [raw]
Subject: Re: [PATCH v1 2/6] powerpc/8xx: Always pin kernel text TLB



Le 09/12/2020 à 11:43, Michael Ellerman a écrit :
> Christophe Leroy <[email protected]> writes:
>> There is no big poing in not pinning kernel text anymore, as now
>> we can keep pinned TLB even with things like DEBUG_PAGEALLOC.
>>
>> Remove CONFIG_PIN_TLB_TEXT, making it always right.
>>
>> Signed-off-by: Christophe Leroy <[email protected]>
>> ---
>> arch/powerpc/Kconfig | 3 +--
>> arch/powerpc/kernel/head_8xx.S | 20 +++-----------------
>> arch/powerpc/mm/nohash/8xx.c | 3 +--
>> arch/powerpc/platforms/8xx/Kconfig | 7 -------
>> 4 files changed, 5 insertions(+), 28 deletions(-)
>>
> ...
>> diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
>> index 231ca95f9ffb..19a3eec1d8c5 100644
>> --- a/arch/powerpc/mm/nohash/8xx.c
>> +++ b/arch/powerpc/mm/nohash/8xx.c
>> @@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void)
>> mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
>> mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
>>
>> - if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
>> - mmu_pin_tlb(block_mapped_ram, false);
>> + mmu_pin_tlb(block_mapped_ram, false);
>> }
>
> This broke mpc885_ads_defconfig with:

:surprise:

How did I get it working ? Anyway, thanks for fixing it.

Christophe


>
> ld: arch/powerpc/mm/nohash/8xx.o: in function `mmu_mark_initmem_nx':
> /home/michael/linux/arch/powerpc/mm/nohash/8xx.c:189: undefined reference to `mmu_pin_tlb'
> make[1]: *** [/home/michael/linux/Makefile:1164: vmlinux] Error 1
> make: *** [Makefile:185: __sub-make] Error 2
>
> Fixed by:
>
> diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
> index 35707e86c5f3..52702f3db6df 100644
> --- a/arch/powerpc/kernel/head_8xx.S
> +++ b/arch/powerpc/kernel/head_8xx.S
> @@ -702,7 +702,6 @@ FixupDAR:/* Entry point for dcbx workaround. */
> mtspr SPRN_DER, r8
> blr
>
> -#ifdef CONFIG_PIN_TLB
> _GLOBAL(mmu_pin_tlb)
> lis r9, (1f - PAGE_OFFSET)@h
> ori r9, r9, (1f - PAGE_OFFSET)@l
> @@ -802,7 +801,6 @@ _GLOBAL(mmu_pin_tlb)
> mtspr SPRN_SRR1, r10
> mtspr SPRN_SRR0, r11
> rfi
> -#endif /* CONFIG_PIN_TLB */
>
> /*
> * We put a few things here that have to be page-aligned.
>
>
> cheers
>

2020-12-09 23:08:35

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH v1 2/6] powerpc/8xx: Always pin kernel text TLB

Christophe Leroy <[email protected]> writes:
> There is no big poing in not pinning kernel text anymore, as now
> we can keep pinned TLB even with things like DEBUG_PAGEALLOC.
>
> Remove CONFIG_PIN_TLB_TEXT, making it always right.
>
> Signed-off-by: Christophe Leroy <[email protected]>
> ---
> arch/powerpc/Kconfig | 3 +--
> arch/powerpc/kernel/head_8xx.S | 20 +++-----------------
> arch/powerpc/mm/nohash/8xx.c | 3 +--
> arch/powerpc/platforms/8xx/Kconfig | 7 -------
> 4 files changed, 5 insertions(+), 28 deletions(-)
>
...
> diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
> index 231ca95f9ffb..19a3eec1d8c5 100644
> --- a/arch/powerpc/mm/nohash/8xx.c
> +++ b/arch/powerpc/mm/nohash/8xx.c
> @@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void)
> mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
> mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
>
> - if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
> - mmu_pin_tlb(block_mapped_ram, false);
> + mmu_pin_tlb(block_mapped_ram, false);
> }

This broke mpc885_ads_defconfig with:

ld: arch/powerpc/mm/nohash/8xx.o: in function `mmu_mark_initmem_nx':
/home/michael/linux/arch/powerpc/mm/nohash/8xx.c:189: undefined reference to `mmu_pin_tlb'
make[1]: *** [/home/michael/linux/Makefile:1164: vmlinux] Error 1
make: *** [Makefile:185: __sub-make] Error 2

Fixed by:

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 35707e86c5f3..52702f3db6df 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -702,7 +702,6 @@ FixupDAR:/* Entry point for dcbx workaround. */
mtspr SPRN_DER, r8
blr

-#ifdef CONFIG_PIN_TLB
_GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h
ori r9, r9, (1f - PAGE_OFFSET)@l
@@ -802,7 +801,6 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
-#endif /* CONFIG_PIN_TLB */

/*
* We put a few things here that have to be page-aligned.


cheers

2020-12-10 05:41:22

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH v1 2/6] powerpc/8xx: Always pin kernel text TLB

Christophe Leroy <[email protected]> writes:
> Le 09/12/2020 à 11:43, Michael Ellerman a écrit :
>> Christophe Leroy <[email protected]> writes:
>>> There is no big poing in not pinning kernel text anymore, as now
>>> we can keep pinned TLB even with things like DEBUG_PAGEALLOC.
>>>
>>> Remove CONFIG_PIN_TLB_TEXT, making it always right.
>>>
>>> Signed-off-by: Christophe Leroy <[email protected]>
>>> ---
>>> arch/powerpc/Kconfig | 3 +--
>>> arch/powerpc/kernel/head_8xx.S | 20 +++-----------------
>>> arch/powerpc/mm/nohash/8xx.c | 3 +--
>>> arch/powerpc/platforms/8xx/Kconfig | 7 -------
>>> 4 files changed, 5 insertions(+), 28 deletions(-)
>>>
>> ...
>>> diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
>>> index 231ca95f9ffb..19a3eec1d8c5 100644
>>> --- a/arch/powerpc/mm/nohash/8xx.c
>>> +++ b/arch/powerpc/mm/nohash/8xx.c
>>> @@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void)
>>> mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
>>> mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
>>>
>>> - if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
>>> - mmu_pin_tlb(block_mapped_ram, false);
>>> + mmu_pin_tlb(block_mapped_ram, false);
>>> }
>>
>> This broke mpc885_ads_defconfig with:
>
> :surprise:
>
> How did I get it working ? Anyway, thanks for fixing it.

No worries. I figured you must have tested with some other series(s)
applied and/or with different configs, it happens :)

cheers

2020-12-15 10:53:11

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH v1 1/6] powerpc/8xx: DEBUG_PAGEALLOC doesn't require an ITLB miss exception handler

On Tue, 24 Nov 2020 15:24:54 +0000 (UTC), Christophe Leroy wrote:
> Since commit e611939fc8ec ("powerpc/mm: Ensure change_page_attr()
> doesn't invalidate pinned TLBs"), pinned TLBs are not anymore
> invalidated by __kernel_map_pages() when CONFIG_DEBUG_PAGEALLOC is
> selected.
>
> Remove the dependency on CONFIG_DEBUG_PAGEALLOC.

Applied to powerpc/next.

[1/6] powerpc/8xx: DEBUG_PAGEALLOC doesn't require an ITLB miss exception handler
https://git.kernel.org/powerpc/c/613df979da6c032cbe6fa273fb8ca21af022157e
[2/6] powerpc/8xx: Always pin kernel text TLB
https://git.kernel.org/powerpc/c/bccc58986a2f98e3af349c85c5f49aac7fb19ef2
[3/6] powerpc/8xx: Simplify INVALIDATE_ADJACENT_PAGES_CPU15
https://git.kernel.org/powerpc/c/576e02bbf1062b9118d5bbb96a40ed3b6b359f22
[4/6] powerpc/8xx: Use SPRN_SPRG_SCRATCH2 in ITLB miss exception
https://git.kernel.org/powerpc/c/a314ea5abf6dbaf35f14c9bd1d93105260fb9336
[5/6] powerpc/8xx: Use SPRN_SPRG_SCRATCH2 in DTLB miss exception
https://git.kernel.org/powerpc/c/89eecd938cab80f0da18abbd2ed997a521f83f01
[6/6] powerpc/ppc-opcode: Add PPC_RAW_MFSPR()
https://git.kernel.org/powerpc/c/70b588a068668dd7a92ed19cf0373ba92847957c

cheers