2020-11-24 23:56:40

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 1/3] powerpc/32s: Remove unused counters incremented by create_hpte()

primary_pteg_full and htab_hash_searches are not used.

Remove them.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/book3s32/hash_low.S | 15 ---------------
1 file changed, 15 deletions(-)

diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 9a56ba4f68f2..f964fd34dad9 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -359,11 +359,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ 10f /* no PTE: go look for an empty slot */
tlbie r4

- lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
- lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
- addi r6,r6,1 /* count how many searches we do */
- stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
-
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r0
addi r4,r3,-HPTE_SIZE
@@ -393,12 +388,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ .Lfound_empty

- /* update counter of times that the primary PTEG is full */
- lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
- lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
- addi r6,r6,1
- stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
-
patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */
ori r5,r5,PTE_H /* set H (secondary hash) bit */
@@ -491,10 +480,6 @@ _ASM_NOKPROBE_SYMBOL(create_hpte)
.align 2
next_slot:
.space 4
-primary_pteg_full:
- .space 4
-htab_hash_searches:
- .space 4
.previous

/*
--
2.25.0


2020-11-24 23:56:41

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 2/3] powerpc/32s: In add_hash_page(), calculate VSID later

VSID is only for create_hpte(). When _PAGE_HASHPTE is
already set, add_hash_page() bails out without calling
create_hpte() and doesn't need the value of VSID.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/mm/book3s32/hash_low.S | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index f964fd34dad9..1366e8e4fc05 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -188,12 +188,6 @@ _GLOBAL(add_hash_page)
mflr r0
stw r0,4(r1)

- /* Convert context and va to VSID */
- mulli r3,r3,897*16 /* multiply context by context skew */
- rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
- mulli r0,r0,0x111 /* multiply by ESID skew */
- add r3,r3,r0 /* note create_hpte trims to 24 bits */
-
#ifdef CONFIG_SMP
lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
oris r8,r8,12
@@ -257,6 +251,12 @@ _GLOBAL(add_hash_page)
stwcx. r5,0,r8
bne- 1b

+ /* Convert context and va to VSID */
+ mulli r3,r3,897*16 /* multiply context by context skew */
+ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
+ mulli r0,r0,0x111 /* multiply by ESID skew */
+ add r3,r3,r0 /* note create_hpte trims to 24 bits */
+
bl create_hpte

9:
--
2.25.0

2020-11-25 02:11:42

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH v1 3/3] powerpc/32s: Cleanup around PTE_FLAGS_OFFSET in hash_low.S

PTE_FLAGS_OFFSET is defined in asm/page_32.h and used only
in hash_low.S

And PTE_FLAGS_OFFSET nullity depends on CONFIG_PTE_64BIT

Instead of tests like #if (PTE_FLAGS_OFFSET != 0), use
CONFIG_PTE_64BIT related code.

Also move the definition of PTE_FLAGS_OFFSET into hash_low.S
directly, that improves readability.

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/include/asm/page_32.h | 6 ------
arch/powerpc/mm/book3s32/hash_low.S | 23 +++++++++++++----------
2 files changed, 13 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d64dfe3ac712..56f217606327 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -16,12 +16,6 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif

-#ifdef CONFIG_PTE_64BIT
-#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
-#else
-#define PTE_FLAGS_OFFSET 0
-#endif
-
#if defined(CONFIG_PPC_256K_PAGES) || \
(defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 1366e8e4fc05..f559a931b9a8 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -26,6 +26,12 @@
#include <asm/feature-fixups.h>
#include <asm/code-patching-asm.h>

+#ifdef CONFIG_PTE_64BIT
+#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
+#else
+#define PTE_FLAGS_OFFSET 0
+#endif
+
#ifdef CONFIG_SMP
.section .bss
.align 2
@@ -94,6 +100,11 @@ _GLOBAL(hash_page)
rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
#else
rlwimi r8,r4,23,20,28 /* compute pte address */
+ /*
+ * If PTE_64BIT is set, the low word is the flags word; use that
+ * word for locking since it contains all the interesting bits.
+ */
+ addi r8,r8,PTE_FLAGS_OFFSET
#endif

/*
@@ -101,13 +112,7 @@ _GLOBAL(hash_page)
* because almost always, there won't be a permission violation
* and there won't already be an HPTE, and thus we will have
* to update the PTE to set _PAGE_HASHPTE. -- paulus.
- *
- * If PTE_64BIT is set, the low word is the flags word; use that
- * word for locking since it contains all the interesting bits.
*/
-#if (PTE_FLAGS_OFFSET != 0)
- addi r8,r8,PTE_FLAGS_OFFSET
-#endif
.Lretry:
lwarx r6,0,r8 /* get linux-style pte, flag word */
#ifdef CONFIG_PPC_KUAP
@@ -511,8 +516,9 @@ _GLOBAL(flush_hash_pages)
rlwimi r5,r4,22,20,29
#else
rlwimi r5,r4,23,20,28
+ addi r5,r5,PTE_FLAGS_OFFSET
#endif
-1: lwz r0,PTE_FLAGS_OFFSET(r5)
+1: lwz r0,0(r5)
cmpwi cr1,r6,1
andi. r0,r0,_PAGE_HASHPTE
bne 2f
@@ -556,9 +562,6 @@ _GLOBAL(flush_hash_pages)
* already clear, we're done (for this pte). If not,
* clear it (atomically) and proceed. -- paulus.
*/
-#if (PTE_FLAGS_OFFSET != 0)
- addi r5,r5,PTE_FLAGS_OFFSET
-#endif
33: lwarx r8,0,r5 /* fetch the pte flags word */
andi. r0,r8,_PAGE_HASHPTE
beq 8f /* done if HASHPTE is already clear */
--
2.25.0

2020-12-15 10:54:28

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH v1 1/3] powerpc/32s: Remove unused counters incremented by create_hpte()

On Tue, 24 Nov 2020 19:51:55 +0000 (UTC), Christophe Leroy wrote:
> primary_pteg_full and htab_hash_searches are not used.
>
> Remove them.

Applied to powerpc/next.

[1/3] powerpc/32s: Remove unused counters incremented by create_hpte()
https://git.kernel.org/powerpc/c/c5ccb4e78968fbe64f938a5a012fc8ec25cafabf
[2/3] powerpc/32s: In add_hash_page(), calculate VSID later
https://git.kernel.org/powerpc/c/fec6166b44ded68e68144144a02e498580118f1a
[3/3] powerpc/32s: Cleanup around PTE_FLAGS_OFFSET in hash_low.S
https://git.kernel.org/powerpc/c/da481c4fe0e485cdab5cf4d2761be8b8fb38d3d1

cheers