2023-12-07 15:06:03

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH RFC/RFT 0/4] Remove preventive sfence.vma

In RISC-V, after a new mapping is established, a sfence.vma needs to be
emitted for different reasons:

- if the uarch caches invalid entries, we need to invalidate it otherwise
we would trap on this invalid entry,
- if the uarch does not cache invalid entries, a reordered access could fail
to see the new mapping and then trap (sfence.vma acts as a fence).

We can actually avoid emitting those (mostly) useless and costly sfence.vma
by handling the traps instead:

- for new kernel mappings: only vmalloc mappings need to be taken care of,
other new mapping are rare and already emit the required sfence.vma if
needed.
That must be achieved very early in the exception path as explained in
patch 1, and this also fixes our fragile way of dealing with vmalloc faults.

- for new user mappings: that can be handled in the page fault path as done
in patch 3.

Patch 2 is certainly a TEMP patch which allows to detect at runtime if a
uarch caches invalid TLB entries.

Patch 4 is a TEMP patch which allows to expose through debugfs the different
sfence.vma that are emitted, which can be used for benchmarking.

On our uarch that does not cache invalid entries and a 6.5 kernel, the
gains are measurable:

* Kernel boot: 6%
* ltp - mmapstress01: 8%
* lmbench - lat_pagefault: 20%
* lmbench - lat_mmap: 5%

On uarchs that cache invalid entries, the results are more mitigated and
need to be explored more thoroughly (if anyone is interested!): that can
be explained by the extra page faults, which depending on "how much" the
uarch caches invalid entries, could kill the benefits of removing the
preventive sfence.vma.

Ved Shanbhogue has prepared a new extension to be used by uarchs that do
not cache invalid entries, which will certainly be used instead of patch 2.

Thanks to Ved and Matt Evans for triggering the discussion that led to
this patchset!

That's an RFC, so please don't mind the checkpatch warnings and dirty
comments. It applies on 6.6.

Any feedback, test or relevant benchmark are welcome :)

Alexandre Ghiti (4):
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings
riscv: Add a runtime detection of invalid TLB entries caching
riscv: Stop emitting preventive sfence.vma for new userspace mappings
TEMP: riscv: Add debugfs interface to retrieve #sfence.vma

arch/arm64/include/asm/pgtable.h | 2 +-
arch/mips/include/asm/pgtable.h | 6 +-
arch/powerpc/include/asm/book3s/64/tlbflush.h | 8 +-
arch/riscv/include/asm/cacheflush.h | 19 ++-
arch/riscv/include/asm/pgtable.h | 45 ++++---
arch/riscv/include/asm/thread_info.h | 5 +
arch/riscv/include/asm/tlbflush.h | 4 +
arch/riscv/kernel/asm-offsets.c | 5 +
arch/riscv/kernel/entry.S | 94 +++++++++++++
arch/riscv/kernel/sbi.c | 12 ++
arch/riscv/mm/init.c | 126 ++++++++++++++++++
arch/riscv/mm/tlbflush.c | 17 +++
include/linux/pgtable.h | 8 +-
mm/memory.c | 12 +-
14 files changed, 331 insertions(+), 32 deletions(-)

--
2.39.2


2023-12-07 15:06:22

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH RFC/RFT 2/4] riscv: Add a runtime detection of invalid TLB entries caching

This mechanism allows to completely bypass the sfence.vma introduced by
the previous commit for uarchs that do not cache invalid TLB entries.

Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/mm/init.c | 124 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 124 insertions(+)

diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 379403de6c6f..2e854613740c 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -56,6 +56,8 @@ bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KER
EXPORT_SYMBOL(pgtable_l4_enabled);
EXPORT_SYMBOL(pgtable_l5_enabled);

+bool tlb_caching_invalid_entries;
+
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);

@@ -750,6 +752,18 @@ static void __init disable_pgtable_l4(void)
satp_mode = SATP_MODE_39;
}

+static void __init enable_pgtable_l5(void)
+{
+ pgtable_l5_enabled = true;
+ satp_mode = SATP_MODE_57;
+}
+
+static void __init enable_pgtable_l4(void)
+{
+ pgtable_l4_enabled = true;
+ satp_mode = SATP_MODE_48;
+}
+
static int __init print_no4lvl(char *p)
{
pr_info("Disabled 4-level and 5-level paging");
@@ -826,6 +840,112 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
memset(early_pud, 0, PAGE_SIZE);
memset(early_pmd, 0, PAGE_SIZE);
}
+
+/* Determine at runtime if the uarch caches invalid TLB entries */
+static __init void set_tlb_caching_invalid_entries(void)
+{
+#define NR_RETRIES_CACHING_INVALID_ENTRIES 50
+ uintptr_t set_tlb_caching_invalid_entries_pmd = ((unsigned long)set_tlb_caching_invalid_entries) & PMD_MASK;
+ // TODO the test_addr as defined below could go into another pud...
+ uintptr_t test_addr = set_tlb_caching_invalid_entries_pmd + 2 * PMD_SIZE;
+ pmd_t valid_pmd;
+ u64 satp;
+ int i = 0;
+
+ /* To ease the page table creation */
+ disable_pgtable_l5();
+ disable_pgtable_l4();
+
+ /* Establish a mapping for set_tlb_caching_invalid_entries() in sv39 */
+ create_pgd_mapping(early_pg_dir,
+ set_tlb_caching_invalid_entries_pmd,
+ (uintptr_t)early_pmd,
+ PGDIR_SIZE, PAGE_TABLE);
+
+ /* Handle the case where set_tlb_caching_invalid_entries straddles 2 PMDs */
+ create_pmd_mapping(early_pmd,
+ set_tlb_caching_invalid_entries_pmd,
+ set_tlb_caching_invalid_entries_pmd,
+ PMD_SIZE, PAGE_KERNEL_EXEC);
+ create_pmd_mapping(early_pmd,
+ set_tlb_caching_invalid_entries_pmd + PMD_SIZE,
+ set_tlb_caching_invalid_entries_pmd + PMD_SIZE,
+ PMD_SIZE, PAGE_KERNEL_EXEC);
+
+ /* Establish an invalid mapping */
+ create_pmd_mapping(early_pmd, test_addr, 0, PMD_SIZE, __pgprot(0));
+
+ /* Precompute the valid pmd here because the mapping for pfn_pmd() won't exist */
+ valid_pmd = pfn_pmd(PFN_DOWN(set_tlb_caching_invalid_entries_pmd), PAGE_KERNEL);
+
+ local_flush_tlb_all();
+ satp = PFN_DOWN((uintptr_t)&early_pg_dir) | SATP_MODE_39;
+ csr_write(CSR_SATP, satp);
+
+ /*
+ * Set stvec to after the trapping access, access this invalid mapping
+ * and legitimately trap
+ */
+ // TODO: Should I save the previous stvec?
+#define ASM_STR(x) __ASM_STR(x)
+ asm volatile(
+ "la a0, 1f \n"
+ "csrw " ASM_STR(CSR_TVEC) ", a0 \n"
+ "ld a0, 0(%0) \n"
+ ".align 2 \n"
+ "1: \n"
+ :
+ : "r" (test_addr)
+ : "a0"
+ );
+
+ /* Now establish a valid mapping to check if the invalid one is cached */
+ early_pmd[pmd_index(test_addr)] = valid_pmd;
+
+ /*
+ * Access the valid mapping multiple times: indeed, we can't use
+ * sfence.vma as a barrier to make sure the cpu did not reorder accesses
+ * so we may trap even if the uarch does not cache invalid entries. By
+ * trying a few times, we make sure that those uarchs will see the right
+ * mapping at some point.
+ */
+
+ i = NR_RETRIES_CACHING_INVALID_ENTRIES;
+
+#define ASM_STR(x) __ASM_STR(x)
+ asm_volatile_goto(
+ "la a0, 1f \n"
+ "csrw " ASM_STR(CSR_TVEC) ", a0 \n"
+ ".align 2 \n"
+ "1: \n"
+ "addi %0, %0, -1 \n"
+ "blt %0, zero, %l[caching_invalid_entries] \n"
+ "ld a0, 0(%1) \n"
+ :
+ : "r" (i), "r" (test_addr)
+ : "a0"
+ : caching_invalid_entries
+ );
+
+ csr_write(CSR_SATP, 0ULL);
+ local_flush_tlb_all();
+
+ /* If we don't trap, the uarch does not cache invalid entries! */
+ tlb_caching_invalid_entries = false;
+ goto clean;
+
+caching_invalid_entries:
+ csr_write(CSR_SATP, 0ULL);
+ local_flush_tlb_all();
+
+ tlb_caching_invalid_entries = true;
+clean:
+ memset(early_pg_dir, 0, PAGE_SIZE);
+ memset(early_pmd, 0, PAGE_SIZE);
+
+ enable_pgtable_l4();
+ enable_pgtable_l5();
+}
#endif

/*
@@ -1072,6 +1192,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif

#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
+ set_tlb_caching_invalid_entries();
set_satp_mode(dtb_pa);
#endif

@@ -1322,6 +1443,9 @@ static void __init setup_vm_final(void)
local_flush_tlb_all();

pt_ops_set_late();
+
+ pr_info("uarch caches invalid entries: %s",
+ tlb_caching_invalid_entries ? "yes" : "no");
}
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
--
2.39.2

2023-12-07 15:07:24

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH RFC/RFT 3/4] riscv: Stop emitting preventive sfence.vma for new userspace mappings

The preventive sfence.vma were emitted because new mappings must be made
visible to the page table walker, either the uarch caches invalid
entries or not.

Actually, there is no need to preventively sfence.vma on new mappings for
userspace, this should be handled only in the page fault path.

This allows to drastically reduce the number of sfence.vma emitted:

* Ubuntu boot to login:
Before: ~630k sfence.vma
After: ~200k sfence.vma

* ltp - mmapstress01
Before: ~45k
After: ~6.3k

* lmbench - lat_pagefault
Before: ~665k
After: 832 (!)

* lmbench - lat_mmap
Before: ~546k
After: 718 (!)

The only issue with the removal of sfence.vma in update_mmu_cache() is
that on uarchs that cache invalid entries, those won't be invalidated
until the process takes a fault: so that's an additional fault in those
cases.

Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/arm64/include/asm/pgtable.h | 2 +-
arch/mips/include/asm/pgtable.h | 6 +--
arch/powerpc/include/asm/book3s/64/tlbflush.h | 8 ++--
arch/riscv/include/asm/pgtable.h | 43 +++++++++++--------
include/linux/pgtable.h | 8 +++-
mm/memory.c | 12 +++++-
6 files changed, 48 insertions(+), 31 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7f7d9b1df4e5..728f25f529a5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void)
* fault on one CPU which has been handled concurrently by another CPU
* does not need to perform additional invalidation.
*/
-#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
+#define flush_tlb_fix_spurious_write_fault(vma, address, ptep) do { } while (0)

/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 430b208c0130..84439fe6ed29 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -478,9 +478,9 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}

-static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
+static inline void flush_tlb_fix_spurious_write_fault(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
{
}

diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 1950c1b825b4..7166d56f90db 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -128,10 +128,10 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */

-#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
-static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
+#define flush_tlb_fix_spurious_write_fault flush_tlb_fix_spurious_write_fault
+static inline void flush_tlb_fix_spurious_write_fault(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
{
/*
* Book3S 64 does not require spurious fault flushes because the PTE
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index b2ba3f79cfe9..89aa5650f104 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -472,28 +472,20 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
- /*
- * The kernel assumes that TLBs don't cache invalid entries, but
- * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
- * cache flush; it is necessary even after writing invalid entries.
- * Relying on flush_tlb_fix_spurious_fault would suffice, but
- * the extra traps reduce performance. So, eagerly SFENCE.VMA.
- */
- while (nr--)
- local_flush_tlb_page(address + nr * PAGE_SIZE);
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)

#define __HAVE_ARCH_UPDATE_MMU_TLB
-#define update_mmu_tlb update_mmu_cache
+static inline void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ flush_tlb_range(vma, address, address + PAGE_SIZE);
+}

static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
- pte_t *ptep = (pte_t *)pmdp;
-
- update_mmu_cache(vma, address, ptep);
}

#define __HAVE_ARCH_PTE_SAME
@@ -548,13 +540,26 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
- if (!pte_same(*ptep, entry))
+ if (!pte_same(*ptep, entry)) {
__set_pte_at(ptep, entry);
- /*
- * update_mmu_cache will unconditionally execute, handling both
- * the case that the PTE changed and the spurious fault case.
- */
- return true;
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
+}
+
+extern u64 nr_sfence_vma_handle_exception;
+extern bool tlb_caching_invalid_entries;
+
+#define flush_tlb_fix_spurious_read_fault flush_tlb_fix_spurious_read_fault
+static inline void flush_tlb_fix_spurious_read_fault(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (tlb_caching_invalid_entries)
+ flush_tlb_page(vma, address);
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0a3..7abaf42ef612 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -931,8 +931,12 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif

-#ifndef flush_tlb_fix_spurious_fault
-#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
+#ifndef flush_tlb_fix_spurious_write_fault
+#define flush_tlb_fix_spurious_write_fault(vma, address, ptep) flush_tlb_page(vma, address)
+#endif
+
+#ifndef flush_tlb_fix_spurious_read_fault
+#define flush_tlb_fix_spurious_read_fault(vma, address, ptep)
#endif

/*
diff --git a/mm/memory.c b/mm/memory.c
index 517221f01303..5cb0ccf0c03f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5014,8 +5014,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
* with threads.
*/
if (vmf->flags & FAULT_FLAG_WRITE)
- flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
- vmf->pte);
+ flush_tlb_fix_spurious_write_fault(vmf->vma, vmf->address,
+ vmf->pte);
+ else
+ /*
+ * With the pte_same(ptep_get(vmf->pte), entry) check
+ * that calls update_mmu_tlb() above, multiple threads
+ * faulting at the same time won't get there.
+ */
+ flush_tlb_fix_spurious_read_fault(vmf->vma, vmf->address,
+ vmf->pte);
}
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
--
2.39.2

2023-12-07 15:08:14

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH RFC/RFT 4/4] TEMP: riscv: Add debugfs interface to retrieve #sfence.vma

This is useful for testing/benchmarking.

Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/include/asm/pgtable.h | 6 ++++--
arch/riscv/include/asm/tlbflush.h | 4 ++++
arch/riscv/kernel/sbi.c | 12 ++++++++++++
arch/riscv/mm/tlbflush.c | 17 +++++++++++++++++
4 files changed, 37 insertions(+), 2 deletions(-)

diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 89aa5650f104..b0855a620cfd 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -550,7 +550,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
return false;
}

-extern u64 nr_sfence_vma_handle_exception;
+extern u64 nr_sfence_vma_spurious_read;
extern bool tlb_caching_invalid_entries;

#define flush_tlb_fix_spurious_read_fault flush_tlb_fix_spurious_read_fault
@@ -558,8 +558,10 @@ static inline void flush_tlb_fix_spurious_read_fault(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
- if (tlb_caching_invalid_entries)
+ if (tlb_caching_invalid_entries) {
+ __sync_fetch_and_add(&nr_sfence_vma_spurious_read, 1UL);
flush_tlb_page(vma, address);
+ }
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index a09196f8de68..f419ec9d2207 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -14,14 +14,18 @@
#ifdef CONFIG_MMU
extern unsigned long asid_mask;

+extern u64 nr_sfence_vma, nr_sfence_vma_all, nr_sfence_vma_all_asid;
+
static inline void local_flush_tlb_all(void)
{
+ __sync_fetch_and_add(&nr_sfence_vma_all, 1UL);
__asm__ __volatile__ ("sfence.vma" : : : "memory");
}

/* Flush one page from local TLB */
static inline void local_flush_tlb_page(unsigned long addr)
{
+ __sync_fetch_and_add(&nr_sfence_vma, 1UL);
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
#else /* CONFIG_MMU */
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index c672c8ba9a2a..ac1617759583 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -376,6 +376,8 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
}
EXPORT_SYMBOL(sbi_remote_fence_i);

+extern u64 nr_sfence_vma, nr_sfence_vma_all, nr_sfence_vma_all_asid;
+
/**
* sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
* harts for the specified virtual address range.
@@ -389,6 +391,11 @@ int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size)
{
+ if (size == (unsigned long)-1)
+ __sync_fetch_and_add(&nr_sfence_vma_all, 1UL);
+ else
+ __sync_fetch_and_add(&nr_sfence_vma, ALIGN(size, PAGE_SIZE) / PAGE_SIZE);
+
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
cpu_mask, start, size, 0, 0);
}
@@ -410,6 +417,11 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long size,
unsigned long asid)
{
+ if (size == (unsigned long)-1)
+ __sync_fetch_and_add(&nr_sfence_vma_all_asid, 1UL);
+ else
+ __sync_fetch_and_add(&nr_sfence_vma, ALIGN(size, PAGE_SIZE) / PAGE_SIZE);
+
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
cpu_mask, start, size, asid, 0);
}
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 77be59aadc73..75a3e2dff16a 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -3,11 +3,16 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>

+u64 nr_sfence_vma, nr_sfence_vma_all, nr_sfence_vma_all_asid,
+ nr_sfence_vma_handle_exception, nr_sfence_vma_spurious_read;
+
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
+ __sync_fetch_and_add(&nr_sfence_vma_all_asid, 1);
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
@@ -17,6 +22,7 @@ static inline void local_flush_tlb_all_asid(unsigned long asid)
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
+ __sync_fetch_and_add(&nr_sfence_vma, 1);
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
@@ -149,3 +155,14 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
}
#endif
+
+static int debugfs_nr_sfence_vma(void)
+{
+ debugfs_create_u64("nr_sfence_vma", 0444, NULL, &nr_sfence_vma);
+ debugfs_create_u64("nr_sfence_vma_all", 0444, NULL, &nr_sfence_vma_all);
+ debugfs_create_u64("nr_sfence_vma_all_asid", 0444, NULL, &nr_sfence_vma_all_asid);
+ debugfs_create_u64("nr_sfence_vma_handle_exception", 0444, NULL, &nr_sfence_vma_handle_exception);
+ debugfs_create_u64("nr_sfence_vma_spurious_read", 0444, NULL, &nr_sfence_vma_spurious_read);
+ return 0;
+}
+device_initcall(debugfs_nr_sfence_vma);
--
2.39.2

2023-12-07 15:57:13

by Christophe Leroy

[permalink] [raw]
Subject: Re: [PATCH RFC/RFT 2/4] riscv: Add a runtime detection of invalid TLB entries caching



Le 07/12/2023 à 16:03, Alexandre Ghiti a écrit :
> This mechanism allows to completely bypass the sfence.vma introduced by
> the previous commit for uarchs that do not cache invalid TLB entries.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>
> ---
> arch/riscv/mm/init.c | 124 +++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 124 insertions(+)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 379403de6c6f..2e854613740c 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -56,6 +56,8 @@ bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KER
> EXPORT_SYMBOL(pgtable_l4_enabled);
> EXPORT_SYMBOL(pgtable_l5_enabled);
>
> +bool tlb_caching_invalid_entries;
> +
> phys_addr_t phys_ram_base __ro_after_init;
> EXPORT_SYMBOL(phys_ram_base);
>
> @@ -750,6 +752,18 @@ static void __init disable_pgtable_l4(void)
> satp_mode = SATP_MODE_39;
> }
>
> +static void __init enable_pgtable_l5(void)
> +{
> + pgtable_l5_enabled = true;
> + satp_mode = SATP_MODE_57;
> +}
> +
> +static void __init enable_pgtable_l4(void)
> +{
> + pgtable_l4_enabled = true;
> + satp_mode = SATP_MODE_48;
> +}
> +
> static int __init print_no4lvl(char *p)
> {
> pr_info("Disabled 4-level and 5-level paging");
> @@ -826,6 +840,112 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
> memset(early_pud, 0, PAGE_SIZE);
> memset(early_pmd, 0, PAGE_SIZE);
> }
> +
> +/* Determine at runtime if the uarch caches invalid TLB entries */
> +static __init void set_tlb_caching_invalid_entries(void)
> +{
> +#define NR_RETRIES_CACHING_INVALID_ENTRIES 50

Looks odd to have macros nested in the middle of a function.

> + uintptr_t set_tlb_caching_invalid_entries_pmd = ((unsigned long)set_tlb_caching_invalid_entries) & PMD_MASK;
> + // TODO the test_addr as defined below could go into another pud...
> + uintptr_t test_addr = set_tlb_caching_invalid_entries_pmd + 2 * PMD_SIZE;
> + pmd_t valid_pmd;
> + u64 satp;
> + int i = 0;
> +
> + /* To ease the page table creation */
> + disable_pgtable_l5();
> + disable_pgtable_l4();
> +
> + /* Establish a mapping for set_tlb_caching_invalid_entries() in sv39 */
> + create_pgd_mapping(early_pg_dir,
> + set_tlb_caching_invalid_entries_pmd,
> + (uintptr_t)early_pmd,
> + PGDIR_SIZE, PAGE_TABLE);
> +
> + /* Handle the case where set_tlb_caching_invalid_entries straddles 2 PMDs */
> + create_pmd_mapping(early_pmd,
> + set_tlb_caching_invalid_entries_pmd,
> + set_tlb_caching_invalid_entries_pmd,
> + PMD_SIZE, PAGE_KERNEL_EXEC);
> + create_pmd_mapping(early_pmd,
> + set_tlb_caching_invalid_entries_pmd + PMD_SIZE,
> + set_tlb_caching_invalid_entries_pmd + PMD_SIZE,
> + PMD_SIZE, PAGE_KERNEL_EXEC);
> +
> + /* Establish an invalid mapping */
> + create_pmd_mapping(early_pmd, test_addr, 0, PMD_SIZE, __pgprot(0));
> +
> + /* Precompute the valid pmd here because the mapping for pfn_pmd() won't exist */
> + valid_pmd = pfn_pmd(PFN_DOWN(set_tlb_caching_invalid_entries_pmd), PAGE_KERNEL);
> +
> + local_flush_tlb_all();
> + satp = PFN_DOWN((uintptr_t)&early_pg_dir) | SATP_MODE_39;
> + csr_write(CSR_SATP, satp);
> +
> + /*
> + * Set stvec to after the trapping access, access this invalid mapping
> + * and legitimately trap
> + */
> + // TODO: Should I save the previous stvec?
> +#define ASM_STR(x) __ASM_STR(x)

Looks odd to have macros nested in the middle of a function.


> + asm volatile(
> + "la a0, 1f \n"
> + "csrw " ASM_STR(CSR_TVEC) ", a0 \n"
> + "ld a0, 0(%0) \n"
> + ".align 2 \n"
> + "1: \n"
> + :
> + : "r" (test_addr)
> + : "a0"
> + );
> +
> + /* Now establish a valid mapping to check if the invalid one is cached */
> + early_pmd[pmd_index(test_addr)] = valid_pmd;
> +
> + /*
> + * Access the valid mapping multiple times: indeed, we can't use
> + * sfence.vma as a barrier to make sure the cpu did not reorder accesses
> + * so we may trap even if the uarch does not cache invalid entries. By
> + * trying a few times, we make sure that those uarchs will see the right
> + * mapping at some point.
> + */
> +
> + i = NR_RETRIES_CACHING_INVALID_ENTRIES;
> +
> +#define ASM_STR(x) __ASM_STR(x)

Deplicate define ?

> + asm_volatile_goto(
> + "la a0, 1f \n"
> + "csrw " ASM_STR(CSR_TVEC) ", a0 \n"
> + ".align 2 \n"
> + "1: \n"
> + "addi %0, %0, -1 \n"
> + "blt %0, zero, %l[caching_invalid_entries] \n"
> + "ld a0, 0(%1) \n"
> + :
> + : "r" (i), "r" (test_addr)
> + : "a0"
> + : caching_invalid_entries
> + );
> +
> + csr_write(CSR_SATP, 0ULL);
> + local_flush_tlb_all();
> +
> + /* If we don't trap, the uarch does not cache invalid entries! */
> + tlb_caching_invalid_entries = false;
> + goto clean;
> +
> +caching_invalid_entries:
> + csr_write(CSR_SATP, 0ULL);
> + local_flush_tlb_all();
> +
> + tlb_caching_invalid_entries = true;
> +clean:
> + memset(early_pg_dir, 0, PAGE_SIZE);
> + memset(early_pmd, 0, PAGE_SIZE);

Use clear_page() instead ?

> +
> + enable_pgtable_l4();
> + enable_pgtable_l5();
> +}
> #endif
>
> /*
> @@ -1072,6 +1192,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> #endif
>
> #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
> + set_tlb_caching_invalid_entries();
> set_satp_mode(dtb_pa);
> #endif
>
> @@ -1322,6 +1443,9 @@ static void __init setup_vm_final(void)
> local_flush_tlb_all();
>
> pt_ops_set_late();
> +
> + pr_info("uarch caches invalid entries: %s",
> + tlb_caching_invalid_entries ? "yes" : "no");
> }
> #else
> asmlinkage void __init setup_vm(uintptr_t dtb_pa)

2023-12-07 16:37:27

by Christophe Leroy

[permalink] [raw]
Subject: Re: [PATCH RFC/RFT 3/4] riscv: Stop emitting preventive sfence.vma for new userspace mappings

The subject says "riscv:" but it changes core part and several arch.
Maybe this commit should be split in two commits, one for API changes
that changes flush_tlb_fix_spurious_fault() to
flush_tlb_fix_spurious_write_fault() and adds
flush_tlb_fix_spurious_read_fault() including the change in memory.c,
then a second patch with the changes to riscv.

Le 07/12/2023 à 16:03, Alexandre Ghiti a écrit :
> The preventive sfence.vma were emitted because new mappings must be made
> visible to the page table walker, either the uarch caches invalid
> entries or not.
>
> Actually, there is no need to preventively sfence.vma on new mappings for
> userspace, this should be handled only in the page fault path.
>
> This allows to drastically reduce the number of sfence.vma emitted:
>
> * Ubuntu boot to login:
> Before: ~630k sfence.vma
> After: ~200k sfence.vma
>
> * ltp - mmapstress01
> Before: ~45k
> After: ~6.3k
>
> * lmbench - lat_pagefault
> Before: ~665k
> After: 832 (!)
>
> * lmbench - lat_mmap
> Before: ~546k
> After: 718 (!)
>
> The only issue with the removal of sfence.vma in update_mmu_cache() is
> that on uarchs that cache invalid entries, those won't be invalidated
> until the process takes a fault: so that's an additional fault in those
> cases.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>
> ---
> arch/arm64/include/asm/pgtable.h | 2 +-
> arch/mips/include/asm/pgtable.h | 6 +--
> arch/powerpc/include/asm/book3s/64/tlbflush.h | 8 ++--
> arch/riscv/include/asm/pgtable.h | 43 +++++++++++--------
> include/linux/pgtable.h | 8 +++-
> mm/memory.c | 12 +++++-
> 6 files changed, 48 insertions(+), 31 deletions(-)

Did you forget mm/pgtable-generic.c ?

>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 7f7d9b1df4e5..728f25f529a5 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void)
> * fault on one CPU which has been handled concurrently by another CPU
> * does not need to perform additional invalidation.
> */
> -#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
> +#define flush_tlb_fix_spurious_write_fault(vma, address, ptep) do { } while (0)

Why do you need to do that change ? Nothing is explained about that in
the commit message.

>
> /*
> * ZERO_PAGE is a global shared page that is always zero: used
> diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
> index 430b208c0130..84439fe6ed29 100644
> --- a/arch/mips/include/asm/pgtable.h
> +++ b/arch/mips/include/asm/pgtable.h
> @@ -478,9 +478,9 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
> return __pgprot(prot);
> }
>
> -static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
> - unsigned long address,
> - pte_t *ptep)
> +static inline void flush_tlb_fix_spurious_write_fault(struct vm_area_struct *vma,
> + unsigned long address,
> + pte_t *ptep)
> {
> }
>
> diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
> index 1950c1b825b4..7166d56f90db 100644
> --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
> +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
> @@ -128,10 +128,10 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
> #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
> #endif /* CONFIG_SMP */
>
> -#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
> -static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
> - unsigned long address,
> - pte_t *ptep)
> +#define flush_tlb_fix_spurious_write_fault flush_tlb_fix_spurious_write_fault
> +static inline void flush_tlb_fix_spurious_write_fault(struct vm_area_struct *vma,
> + unsigned long address,
> + pte_t *ptep)
> {
> /*
> * Book3S 64 does not require spurious fault flushes because the PTE
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index b2ba3f79cfe9..89aa5650f104 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -472,28 +472,20 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
> struct vm_area_struct *vma, unsigned long address,
> pte_t *ptep, unsigned int nr)
> {
> - /*
> - * The kernel assumes that TLBs don't cache invalid entries, but
> - * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
> - * cache flush; it is necessary even after writing invalid entries.
> - * Relying on flush_tlb_fix_spurious_fault would suffice, but
> - * the extra traps reduce performance. So, eagerly SFENCE.VMA.
> - */
> - while (nr--)
> - local_flush_tlb_page(address + nr * PAGE_SIZE);
> }
> #define update_mmu_cache(vma, addr, ptep) \
> update_mmu_cache_range(NULL, vma, addr, ptep, 1)
>
> #define __HAVE_ARCH_UPDATE_MMU_TLB
> -#define update_mmu_tlb update_mmu_cache
> +static inline void update_mmu_tlb(struct vm_area_struct *vma,
> + unsigned long address, pte_t *ptep)
> +{
> + flush_tlb_range(vma, address, address + PAGE_SIZE);
> +}
>
> static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> unsigned long address, pmd_t *pmdp)
> {
> - pte_t *ptep = (pte_t *)pmdp;
> -
> - update_mmu_cache(vma, address, ptep);
> }
>
> #define __HAVE_ARCH_PTE_SAME
> @@ -548,13 +540,26 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
> unsigned long address, pte_t *ptep,
> pte_t entry, int dirty)
> {
> - if (!pte_same(*ptep, entry))
> + if (!pte_same(*ptep, entry)) {
> __set_pte_at(ptep, entry);
> - /*
> - * update_mmu_cache will unconditionally execute, handling both
> - * the case that the PTE changed and the spurious fault case.
> - */
> - return true;
> + /* Here only not svadu is impacted */
> + flush_tlb_page(vma, address);
> + return true;
> + }
> +
> + return false;
> +}
> +
> +extern u64 nr_sfence_vma_handle_exception;
> +extern bool tlb_caching_invalid_entries;
> +
> +#define flush_tlb_fix_spurious_read_fault flush_tlb_fix_spurious_read_fault
> +static inline void flush_tlb_fix_spurious_read_fault(struct vm_area_struct *vma,
> + unsigned long address,
> + pte_t *ptep)
> +{
> + if (tlb_caching_invalid_entries)
> + flush_tlb_page(vma, address);
> }
>
> #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index af7639c3b0a3..7abaf42ef612 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -931,8 +931,12 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
> # define pte_accessible(mm, pte) ((void)(pte), 1)
> #endif
>
> -#ifndef flush_tlb_fix_spurious_fault
> -#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
> +#ifndef flush_tlb_fix_spurious_write_fault
> +#define flush_tlb_fix_spurious_write_fault(vma, address, ptep) flush_tlb_page(vma, address)
> +#endif
> +
> +#ifndef flush_tlb_fix_spurious_read_fault
> +#define flush_tlb_fix_spurious_read_fault(vma, address, ptep)
> #endif
>
> /*
> diff --git a/mm/memory.c b/mm/memory.c
> index 517221f01303..5cb0ccf0c03f 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5014,8 +5014,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
> * with threads.
> */
> if (vmf->flags & FAULT_FLAG_WRITE)
> - flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
> - vmf->pte);
> + flush_tlb_fix_spurious_write_fault(vmf->vma, vmf->address,
> + vmf->pte);
> + else
> + /*
> + * With the pte_same(ptep_get(vmf->pte), entry) check
> + * that calls update_mmu_tlb() above, multiple threads
> + * faulting at the same time won't get there.
> + */
> + flush_tlb_fix_spurious_read_fault(vmf->vma, vmf->address,
> + vmf->pte);
> }
> unlock:
> pte_unmap_unlock(vmf->pte, vmf->ptl);

2023-12-08 14:30:35

by Alexandre Ghiti

[permalink] [raw]
Subject: Re: [PATCH RFC/RFT 2/4] riscv: Add a runtime detection of invalid TLB entries caching

On Thu, Dec 7, 2023 at 4:55 PM Christophe Leroy
<[email protected]> wrote:
>
>
>
> Le 07/12/2023 à 16:03, Alexandre Ghiti a écrit :
> > This mechanism allows to completely bypass the sfence.vma introduced by
> > the previous commit for uarchs that do not cache invalid TLB entries.
> >
> > Signed-off-by: Alexandre Ghiti <[email protected]>
> > ---
> > arch/riscv/mm/init.c | 124 +++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 124 insertions(+)
> >
> > diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> > index 379403de6c6f..2e854613740c 100644
> > --- a/arch/riscv/mm/init.c
> > +++ b/arch/riscv/mm/init.c
> > @@ -56,6 +56,8 @@ bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KER
> > EXPORT_SYMBOL(pgtable_l4_enabled);
> > EXPORT_SYMBOL(pgtable_l5_enabled);
> >
> > +bool tlb_caching_invalid_entries;
> > +
> > phys_addr_t phys_ram_base __ro_after_init;
> > EXPORT_SYMBOL(phys_ram_base);
> >
> > @@ -750,6 +752,18 @@ static void __init disable_pgtable_l4(void)
> > satp_mode = SATP_MODE_39;
> > }
> >
> > +static void __init enable_pgtable_l5(void)
> > +{
> > + pgtable_l5_enabled = true;
> > + satp_mode = SATP_MODE_57;
> > +}
> > +
> > +static void __init enable_pgtable_l4(void)
> > +{
> > + pgtable_l4_enabled = true;
> > + satp_mode = SATP_MODE_48;
> > +}
> > +
> > static int __init print_no4lvl(char *p)
> > {
> > pr_info("Disabled 4-level and 5-level paging");
> > @@ -826,6 +840,112 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
> > memset(early_pud, 0, PAGE_SIZE);
> > memset(early_pmd, 0, PAGE_SIZE);
> > }
> > +
> > +/* Determine at runtime if the uarch caches invalid TLB entries */
> > +static __init void set_tlb_caching_invalid_entries(void)
> > +{
> > +#define NR_RETRIES_CACHING_INVALID_ENTRIES 50
>
> Looks odd to have macros nested in the middle of a function.
>
> > + uintptr_t set_tlb_caching_invalid_entries_pmd = ((unsigned long)set_tlb_caching_invalid_entries) & PMD_MASK;
> > + // TODO the test_addr as defined below could go into another pud...
> > + uintptr_t test_addr = set_tlb_caching_invalid_entries_pmd + 2 * PMD_SIZE;
> > + pmd_t valid_pmd;
> > + u64 satp;
> > + int i = 0;
> > +
> > + /* To ease the page table creation */
> > + disable_pgtable_l5();
> > + disable_pgtable_l4();
> > +
> > + /* Establish a mapping for set_tlb_caching_invalid_entries() in sv39 */
> > + create_pgd_mapping(early_pg_dir,
> > + set_tlb_caching_invalid_entries_pmd,
> > + (uintptr_t)early_pmd,
> > + PGDIR_SIZE, PAGE_TABLE);
> > +
> > + /* Handle the case where set_tlb_caching_invalid_entries straddles 2 PMDs */
> > + create_pmd_mapping(early_pmd,
> > + set_tlb_caching_invalid_entries_pmd,
> > + set_tlb_caching_invalid_entries_pmd,
> > + PMD_SIZE, PAGE_KERNEL_EXEC);
> > + create_pmd_mapping(early_pmd,
> > + set_tlb_caching_invalid_entries_pmd + PMD_SIZE,
> > + set_tlb_caching_invalid_entries_pmd + PMD_SIZE,
> > + PMD_SIZE, PAGE_KERNEL_EXEC);
> > +
> > + /* Establish an invalid mapping */
> > + create_pmd_mapping(early_pmd, test_addr, 0, PMD_SIZE, __pgprot(0));
> > +
> > + /* Precompute the valid pmd here because the mapping for pfn_pmd() won't exist */
> > + valid_pmd = pfn_pmd(PFN_DOWN(set_tlb_caching_invalid_entries_pmd), PAGE_KERNEL);
> > +
> > + local_flush_tlb_all();
> > + satp = PFN_DOWN((uintptr_t)&early_pg_dir) | SATP_MODE_39;
> > + csr_write(CSR_SATP, satp);
> > +
> > + /*
> > + * Set stvec to after the trapping access, access this invalid mapping
> > + * and legitimately trap
> > + */
> > + // TODO: Should I save the previous stvec?
> > +#define ASM_STR(x) __ASM_STR(x)
>
> Looks odd to have macros nested in the middle of a function.
>
>
> > + asm volatile(
> > + "la a0, 1f \n"
> > + "csrw " ASM_STR(CSR_TVEC) ", a0 \n"
> > + "ld a0, 0(%0) \n"
> > + ".align 2 \n"
> > + "1: \n"
> > + :
> > + : "r" (test_addr)
> > + : "a0"
> > + );
> > +
> > + /* Now establish a valid mapping to check if the invalid one is cached */
> > + early_pmd[pmd_index(test_addr)] = valid_pmd;
> > +
> > + /*
> > + * Access the valid mapping multiple times: indeed, we can't use
> > + * sfence.vma as a barrier to make sure the cpu did not reorder accesses
> > + * so we may trap even if the uarch does not cache invalid entries. By
> > + * trying a few times, we make sure that those uarchs will see the right
> > + * mapping at some point.
> > + */
> > +
> > + i = NR_RETRIES_CACHING_INVALID_ENTRIES;
> > +
> > +#define ASM_STR(x) __ASM_STR(x)
>
> Deplicate define ?
>
> > + asm_volatile_goto(
> > + "la a0, 1f \n"
> > + "csrw " ASM_STR(CSR_TVEC) ", a0 \n"
> > + ".align 2 \n"
> > + "1: \n"
> > + "addi %0, %0, -1 \n"
> > + "blt %0, zero, %l[caching_invalid_entries] \n"
> > + "ld a0, 0(%1) \n"
> > + :
> > + : "r" (i), "r" (test_addr)
> > + : "a0"
> > + : caching_invalid_entries
> > + );
> > +
> > + csr_write(CSR_SATP, 0ULL);
> > + local_flush_tlb_all();
> > +
> > + /* If we don't trap, the uarch does not cache invalid entries! */
> > + tlb_caching_invalid_entries = false;
> > + goto clean;
> > +
> > +caching_invalid_entries:
> > + csr_write(CSR_SATP, 0ULL);
> > + local_flush_tlb_all();
> > +
> > + tlb_caching_invalid_entries = true;
> > +clean:
> > + memset(early_pg_dir, 0, PAGE_SIZE);
> > + memset(early_pmd, 0, PAGE_SIZE);
>
> Use clear_page() instead ?
>
> > +
> > + enable_pgtable_l4();
> > + enable_pgtable_l5();
> > +}
> > #endif
> >
> > /*
> > @@ -1072,6 +1192,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> > #endif
> >
> > #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
> > + set_tlb_caching_invalid_entries();
> > set_satp_mode(dtb_pa);
> > #endif
> >
> > @@ -1322,6 +1443,9 @@ static void __init setup_vm_final(void)
> > local_flush_tlb_all();
> >
> > pt_ops_set_late();
> > +
> > + pr_info("uarch caches invalid entries: %s",
> > + tlb_caching_invalid_entries ? "yes" : "no");
> > }
> > #else
> > asmlinkage void __init setup_vm(uintptr_t dtb_pa)

I left this patch so that people can easily test this without knowing
what their uarch is actually doing, but it will very likely be dropped
as a new extension has just been proposed for that.

Thanks anyway, I should have been more clear in the patch title,

Alex

2023-12-08 14:40:12

by Alexandre Ghiti

[permalink] [raw]
Subject: Re: [PATCH RFC/RFT 3/4] riscv: Stop emitting preventive sfence.vma for new userspace mappings

On Thu, Dec 7, 2023 at 5:37 PM Christophe Leroy
<[email protected]> wrote:
>
> The subject says "riscv:" but it changes core part and several arch.
> Maybe this commit should be split in two commits, one for API changes
> that changes flush_tlb_fix_spurious_fault() to
> flush_tlb_fix_spurious_write_fault() and adds
> flush_tlb_fix_spurious_read_fault() including the change in memory.c,
> then a second patch with the changes to riscv.

You're right, I'll do that, thanks.

>
> Le 07/12/2023 à 16:03, Alexandre Ghiti a écrit :
> > The preventive sfence.vma were emitted because new mappings must be made
> > visible to the page table walker, either the uarch caches invalid
> > entries or not.
> >
> > Actually, there is no need to preventively sfence.vma on new mappings for
> > userspace, this should be handled only in the page fault path.
> >
> > This allows to drastically reduce the number of sfence.vma emitted:
> >
> > * Ubuntu boot to login:
> > Before: ~630k sfence.vma
> > After: ~200k sfence.vma
> >
> > * ltp - mmapstress01
> > Before: ~45k
> > After: ~6.3k
> >
> > * lmbench - lat_pagefault
> > Before: ~665k
> > After: 832 (!)
> >
> > * lmbench - lat_mmap
> > Before: ~546k
> > After: 718 (!)
> >
> > The only issue with the removal of sfence.vma in update_mmu_cache() is
> > that on uarchs that cache invalid entries, those won't be invalidated
> > until the process takes a fault: so that's an additional fault in those
> > cases.
> >
> > Signed-off-by: Alexandre Ghiti <[email protected]>
> > ---
> > arch/arm64/include/asm/pgtable.h | 2 +-
> > arch/mips/include/asm/pgtable.h | 6 +--
> > arch/powerpc/include/asm/book3s/64/tlbflush.h | 8 ++--
> > arch/riscv/include/asm/pgtable.h | 43 +++++++++++--------
> > include/linux/pgtable.h | 8 +++-
> > mm/memory.c | 12 +++++-
> > 6 files changed, 48 insertions(+), 31 deletions(-)
>
> Did you forget mm/pgtable-generic.c ?

Indeed, I "missed" the occurrence of flush_tlb_fix_spurious_fault()
there, thanks.

>
> >
> > diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> > index 7f7d9b1df4e5..728f25f529a5 100644
> > --- a/arch/arm64/include/asm/pgtable.h
> > +++ b/arch/arm64/include/asm/pgtable.h
> > @@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void)
> > * fault on one CPU which has been handled concurrently by another CPU
> > * does not need to perform additional invalidation.
> > */
> > -#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
> > +#define flush_tlb_fix_spurious_write_fault(vma, address, ptep) do { } while (0)
>
> Why do you need to do that change ? Nothing is explained about that in
> the commit message.

I renamed this macro because in the page fault path,
flush_tlb_fix_spurious_fault() is called only when the fault is a
write fault (see
https://elixir.bootlin.com/linux/latest/source/mm/memory.c#L5016).
I'll check if that fits the occurrence in mm/pgtable-generic.c too.

Thanks again for the review,

Alex

>
> >
> > /*
> > * ZERO_PAGE is a global shared page that is always zero: used
> > diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
> > index 430b208c0130..84439fe6ed29 100644
> > --- a/arch/mips/include/asm/pgtable.h
> > +++ b/arch/mips/include/asm/pgtable.h
> > @@ -478,9 +478,9 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
> > return __pgprot(prot);
> > }
> >
> > -static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
> > - unsigned long address,
> > - pte_t *ptep)
> > +static inline void flush_tlb_fix_spurious_write_fault(struct vm_area_struct *vma,
> > + unsigned long address,
> > + pte_t *ptep)
> > {
> > }
> >
> > diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
> > index 1950c1b825b4..7166d56f90db 100644
> > --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
> > +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
> > @@ -128,10 +128,10 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
> > #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
> > #endif /* CONFIG_SMP */
> >
> > -#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
> > -static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
> > - unsigned long address,
> > - pte_t *ptep)
> > +#define flush_tlb_fix_spurious_write_fault flush_tlb_fix_spurious_write_fault
> > +static inline void flush_tlb_fix_spurious_write_fault(struct vm_area_struct *vma,
> > + unsigned long address,
> > + pte_t *ptep)
> > {
> > /*
> > * Book3S 64 does not require spurious fault flushes because the PTE
> > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > index b2ba3f79cfe9..89aa5650f104 100644
> > --- a/arch/riscv/include/asm/pgtable.h
> > +++ b/arch/riscv/include/asm/pgtable.h
> > @@ -472,28 +472,20 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
> > struct vm_area_struct *vma, unsigned long address,
> > pte_t *ptep, unsigned int nr)
> > {
> > - /*
> > - * The kernel assumes that TLBs don't cache invalid entries, but
> > - * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
> > - * cache flush; it is necessary even after writing invalid entries.
> > - * Relying on flush_tlb_fix_spurious_fault would suffice, but
> > - * the extra traps reduce performance. So, eagerly SFENCE.VMA.
> > - */
> > - while (nr--)
> > - local_flush_tlb_page(address + nr * PAGE_SIZE);
> > }
> > #define update_mmu_cache(vma, addr, ptep) \
> > update_mmu_cache_range(NULL, vma, addr, ptep, 1)
> >
> > #define __HAVE_ARCH_UPDATE_MMU_TLB
> > -#define update_mmu_tlb update_mmu_cache
> > +static inline void update_mmu_tlb(struct vm_area_struct *vma,
> > + unsigned long address, pte_t *ptep)
> > +{
> > + flush_tlb_range(vma, address, address + PAGE_SIZE);
> > +}
> >
> > static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> > unsigned long address, pmd_t *pmdp)
> > {
> > - pte_t *ptep = (pte_t *)pmdp;
> > -
> > - update_mmu_cache(vma, address, ptep);
> > }
> >
> > #define __HAVE_ARCH_PTE_SAME
> > @@ -548,13 +540,26 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
> > unsigned long address, pte_t *ptep,
> > pte_t entry, int dirty)
> > {
> > - if (!pte_same(*ptep, entry))
> > + if (!pte_same(*ptep, entry)) {
> > __set_pte_at(ptep, entry);
> > - /*
> > - * update_mmu_cache will unconditionally execute, handling both
> > - * the case that the PTE changed and the spurious fault case.
> > - */
> > - return true;
> > + /* Here only not svadu is impacted */
> > + flush_tlb_page(vma, address);
> > + return true;
> > + }
> > +
> > + return false;
> > +}
> > +
> > +extern u64 nr_sfence_vma_handle_exception;
> > +extern bool tlb_caching_invalid_entries;
> > +
> > +#define flush_tlb_fix_spurious_read_fault flush_tlb_fix_spurious_read_fault
> > +static inline void flush_tlb_fix_spurious_read_fault(struct vm_area_struct *vma,
> > + unsigned long address,
> > + pte_t *ptep)
> > +{
> > + if (tlb_caching_invalid_entries)
> > + flush_tlb_page(vma, address);
> > }
> >
> > #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
> > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> > index af7639c3b0a3..7abaf42ef612 100644
> > --- a/include/linux/pgtable.h
> > +++ b/include/linux/pgtable.h
> > @@ -931,8 +931,12 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
> > # define pte_accessible(mm, pte) ((void)(pte), 1)
> > #endif
> >
> > -#ifndef flush_tlb_fix_spurious_fault
> > -#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
> > +#ifndef flush_tlb_fix_spurious_write_fault
> > +#define flush_tlb_fix_spurious_write_fault(vma, address, ptep) flush_tlb_page(vma, address)
> > +#endif
> > +
> > +#ifndef flush_tlb_fix_spurious_read_fault
> > +#define flush_tlb_fix_spurious_read_fault(vma, address, ptep)
> > #endif
> >
> > /*
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 517221f01303..5cb0ccf0c03f 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -5014,8 +5014,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
> > * with threads.
> > */
> > if (vmf->flags & FAULT_FLAG_WRITE)
> > - flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
> > - vmf->pte);
> > + flush_tlb_fix_spurious_write_fault(vmf->vma, vmf->address,
> > + vmf->pte);
> > + else
> > + /*
> > + * With the pte_same(ptep_get(vmf->pte), entry) check
> > + * that calls update_mmu_tlb() above, multiple threads
> > + * faulting at the same time won't get there.
> > + */
> > + flush_tlb_fix_spurious_read_fault(vmf->vma, vmf->address,
> > + vmf->pte);
> > }
> > unlock:
> > pte_unmap_unlock(vmf->pte, vmf->ptl);