This series of commits mainly adds the update_mmu_tlb_range() to
batch update tlb in an address range and refactor the update_mmu_tlb().
After the commit 19eaf44954df ("mm: thp: support allocation of anonymous
multi-size THP"), We may need to batch update tlb of a certain address
range by calling update_mmu_tlb() in a loop. Using the
update_mmu_tlb_range(), we can simplify the code and possibly reduce the
execution of some unnecessary code in some architectures.
Thanks,
Bang
Changes since v2 [2]:
- Use generic architecture to implement update_mmu_tlb() (per Ryan)
Changes since v1 [1]:
- Add __HAVE_ARCH_UPDATE_MMU_TLB_RANGE macro (per Lance Yang)
[1] https://lore.kernel.org/linux-mm/[email protected]/
[2] https://lore.kernel.org/linux-mm/[email protected]/
Bang Li (3):
mm: Add update_mmu_tlb_range()
mm: Refactor update_mmu_tlb()
mm: Use update_mmu_tlb_range() to simplify code
arch/loongarch/include/asm/pgtable.h | 4 ++--
arch/mips/include/asm/pgtable.h | 4 ++--
arch/riscv/include/asm/pgtable.h | 4 ++--
arch/xtensa/include/asm/pgtable.h | 6 +++---
arch/xtensa/mm/tlb.c | 6 +++---
include/linux/pgtable.h | 11 ++++++++---
mm/memory.c | 4 +---
7 files changed, 21 insertions(+), 18 deletions(-)
--
2.19.1.6.gb485710b
Added update_mmu_tlb_range(), we can batch update tlb of an
address range.
Signed-off-by: Bang Li <[email protected]>
---
arch/loongarch/include/asm/pgtable.h | 2 ++
arch/mips/include/asm/pgtable.h | 2 ++
arch/riscv/include/asm/pgtable.h | 2 ++
arch/xtensa/include/asm/pgtable.h | 3 +++
arch/xtensa/mm/tlb.c | 6 ++++++
5 files changed, 15 insertions(+)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index af3acdf3481a..5ccc2a3a6f7a 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -469,6 +469,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, addr, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index e27a4c83c548..0891ad7d43b6 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -596,6 +596,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, address, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, address, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 661b2b4fe758..fc07b829ac4a 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -488,6 +488,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, addr, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 9a7e5e57ee9a..436158bd9030 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -413,6 +413,9 @@ typedef pte_t *pte_addr_t;
void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
#define __HAVE_ARCH_UPDATE_MMU_TLB
+void update_mmu_tlb_range(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr);
+#define update_mmu_tlb_range update_mmu_tlb_range
#endif /* !defined (__ASSEMBLY__) */
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index d8b60d6e50a8..05efba86b870 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -169,6 +169,12 @@ void update_mmu_tlb(struct vm_area_struct *vma,
local_flush_tlb_page(vma, address);
}
+void update_mmu_tlb_range(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr)
+{
+ local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr);
+}
+
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)
--
2.19.1.6.gb485710b
On 18.05.24 09:49, Bang Li wrote:
> Added update_mmu_tlb_range(), we can batch update tlb of an
> address range.
>
> Signed-off-by: Bang Li <[email protected]>
> ---
The generic variant that is a NOP from patch #2 should go into this
patch as well.
Apart from that LGTM
--
Cheers,
David / dhildenb
Hi David,
Thanks for you review!
On 2024/5/21 17:32, David Hildenbrand wrote:
> On 18.05.24 09:49, Bang Li wrote:
>> Added update_mmu_tlb_range(), we can batch update tlb of an
>> address range.
>>
>> Signed-off-by: Bang Li <[email protected]>
>> ---
>
> The generic variant that is a NOP from patch #2 should go into this
> patch as well.
Good point. Thanks for pointing this out.
Thanks,
Bang
>
> Apart from that LGTM
>