The `update_mmu_cache` function in riscv flush tlb cache without asid
information now, which will flush tlbs in other tasks' address space
even if processor support asid. So add a new function
`flush_tlb_local_one_page` to flush local one page whether processor
supports asid or not. If asid is supported, this function will use it.
Signed-off-by: Jinyu Tang <[email protected]>
---
arch/riscv/include/asm/pgtable.h | 2 +-
arch/riscv/include/asm/tlbflush.h | 2 ++
arch/riscv/mm/tlbflush.c | 11 +++++++++++
3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ec936910a96..09ccefa6b6c7 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* Relying on flush_tlb_fix_spurious_fault would suffice, but
* the extra traps reduce performance. So, eagerly SFENCE.VMA.
*/
- local_flush_tlb_page(address);
+ flush_tlb_local_one_page(vma, address);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 801019381dea..120aeb1c6ecf 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
@@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 37ed760d007c..a2634ce55626 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -64,6 +64,17 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
put_cpu();
}
+void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ if (static_branch_unlikely(&use_asid_allocator)) {
+ unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
+
+ local_flush_tlb_page_asid(addr, asid);
+ } else {
+ local_flush_tlb_page(addr);
+ }
+}
+
void flush_tlb_mm(struct mm_struct *mm)
{
__sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
--
2.30.2
On Sun, Aug 21, 2022 at 7:09 AM Jinyu Tang <[email protected]> wrote:
>
> The `update_mmu_cache` function in riscv flush tlb cache without asid
> information now, which will flush tlbs in other tasks' address space
> even if processor support asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not. If asid is supported, this function will use it.
>
> Signed-off-by: Jinyu Tang <[email protected]>
This is already covered by PATCH6 of the IPI improvement series.
https://www.spinics.net/lists/kernel/msg4481428.html
Regards,
Anup
> ---
> arch/riscv/include/asm/pgtable.h | 2 +-
> arch/riscv/include/asm/tlbflush.h | 2 ++
> arch/riscv/mm/tlbflush.c | 11 +++++++++++
> 3 files changed, 14 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ec936910a96..09ccefa6b6c7 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
> * Relying on flush_tlb_fix_spurious_fault would suffice, but
> * the extra traps reduce performance. So, eagerly SFENCE.VMA.
> */
> - local_flush_tlb_page(address);
> + flush_tlb_local_one_page(vma, address);
> }
>
> static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 801019381dea..120aeb1c6ecf 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
> #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
> void flush_tlb_all(void);
> void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end);
> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>
> #define flush_tlb_all() local_flush_tlb_all()
> #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>
> static inline void flush_tlb_range(struct vm_area_struct *vma,
> unsigned long start, unsigned long end)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 37ed760d007c..a2634ce55626 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -64,6 +64,17 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
> put_cpu();
> }
>
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> + if (static_branch_unlikely(&use_asid_allocator)) {
> + unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
> +
> + local_flush_tlb_page_asid(addr, asid);
> + } else {
> + local_flush_tlb_page(addr);
> + }
> +}
> +
> void flush_tlb_mm(struct mm_struct *mm)
> {
> __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
> --
> 2.30.2
>
On Sun, Aug 21, 2022 at 10:35 AM Anup Patel <[email protected]> wrote:
>
> On Sun, Aug 21, 2022 at 7:09 AM Jinyu Tang <[email protected]> wrote:
> >
> > The `update_mmu_cache` function in riscv flush tlb cache without asid
> > information now, which will flush tlbs in other tasks' address space
> > even if processor support asid. So add a new function
> > `flush_tlb_local_one_page` to flush local one page whether processor
> > supports asid or not. If asid is supported, this function will use it.
> >
> > Signed-off-by: Jinyu Tang <[email protected]>
>
> This is already covered by PATCH6 of the IPI improvement series.
> https://www.spinics.net/lists/kernel/msg4481428.html
My bad. This patch does not overlap with PATCH6 of the
IPI improvement series but does conflict with that series.
Maybe you can rebase this patch upon the IPI improvement
series ?
Regards,
Anup
>
> Regards,
> Anup
>
> > ---
> > arch/riscv/include/asm/pgtable.h | 2 +-
> > arch/riscv/include/asm/tlbflush.h | 2 ++
> > arch/riscv/mm/tlbflush.c | 11 +++++++++++
> > 3 files changed, 14 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > index 7ec936910a96..09ccefa6b6c7 100644
> > --- a/arch/riscv/include/asm/pgtable.h
> > +++ b/arch/riscv/include/asm/pgtable.h
> > @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
> > * Relying on flush_tlb_fix_spurious_fault would suffice, but
> > * the extra traps reduce performance. So, eagerly SFENCE.VMA.
> > */
> > - local_flush_tlb_page(address);
> > + flush_tlb_local_one_page(vma, address);
> > }
> >
> > static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> > index 801019381dea..120aeb1c6ecf 100644
> > --- a/arch/riscv/include/asm/tlbflush.h
> > +++ b/arch/riscv/include/asm/tlbflush.h
> > @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
> > #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
> > void flush_tlb_all(void);
> > void flush_tlb_mm(struct mm_struct *mm);
> > +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
> > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> > void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> > unsigned long end);
> > @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
> >
> > #define flush_tlb_all() local_flush_tlb_all()
> > #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> > +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
> >
> > static inline void flush_tlb_range(struct vm_area_struct *vma,
> > unsigned long start, unsigned long end)
> > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> > index 37ed760d007c..a2634ce55626 100644
> > --- a/arch/riscv/mm/tlbflush.c
> > +++ b/arch/riscv/mm/tlbflush.c
> > @@ -64,6 +64,17 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
> > put_cpu();
> > }
> >
> > +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
> > +{
> > + if (static_branch_unlikely(&use_asid_allocator)) {
> > + unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
> > +
> > + local_flush_tlb_page_asid(addr, asid);
> > + } else {
> > + local_flush_tlb_page(addr);
> > + }
> > +}
> > +
> > void flush_tlb_mm(struct mm_struct *mm)
> > {
> > __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
> > --
> > 2.30.2
> >