2023-09-11 22:16:52

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH v4 1/4] riscv: Improve flush_tlb()

For now, flush_tlb() simply calls flush_tlb_mm() which results in a
flush of the whole TLB. So let's use mmu_gather fields to provide a more
fine-grained flush of the TLB.

Signed-off-by: Alexandre Ghiti <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
---
arch/riscv/include/asm/tlb.h | 8 +++++++-
arch/riscv/include/asm/tlbflush.h | 3 +++
arch/riscv/mm/tlbflush.c | 7 +++++++
3 files changed, 17 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 120bcf2ed8a8..1eb5682b2af6 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);

static inline void tlb_flush(struct mmu_gather *tlb)
{
- flush_tlb_mm(tlb->mm);
+#ifdef CONFIG_MMU
+ if (tlb->fullmm || tlb->need_flush_all)
+ flush_tlb_mm(tlb->mm);
+ else
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
+ tlb_get_unmap_size(tlb));
+#endif
}

#endif /* _ASM_RISCV_TLB_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index a09196f8de68..f5c4fb0ae642 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr)
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned int page_size);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
@@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}

#define flush_tlb_mm(mm) flush_tlb_all()
+#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */

/* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 77be59aadc73..fa03289853d8 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm)
__flush_tlb_range(mm, 0, -1, PAGE_SIZE);
}

+void flush_tlb_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned int page_size)
+{
+ __flush_tlb_range(mm, start, end - start, page_size);
+}
+
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
--
2.39.2


2023-09-19 16:38:19

by Lad, Prabhakar

[permalink] [raw]
Subject: Re: [PATCH v4 1/4] riscv: Improve flush_tlb()

On Mon, Sep 11, 2023 at 2:13 PM Alexandre Ghiti <[email protected]> wrote:
>
> For now, flush_tlb() simply calls flush_tlb_mm() which results in a
> flush of the whole TLB. So let's use mmu_gather fields to provide a more
> fine-grained flush of the TLB.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>
> ---
> arch/riscv/include/asm/tlb.h | 8 +++++++-
> arch/riscv/include/asm/tlbflush.h | 3 +++
> arch/riscv/mm/tlbflush.c | 7 +++++++
> 3 files changed, 17 insertions(+), 1 deletion(-)
>
Tested-by: Lad Prabhakar <[email protected]> #
On RZ/Five SMARC

Cheers,
Prabhakar

> diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
> index 120bcf2ed8a8..1eb5682b2af6 100644
> --- a/arch/riscv/include/asm/tlb.h
> +++ b/arch/riscv/include/asm/tlb.h
> @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
>
> static inline void tlb_flush(struct mmu_gather *tlb)
> {
> - flush_tlb_mm(tlb->mm);
> +#ifdef CONFIG_MMU
> + if (tlb->fullmm || tlb->need_flush_all)
> + flush_tlb_mm(tlb->mm);
> + else
> + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
> + tlb_get_unmap_size(tlb));
> +#endif
> }
>
> #endif /* _ASM_RISCV_TLB_H */
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index a09196f8de68..f5c4fb0ae642 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr)
> #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
> void flush_tlb_all(void);
> void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
> + unsigned long end, unsigned int page_size);
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end);
> @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
> }
>
> #define flush_tlb_mm(mm) flush_tlb_all()
> +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
> #endif /* !CONFIG_SMP || !CONFIG_MMU */
>
> /* Flush a range of kernel pages */
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 77be59aadc73..fa03289853d8 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm)
> __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
> }
>
> +void flush_tlb_mm_range(struct mm_struct *mm,
> + unsigned long start, unsigned long end,
> + unsigned int page_size)
> +{
> + __flush_tlb_range(mm, start, end - start, page_size);
> +}
> +
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> {
> __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
> --
> 2.39.2
>

2023-10-09 17:53:41

by Samuel Holland

[permalink] [raw]
Subject: Re: [PATCH v4 1/4] riscv: Improve flush_tlb()

On 2023-09-11 8:12 AM, Alexandre Ghiti wrote:
> For now, flush_tlb() simply calls flush_tlb_mm() which results in a

s/flush_tlb/tlb_flush/ here and in the subject.

Otherwise:
Reviewed-by: Samuel Holland <[email protected]>

> flush of the whole TLB. So let's use mmu_gather fields to provide a more
> fine-grained flush of the TLB.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>
> ---
> arch/riscv/include/asm/tlb.h | 8 +++++++-
> arch/riscv/include/asm/tlbflush.h | 3 +++
> arch/riscv/mm/tlbflush.c | 7 +++++++
> 3 files changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
> index 120bcf2ed8a8..1eb5682b2af6 100644
> --- a/arch/riscv/include/asm/tlb.h
> +++ b/arch/riscv/include/asm/tlb.h
> @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
>
> static inline void tlb_flush(struct mmu_gather *tlb)
> {
> - flush_tlb_mm(tlb->mm);
> +#ifdef CONFIG_MMU
> + if (tlb->fullmm || tlb->need_flush_all)
> + flush_tlb_mm(tlb->mm);
> + else
> + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
> + tlb_get_unmap_size(tlb));
> +#endif
> }
>
> #endif /* _ASM_RISCV_TLB_H */
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index a09196f8de68..f5c4fb0ae642 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr)
> #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
> void flush_tlb_all(void);
> void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
> + unsigned long end, unsigned int page_size);
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> unsigned long end);
> @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
> }
>
> #define flush_tlb_mm(mm) flush_tlb_all()
> +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
> #endif /* !CONFIG_SMP || !CONFIG_MMU */
>
> /* Flush a range of kernel pages */
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 77be59aadc73..fa03289853d8 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm)
> __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
> }
>
> +void flush_tlb_mm_range(struct mm_struct *mm,
> + unsigned long start, unsigned long end,
> + unsigned int page_size)
> +{
> + __flush_tlb_range(mm, start, end - start, page_size);
> +}
> +
> void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> {
> __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);

2023-10-18 11:27:03

by Alexandre Ghiti

[permalink] [raw]
Subject: Re: [PATCH v4 1/4] riscv: Improve flush_tlb()

Hi Samuel,

On Mon, Oct 9, 2023 at 7:53 PM Samuel Holland <[email protected]> wrote:
>
> On 2023-09-11 8:12 AM, Alexandre Ghiti wrote:
> > For now, flush_tlb() simply calls flush_tlb_mm() which results in a
>
> s/flush_tlb/tlb_flush/ here and in the subject.
>
> Otherwise:
> Reviewed-by: Samuel Holland <[email protected]>

Ahah good catch, thanks for that and the RB!

Alex

>
> > flush of the whole TLB. So let's use mmu_gather fields to provide a more
> > fine-grained flush of the TLB.
> >
> > Signed-off-by: Alexandre Ghiti <[email protected]>
> > Reviewed-by: Andrew Jones <[email protected]>
> > ---
> > arch/riscv/include/asm/tlb.h | 8 +++++++-
> > arch/riscv/include/asm/tlbflush.h | 3 +++
> > arch/riscv/mm/tlbflush.c | 7 +++++++
> > 3 files changed, 17 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
> > index 120bcf2ed8a8..1eb5682b2af6 100644
> > --- a/arch/riscv/include/asm/tlb.h
> > +++ b/arch/riscv/include/asm/tlb.h
> > @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
> >
> > static inline void tlb_flush(struct mmu_gather *tlb)
> > {
> > - flush_tlb_mm(tlb->mm);
> > +#ifdef CONFIG_MMU
> > + if (tlb->fullmm || tlb->need_flush_all)
> > + flush_tlb_mm(tlb->mm);
> > + else
> > + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
> > + tlb_get_unmap_size(tlb));
> > +#endif
> > }
> >
> > #endif /* _ASM_RISCV_TLB_H */
> > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> > index a09196f8de68..f5c4fb0ae642 100644
> > --- a/arch/riscv/include/asm/tlbflush.h
> > +++ b/arch/riscv/include/asm/tlbflush.h
> > @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr)
> > #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
> > void flush_tlb_all(void);
> > void flush_tlb_mm(struct mm_struct *mm);
> > +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
> > + unsigned long end, unsigned int page_size);
> > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> > void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
> > unsigned long end);
> > @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
> > }
> >
> > #define flush_tlb_mm(mm) flush_tlb_all()
> > +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
> > #endif /* !CONFIG_SMP || !CONFIG_MMU */
> >
> > /* Flush a range of kernel pages */
> > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> > index 77be59aadc73..fa03289853d8 100644
> > --- a/arch/riscv/mm/tlbflush.c
> > +++ b/arch/riscv/mm/tlbflush.c
> > @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm)
> > __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
> > }
> >
> > +void flush_tlb_mm_range(struct mm_struct *mm,
> > + unsigned long start, unsigned long end,
> > + unsigned int page_size)
> > +{
> > + __flush_tlb_range(mm, start, end - start, page_size);
> > +}
> > +
> > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> > {
> > __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
>