2024-04-17 14:15:41

by Lance Yang

[permalink] [raw]
Subject: [PATCH v8 3/3] mm/madvise: optimize lazyfreeing with mTHP in madvise_free

This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
(Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
splitting if the large folio is fully mapped within the target range.

If a large folio is locked or shared, or if we fail to split it, we just
leave it in place and advance to the next PTE in the range. But note that
the behavior is changed; previously, any failure of this sort would cause
the entire operation to give up. As large folios become more common,
sticking to the old way could result in wasted opportunities.

On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
the same size results in the following runtimes for madvise(MADV_FREE) in
seconds (shorter is better):

Folio Size | Old | New | Change
------------------------------------------
4KiB | 0.590251 | 0.590259 | 0%
16KiB | 2.990447 | 0.185655 | -94%
32KiB | 2.547831 | 0.104870 | -95%
64KiB | 2.457796 | 0.052812 | -97%
128KiB | 2.281034 | 0.032777 | -99%
256KiB | 2.230387 | 0.017496 | -99%
512KiB | 2.189106 | 0.010781 | -99%
1024KiB | 2.183949 | 0.007753 | -99%
2048KiB | 0.002799 | 0.002804 | 0%

[1] https://lkml.kernel.org/r/[email protected]
[2] https://lore.kernel.org/linux-mm/[email protected]

Signed-off-by: Lance Yang <[email protected]>
---
mm/internal.h | 12 ++++-
mm/madvise.c | 141 ++++++++++++++++++++++++++++----------------------
mm/memory.c | 4 +-
3 files changed, 91 insertions(+), 66 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index c6483f73ec13..daa59cef85d7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -134,6 +134,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
* first one is writable.
* @any_young: Optional pointer to indicate whether any entry except the
* first one is young.
+ * @any_dirty: Optional pointer to indicate whether any entry except the
+ * first one is dirty.
*
* Detect a PTE batch: consecutive (present) PTEs that map consecutive
* pages of the same large folio.
@@ -149,18 +151,20 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
*/
static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
- bool *any_writable, bool *any_young)
+ bool *any_writable, bool *any_young, bool *any_dirty)
{
unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
const pte_t *end_ptep = start_ptep + max_nr;
pte_t expected_pte, *ptep;
- bool writable, young;
+ bool writable, young, dirty;
int nr;

if (any_writable)
*any_writable = false;
if (any_young)
*any_young = false;
+ if (any_dirty)
+ *any_dirty = false;

VM_WARN_ON_FOLIO(!pte_present(pte), folio);
VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
@@ -176,6 +180,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
writable = !!pte_write(pte);
if (any_young)
young = !!pte_young(pte);
+ if (any_dirty)
+ dirty = !!pte_dirty(pte);
pte = __pte_batch_clear_ignored(pte, flags);

if (!pte_same(pte, expected_pte))
@@ -193,6 +199,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
*any_writable |= writable;
if (any_young)
*any_young |= young;
+ if (any_dirty)
+ *any_dirty |= dirty;

nr = pte_batch_hint(ptep, pte);
expected_pte = pte_advance_pfn(expected_pte, nr);
diff --git a/mm/madvise.c b/mm/madvise.c
index f5e3699e7b54..d6f1889d6308 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
file_permission(vma->vm_file, MAY_WRITE) == 0;
}

+static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
+ struct folio *folio, pte_t *ptep,
+ pte_t pte, bool *any_young,
+ bool *any_dirty)
+{
+ int max_nr = (end - addr) / PAGE_SIZE;
+ const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
+
+ return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
+ any_young, any_dirty);
+}
+
+static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr,
+ struct folio *folio, pte_t **pte,
+ spinlock_t **ptl)
+{
+ int err;
+
+ if (!folio_trylock(folio))
+ return false;
+
+ folio_get(folio);
+ pte_unmap_unlock(*pte, *ptl);
+ err = split_folio(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+
+ *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
+
+ return err == 0;
+}
+
static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -456,41 +489,30 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
* next pte in the range.
*/
if (folio_test_large(folio)) {
- const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
- FPB_IGNORE_SOFT_DIRTY;
- int max_nr = (end - addr) / PAGE_SIZE;
bool any_young;

- nr = folio_pte_batch(folio, addr, pte, ptent, max_nr,
- fpb_flags, NULL, &any_young);
- if (any_young)
- ptent = pte_mkyoung(ptent);
+ nr = madvise_folio_pte_batch(addr, end, folio, pte,
+ ptent, &any_young, NULL);

if (nr < folio_nr_pages(folio)) {
- int err;
-
if (folio_likely_mapped_shared(folio))
continue;
if (pageout_anon_only_filter && !folio_test_anon(folio))
continue;
- if (!folio_trylock(folio))
- continue;
- folio_get(folio);
+
arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(start_pte, ptl);
- start_pte = NULL;
- err = split_folio(folio);
- folio_unlock(folio);
- folio_put(folio);
- start_pte = pte =
- pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (madvise_pte_split_folio(mm, pmd, addr,
+ folio, &start_pte, &ptl))
+ nr = 0;
if (!start_pte)
break;
+ pte = start_pte;
arch_enter_lazy_mmu_mode();
- if (!err)
- nr = 0;
continue;
}
+
+ if (any_young)
+ ptent = pte_mkyoung(ptent);
}

/*
@@ -688,44 +710,48 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
continue;

/*
- * If pmd isn't transhuge but the folio is large and
- * is owned by only this process, split it and
- * deactivate all pages.
+ * If we encounter a large folio, only split it if it is not
+ * fully mapped within the range we are operating on. Otherwise
+ * leave it as is so that it can be marked as lazyfree. If we
+ * fail to split a folio, leave it in place and advance to the
+ * next pte in the range.
*/
if (folio_test_large(folio)) {
- int err;
+ bool any_young, any_dirty;

- if (folio_likely_mapped_shared(folio))
- break;
- if (!folio_trylock(folio))
- break;
- folio_get(folio);
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(start_pte, ptl);
- start_pte = NULL;
- err = split_folio(folio);
- folio_unlock(folio);
- folio_put(folio);
- if (err)
- break;
- start_pte = pte =
- pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (!start_pte)
- break;
- arch_enter_lazy_mmu_mode();
- pte--;
- addr -= PAGE_SIZE;
- continue;
+ nr = madvise_folio_pte_batch(addr, end, folio, pte,
+ ptent, &any_young, &any_dirty);
+
+ if (nr < folio_nr_pages(folio)) {
+ if (folio_likely_mapped_shared(folio))
+ continue;
+
+ arch_leave_lazy_mmu_mode();
+ if (madvise_pte_split_folio(mm, pmd, addr,
+ folio, &start_pte, &ptl))
+ nr = 0;
+ if (!start_pte)
+ break;
+ pte = start_pte;
+ arch_enter_lazy_mmu_mode();
+ continue;
+ }
+
+ if (any_young)
+ ptent = pte_mkyoung(ptent);
+ if (any_dirty)
+ ptent = pte_mkdirty(ptent);
}

if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
if (!folio_trylock(folio))
continue;
/*
- * If folio is shared with others, we mustn't clear
- * the folio's dirty flag.
+ * If we have a large folio at this point, we know it is
+ * fully mapped so if its mapcount is the same as its
+ * number of pages, it must be exclusive.
*/
- if (folio_mapcount(folio) != 1) {
+ if (folio_mapcount(folio) != folio_nr_pages(folio)) {
folio_unlock(folio);
continue;
}
@@ -741,19 +767,10 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
}

if (pte_young(ptent) || pte_dirty(ptent)) {
- /*
- * Some of architecture(ex, PPC) don't update TLB
- * with set_pte_at and tlb_remove_tlb_entry so for
- * the portability, remap the pte with old|clean
- * after pte clearing.
- */
- ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
-
- ptent = pte_mkold(ptent);
- ptent = pte_mkclean(ptent);
- set_pte_at(mm, addr, pte, ptent);
- tlb_remove_tlb_entry(tlb, pte, addr);
+ clear_young_dirty_ptes(vma, addr, pte, nr,
+ CYDP_CLEAR_YOUNG |
+ CYDP_CLEAR_DIRTY);
+ tlb_remove_tlb_entries(tlb, pte, nr, addr);
}
folio_mark_lazyfree(folio);
}
diff --git a/mm/memory.c b/mm/memory.c
index 33d87b64d15d..9e07d1b9020c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
flags |= FPB_IGNORE_SOFT_DIRTY;

nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
- &any_writable, NULL);
+ &any_writable, NULL, NULL);
folio_ref_add(folio, nr);
if (folio_test_anon(folio)) {
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
@@ -1558,7 +1558,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
*/
if (unlikely(folio_test_large(folio) && max_nr != 1)) {
nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
- NULL, NULL);
+ NULL, NULL, NULL);

zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush,
--
2.33.1



2024-04-17 16:25:45

by Ryan Roberts

[permalink] [raw]
Subject: Re: [PATCH v8 3/3] mm/madvise: optimize lazyfreeing with mTHP in madvise_free

On 17/04/2024 15:14, Lance Yang wrote:
> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> splitting if the large folio is fully mapped within the target range.
>
> If a large folio is locked or shared, or if we fail to split it, we just
> leave it in place and advance to the next PTE in the range. But note that
> the behavior is changed; previously, any failure of this sort would cause
> the entire operation to give up. As large folios become more common,
> sticking to the old way could result in wasted opportunities.
>
> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> the same size results in the following runtimes for madvise(MADV_FREE) in
> seconds (shorter is better):
>
> Folio Size | Old | New | Change
> ------------------------------------------
> 4KiB | 0.590251 | 0.590259 | 0%
> 16KiB | 2.990447 | 0.185655 | -94%
> 32KiB | 2.547831 | 0.104870 | -95%
> 64KiB | 2.457796 | 0.052812 | -97%
> 128KiB | 2.281034 | 0.032777 | -99%
> 256KiB | 2.230387 | 0.017496 | -99%
> 512KiB | 2.189106 | 0.010781 | -99%
> 1024KiB | 2.183949 | 0.007753 | -99%
> 2048KiB | 0.002799 | 0.002804 | 0%
>
> [1] https://lkml.kernel.org/r/[email protected]
> [2] https://lore.kernel.org/linux-mm/[email protected]
>
> Signed-off-by: Lance Yang <[email protected]>

LGTM!

Reviewed-by: Ryan Roberts <[email protected]>

> ---
> mm/internal.h | 12 ++++-
> mm/madvise.c | 141 ++++++++++++++++++++++++++++----------------------
> mm/memory.c | 4 +-
> 3 files changed, 91 insertions(+), 66 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index c6483f73ec13..daa59cef85d7 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -134,6 +134,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> * first one is writable.
> * @any_young: Optional pointer to indicate whether any entry except the
> * first one is young.
> + * @any_dirty: Optional pointer to indicate whether any entry except the
> + * first one is dirty.
> *
> * Detect a PTE batch: consecutive (present) PTEs that map consecutive
> * pages of the same large folio.
> @@ -149,18 +151,20 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> */
> static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
> - bool *any_writable, bool *any_young)
> + bool *any_writable, bool *any_young, bool *any_dirty)
> {
> unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> const pte_t *end_ptep = start_ptep + max_nr;
> pte_t expected_pte, *ptep;
> - bool writable, young;
> + bool writable, young, dirty;
> int nr;
>
> if (any_writable)
> *any_writable = false;
> if (any_young)
> *any_young = false;
> + if (any_dirty)
> + *any_dirty = false;
>
> VM_WARN_ON_FOLIO(!pte_present(pte), folio);
> VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
> @@ -176,6 +180,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> writable = !!pte_write(pte);
> if (any_young)
> young = !!pte_young(pte);
> + if (any_dirty)
> + dirty = !!pte_dirty(pte);
> pte = __pte_batch_clear_ignored(pte, flags);
>
> if (!pte_same(pte, expected_pte))
> @@ -193,6 +199,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> *any_writable |= writable;
> if (any_young)
> *any_young |= young;
> + if (any_dirty)
> + *any_dirty |= dirty;
>
> nr = pte_batch_hint(ptep, pte);
> expected_pte = pte_advance_pfn(expected_pte, nr);
> diff --git a/mm/madvise.c b/mm/madvise.c
> index f5e3699e7b54..d6f1889d6308 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
> file_permission(vma->vm_file, MAY_WRITE) == 0;
> }
>
> +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
> + struct folio *folio, pte_t *ptep,
> + pte_t pte, bool *any_young,
> + bool *any_dirty)
> +{
> + int max_nr = (end - addr) / PAGE_SIZE;
> + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
> +
> + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> + any_young, any_dirty);
> +}
> +
> +static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
> + unsigned long addr,
> + struct folio *folio, pte_t **pte,
> + spinlock_t **ptl)
> +{
> + int err;
> +
> + if (!folio_trylock(folio))
> + return false;
> +
> + folio_get(folio);
> + pte_unmap_unlock(*pte, *ptl);
> + err = split_folio(folio);
> + folio_unlock(folio);
> + folio_put(folio);
> +
> + *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
> +
> + return err == 0;
> +}
> +
> static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> unsigned long addr, unsigned long end,
> struct mm_walk *walk)
> @@ -456,41 +489,30 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> * next pte in the range.
> */
> if (folio_test_large(folio)) {
> - const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
> - FPB_IGNORE_SOFT_DIRTY;
> - int max_nr = (end - addr) / PAGE_SIZE;
> bool any_young;
>
> - nr = folio_pte_batch(folio, addr, pte, ptent, max_nr,
> - fpb_flags, NULL, &any_young);
> - if (any_young)
> - ptent = pte_mkyoung(ptent);
> + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> + ptent, &any_young, NULL);
>
> if (nr < folio_nr_pages(folio)) {
> - int err;
> -
> if (folio_likely_mapped_shared(folio))
> continue;
> if (pageout_anon_only_filter && !folio_test_anon(folio))
> continue;
> - if (!folio_trylock(folio))
> - continue;
> - folio_get(folio);
> +
> arch_leave_lazy_mmu_mode();
> - pte_unmap_unlock(start_pte, ptl);
> - start_pte = NULL;
> - err = split_folio(folio);
> - folio_unlock(folio);
> - folio_put(folio);
> - start_pte = pte =
> - pte_offset_map_lock(mm, pmd, addr, &ptl);
> + if (madvise_pte_split_folio(mm, pmd, addr,
> + folio, &start_pte, &ptl))
> + nr = 0;
> if (!start_pte)
> break;
> + pte = start_pte;
> arch_enter_lazy_mmu_mode();
> - if (!err)
> - nr = 0;
> continue;
> }
> +
> + if (any_young)
> + ptent = pte_mkyoung(ptent);
> }
>
> /*
> @@ -688,44 +710,48 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> continue;
>
> /*
> - * If pmd isn't transhuge but the folio is large and
> - * is owned by only this process, split it and
> - * deactivate all pages.
> + * If we encounter a large folio, only split it if it is not
> + * fully mapped within the range we are operating on. Otherwise
> + * leave it as is so that it can be marked as lazyfree. If we
> + * fail to split a folio, leave it in place and advance to the
> + * next pte in the range.
> */
> if (folio_test_large(folio)) {
> - int err;
> + bool any_young, any_dirty;
>
> - if (folio_likely_mapped_shared(folio))
> - break;
> - if (!folio_trylock(folio))
> - break;
> - folio_get(folio);
> - arch_leave_lazy_mmu_mode();
> - pte_unmap_unlock(start_pte, ptl);
> - start_pte = NULL;
> - err = split_folio(folio);
> - folio_unlock(folio);
> - folio_put(folio);
> - if (err)
> - break;
> - start_pte = pte =
> - pte_offset_map_lock(mm, pmd, addr, &ptl);
> - if (!start_pte)
> - break;
> - arch_enter_lazy_mmu_mode();
> - pte--;
> - addr -= PAGE_SIZE;
> - continue;
> + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> + ptent, &any_young, &any_dirty);
> +
> + if (nr < folio_nr_pages(folio)) {
> + if (folio_likely_mapped_shared(folio))
> + continue;
> +
> + arch_leave_lazy_mmu_mode();
> + if (madvise_pte_split_folio(mm, pmd, addr,
> + folio, &start_pte, &ptl))
> + nr = 0;
> + if (!start_pte)
> + break;
> + pte = start_pte;
> + arch_enter_lazy_mmu_mode();
> + continue;
> + }
> +
> + if (any_young)
> + ptent = pte_mkyoung(ptent);
> + if (any_dirty)
> + ptent = pte_mkdirty(ptent);
> }
>
> if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> if (!folio_trylock(folio))
> continue;
> /*
> - * If folio is shared with others, we mustn't clear
> - * the folio's dirty flag.
> + * If we have a large folio at this point, we know it is
> + * fully mapped so if its mapcount is the same as its
> + * number of pages, it must be exclusive.
> */
> - if (folio_mapcount(folio) != 1) {
> + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
> folio_unlock(folio);
> continue;
> }
> @@ -741,19 +767,10 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> }
>
> if (pte_young(ptent) || pte_dirty(ptent)) {
> - /*
> - * Some of architecture(ex, PPC) don't update TLB
> - * with set_pte_at and tlb_remove_tlb_entry so for
> - * the portability, remap the pte with old|clean
> - * after pte clearing.
> - */
> - ptent = ptep_get_and_clear_full(mm, addr, pte,
> - tlb->fullmm);
> -
> - ptent = pte_mkold(ptent);
> - ptent = pte_mkclean(ptent);
> - set_pte_at(mm, addr, pte, ptent);
> - tlb_remove_tlb_entry(tlb, pte, addr);
> + clear_young_dirty_ptes(vma, addr, pte, nr,
> + CYDP_CLEAR_YOUNG |
> + CYDP_CLEAR_DIRTY);
> + tlb_remove_tlb_entries(tlb, pte, nr, addr);
> }
> folio_mark_lazyfree(folio);
> }
> diff --git a/mm/memory.c b/mm/memory.c
> index 33d87b64d15d..9e07d1b9020c 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> flags |= FPB_IGNORE_SOFT_DIRTY;
>
> nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
> - &any_writable, NULL);
> + &any_writable, NULL, NULL);
> folio_ref_add(folio, nr);
> if (folio_test_anon(folio)) {
> if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> @@ -1558,7 +1558,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
> */
> if (unlikely(folio_test_large(folio) && max_nr != 1)) {
> nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
> - NULL, NULL);
> + NULL, NULL, NULL);
>
> zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
> addr, details, rss, force_flush,


2024-04-17 17:07:06

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH v8 3/3] mm/madvise: optimize lazyfreeing with mTHP in madvise_free

On 17.04.24 16:14, Lance Yang wrote:
> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> splitting if the large folio is fully mapped within the target range.
>
> If a large folio is locked or shared, or if we fail to split it, we just
> leave it in place and advance to the next PTE in the range. But note that
> the behavior is changed; previously, any failure of this sort would cause
> the entire operation to give up. As large folios become more common,
> sticking to the old way could result in wasted opportunities.
>
> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> the same size results in the following runtimes for madvise(MADV_FREE) in
> seconds (shorter is better):
>
> Folio Size | Old | New | Change
> ------------------------------------------
> 4KiB | 0.590251 | 0.590259 | 0%
> 16KiB | 2.990447 | 0.185655 | -94%
> 32KiB | 2.547831 | 0.104870 | -95%
> 64KiB | 2.457796 | 0.052812 | -97%
> 128KiB | 2.281034 | 0.032777 | -99%
> 256KiB | 2.230387 | 0.017496 | -99%
> 512KiB | 2.189106 | 0.010781 | -99%
> 1024KiB | 2.183949 | 0.007753 | -99%
> 2048KiB | 0.002799 | 0.002804 | 0%
>
> [1] https://lkml.kernel.org/r/[email protected]
> [2] https://lore.kernel.org/linux-mm/[email protected]
>
> Signed-off-by: Lance Yang <[email protected]>

Some of the changes could have been moved into separate patches to ease
review ;)

At least the folio_pte_batch() change and factoring out some stuff from
madvise_cold_or_pageout_pte_range(). But see below on the latter.

> ---
> mm/internal.h | 12 ++++-
> mm/madvise.c | 141 ++++++++++++++++++++++++++++----------------------
> mm/memory.c | 4 +-
> 3 files changed, 91 insertions(+), 66 deletions(-)

[...]

> diff --git a/mm/madvise.c b/mm/madvise.c
> index f5e3699e7b54..d6f1889d6308 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
> file_permission(vma->vm_file, MAY_WRITE) == 0;
> }
>
> +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
> + struct folio *folio, pte_t *ptep,
> + pte_t pte, bool *any_young,
> + bool *any_dirty)
> +{
> + int max_nr = (end - addr) / PAGE_SIZE;
> + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;

Reverse Christmas tree looks nicer ;)

> +
> + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> + any_young, any_dirty);
> +}
> +
> +static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
> + unsigned long addr,
> + struct folio *folio, pte_t **pte,
> + spinlock_t **ptl)
> +{
> + int err;
> +
> + if (!folio_trylock(folio))
> + return false;
> +
> + folio_get(folio);
> + pte_unmap_unlock(*pte, *ptl);
> + err = split_folio(folio);
> + folio_unlock(folio);
> + folio_put(folio);
> +
> + *pte = pte_offset_map_lock(mm, pmd, addr, ptl);

Staring at this helper again, I am really not sure if we should have it.
Calling semantics are "special" and that pte_t **pte is just ...
"special" as well ;)

Can we just leave that part as is, in the caller? That would also mean
less madvise_cold_or_pageout_pte_range() churn ... which i would welcome
as part of this patch.

[...]

> @@ -741,19 +767,10 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> }
>
> if (pte_young(ptent) || pte_dirty(ptent)) {
> - /*
> - * Some of architecture(ex, PPC) don't update TLB
> - * with set_pte_at and tlb_remove_tlb_entry so for
> - * the portability, remap the pte with old|clean
> - * after pte clearing.
> - */
> - ptent = ptep_get_and_clear_full(mm, addr, pte,
> - tlb->fullmm);
> -
> - ptent = pte_mkold(ptent);
> - ptent = pte_mkclean(ptent);
> - set_pte_at(mm, addr, pte, ptent);
> - tlb_remove_tlb_entry(tlb, pte, addr);
> + clear_young_dirty_ptes(vma, addr, pte, nr,
> + CYDP_CLEAR_YOUNG |
> + CYDP_CLEAR_DIRTY);

That indent looks odd. I suggest simply having a local variable

const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY;

and then use cydp_flags here that will make this easier to read.

--
Cheers,

David / dhildenb


2024-04-18 00:10:13

by Lance Yang

[permalink] [raw]
Subject: Re: [PATCH v8 3/3] mm/madvise: optimize lazyfreeing with mTHP in madvise_free

On Thu, Apr 18, 2024 at 12:25 AM Ryan Roberts <[email protected]> wrote:
>
> On 17/04/2024 15:14, Lance Yang wrote:
> > This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> > (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> > splitting if the large folio is fully mapped within the target range.
> >
> > If a large folio is locked or shared, or if we fail to split it, we just
> > leave it in place and advance to the next PTE in the range. But note that
> > the behavior is changed; previously, any failure of this sort would cause
> > the entire operation to give up. As large folios become more common,
> > sticking to the old way could result in wasted opportunities.
> >
> > On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> > the same size results in the following runtimes for madvise(MADV_FREE) in
> > seconds (shorter is better):
> >
> > Folio Size | Old | New | Change
> > ------------------------------------------
> > 4KiB | 0.590251 | 0.590259 | 0%
> > 16KiB | 2.990447 | 0.185655 | -94%
> > 32KiB | 2.547831 | 0.104870 | -95%
> > 64KiB | 2.457796 | 0.052812 | -97%
> > 128KiB | 2.281034 | 0.032777 | -99%
> > 256KiB | 2.230387 | 0.017496 | -99%
> > 512KiB | 2.189106 | 0.010781 | -99%
> > 1024KiB | 2.183949 | 0.007753 | -99%
> > 2048KiB | 0.002799 | 0.002804 | 0%
> >
> > [1] https://lkml.kernel.org/r/20231207161211.2374093-5-ryan.roberts@armcom
> > [2] https://lore.kernel.org/linux-mm/[email protected]
> >
> > Signed-off-by: Lance Yang <[email protected]>
>
> LGTM!
>
> Reviewed-by: Ryan Roberts <[email protected]>

Hey Ryan,

Thanks a lot for taking time to review!
Lance

>
> > ---
> > mm/internal.h | 12 ++++-
> > mm/madvise.c | 141 ++++++++++++++++++++++++++++----------------------
> > mm/memory.c | 4 +-
> > 3 files changed, 91 insertions(+), 66 deletions(-)
> >
> > diff --git a/mm/internal.h b/mm/internal.h
> > index c6483f73ec13..daa59cef85d7 100644
> > --- a/mm/internal.h
> > +++ b/mm/internal.h
> > @@ -134,6 +134,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> > * first one is writable.
> > * @any_young: Optional pointer to indicate whether any entry except the
> > * first one is young.
> > + * @any_dirty: Optional pointer to indicate whether any entry except the
> > + * first one is dirty.
> > *
> > * Detect a PTE batch: consecutive (present) PTEs that map consecutive
> > * pages of the same large folio.
> > @@ -149,18 +151,20 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> > */
> > static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> > pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
> > - bool *any_writable, bool *any_young)
> > + bool *any_writable, bool *any_young, bool *any_dirty)
> > {
> > unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> > const pte_t *end_ptep = start_ptep + max_nr;
> > pte_t expected_pte, *ptep;
> > - bool writable, young;
> > + bool writable, young, dirty;
> > int nr;
> >
> > if (any_writable)
> > *any_writable = false;
> > if (any_young)
> > *any_young = false;
> > + if (any_dirty)
> > + *any_dirty = false;
> >
> > VM_WARN_ON_FOLIO(!pte_present(pte), folio);
> > VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
> > @@ -176,6 +180,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> > writable = !!pte_write(pte);
> > if (any_young)
> > young = !!pte_young(pte);
> > + if (any_dirty)
> > + dirty = !!pte_dirty(pte);
> > pte = __pte_batch_clear_ignored(pte, flags);
> >
> > if (!pte_same(pte, expected_pte))
> > @@ -193,6 +199,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> > *any_writable |= writable;
> > if (any_young)
> > *any_young |= young;
> > + if (any_dirty)
> > + *any_dirty |= dirty;
> >
> > nr = pte_batch_hint(ptep, pte);
> > expected_pte = pte_advance_pfn(expected_pte, nr);
> > diff --git a/mm/madvise.c b/mm/madvise.c
> > index f5e3699e7b54..d6f1889d6308 100644
> > --- a/mm/madvise.c
> > +++ b/mm/madvise.c
> > @@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
> > file_permission(vma->vm_file, MAY_WRITE) == 0;
> > }
> >
> > +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
> > + struct folio *folio, pte_t *ptep,
> > + pte_t pte, bool *any_young,
> > + bool *any_dirty)
> > +{
> > + int max_nr = (end - addr) / PAGE_SIZE;
> > + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
> > +
> > + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> > + any_young, any_dirty);
> > +}
> > +
> > +static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
> > + unsigned long addr,
> > + struct folio *folio, pte_t **pte,
> > + spinlock_t **ptl)
> > +{
> > + int err;
> > +
> > + if (!folio_trylock(folio))
> > + return false;
> > +
> > + folio_get(folio);
> > + pte_unmap_unlock(*pte, *ptl);
> > + err = split_folio(folio);
> > + folio_unlock(folio);
> > + folio_put(folio);
> > +
> > + *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
> > +
> > + return err == 0;
> > +}
> > +
> > static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> > unsigned long addr, unsigned long end,
> > struct mm_walk *walk)
> > @@ -456,41 +489,30 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> > * next pte in the range.
> > */
> > if (folio_test_large(folio)) {
> > - const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
> > - FPB_IGNORE_SOFT_DIRTY;
> > - int max_nr = (end - addr) / PAGE_SIZE;
> > bool any_young;
> >
> > - nr = folio_pte_batch(folio, addr, pte, ptent, max_nr,
> > - fpb_flags, NULL, &any_young);
> > - if (any_young)
> > - ptent = pte_mkyoung(ptent);
> > + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> > + ptent, &any_young, NULL);
> >
> > if (nr < folio_nr_pages(folio)) {
> > - int err;
> > -
> > if (folio_likely_mapped_shared(folio))
> > continue;
> > if (pageout_anon_only_filter && !folio_test_anon(folio))
> > continue;
> > - if (!folio_trylock(folio))
> > - continue;
> > - folio_get(folio);
> > +
> > arch_leave_lazy_mmu_mode();
> > - pte_unmap_unlock(start_pte, ptl);
> > - start_pte = NULL;
> > - err = split_folio(folio);
> > - folio_unlock(folio);
> > - folio_put(folio);
> > - start_pte = pte =
> > - pte_offset_map_lock(mm, pmd, addr, &ptl);
> > + if (madvise_pte_split_folio(mm, pmd, addr,
> > + folio, &start_pte, &ptl))
> > + nr = 0;
> > if (!start_pte)
> > break;
> > + pte = start_pte;
> > arch_enter_lazy_mmu_mode();
> > - if (!err)
> > - nr = 0;
> > continue;
> > }
> > +
> > + if (any_young)
> > + ptent = pte_mkyoung(ptent);
> > }
> >
> > /*
> > @@ -688,44 +710,48 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> > continue;
> >
> > /*
> > - * If pmd isn't transhuge but the folio is large and
> > - * is owned by only this process, split it and
> > - * deactivate all pages.
> > + * If we encounter a large folio, only split it if it is not
> > + * fully mapped within the range we are operating on. Otherwise
> > + * leave it as is so that it can be marked as lazyfree. If we
> > + * fail to split a folio, leave it in place and advance to the
> > + * next pte in the range.
> > */
> > if (folio_test_large(folio)) {
> > - int err;
> > + bool any_young, any_dirty;
> >
> > - if (folio_likely_mapped_shared(folio))
> > - break;
> > - if (!folio_trylock(folio))
> > - break;
> > - folio_get(folio);
> > - arch_leave_lazy_mmu_mode();
> > - pte_unmap_unlock(start_pte, ptl);
> > - start_pte = NULL;
> > - err = split_folio(folio);
> > - folio_unlock(folio);
> > - folio_put(folio);
> > - if (err)
> > - break;
> > - start_pte = pte =
> > - pte_offset_map_lock(mm, pmd, addr, &ptl);
> > - if (!start_pte)
> > - break;
> > - arch_enter_lazy_mmu_mode();
> > - pte--;
> > - addr -= PAGE_SIZE;
> > - continue;
> > + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> > + ptent, &any_young, &any_dirty);
> > +
> > + if (nr < folio_nr_pages(folio)) {
> > + if (folio_likely_mapped_shared(folio))
> > + continue;
> > +
> > + arch_leave_lazy_mmu_mode();
> > + if (madvise_pte_split_folio(mm, pmd, addr,
> > + folio, &start_pte, &ptl))
> > + nr = 0;
> > + if (!start_pte)
> > + break;
> > + pte = start_pte;
> > + arch_enter_lazy_mmu_mode();
> > + continue;
> > + }
> > +
> > + if (any_young)
> > + ptent = pte_mkyoung(ptent);
> > + if (any_dirty)
> > + ptent = pte_mkdirty(ptent);
> > }
> >
> > if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> > if (!folio_trylock(folio))
> > continue;
> > /*
> > - * If folio is shared with others, we mustn't clear
> > - * the folio's dirty flag.
> > + * If we have a large folio at this point, we know it is
> > + * fully mapped so if its mapcount is the same as its
> > + * number of pages, it must be exclusive.
> > */
> > - if (folio_mapcount(folio) != 1) {
> > + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
> > folio_unlock(folio);
> > continue;
> > }
> > @@ -741,19 +767,10 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> > }
> >
> > if (pte_young(ptent) || pte_dirty(ptent)) {
> > - /*
> > - * Some of architecture(ex, PPC) don't update TLB
> > - * with set_pte_at and tlb_remove_tlb_entry so for
> > - * the portability, remap the pte with old|clean
> > - * after pte clearing.
> > - */
> > - ptent = ptep_get_and_clear_full(mm, addr, pte,
> > - tlb->fullmm);
> > -
> > - ptent = pte_mkold(ptent);
> > - ptent = pte_mkclean(ptent);
> > - set_pte_at(mm, addr, pte, ptent);
> > - tlb_remove_tlb_entry(tlb, pte, addr);
> > + clear_young_dirty_ptes(vma, addr, pte, nr,
> > + CYDP_CLEAR_YOUNG |
> > + CYDP_CLEAR_DIRTY);
> > + tlb_remove_tlb_entries(tlb, pte, nr, addr);
> > }
> > folio_mark_lazyfree(folio);
> > }
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 33d87b64d15d..9e07d1b9020c 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> > flags |= FPB_IGNORE_SOFT_DIRTY;
> >
> > nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
> > - &any_writable, NULL);
> > + &any_writable, NULL, NULL);
> > folio_ref_add(folio, nr);
> > if (folio_test_anon(folio)) {
> > if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> > @@ -1558,7 +1558,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
> > */
> > if (unlikely(folio_test_large(folio) && max_nr != 1)) {
> > nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
> > - NULL, NULL);
> > + NULL, NULL, NULL);
> >
> > zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
> > addr, details, rss, force_flush,
>

2024-04-18 00:18:46

by Lance Yang

[permalink] [raw]
Subject: Re: [PATCH v8 3/3] mm/madvise: optimize lazyfreeing with mTHP in madvise_free

Hey David,

Thanks a lot for taking time to review!

On Thu, Apr 18, 2024 at 1:05 AM David Hildenbrand <[email protected]> wrote:
>
> On 17.04.24 16:14, Lance Yang wrote:
> > This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> > (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> > splitting if the large folio is fully mapped within the target range.
> >
> > If a large folio is locked or shared, or if we fail to split it, we just
> > leave it in place and advance to the next PTE in the range. But note that
> > the behavior is changed; previously, any failure of this sort would cause
> > the entire operation to give up. As large folios become more common,
> > sticking to the old way could result in wasted opportunities.
> >
> > On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> > the same size results in the following runtimes for madvise(MADV_FREE) in
> > seconds (shorter is better):
> >
> > Folio Size | Old | New | Change
> > ------------------------------------------
> > 4KiB | 0.590251 | 0.590259 | 0%
> > 16KiB | 2.990447 | 0.185655 | -94%
> > 32KiB | 2.547831 | 0.104870 | -95%
> > 64KiB | 2.457796 | 0.052812 | -97%
> > 128KiB | 2.281034 | 0.032777 | -99%
> > 256KiB | 2.230387 | 0.017496 | -99%
> > 512KiB | 2.189106 | 0.010781 | -99%
> > 1024KiB | 2.183949 | 0.007753 | -99%
> > 2048KiB | 0.002799 | 0.002804 | 0%
> >
> > [1] https://lkml.kernel.org/r/20231207161211.2374093-5-ryan.roberts@armcom
> > [2] https://lore.kernel.org/linux-mm/[email protected]
> >
> > Signed-off-by: Lance Yang <[email protected]>
>
> Some of the changes could have been moved into separate patches to ease
> review ;)
>
> At least the folio_pte_batch() change and factoring out some stuff from
> madvise_cold_or_pageout_pte_range(). But see below on the latter.

Thanks for your suggestion! It makes sense to split some of the changes
into separate patches for easier review :)

>
> > ---
> > mm/internal.h | 12 ++++-
> > mm/madvise.c | 141 ++++++++++++++++++++++++++++----------------------
> > mm/memory.c | 4 +-
> > 3 files changed, 91 insertions(+), 66 deletions(-)
>
> [...]
>
> > diff --git a/mm/madvise.c b/mm/madvise.c
> > index f5e3699e7b54..d6f1889d6308 100644
> > --- a/mm/madvise.c
> > +++ b/mm/madvise.c
> > @@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
> > file_permission(vma->vm_file, MAY_WRITE) == 0;
> > }
> >
> > +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
> > + struct folio *folio, pte_t *ptep,
> > + pte_t pte, bool *any_young,
> > + bool *any_dirty)
> > +{
> > + int max_nr = (end - addr) / PAGE_SIZE;
> > + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
>
> Reverse Christmas tree looks nicer ;)

Yep, I understand.

>
> > +
> > + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> > + any_young, any_dirty);
> > +}
> > +
> > +static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
> > + unsigned long addr,
> > + struct folio *folio, pte_t **pte,
> > + spinlock_t **ptl)
> > +{
> > + int err;
> > +
> > + if (!folio_trylock(folio))
> > + return false;
> > +
> > + folio_get(folio);
> > + pte_unmap_unlock(*pte, *ptl);
> > + err = split_folio(folio);
> > + folio_unlock(folio);
> > + folio_put(folio);
> > +
> > + *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
>
> Staring at this helper again, I am really not sure if we should have it.
> Calling semantics are "special" and that pte_t **pte is just ...
> "special" as well ;)
>
> Can we just leave that part as is, in the caller? That would also mean
> less madvise_cold_or_pageout_pte_range() churn ... which i would welcome
> as part of this patch.

Yep, let's leave that part as it is in the caller :)

>
> [...]
>
> > @@ -741,19 +767,10 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> > }
> >
> > if (pte_young(ptent) || pte_dirty(ptent)) {
> > - /*
> > - * Some of architecture(ex, PPC) don't update TLB
> > - * with set_pte_at and tlb_remove_tlb_entry so for
> > - * the portability, remap the pte with old|clean
> > - * after pte clearing.
> > - */
> > - ptent = ptep_get_and_clear_full(mm, addr, pte,
> > - tlb->fullmm);
> > -
> > - ptent = pte_mkold(ptent);
> > - ptent = pte_mkclean(ptent);
> > - set_pte_at(mm, addr, pte, ptent);
> > - tlb_remove_tlb_entry(tlb, pte, addr);
> > + clear_young_dirty_ptes(vma, addr, pte, nr,
> > + CYDP_CLEAR_YOUNG |
> > + CYDP_CLEAR_DIRTY);
>
> That indent looks odd. I suggest simply having a local variable
>
> const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY;
>
> and then use cydp_flags here that will make this easier to read.

Nice. I'll use cydp_flags here in the next version.

Thanks again for the review!
Lance

>
> --
> Cheers,
>
> David / dhildenb
>