This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
(Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
splitting if the large folio is fully mapped within the target range.
If a large folio is locked or shared, or if we fail to split it, we just
leave it in place and advance to the next PTE in the range. But note that
the behavior is changed; previously, any failure of this sort would cause
the entire operation to give up. As large folios become more common,
sticking to the old way could result in wasted opportunities.
On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
the same size results in the following runtimes for madvise(MADV_FREE) in
seconds (shorter is better):
Folio Size | Old | New | Change
------------------------------------------
4KiB | 0.590251 | 0.590259 | 0%
16KiB | 2.990447 | 0.185655 | -94%
32KiB | 2.547831 | 0.104870 | -95%
64KiB | 2.457796 | 0.052812 | -97%
128KiB | 2.281034 | 0.032777 | -99%
256KiB | 2.230387 | 0.017496 | -99%
512KiB | 2.189106 | 0.010781 | -99%
1024KiB | 2.183949 | 0.007753 | -99%
2048KiB | 0.002799 | 0.002804 | 0%
[1] https://lkml.kernel.org/r/[email protected]
[2] https://lore.kernel.org/linux-mm/[email protected]
Signed-off-by: Lance Yang <[email protected]>
---
include/linux/pgtable.h | 34 +++++++++
mm/internal.h | 12 +++-
mm/madvise.c | 149 ++++++++++++++++++++++------------------
mm/memory.c | 4 +-
4 files changed, 129 insertions(+), 70 deletions(-)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 0f4b2faa1d71..4dd442787420 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
+#ifndef mkold_clean_ptes
+/**
+ * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
+ * as old and clean.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to mark old and clean.
+ *
+ * May be overridden by the architecture; otherwise, implemented by
+ * get_and_clear/modify/set for each pte in the range.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ pte_t pte;
+
+ for (;;) {
+ pte = ptep_get_and_clear(mm, addr, ptep);
+ set_pte_at(mm, addr, ptep, pte_mkclean(pte_mkold(pte)));
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
+
static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/mm/internal.h b/mm/internal.h
index 57c1055d5568..792a9baf0d14 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -132,6 +132,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
* first one is writable.
* @any_young: Optional pointer to indicate whether any entry except the
* first one is young.
+ * @any_dirty: Optional pointer to indicate whether any entry except the
+ * first one is dirty.
*
* Detect a PTE batch: consecutive (present) PTEs that map consecutive
* pages of the same large folio.
@@ -147,18 +149,20 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
*/
static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
- bool *any_writable, bool *any_young)
+ bool *any_writable, bool *any_young, bool *any_dirty)
{
unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
const pte_t *end_ptep = start_ptep + max_nr;
pte_t expected_pte, *ptep;
- bool writable, young;
+ bool writable, young, dirty;
int nr;
if (any_writable)
*any_writable = false;
if (any_young)
*any_young = false;
+ if (any_dirty)
+ *any_dirty = false;
VM_WARN_ON_FOLIO(!pte_present(pte), folio);
VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
@@ -174,6 +178,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
writable = !!pte_write(pte);
if (any_young)
young = !!pte_young(pte);
+ if (any_dirty)
+ dirty = !!pte_dirty(pte);
pte = __pte_batch_clear_ignored(pte, flags);
if (!pte_same(pte, expected_pte))
@@ -191,6 +197,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
*any_writable |= writable;
if (any_young)
*any_young |= young;
+ if (any_dirty)
+ *any_dirty |= dirty;
nr = pte_batch_hint(ptep, pte);
expected_pte = pte_advance_pfn(expected_pte, nr);
diff --git a/mm/madvise.c b/mm/madvise.c
index bf26cf2b7715..0777df2e3691 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
file_permission(vma->vm_file, MAY_WRITE) == 0;
}
+static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
+ struct folio *folio, pte_t *ptep,
+ pte_t pte, bool *any_young,
+ bool *any_dirty)
+{
+ int max_nr = (end - addr) / PAGE_SIZE;
+ const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
+
+ return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
+ any_young, any_dirty);
+}
+
+static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr,
+ struct folio *folio, pte_t **pte,
+ spinlock_t **ptl)
+{
+ int err;
+
+ if (!folio_trylock(folio))
+ return false;
+
+ folio_get(folio);
+ pte_unmap_unlock(*pte, *ptl);
+ err = split_folio(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+
+ *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
+
+ return err == 0;
+}
+
static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -456,41 +489,29 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
* next pte in the range.
*/
if (folio_test_large(folio)) {
- const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
- FPB_IGNORE_SOFT_DIRTY;
- int max_nr = (end - addr) / PAGE_SIZE;
bool any_young;
-
- nr = folio_pte_batch(folio, addr, pte, ptent, max_nr,
- fpb_flags, NULL, &any_young);
- if (any_young)
- ptent = pte_mkyoung(ptent);
+ nr = madvise_folio_pte_batch(addr, end, folio, pte,
+ ptent, &any_young, NULL);
if (nr < folio_nr_pages(folio)) {
- int err;
-
if (folio_likely_mapped_shared(folio))
continue;
if (pageout_anon_only_filter && !folio_test_anon(folio))
continue;
- if (!folio_trylock(folio))
- continue;
- folio_get(folio);
+
arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(start_pte, ptl);
- start_pte = NULL;
- err = split_folio(folio);
- folio_unlock(folio);
- folio_put(folio);
- start_pte = pte =
- pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (madvise_pte_split_folio(mm, pmd, addr,
+ folio, &start_pte, &ptl))
+ nr = 0;
if (!start_pte)
break;
+ pte = start_pte;
arch_enter_lazy_mmu_mode();
- if (!err)
- nr = 0;
continue;
}
+
+ if (any_young)
+ ptent = pte_mkyoung(ptent);
}
/*
@@ -687,47 +708,54 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
continue;
/*
- * If pmd isn't transhuge but the folio is large and
- * is owned by only this process, split it and
- * deactivate all pages.
+ * If we encounter a large folio, only split it if it is not
+ * fully mapped within the range we are operating on. Otherwise
+ * leave it as is so that it can be marked as lazyfree. If we
+ * fail to split a folio, leave it in place and advance to the
+ * next pte in the range.
*/
if (folio_test_large(folio)) {
- int err;
+ bool any_young, any_dirty;
+ nr = madvise_folio_pte_batch(addr, end, folio, pte,
+ ptent, &any_young, &any_dirty);
- if (folio_likely_mapped_shared(folio))
- break;
- if (!folio_trylock(folio))
- break;
- folio_get(folio);
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(start_pte, ptl);
- start_pte = NULL;
- err = split_folio(folio);
+ if (nr < folio_nr_pages(folio)) {
+ if (folio_likely_mapped_shared(folio))
+ continue;
+
+ arch_leave_lazy_mmu_mode();
+ if (madvise_pte_split_folio(mm, pmd, addr,
+ folio, &start_pte, &ptl))
+ nr = 0;
+ if (!start_pte)
+ break;
+ pte = start_pte;
+ arch_enter_lazy_mmu_mode();
+ continue;
+ }
+
+ if (any_young)
+ ptent = pte_mkyoung(ptent);
+ if (any_dirty)
+ ptent = pte_mkdirty(ptent);
+ }
+
+ if (!folio_trylock(folio))
+ continue;
+ /*
+ * If we have a large folio at this point, we know it is fully mapped
+ * so if its mapcount is the same as its number of pages, it must be
+ * exclusive.
+ */
+ if (folio_mapcount(folio) != folio_nr_pages(folio)) {
folio_unlock(folio);
- folio_put(folio);
- if (err)
- break;
- start_pte = pte =
- pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (!start_pte)
- break;
- arch_enter_lazy_mmu_mode();
- pte--;
- addr -= PAGE_SIZE;
continue;
}
+ folio_unlock(folio);
if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
if (!folio_trylock(folio))
continue;
- /*
- * If folio is shared with others, we mustn't clear
- * the folio's dirty flag.
- */
- if (folio_mapcount(folio) != 1) {
- folio_unlock(folio);
- continue;
- }
if (folio_test_swapcache(folio) &&
!folio_free_swap(folio)) {
@@ -740,19 +768,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
}
if (pte_young(ptent) || pte_dirty(ptent)) {
- /*
- * Some of architecture(ex, PPC) don't update TLB
- * with set_pte_at and tlb_remove_tlb_entry so for
- * the portability, remap the pte with old|clean
- * after pte clearing.
- */
- ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
-
- ptent = pte_mkold(ptent);
- ptent = pte_mkclean(ptent);
- set_pte_at(mm, addr, pte, ptent);
- tlb_remove_tlb_entry(tlb, pte, addr);
+ mkold_clean_ptes(mm, addr, pte, nr);
+ tlb_remove_tlb_entries(tlb, pte, nr, addr);
}
folio_mark_lazyfree(folio);
}
diff --git a/mm/memory.c b/mm/memory.c
index 1723c8ddf9cb..fe9d4d64c627 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
flags |= FPB_IGNORE_SOFT_DIRTY;
nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
- &any_writable, NULL);
+ &any_writable, NULL, NULL);
folio_ref_add(folio, nr);
if (folio_test_anon(folio)) {
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
@@ -1559,7 +1559,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
*/
if (unlikely(folio_test_large(folio) && max_nr != 1)) {
nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
- NULL, NULL);
+ NULL, NULL, NULL);
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush,
--
2.33.1
On 11.04.24 13:11, Ryan Roberts wrote:
> On 08/04/2024 05:24, Lance Yang wrote:
>> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
>> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
>> splitting if the large folio is fully mapped within the target range.
>>
>> If a large folio is locked or shared, or if we fail to split it, we just
>> leave it in place and advance to the next PTE in the range. But note that
>> the behavior is changed; previously, any failure of this sort would cause
>> the entire operation to give up. As large folios become more common,
>> sticking to the old way could result in wasted opportunities.
>>
>> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
>> the same size results in the following runtimes for madvise(MADV_FREE) in
>> seconds (shorter is better):
>>
>> Folio Size | Old | New | Change
>> ------------------------------------------
>> 4KiB | 0.590251 | 0.590259 | 0%
>> 16KiB | 2.990447 | 0.185655 | -94%
>> 32KiB | 2.547831 | 0.104870 | -95%
>> 64KiB | 2.457796 | 0.052812 | -97%
>> 128KiB | 2.281034 | 0.032777 | -99%
>> 256KiB | 2.230387 | 0.017496 | -99%
>> 512KiB | 2.189106 | 0.010781 | -99%
>> 1024KiB | 2.183949 | 0.007753 | -99%
>> 2048KiB | 0.002799 | 0.002804 | 0%
>>
>> [1] https://lkml.kernel.org/r/[email protected]
>> [2] https://lore.kernel.org/linux-mm/[email protected]
>>
>> Signed-off-by: Lance Yang <[email protected]>
>> ---
>> include/linux/pgtable.h | 34 +++++++++
>> mm/internal.h | 12 +++-
>> mm/madvise.c | 149 ++++++++++++++++++++++------------------
>> mm/memory.c | 4 +-
>> 4 files changed, 129 insertions(+), 70 deletions(-)
>>
>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>> index 0f4b2faa1d71..4dd442787420 100644
>> --- a/include/linux/pgtable.h
>> +++ b/include/linux/pgtable.h
>> @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>> }
>> #endif
>>
>> +#ifndef mkold_clean_ptes
>> +/**
>> + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
>> + * as old and clean.
>> + * @mm: Address space the pages are mapped into.
>> + * @addr: Address the first page is mapped at.
>> + * @ptep: Page table pointer for the first entry.
>> + * @nr: Number of entries to mark old and clean.
>> + *
>> + * May be overridden by the architecture; otherwise, implemented by
>> + * get_and_clear/modify/set for each pte in the range.
>> + *
>> + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
>> + * some PTEs might be write-protected.
>> + *
>> + * Context: The caller holds the page table lock. The PTEs map consecutive
>> + * pages that belong to the same folio. The PTEs are all in the same PMD.
>> + */
>> +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
>> + pte_t *ptep, unsigned int nr)
>
> Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
> (which I added as part of swap-out) to something like:
>
> clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep, unsigned int nr,
> bool clear_young, bool clear_dirty);
>
> Then we can use the same function for both use cases and also have the ability
> to only clear dirty in future if we ever need it. The other advantage is that we
> only need to plumb a single function down the arm64 arch code. As it currently
> stands, those 2 functions would be duplicating most of their code.
Yes. Maybe better use proper __bitwise flags, the compiler should be
smart enough to optimize either way.
--
Cheers,
David / dhildenb
On 08/04/2024 05:24, Lance Yang wrote:
> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> splitting if the large folio is fully mapped within the target range.
>
> If a large folio is locked or shared, or if we fail to split it, we just
> leave it in place and advance to the next PTE in the range. But note that
> the behavior is changed; previously, any failure of this sort would cause
> the entire operation to give up. As large folios become more common,
> sticking to the old way could result in wasted opportunities.
>
> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> the same size results in the following runtimes for madvise(MADV_FREE) in
> seconds (shorter is better):
>
> Folio Size | Old | New | Change
> ------------------------------------------
> 4KiB | 0.590251 | 0.590259 | 0%
> 16KiB | 2.990447 | 0.185655 | -94%
> 32KiB | 2.547831 | 0.104870 | -95%
> 64KiB | 2.457796 | 0.052812 | -97%
> 128KiB | 2.281034 | 0.032777 | -99%
> 256KiB | 2.230387 | 0.017496 | -99%
> 512KiB | 2.189106 | 0.010781 | -99%
> 1024KiB | 2.183949 | 0.007753 | -99%
> 2048KiB | 0.002799 | 0.002804 | 0%
>
> [1] https://lkml.kernel.org/r/[email protected]
> [2] https://lore.kernel.org/linux-mm/[email protected]
>
> Signed-off-by: Lance Yang <[email protected]>
> ---
> include/linux/pgtable.h | 34 +++++++++
> mm/internal.h | 12 +++-
> mm/madvise.c | 149 ++++++++++++++++++++++------------------
> mm/memory.c | 4 +-
> 4 files changed, 129 insertions(+), 70 deletions(-)
>
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index 0f4b2faa1d71..4dd442787420 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> }
> #endif
>
> +#ifndef mkold_clean_ptes
> +/**
> + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
> + * as old and clean.
> + * @mm: Address space the pages are mapped into.
> + * @addr: Address the first page is mapped at.
> + * @ptep: Page table pointer for the first entry.
> + * @nr: Number of entries to mark old and clean.
> + *
> + * May be overridden by the architecture; otherwise, implemented by
> + * get_and_clear/modify/set for each pte in the range.
> + *
> + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
> + * some PTEs might be write-protected.
> + *
> + * Context: The caller holds the page table lock. The PTEs map consecutive
> + * pages that belong to the same folio. The PTEs are all in the same PMD.
> + */
> +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
> + pte_t *ptep, unsigned int nr)
Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
(which I added as part of swap-out) to something like:
clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr,
bool clear_young, bool clear_dirty);
Then we can use the same function for both use cases and also have the ability
to only clear dirty in future if we ever need it. The other advantage is that we
only need to plumb a single function down the arm64 arch code. As it currently
stands, those 2 functions would be duplicating most of their code.
Generated code would still be the same since I'd expect the callsites to be
passing in constants for clear_young and clear_dirty.
> +{
> + pte_t pte;
> +
> + for (;;) {
> + pte = ptep_get_and_clear(mm, addr, ptep);
> + set_pte_at(mm, addr, ptep, pte_mkclean(pte_mkold(pte)));
> + if (--nr == 0)
> + break;
> + ptep++;
> + addr += PAGE_SIZE;
> + }
> +}
> +#endif
> +
> static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep)
> {
> diff --git a/mm/internal.h b/mm/internal.h
> index 57c1055d5568..792a9baf0d14 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -132,6 +132,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> * first one is writable.
> * @any_young: Optional pointer to indicate whether any entry except the
> * first one is young.
> + * @any_dirty: Optional pointer to indicate whether any entry except the
> + * first one is dirty.
> *
> * Detect a PTE batch: consecutive (present) PTEs that map consecutive
> * pages of the same large folio.
> @@ -147,18 +149,20 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> */
> static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
> - bool *any_writable, bool *any_young)
> + bool *any_writable, bool *any_young, bool *any_dirty)
> {
> unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> const pte_t *end_ptep = start_ptep + max_nr;
> pte_t expected_pte, *ptep;
> - bool writable, young;
> + bool writable, young, dirty;
> int nr;
>
> if (any_writable)
> *any_writable = false;
> if (any_young)
> *any_young = false;
> + if (any_dirty)
> + *any_dirty = false;
>
> VM_WARN_ON_FOLIO(!pte_present(pte), folio);
> VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
> @@ -174,6 +178,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> writable = !!pte_write(pte);
> if (any_young)
> young = !!pte_young(pte);
> + if (any_dirty)
> + dirty = !!pte_dirty(pte);
> pte = __pte_batch_clear_ignored(pte, flags);
>
> if (!pte_same(pte, expected_pte))
> @@ -191,6 +197,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> *any_writable |= writable;
> if (any_young)
> *any_young |= young;
> + if (any_dirty)
> + *any_dirty |= dirty;
>
> nr = pte_batch_hint(ptep, pte);
> expected_pte = pte_advance_pfn(expected_pte, nr);
> diff --git a/mm/madvise.c b/mm/madvise.c
> index bf26cf2b7715..0777df2e3691 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
> file_permission(vma->vm_file, MAY_WRITE) == 0;
> }
>
> +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
> + struct folio *folio, pte_t *ptep,
> + pte_t pte, bool *any_young,
> + bool *any_dirty)
> +{
> + int max_nr = (end - addr) / PAGE_SIZE;
> + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
> +
> + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> + any_young, any_dirty);
> +}
> +
> +static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
> + unsigned long addr,
> + struct folio *folio, pte_t **pte,
> + spinlock_t **ptl)
> +{
> + int err;
> +
> + if (!folio_trylock(folio))
> + return false;
> +
> + folio_get(folio);
> + pte_unmap_unlock(*pte, *ptl);
> + err = split_folio(folio);
> + folio_unlock(folio);
> + folio_put(folio);
> +
> + *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
> +
> + return err == 0;
> +}
> +
> static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> unsigned long addr, unsigned long end,
> struct mm_walk *walk)
> @@ -456,41 +489,29 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> * next pte in the range.
> */
> if (folio_test_large(folio)) {
> - const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
> - FPB_IGNORE_SOFT_DIRTY;
> - int max_nr = (end - addr) / PAGE_SIZE;
> bool any_young;
> -
nit: there should be a blank line between variable declarations and following
code. You have removed it here (and similar in free function). Did you run
checkpatch.pl? It would have caught these things.
> - nr = folio_pte_batch(folio, addr, pte, ptent, max_nr,
> - fpb_flags, NULL, &any_young);
> - if (any_young)
> - ptent = pte_mkyoung(ptent);
> + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> + ptent, &any_young, NULL);
>
> if (nr < folio_nr_pages(folio)) {
> - int err;
> -
> if (folio_likely_mapped_shared(folio))
> continue;
> if (pageout_anon_only_filter && !folio_test_anon(folio))
> continue;
> - if (!folio_trylock(folio))
> - continue;
> - folio_get(folio);
> +
> arch_leave_lazy_mmu_mode();
> - pte_unmap_unlock(start_pte, ptl);
> - start_pte = NULL;
> - err = split_folio(folio);
> - folio_unlock(folio);
> - folio_put(folio);
> - start_pte = pte =
> - pte_offset_map_lock(mm, pmd, addr, &ptl);
> + if (madvise_pte_split_folio(mm, pmd, addr,
> + folio, &start_pte, &ptl))
> + nr = 0;
> if (!start_pte)
> break;
> + pte = start_pte;
> arch_enter_lazy_mmu_mode();
> - if (!err)
> - nr = 0;
> continue;
> }
> +
> + if (any_young)
> + ptent = pte_mkyoung(ptent);
> }
>
> /*
> @@ -687,47 +708,54 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> continue;
>
> /*
> - * If pmd isn't transhuge but the folio is large and
> - * is owned by only this process, split it and
> - * deactivate all pages.
> + * If we encounter a large folio, only split it if it is not
> + * fully mapped within the range we are operating on. Otherwise
> + * leave it as is so that it can be marked as lazyfree. If we
> + * fail to split a folio, leave it in place and advance to the
> + * next pte in the range.
> */
> if (folio_test_large(folio)) {
> - int err;
> + bool any_young, any_dirty;
> + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> + ptent, &any_young, &any_dirty);
>
> - if (folio_likely_mapped_shared(folio))
> - break;
> - if (!folio_trylock(folio))
> - break;
> - folio_get(folio);
> - arch_leave_lazy_mmu_mode();
> - pte_unmap_unlock(start_pte, ptl);
> - start_pte = NULL;
> - err = split_folio(folio);
> + if (nr < folio_nr_pages(folio)) {
> + if (folio_likely_mapped_shared(folio))
> + continue;
> +
> + arch_leave_lazy_mmu_mode();
> + if (madvise_pte_split_folio(mm, pmd, addr,
> + folio, &start_pte, &ptl))
> + nr = 0;
> + if (!start_pte)
> + break;
> + pte = start_pte;
> + arch_enter_lazy_mmu_mode();
> + continue;
> + }
> +
> + if (any_young)
> + ptent = pte_mkyoung(ptent);
> + if (any_dirty)
> + ptent = pte_mkdirty(ptent);
> + }
> +
> + if (!folio_trylock(folio))
> + continue;
This is still wrong. This should all be protected by the "if
(folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
so that you only call folio_trylock() if that condition is true. You are
unconditionally locking here, then unlocking, then relocking below if the
condition is met. Just put everything inside the condition and lock once.
Thanks,
Ryan
> + /*
> + * If we have a large folio at this point, we know it is fully mapped
> + * so if its mapcount is the same as its number of pages, it must be
> + * exclusive.
> + */
> + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
> folio_unlock(folio);
> - folio_put(folio);
> - if (err)
> - break;
> - start_pte = pte =
> - pte_offset_map_lock(mm, pmd, addr, &ptl);
> - if (!start_pte)
> - break;
> - arch_enter_lazy_mmu_mode();
> - pte--;
> - addr -= PAGE_SIZE;
> continue;
> }
> + folio_unlock(folio);
>
> if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> if (!folio_trylock(folio))
> continue;
> - /*
> - * If folio is shared with others, we mustn't clear
> - * the folio's dirty flag.
> - */
> - if (folio_mapcount(folio) != 1) {
> - folio_unlock(folio);
> - continue;
> - }
>
> if (folio_test_swapcache(folio) &&
> !folio_free_swap(folio)) {
> @@ -740,19 +768,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> }
>
> if (pte_young(ptent) || pte_dirty(ptent)) {
> - /*
> - * Some of architecture(ex, PPC) don't update TLB
> - * with set_pte_at and tlb_remove_tlb_entry so for
> - * the portability, remap the pte with old|clean
> - * after pte clearing.
> - */
> - ptent = ptep_get_and_clear_full(mm, addr, pte,
> - tlb->fullmm);
> -
> - ptent = pte_mkold(ptent);
> - ptent = pte_mkclean(ptent);
> - set_pte_at(mm, addr, pte, ptent);
> - tlb_remove_tlb_entry(tlb, pte, addr);
> + mkold_clean_ptes(mm, addr, pte, nr);
> + tlb_remove_tlb_entries(tlb, pte, nr, addr);
> }
> folio_mark_lazyfree(folio);
> }
> diff --git a/mm/memory.c b/mm/memory.c
> index 1723c8ddf9cb..fe9d4d64c627 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> flags |= FPB_IGNORE_SOFT_DIRTY;
>
> nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
> - &any_writable, NULL);
> + &any_writable, NULL, NULL);
> folio_ref_add(folio, nr);
> if (folio_test_anon(folio)) {
> if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> @@ -1559,7 +1559,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
> */
> if (unlikely(folio_test_large(folio) && max_nr != 1)) {
> nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
> - NULL, NULL);
> + NULL, NULL, NULL);
>
> zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
> addr, details, rss, force_flush,
On 11/04/2024 12:20, David Hildenbrand wrote:
> On 11.04.24 13:11, Ryan Roberts wrote:
>> On 08/04/2024 05:24, Lance Yang wrote:
>>> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
>>> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
>>> splitting if the large folio is fully mapped within the target range.
>>>
>>> If a large folio is locked or shared, or if we fail to split it, we just
>>> leave it in place and advance to the next PTE in the range. But note that
>>> the behavior is changed; previously, any failure of this sort would cause
>>> the entire operation to give up. As large folios become more common,
>>> sticking to the old way could result in wasted opportunities.
>>>
>>> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
>>> the same size results in the following runtimes for madvise(MADV_FREE) in
>>> seconds (shorter is better):
>>>
>>> Folio Size | Old | New | Change
>>> ------------------------------------------
>>> 4KiB | 0.590251 | 0.590259 | 0%
>>> 16KiB | 2.990447 | 0.185655 | -94%
>>> 32KiB | 2.547831 | 0.104870 | -95%
>>> 64KiB | 2.457796 | 0.052812 | -97%
>>> 128KiB | 2.281034 | 0.032777 | -99%
>>> 256KiB | 2.230387 | 0.017496 | -99%
>>> 512KiB | 2.189106 | 0.010781 | -99%
>>> 1024KiB | 2.183949 | 0.007753 | -99%
>>> 2048KiB | 0.002799 | 0.002804 | 0%
>>>
>>> [1] https://lkml.kernel.org/r/[email protected]
>>> [2] https://lore.kernel.org/linux-mm/[email protected]
>>>
>>> Signed-off-by: Lance Yang <[email protected]>
>>> ---
>>> include/linux/pgtable.h | 34 +++++++++
>>> mm/internal.h | 12 +++-
>>> mm/madvise.c | 149 ++++++++++++++++++++++------------------
>>> mm/memory.c | 4 +-
>>> 4 files changed, 129 insertions(+), 70 deletions(-)
>>>
>>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>>> index 0f4b2faa1d71..4dd442787420 100644
>>> --- a/include/linux/pgtable.h
>>> +++ b/include/linux/pgtable.h
>>> @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct
>>> *mm,
>>> }
>>> #endif
>>> +#ifndef mkold_clean_ptes
>>> +/**
>>> + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
>>> + * as old and clean.
>>> + * @mm: Address space the pages are mapped into.
>>> + * @addr: Address the first page is mapped at.
>>> + * @ptep: Page table pointer for the first entry.
>>> + * @nr: Number of entries to mark old and clean.
>>> + *
>>> + * May be overridden by the architecture; otherwise, implemented by
>>> + * get_and_clear/modify/set for each pte in the range.
>>> + *
>>> + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
>>> + * some PTEs might be write-protected.
>>> + *
>>> + * Context: The caller holds the page table lock. The PTEs map consecutive
>>> + * pages that belong to the same folio. The PTEs are all in the same PMD.
>>> + */
>>> +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
>>> + pte_t *ptep, unsigned int nr)
>>
>> Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
>> (which I added as part of swap-out) to something like:
>>
>> clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
>> pte_t *ptep, unsigned int nr,
>> bool clear_young, bool clear_dirty);
>>
>> Then we can use the same function for both use cases and also have the ability
>> to only clear dirty in future if we ever need it. The other advantage is that we
>> only need to plumb a single function down the arm64 arch code. As it currently
>> stands, those 2 functions would be duplicating most of their code.
>
> Yes. Maybe better use proper __bitwise flags, the compiler should be smart
> enough to optimize either way.
Agreed. I was also thinking perhaps it makes sense to start using output bitwise
flags for folio_pte_batch() since this patch set takes us up to 3 optional bool
pointers for different things. Might be cleaner to have input flags to tell it
what we care about and output flags to highlight those things. I guess the
compiler should be able to optimize in the same way.
On Thu, Apr 11, 2024 at 7:27 PM Ryan Roberts <[email protected]> wrote:
>
> On 11/04/2024 12:20, David Hildenbrand wrote:
> > On 11.04.24 13:11, Ryan Roberts wrote:
> >> On 08/04/2024 05:24, Lance Yang wrote:
> >>> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> >>> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> >>> splitting if the large folio is fully mapped within the target range.
> >>>
> >>> If a large folio is locked or shared, or if we fail to split it, we just
> >>> leave it in place and advance to the next PTE in the range. But note that
> >>> the behavior is changed; previously, any failure of this sort would cause
> >>> the entire operation to give up. As large folios become more common,
> >>> sticking to the old way could result in wasted opportunities.
> >>>
> >>> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> >>> the same size results in the following runtimes for madvise(MADV_FREE) in
> >>> seconds (shorter is better):
> >>>
> >>> Folio Size | Old | New | Change
> >>> ------------------------------------------
> >>> 4KiB | 0.590251 | 0.590259 | 0%
> >>> 16KiB | 2.990447 | 0.185655 | -94%
> >>> 32KiB | 2.547831 | 0.104870 | -95%
> >>> 64KiB | 2.457796 | 0.052812 | -97%
> >>> 128KiB | 2.281034 | 0.032777 | -99%
> >>> 256KiB | 2.230387 | 0.017496 | -99%
> >>> 512KiB | 2.189106 | 0.010781 | -99%
> >>> 1024KiB | 2.183949 | 0.007753 | -99%
> >>> 2048KiB | 0.002799 | 0.002804 | 0%
> >>>
> >>> [1] https://lkml.kernel.org/r/[email protected]
> >>> [2] https://lore.kernel.org/linux-mm/[email protected]
> >>>
> >>> Signed-off-by: Lance Yang <[email protected]>
> >>> ---
> >>> include/linux/pgtable.h | 34 +++++++++
> >>> mm/internal.h | 12 +++-
> >>> mm/madvise.c | 149 ++++++++++++++++++++++------------------
> >>> mm/memory.c | 4 +-
> >>> 4 files changed, 129 insertions(+), 70 deletions(-)
> >>>
> >>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> >>> index 0f4b2faa1d71..4dd442787420 100644
> >>> --- a/include/linux/pgtable.h
> >>> +++ b/include/linux/pgtable.h
> >>> @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct
> >>> *mm,
> >>> }
> >>> #endif
> >>> +#ifndef mkold_clean_ptes
> >>> +/**
> >>> + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
> >>> + * as old and clean.
> >>> + * @mm: Address space the pages are mapped into.
> >>> + * @addr: Address the first page is mapped at.
> >>> + * @ptep: Page table pointer for the first entry.
> >>> + * @nr: Number of entries to mark old and clean.
> >>> + *
> >>> + * May be overridden by the architecture; otherwise, implemented by
> >>> + * get_and_clear/modify/set for each pte in the range.
> >>> + *
> >>> + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
> >>> + * some PTEs might be write-protected.
> >>> + *
> >>> + * Context: The caller holds the page table lock. The PTEs map consecutive
> >>> + * pages that belong to the same folio. The PTEs are all in the same PMD.
> >>> + */
> >>> +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
> >>> + pte_t *ptep, unsigned int nr)
> >>
Thanks for the suggestions, Ryan, David!
> >> Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
> >> (which I added as part of swap-out) to something like:
Yeah, this is definitely cleaner than before.
> >>
> >> clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
> >> pte_t *ptep, unsigned int nr,
> >> bool clear_young, bool clear_dirty);
> >>
> >> Then we can use the same function for both use cases and also have the ability
> >> to only clear dirty in future if we ever need it. The other advantage is that we
> >> only need to plumb a single function down the arm64 arch code. As it currently
> >> stands, those 2 functions would be duplicating most of their code.
Agreed. It's indeed a good idea to use a single function for both use cases.
> >
> > Yes. Maybe better use proper __bitwise flags, the compiler should be smart
> > enough to optimize either way.
Nice. I'll use the __bitwise flags as the input.
>
> Agreed. I was also thinking perhaps it makes sense to start using output bitwise
> flags for folio_pte_batch() since this patch set takes us up to 3 optional bool
> pointers for different things. Might be cleaner to have input flags to tell it
> what we care about and output flags to highlight those things. I guess the
> compiler should be able to optimize in the same way.
>
Should I start using output bitwise flags for folio_pte_batch() in
this patch set?
Thanks,
Lance
On Thu, Apr 11, 2024 at 7:11 PM Ryan Roberts <[email protected]> wrote:
>
> On 08/04/2024 05:24, Lance Yang wrote:
> > This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
> > (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
> > splitting if the large folio is fully mapped within the target range.
> >
> > If a large folio is locked or shared, or if we fail to split it, we just
> > leave it in place and advance to the next PTE in the range. But note that
> > the behavior is changed; previously, any failure of this sort would cause
> > the entire operation to give up. As large folios become more common,
> > sticking to the old way could result in wasted opportunities.
> >
> > On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
> > the same size results in the following runtimes for madvise(MADV_FREE) in
> > seconds (shorter is better):
> >
> > Folio Size | Old | New | Change
> > ------------------------------------------
> > 4KiB | 0.590251 | 0.590259 | 0%
> > 16KiB | 2.990447 | 0.185655 | -94%
> > 32KiB | 2.547831 | 0.104870 | -95%
> > 64KiB | 2.457796 | 0.052812 | -97%
> > 128KiB | 2.281034 | 0.032777 | -99%
> > 256KiB | 2.230387 | 0.017496 | -99%
> > 512KiB | 2.189106 | 0.010781 | -99%
> > 1024KiB | 2.183949 | 0.007753 | -99%
> > 2048KiB | 0.002799 | 0.002804 | 0%
> >
> > [1] https://lkml.kernel.org/r/20231207161211.2374093-5-ryan.roberts@armcom
> > [2] https://lore.kernel.org/linux-mm/[email protected]
> >
> > Signed-off-by: Lance Yang <[email protected]>
> > ---
> > include/linux/pgtable.h | 34 +++++++++
> > mm/internal.h | 12 +++-
> > mm/madvise.c | 149 ++++++++++++++++++++++------------------
> > mm/memory.c | 4 +-
> > 4 files changed, 129 insertions(+), 70 deletions(-)
> >
> > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> > index 0f4b2faa1d71..4dd442787420 100644
> > --- a/include/linux/pgtable.h
> > +++ b/include/linux/pgtable.h
> > @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> > }
> > #endif
> >
> > +#ifndef mkold_clean_ptes
> > +/**
> > + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
> > + * as old and clean.
> > + * @mm: Address space the pages are mapped into.
> > + * @addr: Address the first page is mapped at.
> > + * @ptep: Page table pointer for the first entry.
> > + * @nr: Number of entries to mark old and clean.
> > + *
> > + * May be overridden by the architecture; otherwise, implemented by
> > + * get_and_clear/modify/set for each pte in the range.
> > + *
> > + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
> > + * some PTEs might be write-protected.
> > + *
> > + * Context: The caller holds the page table lock. The PTEs map consecutive
> > + * pages that belong to the same folio. The PTEs are all in the same PMD.
> > + */
> > +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
> > + pte_t *ptep, unsigned int nr)
>
> Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
> (which I added as part of swap-out) to something like:
>
> clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep, unsigned int nr,
> bool clear_young, bool clear_dirty);
>
> Then we can use the same function for both use cases and also have the ability
> to only clear dirty in future if we ever need it. The other advantage is that we
> only need to plumb a single function down the arm64 arch code. As it currently
> stands, those 2 functions would be duplicating most of their code.
>
> Generated code would still be the same since I'd expect the callsites to be
> passing in constants for clear_young and clear_dirty.
>
> > +{
> > + pte_t pte;
> > +
> > + for (;;) {
> > + pte = ptep_get_and_clear(mm, addr, ptep);
> > + set_pte_at(mm, addr, ptep, pte_mkclean(pte_mkold(pte)));
> > + if (--nr == 0)
> > + break;
> > + ptep++;
> > + addr += PAGE_SIZE;
> > + }
> > +}
> > +#endif
> > +
> > static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> > pte_t *ptep)
> > {
> > diff --git a/mm/internal.h b/mm/internal.h
> > index 57c1055d5568..792a9baf0d14 100644
> > --- a/mm/internal.h
> > +++ b/mm/internal.h
> > @@ -132,6 +132,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> > * first one is writable.
> > * @any_young: Optional pointer to indicate whether any entry except the
> > * first one is young.
> > + * @any_dirty: Optional pointer to indicate whether any entry except the
> > + * first one is dirty.
> > *
> > * Detect a PTE batch: consecutive (present) PTEs that map consecutive
> > * pages of the same large folio.
> > @@ -147,18 +149,20 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> > */
> > static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> > pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
> > - bool *any_writable, bool *any_young)
> > + bool *any_writable, bool *any_young, bool *any_dirty)
> > {
> > unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> > const pte_t *end_ptep = start_ptep + max_nr;
> > pte_t expected_pte, *ptep;
> > - bool writable, young;
> > + bool writable, young, dirty;
> > int nr;
> >
> > if (any_writable)
> > *any_writable = false;
> > if (any_young)
> > *any_young = false;
> > + if (any_dirty)
> > + *any_dirty = false;
> >
> > VM_WARN_ON_FOLIO(!pte_present(pte), folio);
> > VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
> > @@ -174,6 +178,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> > writable = !!pte_write(pte);
> > if (any_young)
> > young = !!pte_young(pte);
> > + if (any_dirty)
> > + dirty = !!pte_dirty(pte);
> > pte = __pte_batch_clear_ignored(pte, flags);
> >
> > if (!pte_same(pte, expected_pte))
> > @@ -191,6 +197,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> > *any_writable |= writable;
> > if (any_young)
> > *any_young |= young;
> > + if (any_dirty)
> > + *any_dirty |= dirty;
> >
> > nr = pte_batch_hint(ptep, pte);
> > expected_pte = pte_advance_pfn(expected_pte, nr);
> > diff --git a/mm/madvise.c b/mm/madvise.c
> > index bf26cf2b7715..0777df2e3691 100644
> > --- a/mm/madvise.c
> > +++ b/mm/madvise.c
> > @@ -321,6 +321,39 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
> > file_permission(vma->vm_file, MAY_WRITE) == 0;
> > }
> >
> > +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
> > + struct folio *folio, pte_t *ptep,
> > + pte_t pte, bool *any_young,
> > + bool *any_dirty)
> > +{
> > + int max_nr = (end - addr) / PAGE_SIZE;
> > + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
> > +
> > + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
> > + any_young, any_dirty);
> > +}
> > +
> > +static inline bool madvise_pte_split_folio(struct mm_struct *mm, pmd_t *pmd,
> > + unsigned long addr,
> > + struct folio *folio, pte_t **pte,
> > + spinlock_t **ptl)
> > +{
> > + int err;
> > +
> > + if (!folio_trylock(folio))
> > + return false;
> > +
> > + folio_get(folio);
> > + pte_unmap_unlock(*pte, *ptl);
> > + err = split_folio(folio);
> > + folio_unlock(folio);
> > + folio_put(folio);
> > +
> > + *pte = pte_offset_map_lock(mm, pmd, addr, ptl);
> > +
> > + return err == 0;
> > +}
> > +
> > static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> > unsigned long addr, unsigned long end,
> > struct mm_walk *walk)
> > @@ -456,41 +489,29 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> > * next pte in the range.
> > */
> > if (folio_test_large(folio)) {
> > - const fpb_t fpb_flags = FPB_IGNORE_DIRTY |
> > - FPB_IGNORE_SOFT_DIRTY;
> > - int max_nr = (end - addr) / PAGE_SIZE;
> > bool any_young;
> > -
>
> nit: there should be a blank line between variable declarations and following
> code. You have removed it here (and similar in free function). Did you run
> checkpatch.pl? It would have caught these things.
Sorry for that. I did see this warning msg, but I didn't take it seriously :(
>
> > - nr = folio_pte_batch(folio, addr, pte, ptent, max_nr,
> > - fpb_flags, NULL, &any_young);
> > - if (any_young)
> > - ptent = pte_mkyoung(ptent);
> > + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> > + ptent, &any_young, NULL);
> >
> > if (nr < folio_nr_pages(folio)) {
> > - int err;
> > -
> > if (folio_likely_mapped_shared(folio))
> > continue;
> > if (pageout_anon_only_filter && !folio_test_anon(folio))
> > continue;
> > - if (!folio_trylock(folio))
> > - continue;
> > - folio_get(folio);
> > +
> > arch_leave_lazy_mmu_mode();
> > - pte_unmap_unlock(start_pte, ptl);
> > - start_pte = NULL;
> > - err = split_folio(folio);
> > - folio_unlock(folio);
> > - folio_put(folio);
> > - start_pte = pte =
> > - pte_offset_map_lock(mm, pmd, addr, &ptl);
> > + if (madvise_pte_split_folio(mm, pmd, addr,
> > + folio, &start_pte, &ptl))
> > + nr = 0;
> > if (!start_pte)
> > break;
> > + pte = start_pte;
> > arch_enter_lazy_mmu_mode();
> > - if (!err)
> > - nr = 0;
> > continue;
> > }
> > +
> > + if (any_young)
> > + ptent = pte_mkyoung(ptent);
> > }
> >
> > /*
> > @@ -687,47 +708,54 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> > continue;
> >
> > /*
> > - * If pmd isn't transhuge but the folio is large and
> > - * is owned by only this process, split it and
> > - * deactivate all pages.
> > + * If we encounter a large folio, only split it if it is not
> > + * fully mapped within the range we are operating on. Otherwise
> > + * leave it as is so that it can be marked as lazyfree. If we
> > + * fail to split a folio, leave it in place and advance to the
> > + * next pte in the range.
> > */
> > if (folio_test_large(folio)) {
> > - int err;
> > + bool any_young, any_dirty;
> > + nr = madvise_folio_pte_batch(addr, end, folio, pte,
> > + ptent, &any_young, &any_dirty);
> >
> > - if (folio_likely_mapped_shared(folio))
> > - break;
> > - if (!folio_trylock(folio))
> > - break;
> > - folio_get(folio);
> > - arch_leave_lazy_mmu_mode();
> > - pte_unmap_unlock(start_pte, ptl);
> > - start_pte = NULL;
> > - err = split_folio(folio);
> > + if (nr < folio_nr_pages(folio)) {
> > + if (folio_likely_mapped_shared(folio))
> > + continue;
> > +
> > + arch_leave_lazy_mmu_mode();
> > + if (madvise_pte_split_folio(mm, pmd, addr,
> > + folio, &start_pte, &ptl))
> > + nr = 0;
> > + if (!start_pte)
> > + break;
> > + pte = start_pte;
> > + arch_enter_lazy_mmu_mode();
> > + continue;
> > + }
> > +
> > + if (any_young)
> > + ptent = pte_mkyoung(ptent);
> > + if (any_dirty)
> > + ptent = pte_mkdirty(ptent);
> > + }
> > +
> > + if (!folio_trylock(folio))
> > + continue;
>
> This is still wrong. This should all be protected by the "if
> (folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
> so that you only call folio_trylock() if that condition is true. You are
> unconditionally locking here, then unlocking, then relocking below if the
> condition is met. Just put everything inside the condition and lock once.
I'm not sure if it's safe to call folio_mapcount() without holding the
folio lock.
As mentioned earlier by David in the v2[1]
> What could work for large folios is making sure that #ptes that map the
> folio here correspond to the folio_mapcount(). And folio_mapcount()
> should be called under folio lock, to avoid racing with swapout/migration.
[1] https://lore.kernel.org/all/5cc05529-eb80-410e-bc26-233b0ba0b21f@redhatcom/
Thanks,
Lance
>
> Thanks,
> Ryan
>
> > + /*
> > + * If we have a large folio at this point, we know it is fully mapped
> > + * so if its mapcount is the same as its number of pages, it must be
> > + * exclusive.
> > + */
> > + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
> > folio_unlock(folio);
> > - folio_put(folio);
> > - if (err)
> > - break;
> > - start_pte = pte =
> > - pte_offset_map_lock(mm, pmd, addr, &ptl);
> > - if (!start_pte)
> > - break;
> > - arch_enter_lazy_mmu_mode();
> > - pte--;
> > - addr -= PAGE_SIZE;
> > continue;
> > }
> > + folio_unlock(folio);
> >
> > if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> > if (!folio_trylock(folio))
> > continue;
> > - /*
> > - * If folio is shared with others, we mustn't clear
> > - * the folio's dirty flag.
> > - */
> > - if (folio_mapcount(folio) != 1) {
> > - folio_unlock(folio);
> > - continue;
> > - }
> >
> > if (folio_test_swapcache(folio) &&
> > !folio_free_swap(folio)) {
> > @@ -740,19 +768,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
> > }
> >
> > if (pte_young(ptent) || pte_dirty(ptent)) {
> > - /*
> > - * Some of architecture(ex, PPC) don't update TLB
> > - * with set_pte_at and tlb_remove_tlb_entry so for
> > - * the portability, remap the pte with old|clean
> > - * after pte clearing.
> > - */
> > - ptent = ptep_get_and_clear_full(mm, addr, pte,
> > - tlb->fullmm);
> > -
> > - ptent = pte_mkold(ptent);
> > - ptent = pte_mkclean(ptent);
> > - set_pte_at(mm, addr, pte, ptent);
> > - tlb_remove_tlb_entry(tlb, pte, addr);
> > + mkold_clean_ptes(mm, addr, pte, nr);
> > + tlb_remove_tlb_entries(tlb, pte, nr, addr);
> > }
> > folio_mark_lazyfree(folio);
> > }
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 1723c8ddf9cb..fe9d4d64c627 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
> > flags |= FPB_IGNORE_SOFT_DIRTY;
> >
> > nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
> > - &any_writable, NULL);
> > + &any_writable, NULL, NULL);
> > folio_ref_add(folio, nr);
> > if (folio_test_anon(folio)) {
> > if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> > @@ -1559,7 +1559,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
> > */
> > if (unlikely(folio_test_large(folio) && max_nr != 1)) {
> > nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags,
> > - NULL, NULL);
> > + NULL, NULL, NULL);
> >
> > zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
> > addr, details, rss, force_flush,
>
On 11.04.24 15:51, Ryan Roberts wrote:
> On 11/04/2024 13:23, Lance Yang wrote:
>> On Thu, Apr 11, 2024 at 7:27 PM Ryan Roberts <[email protected]> wrote:
>>>
>>> On 11/04/2024 12:20, David Hildenbrand wrote:
>>>> On 11.04.24 13:11, Ryan Roberts wrote:
>>>>> On 08/04/2024 05:24, Lance Yang wrote:
>>>>>> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
>>>>>> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
>>>>>> splitting if the large folio is fully mapped within the target range.
>>>>>>
>>>>>> If a large folio is locked or shared, or if we fail to split it, we just
>>>>>> leave it in place and advance to the next PTE in the range. But note that
>>>>>> the behavior is changed; previously, any failure of this sort would cause
>>>>>> the entire operation to give up. As large folios become more common,
>>>>>> sticking to the old way could result in wasted opportunities.
>>>>>>
>>>>>> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
>>>>>> the same size results in the following runtimes for madvise(MADV_FREE) in
>>>>>> seconds (shorter is better):
>>>>>>
>>>>>> Folio Size | Old | New | Change
>>>>>> ------------------------------------------
>>>>>> 4KiB | 0.590251 | 0.590259 | 0%
>>>>>> 16KiB | 2.990447 | 0.185655 | -94%
>>>>>> 32KiB | 2.547831 | 0.104870 | -95%
>>>>>> 64KiB | 2.457796 | 0.052812 | -97%
>>>>>> 128KiB | 2.281034 | 0.032777 | -99%
>>>>>> 256KiB | 2.230387 | 0.017496 | -99%
>>>>>> 512KiB | 2.189106 | 0.010781 | -99%
>>>>>> 1024KiB | 2.183949 | 0.007753 | -99%
>>>>>> 2048KiB | 0.002799 | 0.002804 | 0%
>>>>>>
>>>>>> [1] https://lkml.kernel.org/r/[email protected]
>>>>>> [2] https://lore.kernel.org/linux-mm/[email protected]
>>>>>>
>>>>>> Signed-off-by: Lance Yang <[email protected]>
>>>>>> ---
>>>>>> include/linux/pgtable.h | 34 +++++++++
>>>>>> mm/internal.h | 12 +++-
>>>>>> mm/madvise.c | 149 ++++++++++++++++++++++------------------
>>>>>> mm/memory.c | 4 +-
>>>>>> 4 files changed, 129 insertions(+), 70 deletions(-)
>>>>>>
>>>>>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>>>>>> index 0f4b2faa1d71..4dd442787420 100644
>>>>>> --- a/include/linux/pgtable.h
>>>>>> +++ b/include/linux/pgtable.h
>>>>>> @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct
>>>>>> *mm,
>>>>>> }
>>>>>> #endif
>>>>>> +#ifndef mkold_clean_ptes
>>>>>> +/**
>>>>>> + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
>>>>>> + * as old and clean.
>>>>>> + * @mm: Address space the pages are mapped into.
>>>>>> + * @addr: Address the first page is mapped at.
>>>>>> + * @ptep: Page table pointer for the first entry.
>>>>>> + * @nr: Number of entries to mark old and clean.
>>>>>> + *
>>>>>> + * May be overridden by the architecture; otherwise, implemented by
>>>>>> + * get_and_clear/modify/set for each pte in the range.
>>>>>> + *
>>>>>> + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
>>>>>> + * some PTEs might be write-protected.
>>>>>> + *
>>>>>> + * Context: The caller holds the page table lock. The PTEs map consecutive
>>>>>> + * pages that belong to the same folio. The PTEs are all in the same PMD.
>>>>>> + */
>>>>>> +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
>>>>>> + pte_t *ptep, unsigned int nr)
>>>>>
>>
>> Thanks for the suggestions, Ryan, David!
>>
>>>>> Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
>>>>> (which I added as part of swap-out) to something like:
>>
>> Yeah, this is definitely cleaner than before.
>>
>>>>>
>>>>> clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
>>>>> pte_t *ptep, unsigned int nr,
>>>>> bool clear_young, bool clear_dirty);
>>>>>
>>>>> Then we can use the same function for both use cases and also have the ability
>>>>> to only clear dirty in future if we ever need it. The other advantage is that we
>>>>> only need to plumb a single function down the arm64 arch code. As it currently
>>>>> stands, those 2 functions would be duplicating most of their code.
>>
>> Agreed. It's indeed a good idea to use a single function for both use cases.
>>
>>>>
>>>> Yes. Maybe better use proper __bitwise flags, the compiler should be smart
>>>> enough to optimize either way.
>>
>> Nice. I'll use the __bitwise flags as the input.
>>
>>>
>>> Agreed. I was also thinking perhaps it makes sense to start using output bitwise
>>> flags for folio_pte_batch() since this patch set takes us up to 3 optional bool
>>> pointers for different things. Might be cleaner to have input flags to tell it
>>> what we care about and output flags to highlight those things. I guess the
>>> compiler should be able to optimize in the same way.
>>>
>>
>> Should I start using output bitwise flags for folio_pte_batch() in
>> this patch set?
>
> I don't think its crucial (yet). I'd leave it as you have done it for now,
> unless David shouts.
Let's do that separately, and investigate if the compiler actually is
smart enough ... :)
--
Cheers,
David / dhildenb
[...]
>>> +
>>> + if (!folio_trylock(folio))
>>> + continue;
>>
>> This is still wrong. This should all be protected by the "if
>> (folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
>> so that you only call folio_trylock() if that condition is true. You are
>> unconditionally locking here, then unlocking, then relocking below if the
>> condition is met. Just put everything inside the condition and lock once.
>
> I'm not sure if it's safe to call folio_mapcount() without holding the
> folio lock.
>
> As mentioned earlier by David in the v2[1]
>> What could work for large folios is making sure that #ptes that map the
>> folio here correspond to the folio_mapcount(). And folio_mapcount()
>> should be called under folio lock, to avoid racing with swapout/migration.
>
> [1] https://lore.kernel.org/all/[email protected]/
But I'm not suggesting that you should call folio_mapcount() without the lock.
I'm proposing this:
if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
if (!folio_trylock(folio))
continue;
/*
- * If folio is shared with others, we mustn't clear
- * the folio's dirty flag.
+ * If we have a large folio at this point, we know it is
+ * fully mapped so if its mapcount is the same as its
+ * number of pages, it must be exclusive.
*/
- if (folio_mapcount(folio) != 1) {
+ if (folio_mapcount(folio) != folio_nr_pages(folio)) {
folio_unlock(folio);
continue;
}
What am I missing?
On 11/04/2024 13:23, Lance Yang wrote:
> On Thu, Apr 11, 2024 at 7:27 PM Ryan Roberts <[email protected]> wrote:
>>
>> On 11/04/2024 12:20, David Hildenbrand wrote:
>>> On 11.04.24 13:11, Ryan Roberts wrote:
>>>> On 08/04/2024 05:24, Lance Yang wrote:
>>>>> This patch optimizes lazyfreeing with PTE-mapped mTHP[1]
>>>>> (Inspired by David Hildenbrand[2]). We aim to avoid unnecessary folio
>>>>> splitting if the large folio is fully mapped within the target range.
>>>>>
>>>>> If a large folio is locked or shared, or if we fail to split it, we just
>>>>> leave it in place and advance to the next PTE in the range. But note that
>>>>> the behavior is changed; previously, any failure of this sort would cause
>>>>> the entire operation to give up. As large folios become more common,
>>>>> sticking to the old way could result in wasted opportunities.
>>>>>
>>>>> On an Intel I5 CPU, lazyfreeing a 1GiB VMA backed by PTE-mapped folios of
>>>>> the same size results in the following runtimes for madvise(MADV_FREE) in
>>>>> seconds (shorter is better):
>>>>>
>>>>> Folio Size | Old | New | Change
>>>>> ------------------------------------------
>>>>> 4KiB | 0.590251 | 0.590259 | 0%
>>>>> 16KiB | 2.990447 | 0.185655 | -94%
>>>>> 32KiB | 2.547831 | 0.104870 | -95%
>>>>> 64KiB | 2.457796 | 0.052812 | -97%
>>>>> 128KiB | 2.281034 | 0.032777 | -99%
>>>>> 256KiB | 2.230387 | 0.017496 | -99%
>>>>> 512KiB | 2.189106 | 0.010781 | -99%
>>>>> 1024KiB | 2.183949 | 0.007753 | -99%
>>>>> 2048KiB | 0.002799 | 0.002804 | 0%
>>>>>
>>>>> [1] https://lkml.kernel.org/r/[email protected]
>>>>> [2] https://lore.kernel.org/linux-mm/[email protected]
>>>>>
>>>>> Signed-off-by: Lance Yang <[email protected]>
>>>>> ---
>>>>> include/linux/pgtable.h | 34 +++++++++
>>>>> mm/internal.h | 12 +++-
>>>>> mm/madvise.c | 149 ++++++++++++++++++++++------------------
>>>>> mm/memory.c | 4 +-
>>>>> 4 files changed, 129 insertions(+), 70 deletions(-)
>>>>>
>>>>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>>>>> index 0f4b2faa1d71..4dd442787420 100644
>>>>> --- a/include/linux/pgtable.h
>>>>> +++ b/include/linux/pgtable.h
>>>>> @@ -489,6 +489,40 @@ static inline pte_t ptep_get_and_clear(struct mm_struct
>>>>> *mm,
>>>>> }
>>>>> #endif
>>>>> +#ifndef mkold_clean_ptes
>>>>> +/**
>>>>> + * mkold_clean_ptes - Mark PTEs that map consecutive pages of the same folio
>>>>> + * as old and clean.
>>>>> + * @mm: Address space the pages are mapped into.
>>>>> + * @addr: Address the first page is mapped at.
>>>>> + * @ptep: Page table pointer for the first entry.
>>>>> + * @nr: Number of entries to mark old and clean.
>>>>> + *
>>>>> + * May be overridden by the architecture; otherwise, implemented by
>>>>> + * get_and_clear/modify/set for each pte in the range.
>>>>> + *
>>>>> + * Note that PTE bits in the PTE range besides the PFN can differ. For example,
>>>>> + * some PTEs might be write-protected.
>>>>> + *
>>>>> + * Context: The caller holds the page table lock. The PTEs map consecutive
>>>>> + * pages that belong to the same folio. The PTEs are all in the same PMD.
>>>>> + */
>>>>> +static inline void mkold_clean_ptes(struct mm_struct *mm, unsigned long addr,
>>>>> + pte_t *ptep, unsigned int nr)
>>>>
>
> Thanks for the suggestions, Ryan, David!
>
>>>> Just thinking out loud, I wonder if it would be cleaner to convert mkold_ptes()
>>>> (which I added as part of swap-out) to something like:
>
> Yeah, this is definitely cleaner than before.
>
>>>>
>>>> clear_young_dirty_ptes(struct mm_struct *mm, unsigned long addr,
>>>> pte_t *ptep, unsigned int nr,
>>>> bool clear_young, bool clear_dirty);
>>>>
>>>> Then we can use the same function for both use cases and also have the ability
>>>> to only clear dirty in future if we ever need it. The other advantage is that we
>>>> only need to plumb a single function down the arm64 arch code. As it currently
>>>> stands, those 2 functions would be duplicating most of their code.
>
> Agreed. It's indeed a good idea to use a single function for both use cases.
>
>>>
>>> Yes. Maybe better use proper __bitwise flags, the compiler should be smart
>>> enough to optimize either way.
>
> Nice. I'll use the __bitwise flags as the input.
>
>>
>> Agreed. I was also thinking perhaps it makes sense to start using output bitwise
>> flags for folio_pte_batch() since this patch set takes us up to 3 optional bool
>> pointers for different things. Might be cleaner to have input flags to tell it
>> what we care about and output flags to highlight those things. I guess the
>> compiler should be able to optimize in the same way.
>>
>
> Should I start using output bitwise flags for folio_pte_batch() in
> this patch set?
I don't think its crucial (yet). I'd leave it as you have done it for now,
unless David shouts.
>
> Thanks,
> Lance
On Thu, Apr 11, 2024 at 9:48 PM Ryan Roberts <[email protected]> wrote:
>
> [...]
>
> >>> +
> >>> + if (!folio_trylock(folio))
> >>> + continue;
> >>
> >> This is still wrong. This should all be protected by the "if
> >> (folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
> >> so that you only call folio_trylock() if that condition is true. You are
> >> unconditionally locking here, then unlocking, then relocking below if the
> >> condition is met. Just put everything inside the condition and lock once.
> >
> > I'm not sure if it's safe to call folio_mapcount() without holding the
> > folio lock.
> >
> > As mentioned earlier by David in the v2[1]
> >> What could work for large folios is making sure that #ptes that map the
> >> folio here correspond to the folio_mapcount(). And folio_mapcount()
> >> should be called under folio lock, to avoid racing with swapout/migration.
> >
> > [1] https://lore.kernel.org/all/[email protected]/
>
> But I'm not suggesting that you should call folio_mapcount() without the lock.
> I'm proposing this:
>
> if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> if (!folio_trylock(folio))
> continue;
> /*
> - * If folio is shared with others, we mustn't clear
> - * the folio's dirty flag.
> + * If we have a large folio at this point, we know it is
> + * fully mapped so if its mapcount is the same as its
> + * number of pages, it must be exclusive.
> */
> - if (folio_mapcount(folio) != 1) {
> + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
> folio_unlock(folio);
> continue;
> }
IIUC, if the folio is clean and not in the swapcache, we still need to
compare the number of batched PTEs against folio_mapcount().
Thanks,
Lance
>
> What am I missing?
>
On 11.04.24 16:39, Ryan Roberts wrote:
> On 11/04/2024 15:07, Lance Yang wrote:
>> On Thu, Apr 11, 2024 at 9:48 PM Ryan Roberts <[email protected]> wrote:
>>>
>>> [...]
>>>
>>>>>> +
>>>>>> + if (!folio_trylock(folio))
>>>>>> + continue;
>>>>>
>>>>> This is still wrong. This should all be protected by the "if
>>>>> (folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
>>>>> so that you only call folio_trylock() if that condition is true. You are
>>>>> unconditionally locking here, then unlocking, then relocking below if the
>>>>> condition is met. Just put everything inside the condition and lock once.
>>>>
>>>> I'm not sure if it's safe to call folio_mapcount() without holding the
>>>> folio lock.
>>>>
>>>> As mentioned earlier by David in the v2[1]
>>>>> What could work for large folios is making sure that #ptes that map the
>>>>> folio here correspond to the folio_mapcount(). And folio_mapcount()
>>>>> should be called under folio lock, to avoid racing with swapout/migration.
>>>>
>>>> [1] https://lore.kernel.org/all/[email protected]/
>>>
>>> But I'm not suggesting that you should call folio_mapcount() without the lock.
>>> I'm proposing this:
>>>
>>> if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
>>> if (!folio_trylock(folio))
>>> continue;
>>> /*
>>> - * If folio is shared with others, we mustn't clear
>>> - * the folio's dirty flag.
>>> + * If we have a large folio at this point, we know it is
>>> + * fully mapped so if its mapcount is the same as its
>>> + * number of pages, it must be exclusive.
>>> */
>>> - if (folio_mapcount(folio) != 1) {
>>> + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
>>> folio_unlock(folio);
>>> continue;
>>> }
>>
>> IIUC, if the folio is clean and not in the swapcache, we still need to
>> compare the number of batched PTEs against folio_mapcount().
>
> Why? That's not how the old code worked. In fact the comment says that the
> reason for the exclusive check is to avoid marking a dirty *folio* as clean if
> shared; that would be bad because we could throw away data that others relied
> upon. It's perfectly safe to clear the dirty flag from the *pte* even if it is
> shared; the ptes are private to the process so that won't affect sharers.
>
> You should just follow the pattern already estabilished by the original code.
> The only difference is that because the folio is now (potentially) large, you
> have to change the way to detect exclusivity.
+1
--
Cheers,
David / dhildenb
On 11/04/2024 15:07, Lance Yang wrote:
> On Thu, Apr 11, 2024 at 9:48 PM Ryan Roberts <[email protected]> wrote:
>>
>> [...]
>>
>>>>> +
>>>>> + if (!folio_trylock(folio))
>>>>> + continue;
>>>>
>>>> This is still wrong. This should all be protected by the "if
>>>> (folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
>>>> so that you only call folio_trylock() if that condition is true. You are
>>>> unconditionally locking here, then unlocking, then relocking below if the
>>>> condition is met. Just put everything inside the condition and lock once.
>>>
>>> I'm not sure if it's safe to call folio_mapcount() without holding the
>>> folio lock.
>>>
>>> As mentioned earlier by David in the v2[1]
>>>> What could work for large folios is making sure that #ptes that map the
>>>> folio here correspond to the folio_mapcount(). And folio_mapcount()
>>>> should be called under folio lock, to avoid racing with swapout/migration.
>>>
>>> [1] https://lore.kernel.org/all/[email protected]/
>>
>> But I'm not suggesting that you should call folio_mapcount() without the lock.
>> I'm proposing this:
>>
>> if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
>> if (!folio_trylock(folio))
>> continue;
>> /*
>> - * If folio is shared with others, we mustn't clear
>> - * the folio's dirty flag.
>> + * If we have a large folio at this point, we know it is
>> + * fully mapped so if its mapcount is the same as its
>> + * number of pages, it must be exclusive.
>> */
>> - if (folio_mapcount(folio) != 1) {
>> + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
>> folio_unlock(folio);
>> continue;
>> }
>
> IIUC, if the folio is clean and not in the swapcache, we still need to
> compare the number of batched PTEs against folio_mapcount().
Why? That's not how the old code worked. In fact the comment says that the
reason for the exclusive check is to avoid marking a dirty *folio* as clean if
shared; that would be bad because we could throw away data that others relied
upon. It's perfectly safe to clear the dirty flag from the *pte* even if it is
shared; the ptes are private to the process so that won't affect sharers.
You should just follow the pattern already estabilished by the original code.
The only difference is that because the folio is now (potentially) large, you
have to change the way to detect exclusivity.
>
> Thanks,
> Lance
>
>>
>> What am I missing?
>>
On Thu, Apr 11, 2024 at 10:39 PM Ryan Roberts <[email protected]> wrote:
>
> On 11/04/2024 15:07, Lance Yang wrote:
> > On Thu, Apr 11, 2024 at 9:48 PM Ryan Roberts <[email protected]> wrote:
> >>
> >> [...]
> >>
> >>>>> +
> >>>>> + if (!folio_trylock(folio))
> >>>>> + continue;
> >>>>
> >>>> This is still wrong. This should all be protected by the "if
> >>>> (folio_test_swapcache(folio) || folio_test_dirty(folio))" as it was previously
> >>>> so that you only call folio_trylock() if that condition is true. You are
> >>>> unconditionally locking here, then unlocking, then relocking below if the
> >>>> condition is met. Just put everything inside the condition and lock once.
> >>>
> >>> I'm not sure if it's safe to call folio_mapcount() without holding the
> >>> folio lock.
> >>>
> >>> As mentioned earlier by David in the v2[1]
> >>>> What could work for large folios is making sure that #ptes that map the
> >>>> folio here correspond to the folio_mapcount(). And folio_mapcount()
> >>>> should be called under folio lock, to avoid racing with swapout/migration.
> >>>
> >>> [1] https://lore.kernel.org/all/[email protected]/
> >>
> >> But I'm not suggesting that you should call folio_mapcount() without the lock.
> >> I'm proposing this:
> >>
> >> if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> >> if (!folio_trylock(folio))
> >> continue;
> >> /*
> >> - * If folio is shared with others, we mustn't clear
> >> - * the folio's dirty flag.
> >> + * If we have a large folio at this point, we know it is
> >> + * fully mapped so if its mapcount is the same as its
> >> + * number of pages, it must be exclusive.
> >> */
> >> - if (folio_mapcount(folio) != 1) {
> >> + if (folio_mapcount(folio) != folio_nr_pages(folio)) {
> >> folio_unlock(folio);
> >> continue;
> >> }
> >
> > IIUC, if the folio is clean and not in the swapcache, we still need to
> > compare the number of batched PTEs against folio_mapcount().
>
> Why? That's not how the old code worked. In fact the comment says that the
> reason for the exclusive check is to avoid marking a dirty *folio* as clean if
> shared; that would be bad because we could throw away data that others relied
> upon. It's perfectly safe to clear the dirty flag from the *pte* even if it is
> shared; the ptes are private to the process so that won't affect sharers.
>
> You should just follow the pattern already estabilished by the original code.
> The only difference is that because the folio is now (potentially) large, you
> have to change the way to detect exclusivity.
Thanks a lot for your patience and help!
My bad for the oversight and mistake :(
I'll take another look at the original code and make adjustments following the
established pattern.
Thanks,
Lance
>
> >
> > Thanks,
> > Lance
> >
> >>
> >> What am I missing?
> >>
>