Let's make folio_mlock_step() simply a wrapper around folio_pte_batch(),
which will greatly reduce the cost of ptep_get() when scanning a range of
contptes.
Acked-by: David Hildenbrand <[email protected]>
Reviewed-by: Baolin Wang <[email protected]>
Suggested-by: Barry Song <[email protected]>
Suggested-by: Matthew Wilcox <[email protected]>
Signed-off-by: Lance Yang <[email protected]>
---
v2 -> v3:
- Rebased to mm/mm-unstable
- https://lore.kernel.org/linux-mm/[email protected]/
v1 -> v2:
- Remove the likely() hint (per Matthew)
- Keep type declarations at the beginning of the function (per Matthew)
- Make a minimum change (per Barry)
- Pick RB from Baolin - thanks!
- Pick AB from David - thanks!
- https://lore.kernel.org/linux-mm/[email protected]/
mm/mlock.c | 19 ++++---------------
1 file changed, 4 insertions(+), 15 deletions(-)
diff --git a/mm/mlock.c b/mm/mlock.c
index 30b51cdea89d..52d6e401ad67 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -307,26 +307,15 @@ void munlock_folio(struct folio *folio)
static inline unsigned int folio_mlock_step(struct folio *folio,
pte_t *pte, unsigned long addr, unsigned long end)
{
- unsigned int count, i, nr = folio_nr_pages(folio);
- unsigned long pfn = folio_pfn(folio);
+ const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
+ unsigned int count = (end - addr) >> PAGE_SHIFT;
pte_t ptent = ptep_get(pte);
if (!folio_test_large(folio))
return 1;
- count = pfn + nr - pte_pfn(ptent);
- count = min_t(unsigned int, count, (end - addr) >> PAGE_SHIFT);
-
- for (i = 0; i < count; i++, pte++) {
- pte_t entry = ptep_get(pte);
-
- if (!pte_present(entry))
- break;
- if (pte_pfn(entry) - pfn >= nr)
- break;
- }
-
- return i;
+ return folio_pte_batch(folio, addr, pte, ptent, count, fpb_flags, NULL,
+ NULL, NULL);
}
static inline bool allow_mlock_munlock(struct folio *folio,
--
2.33.1