Deactivate_page() has already been converted to use folios. This patch
series modifies the callers of deactivate_page() to use folios. It also
introduces vm_normal_folio() to assist with folio conversions, and
converts deactivate_page() to folio_deactivate() which takes in a folio.
---
v4:
Rebased onto latest mm-unstable - fixes madvise and damon conflicts
v3:
Introduce vm_normal_folio() wrapper function to return a folio
Fix madvise missing folio_mapcount()
v2:
Fix a compilation issue
Some minor rewording of comments/descriptions
Vishal Moola (Oracle) (4):
mm/memory: Add vm_normal_folio()
madvise: Convert madvise_cold_or_pageout_pte_range() to use folios
mm/damon: Convert damon_pa_mark_accessed_or_deactivate() to use folios
mm/swap: Convert deactivate_page() to folio_deactivate()
include/linux/mm.h | 2 +
include/linux/swap.h | 2 +-
mm/damon/paddr.c | 14 ++++---
mm/madvise.c | 98 ++++++++++++++++++++++----------------------
mm/memory.c | 10 +++++
mm/swap.c | 14 +++----
6 files changed, 76 insertions(+), 64 deletions(-)
--
2.38.1
Introduce a wrapper function called vm_normal_folio(). This function
calls vm_normal_page() and returns the folio of the page found, or null
if no page is found.
This function allows callers to get a folio from a pte, which will
eventually allow them to completely replace their struct page variables
with struct folio instead.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
---
include/linux/mm.h | 2 ++
mm/memory.c | 10 ++++++++++
2 files changed, 12 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ff46dcab2004..d29bfae4b71f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1968,6 +1968,8 @@ static inline bool can_do_mlock(void) { return false; }
extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 37d1763c4d47..4000e9f017e0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -625,6 +625,16 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
return pfn_to_page(pfn);
}
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte)
+{
+ struct page *page = vm_normal_page(vma, addr, pte);
+
+ if (page)
+ return page_folio(page);
+ return NULL;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd)
--
2.38.1
This change removes a number of calls to compound_head(), and saves
1729 bytes of kernel text.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
---
mm/madvise.c | 98 ++++++++++++++++++++++++++--------------------------
1 file changed, 49 insertions(+), 49 deletions(-)
diff --git a/mm/madvise.c b/mm/madvise.c
index 7b7549a54a6d..851d977b9b03 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -345,8 +345,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
struct vm_area_struct *vma = walk->vma;
pte_t *orig_pte, *pte, ptent;
spinlock_t *ptl;
- struct page *page = NULL;
- LIST_HEAD(page_list);
+ struct folio *folio = NULL;
+ LIST_HEAD(folio_list);
bool pageout_anon_only_filter;
if (fatal_signal_pending(current))
@@ -375,26 +375,26 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
goto huge_unlock;
}
- page = pmd_page(orig_pmd);
+ folio = pfn_folio(pmd_pfn(orig_pmd));
- /* Do not interfere with other mappings of this page */
- if (page_mapcount(page) != 1)
+ /* Do not interfere with other mappings of this folio */
+ if (folio_mapcount(folio) != 1)
goto huge_unlock;
- if (pageout_anon_only_filter && !PageAnon(page))
+ if (pageout_anon_only_filter && !folio_test_anon(folio))
goto huge_unlock;
if (next - addr != HPAGE_PMD_SIZE) {
int err;
- get_page(page);
+ folio_get(folio);
spin_unlock(ptl);
- lock_page(page);
- err = split_huge_page(page);
- unlock_page(page);
- put_page(page);
+ folio_lock(folio);
+ err = split_folio(folio);
+ folio_unlock(folio);
+ folio_put(folio);
if (!err)
- goto regular_page;
+ goto regular_folio;
return 0;
}
@@ -406,25 +406,25 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
}
- ClearPageReferenced(page);
- test_and_clear_page_young(page);
+ folio_clear_referenced(folio);
+ folio_test_clear_young(folio);
if (pageout) {
- if (!isolate_lru_page(page)) {
- if (PageUnevictable(page))
- putback_lru_page(page);
+ if (!folio_isolate_lru(folio)) {
+ if (folio_test_unevictable(folio))
+ folio_putback_lru(folio);
else
- list_add(&page->lru, &page_list);
+ list_add(&folio->lru, &folio_list);
}
} else
- deactivate_page(page);
+ deactivate_page(&folio->page);
huge_unlock:
spin_unlock(ptl);
if (pageout)
- reclaim_pages(&page_list);
+ reclaim_pages(&folio_list);
return 0;
}
-regular_page:
+regular_folio:
if (pmd_trans_unstable(pmd))
return 0;
#endif
@@ -441,33 +441,33 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (!pte_present(ptent))
continue;
- page = vm_normal_page(vma, addr, ptent);
- if (!page || is_zone_device_page(page))
+ folio = vm_normal_folio(vma, addr, ptent);
+ if (!folio || folio_is_zone_device(folio))
continue;
/*
* Creating a THP page is expensive so split it only if we
* are sure it's worth. Split it if we are only owner.
*/
- if (PageTransCompound(page)) {
- if (page_mapcount(page) != 1)
+ if (folio_test_large(folio)) {
+ if (folio_mapcount(folio) != 1)
break;
- if (pageout_anon_only_filter && !PageAnon(page))
+ if (pageout_anon_only_filter && !folio_test_anon(folio))
break;
- get_page(page);
- if (!trylock_page(page)) {
- put_page(page);
+ folio_get(folio);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
break;
}
pte_unmap_unlock(orig_pte, ptl);
- if (split_huge_page(page)) {
- unlock_page(page);
- put_page(page);
+ if (split_folio(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
break;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--;
addr -= PAGE_SIZE;
@@ -475,16 +475,16 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
}
/*
- * Do not interfere with other mappings of this page and
- * non-LRU page.
+ * Do not interfere with other mappings of this folio and
+ * non-LRU folio.
*/
- if (!PageLRU(page) || page_mapcount(page) != 1)
+ if (!folio_test_lru(folio) || folio_mapcount(folio) != 1)
continue;
- if (pageout_anon_only_filter && !PageAnon(page))
+ if (pageout_anon_only_filter && !folio_test_anon(folio))
continue;
- VM_BUG_ON_PAGE(PageTransCompound(page), page);
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
if (pte_young(ptent)) {
ptent = ptep_get_and_clear_full(mm, addr, pte,
@@ -495,28 +495,28 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
}
/*
- * We are deactivating a page for accelerating reclaiming.
- * VM couldn't reclaim the page unless we clear PG_young.
+ * We are deactivating a folio for accelerating reclaiming.
+ * VM couldn't reclaim the folio unless we clear PG_young.
* As a side effect, it makes confuse idle-page tracking
* because they will miss recent referenced history.
*/
- ClearPageReferenced(page);
- test_and_clear_page_young(page);
+ folio_clear_referenced(folio);
+ folio_test_clear_young(folio);
if (pageout) {
- if (!isolate_lru_page(page)) {
- if (PageUnevictable(page))
- putback_lru_page(page);
+ if (!folio_isolate_lru(folio)) {
+ if (folio_test_unevictable(folio))
+ folio_putback_lru(folio);
else
- list_add(&page->lru, &page_list);
+ list_add(&folio->lru, &folio_list);
}
} else
- deactivate_page(page);
+ deactivate_page(&folio->page);
}
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl);
if (pageout)
- reclaim_pages(&page_list);
+ reclaim_pages(&folio_list);
cond_resched();
return 0;
--
2.38.1
Deactivate_page() has already been converted to use folios, this change
converts it to take in a folio argument instead of calling page_folio().
It also renames the function folio_deactivate() to be more consistent
with other folio functions.
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: SeongJae Park <[email protected]>
---
include/linux/swap.h | 2 +-
mm/damon/paddr.c | 2 +-
mm/madvise.c | 4 ++--
mm/swap.c | 14 ++++++--------
4 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f965786710b0..209a425739a9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -401,7 +401,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void deactivate_page(struct page *page);
+void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 884c8bf18b12..6334c99e5152 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -297,7 +297,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
if (mark_accessed)
folio_mark_accessed(folio);
else
- deactivate_page(&folio->page);
+ folio_deactivate(folio);
folio_put(folio);
applied += folio_nr_pages(folio);
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 851d977b9b03..025be3517af1 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -416,7 +416,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
list_add(&folio->lru, &folio_list);
}
} else
- deactivate_page(&folio->page);
+ folio_deactivate(folio);
huge_unlock:
spin_unlock(ptl);
if (pageout)
@@ -510,7 +510,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
list_add(&folio->lru, &folio_list);
}
} else
- deactivate_page(&folio->page);
+ folio_deactivate(folio);
}
arch_leave_lazy_mmu_mode();
diff --git a/mm/swap.c b/mm/swap.c
index 5e5eba186930..e54e2a252e27 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -733,17 +733,15 @@ void deactivate_file_folio(struct folio *folio)
}
/*
- * deactivate_page - deactivate a page
- * @page: page to deactivate
+ * folio_deactivate - deactivate a folio
+ * @folio: folio to deactivate
*
- * deactivate_page() moves @page to the inactive list if @page was on the active
- * list and was not an unevictable page. This is done to accelerate the reclaim
- * of @page.
+ * folio_deactivate() moves @folio to the inactive list if @folio was on the
+ * active list and was not unevictable. This is done to accelerate the
+ * reclaim of @folio.
*/
-void deactivate_page(struct page *page)
+void folio_deactivate(struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
(folio_test_active(folio) || lru_gen_enabled())) {
struct folio_batch *fbatch;
--
2.38.1
This change replaces 2 calls to compound_head() from put_page() and 1 call
from mark_page_accessed() with one from page_folio(). This is in
preparation for the conversion of deactivate_page() to
folio_deactivate().
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
Reviewed-by: SeongJae Park <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
---
mm/damon/paddr.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index ebd1905eed6f..884c8bf18b12 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -283,21 +283,23 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
struct page *page = damon_get_page(PHYS_PFN(addr));
+ struct folio *folio;
if (!page)
continue;
+ folio = page_folio(page);
- if (damos_pa_filter_out(s, page)) {
- put_page(page);
+ if (damos_pa_filter_out(s, &folio->page)) {
+ folio_put(folio);
continue;
}
if (mark_accessed)
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
else
- deactivate_page(page);
- put_page(page);
- applied++;
+ deactivate_page(&folio->page);
+ folio_put(folio);
+ applied += folio_nr_pages(folio);
}
return applied * PAGE_SIZE;
}
--
2.38.1
On Wed, Dec 21, 2022 at 11:10 AM Vishal Moola (Oracle)
<[email protected]> wrote:
>
> Deactivate_page() has already been converted to use folios, this change
> converts it to take in a folio argument instead of calling page_folio().
> It also renames the function folio_deactivate() to be more consistent
> with other folio functions.
There is one more in mm/vmscan.c.
Please git grep.