2023-11-06 15:50:52

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH 09/10] mm: convert mm_counter() to take a folio

Since all mm_counter() callers with a folio, let's convert
mm_counter() to take a folio.

Signed-off-by: Kefeng Wang <[email protected]>
---
arch/s390/mm/pgtable.c | 2 +-
include/linux/mm.h | 6 +++---
mm/memory.c | 10 +++++-----
mm/rmap.c | 8 ++++----
mm/userfaultfd.c | 2 +-
5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2f946b493fff..54b184648db6 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -732,7 +732,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
else if (is_migration_entry(entry)) {
struct folio *folio = pfn_swap_entry_to_folio(entry);

- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
}
free_swap_and_cache(entry);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 418d26608ece..fea78900bf84 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2591,11 +2591,11 @@ static inline int mm_counter_file(struct page *page)
return MM_FILEPAGES;
}

-static inline int mm_counter(struct page *page)
+static inline int mm_counter(struct folio *folio)
{
- if (PageAnon(page))
+ if (folio_test_anon(folio))
return MM_ANONPAGES;
- return mm_counter_file(page);
+ return mm_counter_file(&folio->page);
}

static inline unsigned long get_mm_rss(struct mm_struct *mm)
diff --git a/mm/memory.c b/mm/memory.c
index dd3760988e02..3ffef84dd7bb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -803,7 +803,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
} else if (is_migration_entry(entry)) {
folio = pfn_swap_entry_to_folio(entry);

- rss[mm_counter(&folio->page)]++;
+ rss[mm_counter(folio)]++;

if (!is_readable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
@@ -835,7 +835,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* keep things as they are.
*/
folio_get(folio);
- rss[mm_counter(page)]++;
+ rss[mm_counter(folio)]++;
/* Cannot fail as these pages cannot get pinned. */
BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));

@@ -1466,7 +1466,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (pte_young(ptent) && likely(vma_has_recency(vma)))
folio_mark_accessed(folio);
}
- rss[mm_counter(page)]--;
+ rss[mm_counter(folio)]--;
if (!delay_rmap) {
page_remove_rmap(page, vma, false);
if (unlikely(page_mapcount(page) < 0))
@@ -1496,7 +1496,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
* see zap_install_uffd_wp_if_needed().
*/
WARN_ON_ONCE(!vma_is_anonymous(vma));
- rss[mm_counter(page)]--;
+ rss[mm_counter(folio)]--;
if (is_device_private_entry(entry))
page_remove_rmap(page, vma, false);
folio_put(folio);
@@ -1511,7 +1511,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
folio = pfn_swap_entry_to_folio(entry);
if (!should_zap_folio(details, folio))
continue;
- rss[mm_counter(&folio->page)]--;
+ rss[mm_counter(folio)]--;
} else if (pte_marker_entry_uffd_wp(entry)) {
/*
* For anon: always drop the marker; for file: only
diff --git a/mm/rmap.c b/mm/rmap.c
index 7a27a2b41802..7a563490ce08 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1678,7 +1678,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz);
} else {
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}

@@ -1693,7 +1693,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
} else if (folio_test_anon(folio)) {
swp_entry_t entry = page_swap_entry(subpage);
pte_t swp_pte;
@@ -2075,7 +2075,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz);
} else {
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}

@@ -2090,7 +2090,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
} else {
swp_entry_t entry;
pte_t swp_pte;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 96d9eae5c7cc..9a6759fa9b06 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
* Must happen after rmap, as mm_counter() checks mapping (via
* PageAnon()), which is set by __page_set_anon_rmap().
*/
- inc_mm_counter(dst_mm, mm_counter(page));
+ inc_mm_counter(dst_mm, mm_counter(folio));

set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);

--
2.27.0