2023-09-26 00:55:40

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH -next rfc 0/9] mm: convert page cpupid functions to folios

The cpupid(or access time) used by numa balancing is stored in flags
or _last_cpupid(if LAST_CPUPID_NOT_IN_PAGE_FLAGS) of page, this is to
convert page cpupid to folio cpupid, a new _last_cpupid is added into
folio, which make us to use folio->_last_cpupid directly, and the
page_cpupid_reset_last(), page_cpupid_xchg_last(), xchg_page_access_time(),
and page_cpupid_last() are converted to folio one.

Kefeng Wang (9):
mm_types: add _last_cpupid into folio
mm: mprotect: use a folio in change_pte_range()
mm: huge_memory: use a folio in change_huge_pmd()
mm: convert xchg_page_access_time to xchg_folio_access_time()
mm: convert page_cpupid_last() to folio_cpupid_last()
mm: make wp_page_reuse() and finish_mkwrite_fault() to take a folio
mm: convert page_cpupid_xchg_last() to folio_cpupid_xchg_last()
mm: page_alloc: use a folio in free_pages_prepare()
mm: convert page_cpupid_reset_last() to folio_cpupid_reset_last()

include/linux/mm.h | 40 ++++++++++++++++++++--------------------
include/linux/mm_types.h | 13 +++++++++----
kernel/sched/fair.c | 4 ++--
mm/huge_memory.c | 17 +++++++++--------
mm/memory.c | 39 +++++++++++++++++++++------------------
mm/migrate.c | 4 ++--
mm/mm_init.c | 1 -
mm/mmzone.c | 6 +++---
mm/mprotect.c | 16 +++++++++-------
mm/page_alloc.c | 17 +++++++++--------
10 files changed, 84 insertions(+), 73 deletions(-)

--
2.27.0


2023-09-26 00:55:51

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH -next 9/9] mm: convert page_cpupid_reset_last() to folio_cpupid_reset_last()

It isn't need to fill the default cpupid value for all the struct
page, since cpupid is only used for numa balancing, and the pages
for numa balancing are all from buddy, page_cpupid_reset_last()
is already called by free_pages_prepare() to initialize it, so
let's drop the page_cpupid_reset_last() in __init_single_page(),
then make page_cpupid_reset_last() to take a folio and rename it
to folio_cpupid_reset_last().

Signed-off-by: Kefeng Wang <[email protected]>
---
include/linux/mm.h | 10 +++++-----
mm/mm_init.c | 1 -
mm/page_alloc.c | 2 +-
3 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a6f4b55bf469..ca66a05eb2ed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1692,9 +1692,9 @@ static inline int folio_cpupid_last(struct folio *folio)
{
return folio->_last_cpupid;
}
-static inline void page_cpupid_reset_last(struct page *page)
+static inline void folio_cpupid_reset_last(struct folio *folio)
{
- page->_last_cpupid = -1 & LAST_CPUPID_MASK;
+ folio->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
static inline int folio_cpupid_last(struct folio *folio)
@@ -1704,9 +1704,9 @@ static inline int folio_cpupid_last(struct folio *folio)

extern int folio_cpupid_xchg_last(struct folio *folio, int cpupid);

-static inline void page_cpupid_reset_last(struct page *page)
+static inline void folio_cpupid_reset_last(struct folio *folio)
{
- page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
+ folio->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */

@@ -1769,7 +1769,7 @@ static inline bool cpupid_pid_unset(int cpupid)
return true;
}

-static inline void page_cpupid_reset_last(struct page *page)
+static inline void folio_cpupid_reset_last(struct folio *folio)
{
}

diff --git a/mm/mm_init.c b/mm/mm_init.c
index 06a72c223bce..74c0dc27fbf1 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -563,7 +563,6 @@ void __meminit __init_single_page(struct page *page, unsigned long pfn,
set_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
- page_cpupid_reset_last(page);
page_kasan_tag_reset(page);

INIT_LIST_HEAD(&page->lru);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a888b9d57751..852fc78ddb34 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1126,7 +1126,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
return false;
}

- page_cpupid_reset_last(page);
+ folio_cpupid_reset_last(folio);
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
reset_page_owner(page, order);
page_table_check_free(page, order);
--
2.27.0

2023-09-26 00:56:26

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH -next 3/9] mm: huge_memory: use a folio in change_huge_pmd()

Use a folio in change_huge_pmd(), this is in preparation for
xchg_page_access_time() to folio conversion.

Signed-off-by: Kefeng Wang <[email protected]>
---
mm/huge_memory.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0f93a73115f7..c7efa214add8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1849,7 +1849,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (is_swap_pmd(*pmd)) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
- struct page *page = pfn_swap_entry_to_page(entry);
+ struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
pmd_t newpmd;

VM_BUG_ON(!is_pmd_migration_entry(*pmd));
@@ -1858,7 +1858,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* A protection check is difficult so
* just be safe and disable write
*/
- if (PageAnon(page))
+ if (folio_test_anon(folio))
entry = make_readable_exclusive_migration_entry(swp_offset(entry));
else
entry = make_readable_migration_entry(swp_offset(entry));
@@ -1880,7 +1880,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
#endif

if (prot_numa) {
- struct page *page;
+ struct folio *folio;
bool toptier;
/*
* Avoid trapping faults against the zero page. The read-only
@@ -1893,8 +1893,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (pmd_protnone(*pmd))
goto unlock;

- page = pmd_page(*pmd);
- toptier = node_is_toptier(page_to_nid(page));
+ folio = page_folio(pmd_page(*pmd));
+ toptier = node_is_toptier(folio_nid(folio));
/*
* Skip scanning top tier node if normal numa
* balancing is disabled
@@ -1905,7 +1905,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,

if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
- xchg_page_access_time(page, jiffies_to_msecs(jiffies));
+ xchg_page_access_time(&folio->page,
+ jiffies_to_msecs(jiffies));
}
/*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
--
2.27.0

2023-09-26 01:00:46

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH -next 4/9] mm: convert xchg_page_access_time to xchg_folio_access_time()

Make xchg_page_access_time to take a folio, and rename it to
xchg_folio_access_time() since all callers with a folio.

Signed-off-by: Kefeng Wang <[email protected]>
---
include/linux/mm.h | 7 ++++---
kernel/sched/fair.c | 2 +-
mm/huge_memory.c | 4 ++--
mm/mprotect.c | 2 +-
4 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a1d0c82ac9a7..49b9fa383e7d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1711,11 +1711,12 @@ static inline void page_cpupid_reset_last(struct page *page)
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */

-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int xchg_folio_access_time(struct folio *folio, int time)
{
int last_time;

- last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ last_time = page_cpupid_xchg_last(&folio->page,
+ time >> PAGE_ACCESS_TIME_BUCKETS);
return last_time << PAGE_ACCESS_TIME_BUCKETS;
}

@@ -1734,7 +1735,7 @@ static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
return page_to_nid(page); /* XXX */
}

-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int xchg_folio_access_time(struct folio *folio, int time)
{
return 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b507ec29e1e1..afb9dc98a8ee 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1714,7 +1714,7 @@ static int numa_hint_fault_latency(struct folio *folio)
int last_time, time;

time = jiffies_to_msecs(jiffies);
- last_time = xchg_page_access_time(&folio->page, time);
+ last_time = xchg_folio_access_time(folio, time);

return (time - last_time) & PAGE_ACCESS_TIME_MASK;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c7efa214add8..c4f4951615fd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1905,8 +1905,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,

if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
- xchg_page_access_time(&folio->page,
- jiffies_to_msecs(jiffies));
+ xchg_folio_access_time(folio,
+ jiffies_to_msecs(jiffies));
}
/*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 459daa987131..1c556651888a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -159,7 +159,7 @@ static long change_pte_range(struct mmu_gather *tlb,
continue;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
!toptier)
- xchg_page_access_time(&folio->page,
+ xchg_folio_access_time(folio,
jiffies_to_msecs(jiffies));
}

--
2.27.0

2023-09-26 01:00:55

by Kefeng Wang

[permalink] [raw]
Subject: [PATCH -next 5/9] mm: convert page_cpupid_last() to folio_cpupid_last()

Make page_cpupid_last() to take a folio, and rename it to
folio_cpupid_last() since all callers with a folio.

Signed-off-by: Kefeng Wang <[email protected]>
---
include/linux/mm.h | 12 ++++++------
mm/huge_memory.c | 4 ++--
mm/memory.c | 2 +-
3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 49b9fa383e7d..aa7fdda1b56c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1689,18 +1689,18 @@ static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}

-static inline int page_cpupid_last(struct page *page)
+static inline int folio_cpupid_last(struct folio *folio)
{
- return page->_last_cpupid;
+ return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_cpupid_last(struct folio *folio)
{
- return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}

extern int page_cpupid_xchg_last(struct page *page, int cpupid);
@@ -1740,9 +1740,9 @@ static inline int xchg_folio_access_time(struct folio *folio, int time)
return 0;
}

-static inline int page_cpupid_last(struct page *page)
+static inline int folio_cpupid_last(struct folio *folio)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}

static inline int cpupid_to_nid(int cpupid)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c4f4951615fd..93981a759daf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1555,7 +1555,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
* to record page access time. So use default value.
*/
if (node_is_toptier(nid))
- last_cpupid = page_cpupid_last(&folio->page);
+ last_cpupid = folio_cpupid_last(folio);
target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
if (target_nid == NUMA_NO_NODE) {
folio_put(folio);
@@ -2508,7 +2508,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
if (page_is_idle(head))
set_page_idle(page_tail);

- page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
+ page_cpupid_xchg_last(page_tail, folio_cpupid_last(folio));

/*
* always add to the tail because some iterators expect new
diff --git a/mm/memory.c b/mm/memory.c
index 29c5618c91e5..5ab6e8d45a7d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4814,7 +4814,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
!node_is_toptier(nid))
last_cpupid = (-1 & LAST_CPUPID_MASK);
else
- last_cpupid = page_cpupid_last(&folio->page);
+ last_cpupid = folio_cpupid_last(folio);
target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags);
if (target_nid == NUMA_NO_NODE) {
folio_put(folio);
--
2.27.0