2023-01-13 22:53:43

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 0/8] continue hugetlb folio conversion

============== OVERVIEW ===========================
This series continues the conversion of core hugetlb functions to use
folios. This series converts many helper funtions in the hugetlb fault
path. This is in preperation for another series to convert the hugetlb
fault code paths to operate on folios.

============== TESTING ===========================
LTP:
Ran 10 back to back rounds of the LTP hugetlb test suite.

Gigantic Huge Pages:
Test allocation and freeing via hugeadm commands:
hugeadm --pool-pages-min 1GB:10
hugeadm --pool-pages-min 1GB:0

Demote:
Demote 1 1GB hugepages to 512 2MB hugepages
echo 1 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
echo 1 > /sys/kernel/mm/hugepages/hugepages-1048576kB/demote
cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
# 512
cat /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
# 0

Rebased on 1/13/2023 mm-unstable

v2 -> v3:
- grab folio directly from hugepage_freelists in patch 8 per
Matthew

v1 -> v2:
- remove reduntant get_folio_unless_zero() and use existing
folio_try_get() in patch 1 per Matthew
- grab folio directly from hugepage_freelists in patch 3 per
Matthew
- add r-b to patches 1-2, 4-7
- change variabale 'subfolio' to inner_folio in patch 8 per
John Hubbard


Sidhartha Kumar (8):
mm/hugetlb: convert isolate_hugetlb to folios
mm/hugetlb: convert __update_and_free_page() to folios
mm/hugetlb: convert dequeue_hugetlb_page functions to folios
mm/hugetlb: convert alloc_surplus_huge_page() to folios
mm/hugetlb: increase use of folios in alloc_huge_page()
mm/hugetlb: convert alloc_migrate_huge_page to folios
mm/hugetlb: convert restore_reserve_on_error() to folios
mm/hugetlb: convert demote_free_huge_page to folios

include/linux/hugetlb.h | 10 +-
include/linux/hugetlb_cgroup.h | 8 +-
mm/gup.c | 2 +-
mm/hugetlb.c | 218 +++++++++++++++++----------------
mm/hugetlb_cgroup.c | 8 +-
mm/memory-failure.c | 2 +-
mm/memory_hotplug.c | 2 +-
mm/mempolicy.c | 2 +-
mm/migrate.c | 7 +-
9 files changed, 131 insertions(+), 128 deletions(-)

--
2.39.0


2023-01-13 22:54:41

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 6/8] mm/hugetlb: convert alloc_migrate_huge_page to folios

Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and
alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio(). Both functions
now return a folio rather than a page.

Signed-off-by: Sidhartha Kumar <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
include/linux/hugetlb.h | 6 +++---
mm/hugetlb.c | 18 +++++++++---------
mm/migrate.c | 5 ++++-
3 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 482929b2d044..a853c13d8308 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -680,7 +680,7 @@ struct huge_bootmem_page {
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
@@ -1001,8 +1001,8 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
return NULL;
}

-static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f3e1d052b40c..c0cfb075cd58 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2275,7 +2275,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
return folio;
}

-static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{
struct folio *folio;
@@ -2295,7 +2295,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
*/
folio_set_hugetlb_temporary(folio);

- return &folio->page;
+ return folio;
}

/*
@@ -2328,8 +2328,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio;
}

-/* page migration callback function */
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+/* folio migration callback function */
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask)
{
spin_lock_irq(&hugetlb_lock);
@@ -2340,12 +2340,12 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
preferred_nid, nmask);
if (folio) {
spin_unlock_irq(&hugetlb_lock);
- return &folio->page;
+ return folio;
}
}
spin_unlock_irq(&hugetlb_lock);

- return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
+ return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
}

/* mempolicy aware migration callback */
@@ -2354,16 +2354,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
{
struct mempolicy *mpol;
nodemask_t *nodemask;
- struct page *page;
+ struct folio *folio;
gfp_t gfp_mask;
int node;

gfp_mask = htlb_alloc_mask(h);
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
- page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
+ folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
mpol_cond_put(mpol);

- return page;
+ return &folio->page;
}

/*
diff --git a/mm/migrate.c b/mm/migrate.c
index 6932b3d5a9dd..fab706b78be1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1622,6 +1622,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
+ struct folio *hugetlb_folio = NULL;
struct folio *new_folio = NULL;
int nid;
int zidx;
@@ -1636,7 +1637,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
struct hstate *h = folio_hstate(folio);

gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
- return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
+ hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
+ mtc->nmask, gfp_mask);
+ return &hugetlb_folio->page;
}

if (folio_test_large(folio)) {
--
2.39.0

2023-01-13 23:14:14

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 8/8] mm/hugetlb: convert demote_free_huge_page to folios

Change demote_free_huge_page to demote_free_hugetlb_folio() and change
demote_pool_huge_page() pass in a folio.

Signed-off-by: Sidhartha Kumar <[email protected]>
---
mm/hugetlb.c | 35 +++++++++++++++++------------------
1 file changed, 17 insertions(+), 18 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8cd0c694fe9..46c5daacc5bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3435,12 +3435,12 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
return 0;
}

-static int demote_free_huge_page(struct hstate *h, struct page *page)
+static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
{
- int i, nid = page_to_nid(page);
+ int i, nid = folio_nid(folio);
struct hstate *target_hstate;
- struct folio *folio = page_folio(page);
struct page *subpage;
+ struct folio *inner_folio;
int rc = 0;

target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
@@ -3448,18 +3448,18 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
remove_hugetlb_folio_for_demote(h, folio, false);
spin_unlock_irq(&hugetlb_lock);

- rc = hugetlb_vmemmap_restore(h, page);
+ rc = hugetlb_vmemmap_restore(h, &folio->page);
if (rc) {
- /* Allocation of vmemmmap failed, we can not demote page */
+ /* Allocation of vmemmmap failed, we can not demote folio */
spin_lock_irq(&hugetlb_lock);
- set_page_refcounted(page);
- add_hugetlb_folio(h, page_folio(page), false);
+ folio_ref_unfreeze(folio, 1);
+ add_hugetlb_folio(h, folio, false);
return rc;
}

/*
* Use destroy_compound_hugetlb_folio_for_demote for all huge page
- * sizes as it will not ref count pages.
+ * sizes as it will not ref count folios.
*/
destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));

@@ -3474,15 +3474,15 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
mutex_lock(&target_hstate->resize_lock);
for (i = 0; i < pages_per_huge_page(h);
i += pages_per_huge_page(target_hstate)) {
- subpage = nth_page(page, i);
- folio = page_folio(subpage);
+ subpage = folio_page(folio, i);
+ inner_folio = page_folio(subpage);
if (hstate_is_gigantic(target_hstate))
- prep_compound_gigantic_folio_for_demote(folio,
+ prep_compound_gigantic_folio_for_demote(inner_folio,
target_hstate->order);
else
prep_compound_page(subpage, target_hstate->order);
- set_page_private(subpage, 0);
- prep_new_hugetlb_folio(target_hstate, folio, nid);
+ folio_change_private(inner_folio, NULL);
+ prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
free_huge_page(subpage);
}
mutex_unlock(&target_hstate->resize_lock);
@@ -3504,7 +3504,7 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
__must_hold(&hugetlb_lock)
{
int nr_nodes, node;
- struct page *page;
+ struct folio *folio;

lockdep_assert_held(&hugetlb_lock);

@@ -3515,11 +3515,10 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
}

for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
- list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
- if (PageHWPoison(page))
+ list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
+ if (folio_test_hwpoison(folio))
continue;
-
- return demote_free_huge_page(h, page);
+ return demote_free_hugetlb_folio(h, folio);
}
}

--
2.39.0

2023-01-13 23:14:43

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 4/8] mm/hugetlb: convert alloc_surplus_huge_page() to folios

Change alloc_surplus_huge_page() to alloc_surplus_hugetlb_folio() and
update its callers.

Signed-off-by: Sidhartha Kumar <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c206a8c1ddb6..62552172683a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2234,8 +2234,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
/*
* Allocates a fresh surplus page from the page allocator.
*/
-static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nmask)
+static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
+ gfp_t gfp_mask, int nid, nodemask_t *nmask)
{
struct folio *folio = NULL;

@@ -2272,7 +2272,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
out_unlock:
spin_unlock_irq(&hugetlb_lock);

- return &folio->page;
+ return folio;
}

static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
@@ -2305,7 +2305,7 @@ static
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct mempolicy *mpol;
gfp_t gfp_mask = htlb_alloc_mask(h);
int nid;
@@ -2316,16 +2316,16 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
gfp_t gfp = gfp_mask | __GFP_NOWARN;

gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
+ folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);

/* Fallback to all nodes if page==NULL */
nodemask = NULL;
}

- if (!page)
- page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
+ if (!folio)
+ folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol);
- return page;
+ return &folio->page;
}

/* page migration callback function */
@@ -2374,6 +2374,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{
LIST_HEAD(surplus_list);
+ struct folio *folio;
struct page *page, *tmp;
int ret;
long i;
@@ -2393,13 +2394,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
retry:
spin_unlock_irq(&hugetlb_lock);
for (i = 0; i < needed; i++) {
- page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
+ folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
NUMA_NO_NODE, NULL);
- if (!page) {
+ if (!folio) {
alloc_ok = false;
break;
}
- list_add(&page->lru, &surplus_list);
+ list_add(&folio->lru, &surplus_list);
cond_resched();
}
allocated += i;
@@ -3352,7 +3353,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
- * We might race with alloc_surplus_huge_page() here and be unable
+ * We might race with alloc_surplus_hugetlb_folio() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
@@ -3395,7 +3396,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
- * alloc_surplus_huge_page() is checking the global counter,
+ * alloc_surplus_hugetlb_folio() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
--
2.39.0

2023-01-13 23:31:34

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 5/8] mm/hugetlb: increase use of folios in alloc_huge_page()

Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma()
and alloc_buddy_huge_page_with_mpol() to use folios so alloc_huge_page()
is cleaned by operating on folios until its return.

Signed-off-by: Sidhartha Kumar <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
include/linux/hugetlb_cgroup.h | 8 ++++----
mm/hugetlb.c | 33 ++++++++++++++++-----------------
mm/hugetlb_cgroup.c | 8 ++------
3 files changed, 22 insertions(+), 27 deletions(-)

diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index f706626a8063..3d82d91f49ac 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
+ struct folio *folio);
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
+ struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
@@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,

static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}

static inline void
hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 62552172683a..f3e1d052b40c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1206,7 +1206,7 @@ static unsigned long available_huge_pages(struct hstate *h)
return h->free_huge_pages - h->resv_huge_pages;
}

-static struct page *dequeue_huge_page_vma(struct hstate *h,
+static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve,
long chg)
@@ -1250,7 +1250,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
}

mpol_cond_put(mpol);
- return &folio->page;
+ return folio;

err:
return NULL;
@@ -2302,7 +2302,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
* Use the VMA's mpolicy to allocate a huge page from the buddy.
*/
static
-struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
+struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct folio *folio = NULL;
@@ -2325,7 +2325,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
if (!folio)
folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol);
- return &folio->page;
+ return folio;
}

/* page migration callback function */
@@ -2874,7 +2874,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
{
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
- struct page *page;
struct folio *folio;
long map_chg, map_commit;
long gbl_chg;
@@ -2938,34 +2937,34 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
* from the global free pool (global change). gbl_chg == 0 indicates
* a reservation exists for the allocation.
*/
- page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
- if (!page) {
+ folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
+ if (!folio) {
spin_unlock_irq(&hugetlb_lock);
- page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
- if (!page)
+ folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+ if (!folio)
goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock);
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
- SetHPageRestoreReserve(page);
+ folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--;
}
- list_add(&page->lru, &h->hugepage_activelist);
- set_page_refcounted(page);
+ list_add(&folio->lru, &h->hugepage_activelist);
+ folio_ref_unfreeze(folio, 1);
/* Fall through */
}
- folio = page_folio(page);
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
/* If allocation is not consuming a reservation, also store the
* hugetlb_cgroup pointer on the page.
*/
if (deferred_reserve) {
hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
- h_cg, page);
+ h_cg, folio);
}

spin_unlock_irq(&hugetlb_lock);

- hugetlb_set_page_subpool(page, spool);
+ hugetlb_set_folio_subpool(folio, spool);

map_commit = vma_commit_reservation(h, vma, addr);
if (unlikely(map_chg > map_commit)) {
@@ -2986,7 +2985,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
}
- return page;
+ return &folio->page;

out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index d9e4425d81ac..dedd2edb076e 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -331,19 +331,15 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,

void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
}

void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
}

--
2.39.0

2023-01-13 23:31:46

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 3/8] mm/hugetlb: convert dequeue_hugetlb_page functions to folios

dequeue_huge_page_node_exact() is changed to dequeue_hugetlb_folio_node_
exact() and dequeue_huge_page_nodemask() is changed to dequeue_hugetlb_
folio_nodemask(). Update their callers to pass in a folio.

Signed-off-by: Sidhartha Kumar <[email protected]>
---
mm/hugetlb.c | 56 ++++++++++++++++++++++++++++------------------------
1 file changed, 30 insertions(+), 26 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9155bb5ee570..c206a8c1ddb6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1140,32 +1140,33 @@ static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
folio_set_hugetlb_freed(folio);
}

-static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
+static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
+ int nid)
{
- struct page *page;
+ struct folio *folio;
bool pin = !!(current->flags & PF_MEMALLOC_PIN);

lockdep_assert_held(&hugetlb_lock);
- list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
- if (pin && !is_longterm_pinnable_page(page))
+ list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
+ if (pin && !folio_is_longterm_pinnable(folio))
continue;

- if (PageHWPoison(page))
+ if (folio_test_hwpoison(folio))
continue;

- list_move(&page->lru, &h->hugepage_activelist);
- set_page_refcounted(page);
- ClearHPageFreed(page);
+ list_move(&folio->lru, &h->hugepage_activelist);
+ folio_ref_unfreeze(folio, 1);
+ folio_clear_hugetlb_freed(folio);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
- return page;
+ return folio;
}

return NULL;
}

-static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
- nodemask_t *nmask)
+static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nmask)
{
unsigned int cpuset_mems_cookie;
struct zonelist *zonelist;
@@ -1178,7 +1179,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
- struct page *page;
+ struct folio *folio;

if (!cpuset_zone_allowed(zone, gfp_mask))
continue;
@@ -1190,9 +1191,9 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
continue;
node = zone_to_nid(zone);

- page = dequeue_huge_page_node_exact(h, node);
- if (page)
- return page;
+ folio = dequeue_hugetlb_folio_node_exact(h, node);
+ if (folio)
+ return folio;
}
if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
@@ -1210,7 +1211,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
unsigned long address, int avoid_reserve,
long chg)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct mempolicy *mpol;
gfp_t gfp_mask;
nodemask_t *nodemask;
@@ -1232,22 +1233,24 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);

if (mpol_is_preferred_many(mpol)) {
- page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+ nid, nodemask);

/* Fallback to all nodes if page==NULL */
nodemask = NULL;
}

- if (!page)
- page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
+ if (!folio)
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+ nid, nodemask);

- if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
- SetHPageRestoreReserve(page);
+ if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
+ folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--;
}

mpol_cond_put(mpol);
- return page;
+ return &folio->page;

err:
return NULL;
@@ -2331,12 +2334,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
{
spin_lock_irq(&hugetlb_lock);
if (available_huge_pages(h)) {
- struct page *page;
+ struct folio *folio;

- page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
- if (page) {
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+ preferred_nid, nmask);
+ if (folio) {
spin_unlock_irq(&hugetlb_lock);
- return page;
+ return &folio->page;
}
}
spin_unlock_irq(&hugetlb_lock);
--
2.39.0

2023-01-13 23:31:52

by Sidhartha Kumar

[permalink] [raw]
Subject: [PATCH mm-unstable v3 2/8] mm/hugetlb: convert __update_and_free_page() to folios

Change __update_and_free_page() to __update_and_free_hugetlb_folio() by
changing its callers to pass in a folio.

Signed-off-by: Sidhartha Kumar <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
---
mm/hugetlb.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 540f33d8cc51..9155bb5ee570 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1556,10 +1556,10 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
enqueue_hugetlb_folio(h, folio);
}

-static void __update_and_free_page(struct hstate *h, struct page *page)
+static void __update_and_free_hugetlb_folio(struct hstate *h,
+ struct folio *folio)
{
int i;
- struct folio *folio = page_folio(page);
struct page *subpage;

if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1572,7 +1572,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return;

- if (hugetlb_vmemmap_restore(h, page)) {
+ if (hugetlb_vmemmap_restore(h, &folio->page)) {
spin_lock_irq(&hugetlb_lock);
/*
* If we cannot allocate vmemmap pages, just refuse to free the
@@ -1608,7 +1608,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
destroy_compound_gigantic_folio(folio, huge_page_order(h));
free_gigantic_folio(folio, huge_page_order(h));
} else {
- __free_pages(page, huge_page_order(h));
+ __free_pages(&folio->page, huge_page_order(h));
}
}

@@ -1648,7 +1648,7 @@ static void free_hpage_workfn(struct work_struct *work)
*/
h = size_to_hstate(page_size(page));

- __update_and_free_page(h, page);
+ __update_and_free_hugetlb_folio(h, page_folio(page));

cond_resched();
}
@@ -1665,7 +1665,7 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
bool atomic)
{
if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
- __update_and_free_page(h, &folio->page);
+ __update_and_free_hugetlb_folio(h, folio);
return;
}

--
2.39.0