2023-10-09 15:19:50

by Usama Arif

[permalink] [raw]
Subject: [PATCH 0/1] mm: hugetlb_vmemmap: use folio argument for hugetlb_vmemmap_* functions

Most function calls in hugetlb.c are made with folio arguments.
This brings hugetlb_vmemmap calls inline with them by using folio
instead of head struct page. Head struct page is still needed
within these functions.

This patch is on top of the patch
"hugetlb: batch TLB flushes when restoring vmemmap" to avoid merge conflicts.

Usama Arif (1):
hugetlb_vmemmap: use folio argument for hugetlb_vmemmap_* functions

mm/hugetlb.c | 10 +++++-----
mm/hugetlb_vmemmap.c | 42 ++++++++++++++++++++++--------------------
mm/hugetlb_vmemmap.h | 8 ++++----
3 files changed, 31 insertions(+), 29 deletions(-)

--
2.25.1


2023-10-09 15:19:50

by Usama Arif

[permalink] [raw]
Subject: [PATCH 1/1] hugetlb_vmemmap: use folio argument for hugetlb_vmemmap_* functions

Most function calls in hugetlb.c are made with folio arguments.
This brings hugetlb_vmemmap calls inline with them by using folio
instead of head struct page. Head struct page is still needed
within these functions.

The set/clear/test functions for hugepages are also changed to
folio versions.

Signed-off-by: Usama Arif <[email protected]>
---
mm/hugetlb.c | 10 +++++-----
mm/hugetlb_vmemmap.c | 42 ++++++++++++++++++++++--------------------
mm/hugetlb_vmemmap.h | 8 ++++----
3 files changed, 31 insertions(+), 29 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b12f5fd295bb..73803d62066a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1606,7 +1606,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
* is no longer identified as a hugetlb page. hugetlb_vmemmap_restore
* can only be passed hugetlb pages and will BUG otherwise.
*/
- if (clear_dtor && hugetlb_vmemmap_restore(h, &folio->page)) {
+ if (clear_dtor && hugetlb_vmemmap_restore(h, folio)) {
spin_lock_irq(&hugetlb_lock);
/*
* If we cannot allocate vmemmap pages, just refuse to free the
@@ -1749,7 +1749,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h,
* quit processing the list to retry the bulk operation.
*/
list_for_each_entry_safe(folio, t_folio, folio_list, lru)
- if (hugetlb_vmemmap_restore(h, &folio->page)) {
+ if (hugetlb_vmemmap_restore(h, folio)) {
list_del(&folio->lru);
spin_lock_irq(&hugetlb_lock);
add_hugetlb_folio(h, folio, true);
@@ -1907,7 +1907,7 @@ static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
init_new_hugetlb_folio(h, folio);
- hugetlb_vmemmap_optimize(h, &folio->page);
+ hugetlb_vmemmap_optimize(h, folio);
}

static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
@@ -2312,7 +2312,7 @@ int dissolve_free_huge_page(struct page *page)
* Attempt to allocate vmemmmap here so that we can take
* appropriate action on failure.
*/
- rc = hugetlb_vmemmap_restore(h, &folio->page);
+ rc = hugetlb_vmemmap_restore(h, folio);
if (!rc) {
update_and_free_hugetlb_folio(h, folio, false);
} else {
@@ -3721,7 +3721,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
* passed hugetlb folios and will BUG otherwise.
*/
if (folio_test_hugetlb(folio)) {
- rc = hugetlb_vmemmap_restore(h, &folio->page);
+ rc = hugetlb_vmemmap_restore(h, folio);
if (rc) {
/* Allocation of vmemmmap failed, we can not demote folio */
spin_lock_irq(&hugetlb_lock);
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index d2999c303031..84b5ac93b9e5 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -495,14 +495,15 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);

-static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
+static int __hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio, unsigned long flags)
{
int ret;
+ struct page *head = &folio->page;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_reuse;

VM_WARN_ON_ONCE(!PageHuge(head));
- if (!HPageVmemmapOptimized(head))
+ if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;

vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
@@ -518,7 +519,7 @@ static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head,
*/
ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
if (!ret) {
- ClearHPageVmemmapOptimized(head);
+ folio_clear_hugetlb_vmemmap_optimized(folio);
static_branch_dec(&hugetlb_optimize_vmemmap_key);
}

@@ -530,14 +531,14 @@ static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head,
* hugetlb_vmemmap_optimize()) vmemmap pages which
* will be reallocated and remapped.
* @h: struct hstate.
- * @head: the head page whose vmemmap pages will be restored.
+ * @folio: the folio whose vmemmap pages will be restored.
*
- * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
+ * Return: %0 if @folio's vmemmap pages have been reallocated and remapped,
* negative error code otherwise.
*/
-int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
+int hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio)
{
- return __hugetlb_vmemmap_restore(h, head, 0);
+ return __hugetlb_vmemmap_restore(h, folio, 0);
}

/**
@@ -563,7 +564,7 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,

list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
- ret = __hugetlb_vmemmap_restore(h, &folio->page,
+ ret = __hugetlb_vmemmap_restore(h, folio,
VMEMMAP_REMAP_NO_TLB_FLUSH);
if (ret)
break;
@@ -641,11 +642,12 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
}

static int __hugetlb_vmemmap_optimize(const struct hstate *h,
- struct page *head,
+ struct folio *folio,
struct list_head *vmemmap_pages,
unsigned long flags)
{
int ret = 0;
+ struct page *head = &folio->page;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_reuse;

@@ -665,7 +667,7 @@ static int __hugetlb_vmemmap_optimize(const struct hstate *h,
* If there is an error during optimization, we will immediately FLUSH
* the TLB and clear the flag below.
*/
- SetHPageVmemmapOptimized(head);
+ folio_set_hugetlb_vmemmap_optimized(folio);

vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
@@ -681,27 +683,27 @@ static int __hugetlb_vmemmap_optimize(const struct hstate *h,
vmemmap_pages, flags);
if (ret) {
static_branch_dec(&hugetlb_optimize_vmemmap_key);
- ClearHPageVmemmapOptimized(head);
+ folio_clear_hugetlb_vmemmap_optimized(folio);
}

return ret;
}

/**
- * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
+ * hugetlb_vmemmap_optimize - optimize @folio's vmemmap pages.
* @h: struct hstate.
- * @head: the head page whose vmemmap pages will be optimized.
+ * @folio: the folio whose vmemmap pages will be optimized.
*
- * This function only tries to optimize @head's vmemmap pages and does not
+ * This function only tries to optimize @folio's vmemmap pages and does not
* guarantee that the optimization will succeed after it returns. The caller
- * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
- * have been optimized.
+ * can use folio_test_hugetlb_vmemmap_optimized(@folio) to detect if @folio's
+ * vmemmap pages have been optimized.
*/
-void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
+void hugetlb_vmemmap_optimize(const struct hstate *h, struct folio *folio)
{
LIST_HEAD(vmemmap_pages);

- __hugetlb_vmemmap_optimize(h, head, &vmemmap_pages, 0);
+ __hugetlb_vmemmap_optimize(h, folio, &vmemmap_pages, 0);
free_vmemmap_page_list(&vmemmap_pages);
}

@@ -745,7 +747,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();

list_for_each_entry(folio, folio_list, lru) {
- int ret = __hugetlb_vmemmap_optimize(h, &folio->page,
+ int ret = __hugetlb_vmemmap_optimize(h, folio,
&vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH);

@@ -761,7 +763,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
- __hugetlb_vmemmap_optimize(h, &folio->page,
+ __hugetlb_vmemmap_optimize(h, folio,
&vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH);
}
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index a0dcf49f46ba..6a06dccd7ffa 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -18,11 +18,11 @@
#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))

#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
+int hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio);
long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios);
-void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
+void hugetlb_vmemmap_optimize(const struct hstate *h, struct folio *folio);
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);

static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
@@ -43,7 +43,7 @@ static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate
return size > 0 ? size : 0;
}
#else
-static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
+static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio)
{
return 0;
}
@@ -56,7 +56,7 @@ static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
return 0;
}

-static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
+static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct folio *folio)
{
}

--
2.25.1

2023-10-10 06:59:25

by Muchun Song

[permalink] [raw]
Subject: Re: [PATCH 1/1] hugetlb_vmemmap: use folio argument for hugetlb_vmemmap_* functions



On 2023/10/9 23:18, Usama Arif wrote:
> Most function calls in hugetlb.c are made with folio arguments.
> This brings hugetlb_vmemmap calls inline with them by using folio
> instead of head struct page. Head struct page is still needed
> within these functions.
>
> The set/clear/test functions for hugepages are also changed to
> folio versions.
>
> Signed-off-by: Usama Arif <[email protected]>
> ---
> mm/hugetlb.c | 10 +++++-----
> mm/hugetlb_vmemmap.c | 42 ++++++++++++++++++++++--------------------
> mm/hugetlb_vmemmap.h | 8 ++++----
> 3 files changed, 31 insertions(+), 29 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index b12f5fd295bb..73803d62066a 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1606,7 +1606,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
> * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore
> * can only be passed hugetlb pages and will BUG otherwise.
> */
> - if (clear_dtor && hugetlb_vmemmap_restore(h, &folio->page)) {
> + if (clear_dtor && hugetlb_vmemmap_restore(h, folio)) {
> spin_lock_irq(&hugetlb_lock);
> /*
> * If we cannot allocate vmemmap pages, just refuse to free the
> @@ -1749,7 +1749,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h,
> * quit processing the list to retry the bulk operation.
> */
> list_for_each_entry_safe(folio, t_folio, folio_list, lru)
> - if (hugetlb_vmemmap_restore(h, &folio->page)) {
> + if (hugetlb_vmemmap_restore(h, folio)) {
> list_del(&folio->lru);
> spin_lock_irq(&hugetlb_lock);
> add_hugetlb_folio(h, folio, true);
> @@ -1907,7 +1907,7 @@ static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
> static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
> {
> init_new_hugetlb_folio(h, folio);
> - hugetlb_vmemmap_optimize(h, &folio->page);
> + hugetlb_vmemmap_optimize(h, folio);
> }
>
> static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
> @@ -2312,7 +2312,7 @@ int dissolve_free_huge_page(struct page *page)
> * Attempt to allocate vmemmmap here so that we can take
> * appropriate action on failure.
> */
> - rc = hugetlb_vmemmap_restore(h, &folio->page);
> + rc = hugetlb_vmemmap_restore(h, folio);
> if (!rc) {
> update_and_free_hugetlb_folio(h, folio, false);
> } else {
> @@ -3721,7 +3721,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
> * passed hugetlb folios and will BUG otherwise.
> */
> if (folio_test_hugetlb(folio)) {
> - rc = hugetlb_vmemmap_restore(h, &folio->page);
> + rc = hugetlb_vmemmap_restore(h, folio);
> if (rc) {
> /* Allocation of vmemmmap failed, we can not demote folio */
> spin_lock_irq(&hugetlb_lock);
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index d2999c303031..84b5ac93b9e5 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -495,14 +495,15 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
> static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
> core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
>
> -static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head, unsigned long flags)
> +static int __hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio, unsigned long flags)
> {
> int ret;
> + struct page *head = &folio->page;
> unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
> unsigned long vmemmap_reuse;
>
> VM_WARN_ON_ONCE(!PageHuge(head));
> - if (!HPageVmemmapOptimized(head))
> + if (!folio_test_hugetlb_vmemmap_optimized(folio))
> return 0;
>
> vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
> @@ -518,7 +519,7 @@ static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head,
> */
> ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
> if (!ret) {
> - ClearHPageVmemmapOptimized(head);
> + folio_clear_hugetlb_vmemmap_optimized(folio);
> static_branch_dec(&hugetlb_optimize_vmemmap_key);
> }
>
> @@ -530,14 +531,14 @@ static int __hugetlb_vmemmap_restore(const struct hstate *h, struct page *head,
> * hugetlb_vmemmap_optimize()) vmemmap pages which
> * will be reallocated and remapped.
> * @h: struct hstate.
> - * @head: the head page whose vmemmap pages will be restored.
> + * @folio: the folio whose vmemmap pages will be restored.
> *
> - * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
> + * Return: %0 if @folio's vmemmap pages have been reallocated and remapped,
> * negative error code otherwise.
> */
> -int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> +int hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio)

I'd like to rename this to hugetlb_vmemmap_restore_folio to be consistent
with hugetlb_vmemmap_restore_folios.

> {
> - return __hugetlb_vmemmap_restore(h, head, 0);
> + return __hugetlb_vmemmap_restore(h, folio, 0);
> }
>
> /**
> @@ -563,7 +564,7 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
>
> list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
> if (folio_test_hugetlb_vmemmap_optimized(folio)) {
> - ret = __hugetlb_vmemmap_restore(h, &folio->page,
> + ret = __hugetlb_vmemmap_restore(h, folio,
> VMEMMAP_REMAP_NO_TLB_FLUSH);
> if (ret)
> break;
> @@ -641,11 +642,12 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
> }
>
> static int __hugetlb_vmemmap_optimize(const struct hstate *h,
> - struct page *head,
> + struct folio *folio,
> struct list_head *vmemmap_pages,
> unsigned long flags)
> {
> int ret = 0;
> + struct page *head = &folio->page;
> unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
> unsigned long vmemmap_reuse;
>
> @@ -665,7 +667,7 @@ static int __hugetlb_vmemmap_optimize(const struct hstate *h,
> * If there is an error during optimization, we will immediately FLUSH
> * the TLB and clear the flag below.
> */
> - SetHPageVmemmapOptimized(head);
> + folio_set_hugetlb_vmemmap_optimized(folio);
>
> vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
> vmemmap_reuse = vmemmap_start;
> @@ -681,27 +683,27 @@ static int __hugetlb_vmemmap_optimize(const struct hstate *h,
> vmemmap_pages, flags);
> if (ret) {
> static_branch_dec(&hugetlb_optimize_vmemmap_key);
> - ClearHPageVmemmapOptimized(head);
> + folio_clear_hugetlb_vmemmap_optimized(folio);
> }
>
> return ret;
> }
>
> /**
> - * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
> + * hugetlb_vmemmap_optimize - optimize @folio's vmemmap pages.
> * @h: struct hstate.
> - * @head: the head page whose vmemmap pages will be optimized.
> + * @folio: the folio whose vmemmap pages will be optimized.
> *
> - * This function only tries to optimize @head's vmemmap pages and does not
> + * This function only tries to optimize @folio's vmemmap pages and does not
> * guarantee that the optimization will succeed after it returns. The caller
> - * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
> - * have been optimized.
> + * can use folio_test_hugetlb_vmemmap_optimized(@folio) to detect if @folio's
> + * vmemmap pages have been optimized.
> */
> -void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> +void hugetlb_vmemmap_optimize(const struct hstate *h, struct folio *folio)

The same as here. Otherwise, LGTM. Please free to add:

Reviewed-by: Muchun Song <[email protected]> in you next edition.

Thanks.

> {
> LIST_HEAD(vmemmap_pages);
>
> - __hugetlb_vmemmap_optimize(h, head, &vmemmap_pages, 0);
> + __hugetlb_vmemmap_optimize(h, folio, &vmemmap_pages, 0);
> free_vmemmap_page_list(&vmemmap_pages);
> }
>
> @@ -745,7 +747,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
> flush_tlb_all();
>
> list_for_each_entry(folio, folio_list, lru) {
> - int ret = __hugetlb_vmemmap_optimize(h, &folio->page,
> + int ret = __hugetlb_vmemmap_optimize(h, folio,
> &vmemmap_pages,
> VMEMMAP_REMAP_NO_TLB_FLUSH);
>
> @@ -761,7 +763,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
> flush_tlb_all();
> free_vmemmap_page_list(&vmemmap_pages);
> INIT_LIST_HEAD(&vmemmap_pages);
> - __hugetlb_vmemmap_optimize(h, &folio->page,
> + __hugetlb_vmemmap_optimize(h, folio,
> &vmemmap_pages,
> VMEMMAP_REMAP_NO_TLB_FLUSH);
> }
> diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
> index a0dcf49f46ba..6a06dccd7ffa 100644
> --- a/mm/hugetlb_vmemmap.h
> +++ b/mm/hugetlb_vmemmap.h
> @@ -18,11 +18,11 @@
> #define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
>
> #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
> -int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
> +int hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio);
> long hugetlb_vmemmap_restore_folios(const struct hstate *h,
> struct list_head *folio_list,
> struct list_head *non_hvo_folios);
> -void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
> +void hugetlb_vmemmap_optimize(const struct hstate *h, struct folio *folio);
> void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
>
> static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
> @@ -43,7 +43,7 @@ static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate
> return size > 0 ? size : 0;
> }
> #else
> -static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
> +static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct folio *folio)
> {
> return 0;
> }
> @@ -56,7 +56,7 @@ static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
> return 0;
> }
>
> -static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
> +static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct folio *folio)
> {
> }
>