2023-02-15 10:40:02

by Baolin Wang

[permalink] [raw]
Subject: [PATCH v3 0/4] Change the return value for page isolation functions

Now the page isolation functions did not return a boolean to indicate
success or not, instead it will return a negative error when failed
to isolate a page. So below code used in most places seem a boolean
success/failure thing, which can confuse people whether the isolation
is successful.

if (folio_isolate_lru(folio))
continue;

Moreover the page isolation functions only return 0 or -EBUSY, and
most users did not care about the negative error except for few users,
thus we can convert all page isolation functions to return a boolean
value, which can remove the confusion to make code more clear.

No functional changes intended in this patch series.

Changes from v2:
- Add a new boolean 'isolated' variable for functions that require the
negative error value.
- Keep the same return value logic in do_migrate_range(), that means it
will return -EBUSY if all pages are failed to be isolated.
- Collect Acked-by and Reviewed-by tags. Thanks David and SeongJae.

Changes from v1:
- Convert all isolation functions to return bool.

Baolin Wang (4):
mm: change to return bool for folio_isolate_lru()
mm: change to return bool for isolate_lru_page()
mm: hugetlb: change to return bool for isolate_hugetlb()
mm: change to return bool for isolate_movable_page()

include/linux/hugetlb.h | 6 +++---
include/linux/migrate.h | 6 +++---
mm/compaction.c | 2 +-
mm/damon/paddr.c | 2 +-
mm/folio-compat.c | 4 ++--
mm/gup.c | 2 +-
mm/hugetlb.c | 13 ++++++++-----
mm/internal.h | 4 ++--
mm/khugepaged.c | 4 ++--
mm/madvise.c | 4 ++--
mm/memcontrol.c | 4 ++--
mm/memory-failure.c | 10 +++++-----
mm/memory_hotplug.c | 8 +++++---
mm/mempolicy.c | 4 ++--
mm/migrate.c | 20 +++++++++++---------
mm/migrate_device.c | 2 +-
mm/vmscan.c | 10 +++++-----
17 files changed, 56 insertions(+), 49 deletions(-)

--
2.27.0



2023-02-15 10:40:05

by Baolin Wang

[permalink] [raw]
Subject: [PATCH v3 2/4] mm: change to return bool for isolate_lru_page()

The isolate_lru_page() can only return 0 or -EBUSY, and most users did
not care about the negative error of isolate_lru_page(), except one user
in add_page_for_migration(). So we can convert the isolate_lru_page() to
return a boolean value, which can help to make the code more clear when
checking the return value of isolate_lru_page().

Also convert all users' logic of checking the isolation state.

No functional changes intended.

Signed-off-by: Baolin Wang <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
---
mm/folio-compat.c | 12 +++---------
mm/internal.h | 2 +-
mm/khugepaged.c | 2 +-
mm/memcontrol.c | 4 ++--
mm/memory-failure.c | 4 ++--
mm/memory_hotplug.c | 8 +++++---
mm/migrate.c | 9 ++++++---
mm/migrate_device.c | 2 +-
8 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 540373cf904e..cabcd1de9ecb 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -113,17 +113,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
}
EXPORT_SYMBOL(grab_cache_page_write_begin);

-int isolate_lru_page(struct page *page)
+bool isolate_lru_page(struct page *page)
{
- bool ret;
-
if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
- return -EBUSY;
- ret = folio_isolate_lru((struct folio *)page);
- if (ret)
- return 0;
-
- return -EBUSY;
+ return false;
+ return folio_isolate_lru((struct folio *)page);
}

void putback_lru_page(struct page *page)
diff --git a/mm/internal.h b/mm/internal.h
index 8645e8496537..fc01fd092ea5 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -187,7 +187,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
/*
* in mm/vmscan.c:
*/
-int isolate_lru_page(struct page *page);
+bool isolate_lru_page(struct page *page);
bool folio_isolate_lru(struct folio *folio);
void putback_lru_page(struct page *page);
void folio_putback_lru(struct folio *folio);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index cee659cfa3c1..8dbc39896811 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -659,7 +659,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
* Isolate the page to avoid collapsing an hugepage
* currently in use by the VM.
*/
- if (isolate_lru_page(page)) {
+ if (!isolate_lru_page(page)) {
unlock_page(page);
result = SCAN_DEL_PAGE_LRU;
goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 17335459d8dc..e8fd42be5fab 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6176,7 +6176,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
if (target_type == MC_TARGET_PAGE) {
page = target.page;
- if (!isolate_lru_page(page)) {
+ if (isolate_lru_page(page)) {
if (!mem_cgroup_move_account(page, true,
mc.from, mc.to)) {
mc.precharge -= HPAGE_PMD_NR;
@@ -6226,7 +6226,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
*/
if (PageTransCompound(page))
goto put;
- if (!device && isolate_lru_page(page))
+ if (!device && !isolate_lru_page(page))
goto put;
if (!mem_cgroup_move_account(page, false,
mc.from, mc.to)) {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index db85c2d37f70..e504362fdb23 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -846,7 +846,7 @@ static const char * const action_page_types[] = {
*/
static int delete_from_lru_cache(struct page *p)
{
- if (!isolate_lru_page(p)) {
+ if (isolate_lru_page(p)) {
/*
* Clear sensible page flags, so that the buddy system won't
* complain when the page is unpoison-and-freed.
@@ -2513,7 +2513,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool lru = !__PageMovable(page);

if (lru)
- isolated = !isolate_lru_page(page);
+ isolated = isolate_lru_page(page);
else
isolated = !isolate_movable_page(page,
ISOLATE_UNEVICTABLE);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a1e8c3e9ab08..5fc2dcf4e3ab 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1632,6 +1632,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)

for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct folio *folio;
+ bool isolated;

if (!pfn_valid(pfn))
continue;
@@ -1667,9 +1668,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can deal with pages on
* LRU and non-lru movable pages.
*/
- if (PageLRU(page))
- ret = isolate_lru_page(page);
- else
+ if (PageLRU(page)) {
+ isolated = isolate_lru_page(page);
+ ret = isolated ? 0 : -EBUSY;
+ } else
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
if (!ret) { /* Success */
list_add_tail(&page->lru, &source);
diff --git a/mm/migrate.c b/mm/migrate.c
index ef68a1aff35c..53010a142e7f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2132,11 +2132,14 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
}
} else {
struct page *head;
+ bool isolated;

head = compound_head(page);
- err = isolate_lru_page(head);
- if (err)
+ isolated = isolate_lru_page(head);
+ if (!isolated) {
+ err = -EBUSY;
goto out_putpage;
+ }

err = 1;
list_add_tail(&head->lru, pagelist);
@@ -2541,7 +2544,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 0;
}

- if (isolate_lru_page(page))
+ if (!isolate_lru_page(page))
return 0;

mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6c3740318a98..d30c9de60b0d 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -388,7 +388,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
allow_drain = false;
}

- if (isolate_lru_page(page)) {
+ if (!isolate_lru_page(page)) {
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++;
continue;
--
2.27.0


2023-02-15 10:40:09

by Baolin Wang

[permalink] [raw]
Subject: [PATCH v3 3/4] mm: hugetlb: change to return bool for isolate_hugetlb()

Now the isolate_hugetlb() only returns 0 or -EBUSY, and most users did not
care about the negative value, thus we can convert the isolate_hugetlb()
to return a boolean value to make code more clear when checking the
hugetlb isolation state. Moreover converts 2 users which will consider
the negative value returned by isolate_hugetlb().

No functional changes intended.

Signed-off-by: Baolin Wang <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
---
include/linux/hugetlb.h | 6 +++---
mm/hugetlb.c | 13 ++++++++-----
mm/memory-failure.c | 2 +-
mm/mempolicy.c | 2 +-
mm/migrate.c | 7 +++----
5 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index df6dd624ccfe..5f5e4177b2e0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -171,7 +171,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-int isolate_hugetlb(struct folio *folio, struct list_head *list);
+bool isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
@@ -413,9 +413,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}

-static inline int isolate_hugetlb(struct folio *folio, struct list_head *list)
+static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
- return -EBUSY;
+ return false;
}

static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3a01a9dbf445..16513cd23d5d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2925,13 +2925,16 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
*/
goto free_new;
} else if (folio_ref_count(old_folio)) {
+ bool isolated;
+
/*
* Someone has grabbed the folio, try to isolate it here.
* Fail with -EBUSY if not possible.
*/
spin_unlock_irq(&hugetlb_lock);
- ret = isolate_hugetlb(old_folio, list);
+ isolated = isolate_hugetlb(old_folio, list);
spin_lock_irq(&hugetlb_lock);
+ ret = isolated ? 0 : -EBUSY;
goto free_new;
} else if (!folio_test_hugetlb_freed(old_folio)) {
/*
@@ -3005,7 +3008,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
if (hstate_is_gigantic(h))
return -ENOMEM;

- if (folio_ref_count(folio) && !isolate_hugetlb(folio, list))
+ if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
ret = 0;
else if (!folio_ref_count(folio))
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
@@ -7251,15 +7254,15 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
* These functions are overwritable if your architecture needs its own
* behavior.
*/
-int isolate_hugetlb(struct folio *folio, struct list_head *list)
+bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
- int ret = 0;
+ bool ret = true;

spin_lock_irq(&hugetlb_lock);
if (!folio_test_hugetlb(folio) ||
!folio_test_hugetlb_migratable(folio) ||
!folio_try_get(folio)) {
- ret = -EBUSY;
+ ret = false;
goto unlock;
}
folio_clear_hugetlb_migratable(folio);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e504362fdb23..8604753bc644 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2508,7 +2508,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool isolated = false;

if (PageHuge(page)) {
- isolated = !isolate_hugetlb(page_folio(page), pagelist);
+ isolated = isolate_hugetlb(page_folio(page), pagelist);
} else {
bool lru = !__PageMovable(page);

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2751bc3310fd..a256a241fd1d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -609,7 +609,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
!hugetlb_pmd_shared(pte))) {
- if (isolate_hugetlb(folio, qp->pagelist) &&
+ if (!isolate_hugetlb(folio, qp->pagelist) &&
(flags & MPOL_MF_STRICT))
/*
* Failed to isolate folio but allow migrating pages
diff --git a/mm/migrate.c b/mm/migrate.c
index 53010a142e7f..2db546a0618c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2095,6 +2095,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct *vma;
struct page *page;
int err;
+ bool isolated;

mmap_read_lock(mm);
err = -EFAULT;
@@ -2126,13 +2127,11 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,

if (PageHuge(page)) {
if (PageHead(page)) {
- err = isolate_hugetlb(page_folio(page), pagelist);
- if (!err)
- err = 1;
+ isolated = isolate_hugetlb(page_folio(page), pagelist);
+ err = isolated ? 1 : -EBUSY;
}
} else {
struct page *head;
- bool isolated;

head = compound_head(page);
isolated = isolate_lru_page(head);
--
2.27.0


2023-02-15 10:40:11

by Baolin Wang

[permalink] [raw]
Subject: [PATCH v3 4/4] mm: change to return bool for isolate_movable_page()

Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
will care about the negative return value, thus we can convert the
isolate_movable_page() to return a boolean value to make the code more
clear when checking the movable page isolation state.

No functional changes intended.

Signed-off-by: Baolin Wang <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
---
include/linux/migrate.h | 6 +++---
mm/compaction.c | 2 +-
mm/memory-failure.c | 4 ++--
mm/memory_hotplug.c | 10 +++++-----
mm/migrate.c | 6 +++---
5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index c88b96b48be7..6b252f519c86 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);

int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -92,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline struct page *alloc_migration_target(struct page *page,
unsigned long private)
{ return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return false; }

static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/mm/compaction.c b/mm/compaction.c
index d73578af44cc..ad7409f70519 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -976,7 +976,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
locked = NULL;
}

- if (!isolate_movable_page(page, mode))
+ if (isolate_movable_page(page, mode))
goto isolate_success;
}

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8604753bc644..a1ede7bdce95 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2515,8 +2515,8 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
if (lru)
isolated = isolate_lru_page(page);
else
- isolated = !isolate_movable_page(page,
- ISOLATE_UNEVICTABLE);
+ isolated = isolate_movable_page(page,
+ ISOLATE_UNEVICTABLE);

if (isolated) {
list_add(&page->lru, pagelist);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 5fc2dcf4e3ab..bcb0dc41c2f2 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can deal with pages on
* LRU and non-lru movable pages.
*/
- if (PageLRU(page)) {
+ if (PageLRU(page))
isolated = isolate_lru_page(page);
- ret = isolated ? 0 : -EBUSY;
- } else
- ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
- if (!ret) { /* Success */
+ else
+ isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
+ if (isolated) { /* Success */
list_add_tail(&page->lru, &source);
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_lru(page));

} else {
+ ret = -EBUSY;
if (__ratelimit(&migrate_rs)) {
pr_warn("failed to isolate pfn %lx\n", pfn);
dump_page(page, "isolation failed");
diff --git a/mm/migrate.c b/mm/migrate.c
index 2db546a0618c..9a101c7bb8ff 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -58,7 +58,7 @@

#include "internal.h"

-int isolate_movable_page(struct page *page, isolate_mode_t mode)
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops;
@@ -119,14 +119,14 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
folio_set_isolated(folio);
folio_unlock(folio);

- return 0;
+ return true;

out_no_isolated:
folio_unlock(folio);
out_putfolio:
folio_put(folio);
out:
- return -EBUSY;
+ return false;
}

static void putback_movable_folio(struct folio *folio)
--
2.27.0


2023-02-15 15:42:11

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH v3 2/4] mm: change to return bool for isolate_lru_page()

On Wed, Feb 15, 2023 at 06:39:35PM +0800, Baolin Wang wrote:
> The isolate_lru_page() can only return 0 or -EBUSY, and most users did
> not care about the negative error of isolate_lru_page(), except one user
> in add_page_for_migration(). So we can convert the isolate_lru_page() to
> return a boolean value, which can help to make the code more clear when
> checking the return value of isolate_lru_page().
>
> Also convert all users' logic of checking the isolation state.
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <[email protected]>
> Acked-by: David Hildenbrand <[email protected]>

Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>

2023-02-15 15:42:33

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH v3 3/4] mm: hugetlb: change to return bool for isolate_hugetlb()

On Wed, Feb 15, 2023 at 06:39:36PM +0800, Baolin Wang wrote:
> Now the isolate_hugetlb() only returns 0 or -EBUSY, and most users did not
> care about the negative value, thus we can convert the isolate_hugetlb()
> to return a boolean value to make code more clear when checking the
> hugetlb isolation state. Moreover converts 2 users which will consider
> the negative value returned by isolate_hugetlb().
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <[email protected]>
> Acked-by: David Hildenbrand <[email protected]>

Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>

2023-02-15 15:44:45

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH v3 4/4] mm: change to return bool for isolate_movable_page()

On Wed, Feb 15, 2023 at 06:39:37PM +0800, Baolin Wang wrote:
> Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
> will care about the negative return value, thus we can convert the
> isolate_movable_page() to return a boolean value to make the code more
> clear when checking the movable page isolation state.
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <[email protected]>
> Acked-by: David Hildenbrand <[email protected]>

Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>

A couple of nits below, not worth respinning the patch series for:

> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index c88b96b48be7..6b252f519c86 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
> unsigned long private, enum migrate_mode mode, int reason,
> unsigned int *ret_succeeded);
> extern struct page *alloc_migration_target(struct page *page, unsigned long private);
> -extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
> +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);

You can drop the 'extern' here.

> +++ b/mm/memory_hotplug.c
> @@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
> * We can skip free pages. And we can deal with pages on
> * LRU and non-lru movable pages.
> */
> - if (PageLRU(page)) {
> + if (PageLRU(page))
> isolated = isolate_lru_page(page);
> - ret = isolated ? 0 : -EBUSY;
> - } else
> - ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
> - if (!ret) { /* Success */
> + else
> + isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
> + if (isolated) { /* Success */

I would have dropped the "/* Success */" here. Before, commenting
"!ret" is quite sensible, but "isolated" seems obviously success to me.


Thanks for doing all this.

2023-02-15 19:23:09

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH v3 3/4] mm: hugetlb: change to return bool for isolate_hugetlb()

On 02/15/23 18:39, Baolin Wang wrote:
> Now the isolate_hugetlb() only returns 0 or -EBUSY, and most users did not
> care about the negative value, thus we can convert the isolate_hugetlb()
> to return a boolean value to make code more clear when checking the
> hugetlb isolation state. Moreover converts 2 users which will consider
> the negative value returned by isolate_hugetlb().
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <[email protected]>
> Acked-by: David Hildenbrand <[email protected]>
> ---
> include/linux/hugetlb.h | 6 +++---
> mm/hugetlb.c | 13 ++++++++-----
> mm/memory-failure.c | 2 +-
> mm/mempolicy.c | 2 +-
> mm/migrate.c | 7 +++----
> 5 files changed, 16 insertions(+), 14 deletions(-)

Thanks,

Reviewed-by: Mike Kravetz <[email protected]>

--
Mike Kravetz

2023-02-15 20:14:57

by Linus Torvalds

[permalink] [raw]
Subject: Re: [PATCH v3 0/4] Change the return value for page isolation functions

This v3 series looks like it's making things more readable, so ack as
far as I'm concerned.

But it looks like it's firmly in the "Andrew's mm tree" category, so
I'll leave it up to him to decide.

Linus

2023-02-15 20:26:09

by SeongJae Park

[permalink] [raw]
Subject: Re: [PATCH v3 3/4] mm: hugetlb: change to return bool for isolate_hugetlb()

On Wed, 15 Feb 2023 18:39:36 +0800 Baolin Wang <[email protected]> wrote:

> Now the isolate_hugetlb() only returns 0 or -EBUSY, and most users did not
> care about the negative value, thus we can convert the isolate_hugetlb()
> to return a boolean value to make code more clear when checking the
> hugetlb isolation state. Moreover converts 2 users which will consider
> the negative value returned by isolate_hugetlb().
>
> No functional changes intended.
>
> Signed-off-by: Baolin Wang <[email protected]>
> Acked-by: David Hildenbrand <[email protected]>
> ---
[...]
> include/linux/hugetlb.h | 6 +++---
> mm/hugetlb.c | 13 ++++++++-----
> mm/memory-failure.c | 2 +-
> mm/mempolicy.c | 2 +-
> mm/migrate.c | 7 +++----
> 5 files changed, 16 insertions(+), 14 deletions(-)
>
[...]
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 3a01a9dbf445..16513cd23d5d 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -2925,13 +2925,16 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
> */
> goto free_new;
> } else if (folio_ref_count(old_folio)) {
> + bool isolated;
> +
> /*
> * Someone has grabbed the folio, try to isolate it here.
> * Fail with -EBUSY if not possible.
> */
> spin_unlock_irq(&hugetlb_lock);
> - ret = isolate_hugetlb(old_folio, list);
> + isolated = isolate_hugetlb(old_folio, list);
> spin_lock_irq(&hugetlb_lock);
> + ret = isolated ? 0 : -EBUSY;
> goto free_new;

Nit. I'd personally prefer to set 'ret' before entering this critical section
to keep the section short, but this would be just a mean comment that wouldn't
worth request respin.


Thanks,
SJ

[...]

2023-02-15 20:26:59

by SeongJae Park

[permalink] [raw]
Subject: Re: [PATCH v3 0/4] Change the return value for page isolation functions

Hi Baolin,

On Wed, 15 Feb 2023 18:39:33 +0800 Baolin Wang <[email protected]> wrote:

> Now the page isolation functions did not return a boolean to indicate
> success or not, instead it will return a negative error when failed
> to isolate a page. So below code used in most places seem a boolean
> success/failure thing, which can confuse people whether the isolation
> is successful.
>
> if (folio_isolate_lru(folio))
> continue;
>
> Moreover the page isolation functions only return 0 or -EBUSY, and
> most users did not care about the negative error except for few users,
> thus we can convert all page isolation functions to return a boolean
> value, which can remove the confusion to make code more clear.
>
> No functional changes intended in this patch series.

For the series,

Reviewed-by: SeongJae Park <[email protected]>


Thanks,
SJ

[...]

2023-02-16 02:06:12

by Baolin Wang

[permalink] [raw]
Subject: Re: [PATCH v3 3/4] mm: hugetlb: change to return bool for isolate_hugetlb()



On 2/16/2023 4:25 AM, SeongJae Park wrote:
> On Wed, 15 Feb 2023 18:39:36 +0800 Baolin Wang <[email protected]> wrote:
>
>> Now the isolate_hugetlb() only returns 0 or -EBUSY, and most users did not
>> care about the negative value, thus we can convert the isolate_hugetlb()
>> to return a boolean value to make code more clear when checking the
>> hugetlb isolation state. Moreover converts 2 users which will consider
>> the negative value returned by isolate_hugetlb().
>>
>> No functional changes intended.
>>
>> Signed-off-by: Baolin Wang <[email protected]>
>> Acked-by: David Hildenbrand <[email protected]>
>> ---
> [...]
>> include/linux/hugetlb.h | 6 +++---
>> mm/hugetlb.c | 13 ++++++++-----
>> mm/memory-failure.c | 2 +-
>> mm/mempolicy.c | 2 +-
>> mm/migrate.c | 7 +++----
>> 5 files changed, 16 insertions(+), 14 deletions(-)
>>
> [...]
>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>> index 3a01a9dbf445..16513cd23d5d 100644
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -2925,13 +2925,16 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
>> */
>> goto free_new;
>> } else if (folio_ref_count(old_folio)) {
>> + bool isolated;
>> +
>> /*
>> * Someone has grabbed the folio, try to isolate it here.
>> * Fail with -EBUSY if not possible.
>> */
>> spin_unlock_irq(&hugetlb_lock);
>> - ret = isolate_hugetlb(old_folio, list);
>> + isolated = isolate_hugetlb(old_folio, list);
>> spin_lock_irq(&hugetlb_lock);
>> + ret = isolated ? 0 : -EBUSY;
>> goto free_new;
>
> Nit. I'd personally prefer to set 'ret' before entering this critical section
> to keep the section short, but this would be just a mean comment that wouldn't
> worth request respin.

Yes, good catch. And I see Andrew has helped to do this (Thanks Andrew).

Thanks for reviewing.

2023-02-16 02:07:15

by Baolin Wang

[permalink] [raw]
Subject: Re: [PATCH v3 4/4] mm: change to return bool for isolate_movable_page()



On 2/15/2023 11:44 PM, Matthew Wilcox wrote:
> On Wed, Feb 15, 2023 at 06:39:37PM +0800, Baolin Wang wrote:
>> Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
>> will care about the negative return value, thus we can convert the
>> isolate_movable_page() to return a boolean value to make the code more
>> clear when checking the movable page isolation state.
>>
>> No functional changes intended.
>>
>> Signed-off-by: Baolin Wang <[email protected]>
>> Acked-by: David Hildenbrand <[email protected]>
>
> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
>
> A couple of nits below, not worth respinning the patch series for:
>
>> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
>> index c88b96b48be7..6b252f519c86 100644
>> --- a/include/linux/migrate.h
>> +++ b/include/linux/migrate.h
>> @@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
>> unsigned long private, enum migrate_mode mode, int reason,
>> unsigned int *ret_succeeded);
>> extern struct page *alloc_migration_target(struct page *page, unsigned long private);
>> -extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
>> +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>
> You can drop the 'extern' here.
>
>> +++ b/mm/memory_hotplug.c
>> @@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>> * We can skip free pages. And we can deal with pages on
>> * LRU and non-lru movable pages.
>> */
>> - if (PageLRU(page)) {
>> + if (PageLRU(page))
>> isolated = isolate_lru_page(page);
>> - ret = isolated ? 0 : -EBUSY;
>> - } else
>> - ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
>> - if (!ret) { /* Success */
>> + else
>> + isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
>> + if (isolated) { /* Success */
>
> I would have dropped the "/* Success */" here. Before, commenting
> "!ret" is quite sensible, but "isolated" seems obviously success to me.

Right. Hope Andrew can help to drop this unnecessary comment:)

Thanks for reviewing.

2023-02-16 22:47:06

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH v3 4/4] mm: change to return bool for isolate_movable_page()

On Wed, 15 Feb 2023 15:44:22 +0000 Matthew Wilcox <[email protected]> wrote:

> > extern struct page *alloc_migration_target(struct page *page, unsigned long private);
> > -extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
> > +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>
> You can drop the 'extern' here.

There are a bunch of them, so a separate patch would be better.


--- a/include/linux/migrate.h~a
+++ a/include/linux/migrate.h
@@ -62,16 +62,16 @@ extern const char *migrate_reason_names[

#ifdef CONFIG_MIGRATION

-extern void putback_movable_pages(struct list_head *l);
+void putback_movable_pages(struct list_head *l);
int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason,
- unsigned int *ret_succeeded);
-extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct page *alloc_migration_target(struct page *page, unsigned long private);
+bool isolate_movable_page(struct page *page, isolate_mode_t mode);

int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -142,8 +142,8 @@ const struct movable_operations *page_mo
}

#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+ int node);
#else
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
_