2023-05-10 03:21:32

by Huang, Ying

[permalink] [raw]
Subject: [PATCH -V2] migrate_pages_batch: simplify retrying and failure counting of large folios

After recent changes to the retrying and failure counting in
migrate_pages_batch(), it was found that it's unnecessary to count
retrying and failure for normal, large, and THP folios separately.
Because we don't use retrying and failure number of large folios
directly. So, in this patch, we simplified retrying and failure
counting of large folios via counting retrying and failure of normal
and large folios together. This results in the reduced line number.

Previously, in migrate_pages_batch we need to track whether the source
folio is large/THP before splitting. So is_large is used to cache
folio_test_large() result. Now, we don't need that variable any more
because we don't count retrying and failure of large folios (only
counting that of THP folios). So, in this patch, is_large is removed
to simplify the code.

This is just code cleanup, no functionality changes are expected.

Signed-off-by: "Huang, Ying" <[email protected]>
Reviewed-by: Xin Hao <[email protected]>
Reviewed-by: Zi Yan <[email protected]>
Cc: Yang Shi <[email protected]>
Cc: Baolin Wang <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Alistair Popple <[email protected]>
---
mm/migrate.c | 110 ++++++++++++++++-----------------------------------
1 file changed, 35 insertions(+), 75 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 01cac26a3127..2ac927a82bbc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1614,13 +1614,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
int nr_pass)
{
int retry = 1;
- int large_retry = 1;
int thp_retry = 1;
int nr_failed = 0;
int nr_retry_pages = 0;
- int nr_large_failed = 0;
int pass = 0;
- bool is_large = false;
bool is_thp = false;
struct folio *folio, *folio2, *dst = NULL, *dst2;
int rc, rc_saved = 0, nr_pages;
@@ -1631,20 +1628,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
!list_empty(from) && !list_is_singular(from));

- for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
+ for (pass = 0; pass < nr_pass && retry; pass++) {
retry = 0;
- large_retry = 0;
thp_retry = 0;
nr_retry_pages = 0;

list_for_each_entry_safe(folio, folio2, from, lru) {
- /*
- * Large folio statistics is based on the source large
- * folio. Capture required information that might get
- * lost during migration.
- */
- is_large = folio_test_large(folio);
- is_thp = is_large && folio_test_pmd_mappable(folio);
+ is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
nr_pages = folio_nr_pages(folio);

cond_resched();
@@ -1660,7 +1650,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
* list is processed.
*/
if (!thp_migration_supported() && is_thp) {
- nr_large_failed++;
+ nr_failed++;
stats->nr_thp_failed++;
if (!try_split_folio(folio, split_folios)) {
stats->nr_thp_split++;
@@ -1688,38 +1678,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
* When memory is low, don't bother to try to migrate
* other folios, move unmapped folios, then exit.
*/
- if (is_large) {
- nr_large_failed++;
- stats->nr_thp_failed += is_thp;
- /* Large folio NUMA faulting doesn't split to retry. */
- if (!nosplit) {
- int ret = try_split_folio(folio, split_folios);
+ nr_failed++;
+ stats->nr_thp_failed += is_thp;
+ /* Large folio NUMA faulting doesn't split to retry. */
+ if (folio_test_large(folio) && !nosplit) {
+ int ret = try_split_folio(folio, split_folios);

- if (!ret) {
- stats->nr_thp_split += is_thp;
- break;
- } else if (reason == MR_LONGTERM_PIN &&
- ret == -EAGAIN) {
- /*
- * Try again to split large folio to
- * mitigate the failure of longterm pinning.
- */
- large_retry++;
- thp_retry += is_thp;
- nr_retry_pages += nr_pages;
- /* Undo duplicated failure counting. */
- nr_large_failed--;
- stats->nr_thp_failed -= is_thp;
- break;
- }
+ if (!ret) {
+ stats->nr_thp_split += is_thp;
+ break;
+ } else if (reason == MR_LONGTERM_PIN &&
+ ret == -EAGAIN) {
+ /*
+ * Try again to split large folio to
+ * mitigate the failure of longterm pinning.
+ */
+ retry++;
+ thp_retry += is_thp;
+ nr_retry_pages += nr_pages;
+ /* Undo duplicated failure counting. */
+ nr_failed--;
+ stats->nr_thp_failed -= is_thp;
+ break;
}
- } else {
- nr_failed++;
}

stats->nr_failed_pages += nr_pages + nr_retry_pages;
/* nr_failed isn't updated for not used */
- nr_large_failed += large_retry;
stats->nr_thp_failed += thp_retry;
rc_saved = rc;
if (list_empty(&unmap_folios))
@@ -1727,12 +1712,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
else
goto move;
case -EAGAIN:
- if (is_large) {
- large_retry++;
- thp_retry += is_thp;
- } else {
- retry++;
- }
+ retry++;
+ thp_retry += is_thp;
nr_retry_pages += nr_pages;
break;
case MIGRATEPAGE_SUCCESS:
@@ -1750,20 +1731,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
* removed from migration folio list and not
* retried in the next outer loop.
*/
- if (is_large) {
- nr_large_failed++;
- stats->nr_thp_failed += is_thp;
- } else {
- nr_failed++;
- }
-
+ nr_failed++;
+ stats->nr_thp_failed += is_thp;
stats->nr_failed_pages += nr_pages;
break;
}
}
}
nr_failed += retry;
- nr_large_failed += large_retry;
stats->nr_thp_failed += thp_retry;
stats->nr_failed_pages += nr_retry_pages;
move:
@@ -1771,17 +1746,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
try_to_unmap_flush();

retry = 1;
- for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
+ for (pass = 0; pass < nr_pass && retry; pass++) {
retry = 0;
- large_retry = 0;
thp_retry = 0;
nr_retry_pages = 0;

dst = list_first_entry(&dst_folios, struct folio, lru);
dst2 = list_next_entry(dst, lru);
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
- is_large = folio_test_large(folio);
- is_thp = is_large && folio_test_pmd_mappable(folio);
+ is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
nr_pages = folio_nr_pages(folio);

cond_resched();
@@ -1797,12 +1770,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
*/
switch(rc) {
case -EAGAIN:
- if (is_large) {
- large_retry++;
- thp_retry += is_thp;
- } else {
- retry++;
- }
+ retry++;
+ thp_retry += is_thp;
nr_retry_pages += nr_pages;
break;
case MIGRATEPAGE_SUCCESS:
@@ -1810,13 +1779,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
stats->nr_thp_succeeded += is_thp;
break;
default:
- if (is_large) {
- nr_large_failed++;
- stats->nr_thp_failed += is_thp;
- } else {
- nr_failed++;
- }
-
+ nr_failed++;
+ stats->nr_thp_failed += is_thp;
stats->nr_failed_pages += nr_pages;
break;
}
@@ -1825,14 +1789,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
}
}
nr_failed += retry;
- nr_large_failed += large_retry;
stats->nr_thp_failed += thp_retry;
stats->nr_failed_pages += nr_retry_pages;

- if (rc_saved)
- rc = rc_saved;
- else
- rc = nr_failed + nr_large_failed;
+ rc = rc_saved ? : nr_failed;
out:
/* Cleanup remaining folios */
dst = list_first_entry(&dst_folios, struct folio, lru);
--
2.39.2


2023-05-10 09:42:19

by Alistair Popple

[permalink] [raw]
Subject: Re: [PATCH -V2] migrate_pages_batch: simplify retrying and failure counting of large folios


Looks good. Please add my reviewed by from v1 as well:

Reviewed-by: Alistair Popple <[email protected]>

Huang Ying <[email protected]> writes:

> After recent changes to the retrying and failure counting in
> migrate_pages_batch(), it was found that it's unnecessary to count
> retrying and failure for normal, large, and THP folios separately.
> Because we don't use retrying and failure number of large folios
> directly. So, in this patch, we simplified retrying and failure
> counting of large folios via counting retrying and failure of normal
> and large folios together. This results in the reduced line number.
>
> Previously, in migrate_pages_batch we need to track whether the source
> folio is large/THP before splitting. So is_large is used to cache
> folio_test_large() result. Now, we don't need that variable any more
> because we don't count retrying and failure of large folios (only
> counting that of THP folios). So, in this patch, is_large is removed
> to simplify the code.
>
> This is just code cleanup, no functionality changes are expected.
>
> Signed-off-by: "Huang, Ying" <[email protected]>
> Reviewed-by: Xin Hao <[email protected]>
> Reviewed-by: Zi Yan <[email protected]>
> Cc: Yang Shi <[email protected]>
> Cc: Baolin Wang <[email protected]>
> Cc: Oscar Salvador <[email protected]>
> Cc: Alistair Popple <[email protected]>
> ---
> mm/migrate.c | 110 ++++++++++++++++-----------------------------------
> 1 file changed, 35 insertions(+), 75 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 01cac26a3127..2ac927a82bbc 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1614,13 +1614,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> int nr_pass)
> {
> int retry = 1;
> - int large_retry = 1;
> int thp_retry = 1;
> int nr_failed = 0;
> int nr_retry_pages = 0;
> - int nr_large_failed = 0;
> int pass = 0;
> - bool is_large = false;
> bool is_thp = false;
> struct folio *folio, *folio2, *dst = NULL, *dst2;
> int rc, rc_saved = 0, nr_pages;
> @@ -1631,20 +1628,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
> !list_empty(from) && !list_is_singular(from));
>
> - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
> + for (pass = 0; pass < nr_pass && retry; pass++) {
> retry = 0;
> - large_retry = 0;
> thp_retry = 0;
> nr_retry_pages = 0;
>
> list_for_each_entry_safe(folio, folio2, from, lru) {
> - /*
> - * Large folio statistics is based on the source large
> - * folio. Capture required information that might get
> - * lost during migration.
> - */
> - is_large = folio_test_large(folio);
> - is_thp = is_large && folio_test_pmd_mappable(folio);
> + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
> nr_pages = folio_nr_pages(folio);
>
> cond_resched();
> @@ -1660,7 +1650,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> * list is processed.
> */
> if (!thp_migration_supported() && is_thp) {
> - nr_large_failed++;
> + nr_failed++;
> stats->nr_thp_failed++;
> if (!try_split_folio(folio, split_folios)) {
> stats->nr_thp_split++;
> @@ -1688,38 +1678,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> * When memory is low, don't bother to try to migrate
> * other folios, move unmapped folios, then exit.
> */
> - if (is_large) {
> - nr_large_failed++;
> - stats->nr_thp_failed += is_thp;
> - /* Large folio NUMA faulting doesn't split to retry. */
> - if (!nosplit) {
> - int ret = try_split_folio(folio, split_folios);
> + nr_failed++;
> + stats->nr_thp_failed += is_thp;
> + /* Large folio NUMA faulting doesn't split to retry. */
> + if (folio_test_large(folio) && !nosplit) {
> + int ret = try_split_folio(folio, split_folios);
>
> - if (!ret) {
> - stats->nr_thp_split += is_thp;
> - break;
> - } else if (reason == MR_LONGTERM_PIN &&
> - ret == -EAGAIN) {
> - /*
> - * Try again to split large folio to
> - * mitigate the failure of longterm pinning.
> - */
> - large_retry++;
> - thp_retry += is_thp;
> - nr_retry_pages += nr_pages;
> - /* Undo duplicated failure counting. */
> - nr_large_failed--;
> - stats->nr_thp_failed -= is_thp;
> - break;
> - }
> + if (!ret) {
> + stats->nr_thp_split += is_thp;
> + break;
> + } else if (reason == MR_LONGTERM_PIN &&
> + ret == -EAGAIN) {
> + /*
> + * Try again to split large folio to
> + * mitigate the failure of longterm pinning.
> + */
> + retry++;
> + thp_retry += is_thp;
> + nr_retry_pages += nr_pages;
> + /* Undo duplicated failure counting. */
> + nr_failed--;
> + stats->nr_thp_failed -= is_thp;
> + break;
> }
> - } else {
> - nr_failed++;
> }
>
> stats->nr_failed_pages += nr_pages + nr_retry_pages;
> /* nr_failed isn't updated for not used */
> - nr_large_failed += large_retry;
> stats->nr_thp_failed += thp_retry;
> rc_saved = rc;
> if (list_empty(&unmap_folios))
> @@ -1727,12 +1712,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> else
> goto move;
> case -EAGAIN:
> - if (is_large) {
> - large_retry++;
> - thp_retry += is_thp;
> - } else {
> - retry++;
> - }
> + retry++;
> + thp_retry += is_thp;
> nr_retry_pages += nr_pages;
> break;
> case MIGRATEPAGE_SUCCESS:
> @@ -1750,20 +1731,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> * removed from migration folio list and not
> * retried in the next outer loop.
> */
> - if (is_large) {
> - nr_large_failed++;
> - stats->nr_thp_failed += is_thp;
> - } else {
> - nr_failed++;
> - }
> -
> + nr_failed++;
> + stats->nr_thp_failed += is_thp;
> stats->nr_failed_pages += nr_pages;
> break;
> }
> }
> }
> nr_failed += retry;
> - nr_large_failed += large_retry;
> stats->nr_thp_failed += thp_retry;
> stats->nr_failed_pages += nr_retry_pages;
> move:
> @@ -1771,17 +1746,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> try_to_unmap_flush();
>
> retry = 1;
> - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
> + for (pass = 0; pass < nr_pass && retry; pass++) {
> retry = 0;
> - large_retry = 0;
> thp_retry = 0;
> nr_retry_pages = 0;
>
> dst = list_first_entry(&dst_folios, struct folio, lru);
> dst2 = list_next_entry(dst, lru);
> list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> - is_large = folio_test_large(folio);
> - is_thp = is_large && folio_test_pmd_mappable(folio);
> + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
> nr_pages = folio_nr_pages(folio);
>
> cond_resched();
> @@ -1797,12 +1770,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> */
> switch(rc) {
> case -EAGAIN:
> - if (is_large) {
> - large_retry++;
> - thp_retry += is_thp;
> - } else {
> - retry++;
> - }
> + retry++;
> + thp_retry += is_thp;
> nr_retry_pages += nr_pages;
> break;
> case MIGRATEPAGE_SUCCESS:
> @@ -1810,13 +1779,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> stats->nr_thp_succeeded += is_thp;
> break;
> default:
> - if (is_large) {
> - nr_large_failed++;
> - stats->nr_thp_failed += is_thp;
> - } else {
> - nr_failed++;
> - }
> -
> + nr_failed++;
> + stats->nr_thp_failed += is_thp;
> stats->nr_failed_pages += nr_pages;
> break;
> }
> @@ -1825,14 +1789,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
> }
> }
> nr_failed += retry;
> - nr_large_failed += large_retry;
> stats->nr_thp_failed += thp_retry;
> stats->nr_failed_pages += nr_retry_pages;
>
> - if (rc_saved)
> - rc = rc_saved;
> - else
> - rc = nr_failed + nr_large_failed;
> + rc = rc_saved ? : nr_failed;
> out:
> /* Cleanup remaining folios */
> dst = list_first_entry(&dst_folios, struct folio, lru);


2023-05-15 02:38:14

by Huang, Ying

[permalink] [raw]
Subject: Re: [PATCH -V2] migrate_pages_batch: simplify retrying and failure counting of large folios

Hi, Alistair,

Alistair Popple <[email protected]> writes:

> Looks good. Please add my reviewed by from v1 as well:
>
> Reviewed-by: Alistair Popple <[email protected]>

Sorry, I just found your email in my junk email folder. It appears that
there are some bugs in my email system. I will be more careful in the
future and try to find a way to fix my email system.

Thanks Andrew to help add your reviewed-by in his merged version.

Best Regards,
Huang, Ying

> Huang Ying <[email protected]> writes:
>
>> After recent changes to the retrying and failure counting in
>> migrate_pages_batch(), it was found that it's unnecessary to count
>> retrying and failure for normal, large, and THP folios separately.
>> Because we don't use retrying and failure number of large folios
>> directly. So, in this patch, we simplified retrying and failure
>> counting of large folios via counting retrying and failure of normal
>> and large folios together. This results in the reduced line number.
>>
>> Previously, in migrate_pages_batch we need to track whether the source
>> folio is large/THP before splitting. So is_large is used to cache
>> folio_test_large() result. Now, we don't need that variable any more
>> because we don't count retrying and failure of large folios (only
>> counting that of THP folios). So, in this patch, is_large is removed
>> to simplify the code.
>>
>> This is just code cleanup, no functionality changes are expected.
>>
>> Signed-off-by: "Huang, Ying" <[email protected]>
>> Reviewed-by: Xin Hao <[email protected]>
>> Reviewed-by: Zi Yan <[email protected]>
>> Cc: Yang Shi <[email protected]>
>> Cc: Baolin Wang <[email protected]>
>> Cc: Oscar Salvador <[email protected]>
>> Cc: Alistair Popple <[email protected]>
>> ---
>> mm/migrate.c | 110 ++++++++++++++++-----------------------------------
>> 1 file changed, 35 insertions(+), 75 deletions(-)
>>
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 01cac26a3127..2ac927a82bbc 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -1614,13 +1614,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> int nr_pass)
>> {
>> int retry = 1;
>> - int large_retry = 1;
>> int thp_retry = 1;
>> int nr_failed = 0;
>> int nr_retry_pages = 0;
>> - int nr_large_failed = 0;
>> int pass = 0;
>> - bool is_large = false;
>> bool is_thp = false;
>> struct folio *folio, *folio2, *dst = NULL, *dst2;
>> int rc, rc_saved = 0, nr_pages;
>> @@ -1631,20 +1628,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
>> !list_empty(from) && !list_is_singular(from));
>>
>> - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
>> + for (pass = 0; pass < nr_pass && retry; pass++) {
>> retry = 0;
>> - large_retry = 0;
>> thp_retry = 0;
>> nr_retry_pages = 0;
>>
>> list_for_each_entry_safe(folio, folio2, from, lru) {
>> - /*
>> - * Large folio statistics is based on the source large
>> - * folio. Capture required information that might get
>> - * lost during migration.
>> - */
>> - is_large = folio_test_large(folio);
>> - is_thp = is_large && folio_test_pmd_mappable(folio);
>> + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
>> nr_pages = folio_nr_pages(folio);
>>
>> cond_resched();
>> @@ -1660,7 +1650,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> * list is processed.
>> */
>> if (!thp_migration_supported() && is_thp) {
>> - nr_large_failed++;
>> + nr_failed++;
>> stats->nr_thp_failed++;
>> if (!try_split_folio(folio, split_folios)) {
>> stats->nr_thp_split++;
>> @@ -1688,38 +1678,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> * When memory is low, don't bother to try to migrate
>> * other folios, move unmapped folios, then exit.
>> */
>> - if (is_large) {
>> - nr_large_failed++;
>> - stats->nr_thp_failed += is_thp;
>> - /* Large folio NUMA faulting doesn't split to retry. */
>> - if (!nosplit) {
>> - int ret = try_split_folio(folio, split_folios);
>> + nr_failed++;
>> + stats->nr_thp_failed += is_thp;
>> + /* Large folio NUMA faulting doesn't split to retry. */
>> + if (folio_test_large(folio) && !nosplit) {
>> + int ret = try_split_folio(folio, split_folios);
>>
>> - if (!ret) {
>> - stats->nr_thp_split += is_thp;
>> - break;
>> - } else if (reason == MR_LONGTERM_PIN &&
>> - ret == -EAGAIN) {
>> - /*
>> - * Try again to split large folio to
>> - * mitigate the failure of longterm pinning.
>> - */
>> - large_retry++;
>> - thp_retry += is_thp;
>> - nr_retry_pages += nr_pages;
>> - /* Undo duplicated failure counting. */
>> - nr_large_failed--;
>> - stats->nr_thp_failed -= is_thp;
>> - break;
>> - }
>> + if (!ret) {
>> + stats->nr_thp_split += is_thp;
>> + break;
>> + } else if (reason == MR_LONGTERM_PIN &&
>> + ret == -EAGAIN) {
>> + /*
>> + * Try again to split large folio to
>> + * mitigate the failure of longterm pinning.
>> + */
>> + retry++;
>> + thp_retry += is_thp;
>> + nr_retry_pages += nr_pages;
>> + /* Undo duplicated failure counting. */
>> + nr_failed--;
>> + stats->nr_thp_failed -= is_thp;
>> + break;
>> }
>> - } else {
>> - nr_failed++;
>> }
>>
>> stats->nr_failed_pages += nr_pages + nr_retry_pages;
>> /* nr_failed isn't updated for not used */
>> - nr_large_failed += large_retry;
>> stats->nr_thp_failed += thp_retry;
>> rc_saved = rc;
>> if (list_empty(&unmap_folios))
>> @@ -1727,12 +1712,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> else
>> goto move;
>> case -EAGAIN:
>> - if (is_large) {
>> - large_retry++;
>> - thp_retry += is_thp;
>> - } else {
>> - retry++;
>> - }
>> + retry++;
>> + thp_retry += is_thp;
>> nr_retry_pages += nr_pages;
>> break;
>> case MIGRATEPAGE_SUCCESS:
>> @@ -1750,20 +1731,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> * removed from migration folio list and not
>> * retried in the next outer loop.
>> */
>> - if (is_large) {
>> - nr_large_failed++;
>> - stats->nr_thp_failed += is_thp;
>> - } else {
>> - nr_failed++;
>> - }
>> -
>> + nr_failed++;
>> + stats->nr_thp_failed += is_thp;
>> stats->nr_failed_pages += nr_pages;
>> break;
>> }
>> }
>> }
>> nr_failed += retry;
>> - nr_large_failed += large_retry;
>> stats->nr_thp_failed += thp_retry;
>> stats->nr_failed_pages += nr_retry_pages;
>> move:
>> @@ -1771,17 +1746,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> try_to_unmap_flush();
>>
>> retry = 1;
>> - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
>> + for (pass = 0; pass < nr_pass && retry; pass++) {
>> retry = 0;
>> - large_retry = 0;
>> thp_retry = 0;
>> nr_retry_pages = 0;
>>
>> dst = list_first_entry(&dst_folios, struct folio, lru);
>> dst2 = list_next_entry(dst, lru);
>> list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
>> - is_large = folio_test_large(folio);
>> - is_thp = is_large && folio_test_pmd_mappable(folio);
>> + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
>> nr_pages = folio_nr_pages(folio);
>>
>> cond_resched();
>> @@ -1797,12 +1770,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> */
>> switch(rc) {
>> case -EAGAIN:
>> - if (is_large) {
>> - large_retry++;
>> - thp_retry += is_thp;
>> - } else {
>> - retry++;
>> - }
>> + retry++;
>> + thp_retry += is_thp;
>> nr_retry_pages += nr_pages;
>> break;
>> case MIGRATEPAGE_SUCCESS:
>> @@ -1810,13 +1779,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> stats->nr_thp_succeeded += is_thp;
>> break;
>> default:
>> - if (is_large) {
>> - nr_large_failed++;
>> - stats->nr_thp_failed += is_thp;
>> - } else {
>> - nr_failed++;
>> - }
>> -
>> + nr_failed++;
>> + stats->nr_thp_failed += is_thp;
>> stats->nr_failed_pages += nr_pages;
>> break;
>> }
>> @@ -1825,14 +1789,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
>> }
>> }
>> nr_failed += retry;
>> - nr_large_failed += large_retry;
>> stats->nr_thp_failed += thp_retry;
>> stats->nr_failed_pages += nr_retry_pages;
>>
>> - if (rc_saved)
>> - rc = rc_saved;
>> - else
>> - rc = nr_failed + nr_large_failed;
>> + rc = rc_saved ? : nr_failed;
>> out:
>> /* Cleanup remaining folios */
>> dst = list_first_entry(&dst_folios, struct folio, lru);