Hi all, this series contains random fixes and cleanups to free page
isolation in compaction. This is based on another compact series[1].
More details can be found in respective patches. Thanks!
[1] https://lore.kernel.org/all/[email protected]/
Kemeng Shi (5):
mm/compaction: allow blockpfn outside of pageblock for high order
buddy page
mm/compaction: set compact_cached_free_pfn correctly in
update_pageblock_skip
mm/compaction: merge end_pfn boundary check in isolate_freepages_range
mm/compaction: remove unnecessary cursor page in
isolate_freepages_block
mm/compaction: remove unnecessary "else continue" at end of loop in
isolate_freepages_block
mm/compaction.c | 30 ++++++++++++++----------------
1 file changed, 14 insertions(+), 16 deletions(-)
--
2.30.0
The cursor is only used for page forward currently. We can simply move page
forward directly to remove unnecessary cursor.
Signed-off-by: Kemeng Shi <[email protected]>
---
mm/compaction.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 65791a74c5e8..cfb661f4ce23 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -589,7 +589,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
- struct page *cursor;
+ struct page *page;
unsigned long flags = 0;
spinlock_t *locked = NULL;
unsigned long blockpfn = *start_pfn;
@@ -599,12 +599,11 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (strict)
stride = 1;
- cursor = pfn_to_page(blockpfn);
+ page = pfn_to_page(blockpfn);
/* Isolate free pages. */
- for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
+ for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
int isolated;
- struct page *page = cursor;
/*
* Periodically drop the lock (if held) regardless of its
@@ -628,7 +627,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (likely(order <= MAX_ORDER)) {
blockpfn += (1UL << order) - 1;
- cursor += (1UL << order) - 1;
+ page += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
}
goto isolate_fail;
@@ -665,7 +664,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
}
/* Advance to the end of split page */
blockpfn += isolated - 1;
- cursor += isolated - 1;
+ page += isolated - 1;
continue;
isolate_fail:
--
2.30.0
We will set skip to page block of block_start_pfn, it's more reasonable
to set compact_cached_free_pfn to page block before the block_start_pfn.
Signed-off-by: Kemeng Shi <[email protected]>
---
mm/compaction.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index d1d28d57e5bd..4a784872565a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1690,7 +1690,8 @@ static void isolate_freepages(struct compact_control *cc)
/* Update the skip hint if the full pageblock was scanned */
if (isolate_start_pfn >= block_end_pfn)
- update_pageblock_skip(cc, page, block_start_pfn);
+ update_pageblock_skip(cc, page, block_start_pfn -
+ pageblock_nr_pages);
/* Are enough freepages isolated? */
if (cc->nr_freepages >= cc->nr_migratepages) {
--
2.30.0
From: Kemeng Shi <[email protected]>
Merge the end_pfn boundary check for single page block forward and multiple
page blocks forward to avoid do twice boundary check for multiple page
blocks forward.
Signed-off-by: Kemeng Shi <[email protected]>
---
mm/compaction.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 4a784872565a..65791a74c5e8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -740,8 +740,6 @@ isolate_freepages_range(struct compact_control *cc,
/* Protect pfn from changing by isolate_freepages_block */
unsigned long isolate_start_pfn = pfn;
- block_end_pfn = min(block_end_pfn, end_pfn);
-
/*
* pfn could pass the block_end_pfn if isolated freepage
* is more than pageblock order. In this case, we adjust
@@ -750,9 +748,10 @@ isolate_freepages_range(struct compact_control *cc,
if (pfn >= block_end_pfn) {
block_start_pfn = pageblock_start_pfn(pfn);
block_end_pfn = pageblock_end_pfn(pfn);
- block_end_pfn = min(block_end_pfn, end_pfn);
}
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
if (!pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone))
break;
--
2.30.0
There is no behavior change to remove "else continue" code at end of scan loop.
Just remove it to make code cleaner.
Signed-off-by: Kemeng Shi <[email protected]>
---
mm/compaction.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index cfb661f4ce23..d38297018077 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -670,9 +670,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
isolate_fail:
if (strict)
break;
- else
- continue;
-
}
compact_unlock_irqrestore(&locked, flags);
--
2.30.0
On 7/30/2023 1:43 AM, Kemeng Shi wrote:
> We will set skip to page block of block_start_pfn, it's more reasonable
> to set compact_cached_free_pfn to page block before the block_start_pfn.
Looks reasonable to me.
Reviewed-by: Baolin Wang <[email protected]>
> Signed-off-by: Kemeng Shi <[email protected]>
> ---
> mm/compaction.c | 3 ++-
> 1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/mm/compaction.c b/mm/compaction.c
> index d1d28d57e5bd..4a784872565a 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -1690,7 +1690,8 @@ static void isolate_freepages(struct compact_control *cc)
>
> /* Update the skip hint if the full pageblock was scanned */
> if (isolate_start_pfn >= block_end_pfn)
> - update_pageblock_skip(cc, page, block_start_pfn);
> + update_pageblock_skip(cc, page, block_start_pfn -
> + pageblock_nr_pages);
>
> /* Are enough freepages isolated? */
> if (cc->nr_freepages >= cc->nr_migratepages) {
On 7/30/2023 1:43 AM, Kemeng Shi wrote:
> The cursor is only used for page forward currently. We can simply move page
> forward directly to remove unnecessary cursor.
>
> Signed-off-by: Kemeng Shi <[email protected]>
LGTM.
Reviewed-by: Baolin Wang <[email protected]>
> ---
> mm/compaction.c | 11 +++++------
> 1 file changed, 5 insertions(+), 6 deletions(-)
>
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 65791a74c5e8..cfb661f4ce23 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -589,7 +589,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> bool strict)
> {
> int nr_scanned = 0, total_isolated = 0;
> - struct page *cursor;
> + struct page *page;
> unsigned long flags = 0;
> spinlock_t *locked = NULL;
> unsigned long blockpfn = *start_pfn;
> @@ -599,12 +599,11 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> if (strict)
> stride = 1;
>
> - cursor = pfn_to_page(blockpfn);
> + page = pfn_to_page(blockpfn);
>
> /* Isolate free pages. */
> - for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
> + for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
> int isolated;
> - struct page *page = cursor;
>
> /*
> * Periodically drop the lock (if held) regardless of its
> @@ -628,7 +627,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>
> if (likely(order <= MAX_ORDER)) {
> blockpfn += (1UL << order) - 1;
> - cursor += (1UL << order) - 1;
> + page += (1UL << order) - 1;
> nr_scanned += (1UL << order) - 1;
> }
> goto isolate_fail;
> @@ -665,7 +664,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> }
> /* Advance to the end of split page */
> blockpfn += isolated - 1;
> - cursor += isolated - 1;
> + page += isolated - 1;
> continue;
>
> isolate_fail:
On 7/30/2023 1:43 AM, Kemeng Shi wrote:
> There is no behavior change to remove "else continue" code at end of scan loop.
> Just remove it to make code cleaner.
>
> Signed-off-by: Kemeng Shi <[email protected]>
Reviewed-by: Baolin Wang <[email protected]>
> ---
> mm/compaction.c | 3 ---
> 1 file changed, 3 deletions(-)
>
> diff --git a/mm/compaction.c b/mm/compaction.c
> index cfb661f4ce23..d38297018077 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -670,9 +670,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> isolate_fail:
> if (strict)
> break;
> - else
> - continue;
> -
> }
>
> compact_unlock_irqrestore(&locked, flags);
On 7/30/2023 1:43 AM, Kemeng Shi wrote:
> From: Kemeng Shi <[email protected]>
>
> Merge the end_pfn boundary check for single page block forward and multiple
> page blocks forward to avoid do twice boundary check for multiple page
> blocks forward.
>
> Signed-off-by: Kemeng Shi <[email protected]>
LGTM.
Reviewed-by: Baolin Wang <[email protected]>
> ---
> mm/compaction.c | 5 ++---
> 1 file changed, 2 insertions(+), 3 deletions(-)
>
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 4a784872565a..65791a74c5e8 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -740,8 +740,6 @@ isolate_freepages_range(struct compact_control *cc,
> /* Protect pfn from changing by isolate_freepages_block */
> unsigned long isolate_start_pfn = pfn;
>
> - block_end_pfn = min(block_end_pfn, end_pfn);
> -
> /*
> * pfn could pass the block_end_pfn if isolated freepage
> * is more than pageblock order. In this case, we adjust
> @@ -750,9 +748,10 @@ isolate_freepages_range(struct compact_control *cc,
> if (pfn >= block_end_pfn) {
> block_start_pfn = pageblock_start_pfn(pfn);
> block_end_pfn = pageblock_end_pfn(pfn);
> - block_end_pfn = min(block_end_pfn, end_pfn);
> }
>
> + block_end_pfn = min(block_end_pfn, end_pfn);
> +
> if (!pageblock_pfn_to_page(block_start_pfn,
> block_end_pfn, cc->zone))
> break;