These compaction cleanups are split out from the huge page allocator
series[1], as requested by reviewer feedback.
Against current mm-stable.
[1] https://lore.kernel.org/linux-mm/[email protected]/
include/linux/compaction.h | 96 ++----------------------------------------------------------------------------------------------
include/trace/events/mmflags.h | 4 ++--
mm/compaction.c | 62 ++++++++++++++++++++++++++++++++++----------------------------
mm/page_alloc.c | 57 ++++++++++++++++++++-------------------------------------
mm/vmscan.c | 27 ++++++++++++++-------------
5 files changed, 72 insertions(+), 174 deletions(-)
The different branches for retry are unnecessarily complicated. There
are really only three outcomes: progress (retry n times), skipped
(retry if reclaim can help), failed (retry with higher priority).
Rearrange the branches and the retry counter to make it simpler.
v2:
- fix trace point build (Mel)
- fix max_retries logic for costly allocs (Huang)
Signed-off-by: Johannes Weiner <[email protected]>
---
mm/page_alloc.c | 53 +++++++++++++++----------------------------------
1 file changed, 16 insertions(+), 37 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5a84a0bebc37..72660e924b95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3772,16 +3772,22 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
* Compaction managed to coalesce some page blocks, but the
* allocation failed presumably due to a race. Retry some.
*/
- if (compact_result == COMPACT_SUCCESS)
- (*compaction_retries)++;
+ if (compact_result == COMPACT_SUCCESS) {
+ /*
+ * !costly requests are much more important than
+ * __GFP_RETRY_MAYFAIL costly ones because they are de
+ * facto nofail and invoke OOM killer to move on while
+ * costly can fail and users are ready to cope with
+ * that. 1/4 retries is rather arbitrary but we would
+ * need much more detailed feedback from compaction to
+ * make a better decision.
+ */
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ max_retries /= 4;
- /*
- * All zones were scanned completely and still no result. It
- * doesn't really make much sense to retry except when the
- * failure could be caused by insufficient priority
- */
- if (compact_result == COMPACT_COMPLETE)
- goto check_priority;
+ ret = ++(*compaction_retries) <= max_retries;
+ goto out;
+ }
/*
* Compaction was skipped due to a lack of free order-0
@@ -3793,35 +3799,8 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
}
/*
- * If compaction backed due to being deferred, due to
- * contended locks in async mode, or due to scanners meeting
- * after a partial scan, retry with increased priority.
- */
- if (compact_result == COMPACT_DEFERRED ||
- compact_result == COMPACT_CONTENDED ||
- compact_result == COMPACT_PARTIAL_SKIPPED)
- goto check_priority;
-
- /*
- * !costly requests are much more important than __GFP_RETRY_MAYFAIL
- * costly ones because they are de facto nofail and invoke OOM
- * killer to move on while costly can fail and users are ready
- * to cope with that. 1/4 retries is rather arbitrary but we
- * would need much more detailed feedback from compaction to
- * make a better decision.
- */
- if (order > PAGE_ALLOC_COSTLY_ORDER)
- max_retries /= 4;
- if (*compaction_retries <= max_retries) {
- ret = true;
- goto out;
- }
-
- /*
- * Make sure there are attempts at the highest priority if we exhausted
- * all retries or failed at the lower priorities.
+ * Compaction failed. Retry with increasing priority.
*/
-check_priority:
min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
--
2.40.0
Remove from all paths not reachable via /proc/sys/vm/compact_memory.
Signed-off-by: Johannes Weiner <[email protected]>
---
mm/compaction.c | 11 +----------
mm/vmscan.c | 8 +-------
2 files changed, 2 insertions(+), 17 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 8f61cfa87c49..83ecbc829c62 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2293,9 +2293,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
unsigned long available;
unsigned long watermark;
- if (is_via_compact_memory(order))
- return true;
-
/* Allocation can already succeed, nothing to do */
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
if (zone_watermark_ok(zone, order, watermark,
@@ -2850,9 +2847,6 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
if (!populated_zone(zone))
continue;
- if (is_via_compact_memory(pgdat->kcompactd_max_order))
- return true;
-
/* Allocation can already succeed, check other zones */
if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
min_wmark_pages(zone),
@@ -2897,9 +2891,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
if (compaction_deferred(zone, cc.order))
continue;
- if (is_via_compact_memory(cc.order))
- goto compact;
-
/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, cc.order,
min_wmark_pages(zone), zoneid, 0))
@@ -2908,7 +2899,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
if (compaction_suitable(zone, cc.order,
zoneid) != COMPACT_CONTINUE)
continue;
-compact:
+
if (kthread_should_stop())
return;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c9c0f3e081f5..c0cfa9b86b48 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6397,9 +6397,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
if (!managed_zone(zone))
continue;
- if (sc->order == -1) /* is_via_compact_memory() */
- return false;
-
/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0))
@@ -6596,9 +6593,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long watermark;
- if (sc->order == -1) /* is_via_compact_memory() */
- goto suitable;
-
/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0))
@@ -6608,7 +6602,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
if (compaction_suitable(zone, sc->order,
sc->reclaim_idx) == COMPACT_SKIPPED)
return false;
-suitable:
+
/*
* Compaction is already possible, but it takes time to run and there
* are potentially other callers using the pages just freed. So proceed
--
2.40.0
The watermark check in compaction_zonelist_suitable(), called from
should_compact_retry(), is sandwiched between two watermark checks
already: before, there are freelist attempts as part of direct reclaim
and direct compaction; after, there is a last-minute freelist attempt
in __alloc_pages_may_oom().
The check in compaction_zonelist_suitable() isn't necessary. Kill it.
Signed-off-by: Johannes Weiner <[email protected]>
---
mm/compaction.c | 7 -------
1 file changed, 7 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 83ecbc829c62..40ce4e6dd17e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2291,13 +2291,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
unsigned long available;
- unsigned long watermark;
-
- /* Allocation can already succeed, nothing to do */
- watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
- if (zone_watermark_ok(zone, order, watermark,
- ac->highest_zoneidx, alloc_flags))
- continue;
/*
* Do not consider all the reclaimable memory because we do not
--
2.40.0
On 5/19/23 14:39, Johannes Weiner wrote:
> The different branches for retry are unnecessarily complicated. There
> are really only three outcomes: progress (retry n times), skipped
> (retry if reclaim can help), failed (retry with higher priority).
>
> Rearrange the branches and the retry counter to make it simpler.
>
> v2:
> - fix trace point build (Mel)
> - fix max_retries logic for costly allocs (Huang)
>
> Signed-off-by: Johannes Weiner <[email protected]>
> ---
> mm/page_alloc.c | 53 +++++++++++++++----------------------------------
> 1 file changed, 16 insertions(+), 37 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 5a84a0bebc37..72660e924b95 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3772,16 +3772,22 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
> * Compaction managed to coalesce some page blocks, but the
> * allocation failed presumably due to a race. Retry some.
> */
> - if (compact_result == COMPACT_SUCCESS)
> - (*compaction_retries)++;
> + if (compact_result == COMPACT_SUCCESS) {
> + /*
> + * !costly requests are much more important than
> + * __GFP_RETRY_MAYFAIL costly ones because they are de
> + * facto nofail and invoke OOM killer to move on while
> + * costly can fail and users are ready to cope with
> + * that. 1/4 retries is rather arbitrary but we would
> + * need much more detailed feedback from compaction to
> + * make a better decision.
> + */
> + if (order > PAGE_ALLOC_COSTLY_ORDER)
> + max_retries /= 4;
>
> - /*
> - * All zones were scanned completely and still no result. It
> - * doesn't really make much sense to retry except when the
> - * failure could be caused by insufficient priority
> - */
> - if (compact_result == COMPACT_COMPLETE)
> - goto check_priority;
> + ret = ++(*compaction_retries) <= max_retries;
> + goto out;
I think you simplified this part too much, so now once it runs out of
retries, it will return false, while previously it would increase the priority.
> + }
>
> /*
> * Compaction was skipped due to a lack of free order-0
> @@ -3793,35 +3799,8 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
> }
>
> /*
> - * If compaction backed due to being deferred, due to
> - * contended locks in async mode, or due to scanners meeting
> - * after a partial scan, retry with increased priority.
> - */
> - if (compact_result == COMPACT_DEFERRED ||
> - compact_result == COMPACT_CONTENDED ||
> - compact_result == COMPACT_PARTIAL_SKIPPED)
> - goto check_priority;
> -
> - /*
> - * !costly requests are much more important than __GFP_RETRY_MAYFAIL
> - * costly ones because they are de facto nofail and invoke OOM
> - * killer to move on while costly can fail and users are ready
> - * to cope with that. 1/4 retries is rather arbitrary but we
> - * would need much more detailed feedback from compaction to
> - * make a better decision.
> - */
> - if (order > PAGE_ALLOC_COSTLY_ORDER)
> - max_retries /= 4;
> - if (*compaction_retries <= max_retries) {
> - ret = true;
> - goto out;
> - }
> -
> - /*
> - * Make sure there are attempts at the highest priority if we exhausted
> - * all retries or failed at the lower priorities.
> + * Compaction failed. Retry with increasing priority.
> */
> -check_priority:
> min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
> MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
>
On Mon, May 29, 2023 at 03:03:52PM +0200, Vlastimil Babka wrote:
> On 5/19/23 14:39, Johannes Weiner wrote:
> > The different branches for retry are unnecessarily complicated. There
> > are really only three outcomes: progress (retry n times), skipped
> > (retry if reclaim can help), failed (retry with higher priority).
> >
> > Rearrange the branches and the retry counter to make it simpler.
> >
> > v2:
> > - fix trace point build (Mel)
> > - fix max_retries logic for costly allocs (Huang)
> >
> > Signed-off-by: Johannes Weiner <[email protected]>
> > ---
> > mm/page_alloc.c | 53 +++++++++++++++----------------------------------
> > 1 file changed, 16 insertions(+), 37 deletions(-)
> >
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index 5a84a0bebc37..72660e924b95 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -3772,16 +3772,22 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
> > * Compaction managed to coalesce some page blocks, but the
> > * allocation failed presumably due to a race. Retry some.
> > */
> > - if (compact_result == COMPACT_SUCCESS)
> > - (*compaction_retries)++;
> > + if (compact_result == COMPACT_SUCCESS) {
> > + /*
> > + * !costly requests are much more important than
> > + * __GFP_RETRY_MAYFAIL costly ones because they are de
> > + * facto nofail and invoke OOM killer to move on while
> > + * costly can fail and users are ready to cope with
> > + * that. 1/4 retries is rather arbitrary but we would
> > + * need much more detailed feedback from compaction to
> > + * make a better decision.
> > + */
> > + if (order > PAGE_ALLOC_COSTLY_ORDER)
> > + max_retries /= 4;
> >
> > - /*
> > - * All zones were scanned completely and still no result. It
> > - * doesn't really make much sense to retry except when the
> > - * failure could be caused by insufficient priority
> > - */
> > - if (compact_result == COMPACT_COMPLETE)
> > - goto check_priority;
> > + ret = ++(*compaction_retries) <= max_retries;
> > + goto out;
>
> I think you simplified this part too much, so now once it runs out of
> retries, it will return false, while previously it would increase the priority.
Oops, I'll send a delta fix to Andrew tomorrow. Thanks!
On 5/19/23 14:39, Johannes Weiner wrote:
> Remove from all paths not reachable via /proc/sys/vm/compact_memory.
>
> Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
On 5/19/23 14:39, Johannes Weiner wrote:
> The watermark check in compaction_zonelist_suitable(), called from
> should_compact_retry(), is sandwiched between two watermark checks
> already: before, there are freelist attempts as part of direct reclaim
> and direct compaction; after, there is a last-minute freelist attempt
> in __alloc_pages_may_oom().
>
> The check in compaction_zonelist_suitable() isn't necessary. Kill it.
>
> Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
On Mon, May 29, 2023 at 12:38:07PM -0400, Johannes Weiner wrote:
> On Mon, May 29, 2023 at 03:03:52PM +0200, Vlastimil Babka wrote:
> > I think you simplified this part too much, so now once it runs out of
> > retries, it will return false, while previously it would increase the priority.
Here is the delta fix. If this looks good to everybody, can you please
fold this into the patch you have in tree? Thanks!
---
From 4b9429f9ef04fcb7bb5ffae0db8ea113b26d097b Mon Sep 17 00:00:00 2001
From: Johannes Weiner <[email protected]>
Date: Fri, 2 Jun 2023 16:02:37 +0200
Subject: [PATCH] mm: compaction: simplify should_compact_retry() fix
Vlastimil points out an unintended change. Previously when hitting
max_retries we'd bump the priority level and restart the loop. Now we
bail out and fail instead. Restore the original behavior.
Signed-off-by: Johannes Weiner <[email protected]>
---
mm/page_alloc.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 72660e924b95..e7d7db36582b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3768,6 +3768,15 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
if (fatal_signal_pending(current))
return false;
+ /*
+ * Compaction was skipped due to a lack of free order-0
+ * migration targets. Continue if reclaim can help.
+ */
+ if (compact_result == COMPACT_SKIPPED) {
+ ret = compaction_zonelist_suitable(ac, order, alloc_flags);
+ goto out;
+ }
+
/*
* Compaction managed to coalesce some page blocks, but the
* allocation failed presumably due to a race. Retry some.
@@ -3785,17 +3794,10 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
if (order > PAGE_ALLOC_COSTLY_ORDER)
max_retries /= 4;
- ret = ++(*compaction_retries) <= max_retries;
- goto out;
- }
-
- /*
- * Compaction was skipped due to a lack of free order-0
- * migration targets. Continue if reclaim can help.
- */
- if (compact_result == COMPACT_SKIPPED) {
- ret = compaction_zonelist_suitable(ac, order, alloc_flags);
- goto out;
+ if (++(*compaction_retries) <= max_retries) {
+ ret = true;
+ goto out;
+ }
}
/*
--
2.40.1
Since it only returns COMPACT_CONTINUE or COMPACT_SKIPPED now, a bool
return value simplifies the callsites.
Suggested-by: Vlastimil Babka <[email protected]>
Signed-off-by: Johannes Weiner <[email protected]>
---
include/linux/compaction.h | 6 ++--
mm/compaction.c | 64 ++++++++++++++++++--------------------
mm/vmscan.c | 6 ++--
3 files changed, 36 insertions(+), 40 deletions(-)
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 9f7cf3e1bf89..57b16e69c19a 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -89,7 +89,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
const struct alloc_context *ac, enum compact_priority prio,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern enum compact_result compaction_suitable(struct zone *zone, int order,
+extern bool compaction_suitable(struct zone *zone, int order,
int highest_zoneidx);
extern void compaction_defer_reset(struct zone *zone, int order,
@@ -107,10 +107,10 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline enum compact_result compaction_suitable(struct zone *zone, int order,
+static inline bool compaction_suitable(struct zone *zone, int order,
int highest_zoneidx)
{
- return COMPACT_SKIPPED;
+ return false;
}
static inline void kcompactd_run(int nid)
diff --git a/mm/compaction.c b/mm/compaction.c
index fdee5f1ac5a1..d354d8af157c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2205,9 +2205,9 @@ static enum compact_result compact_finished(struct compact_control *cc)
return ret;
}
-static enum compact_result __compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx,
- unsigned long wmark_target)
+static bool __compaction_suitable(struct zone *zone, int order,
+ int highest_zoneidx,
+ unsigned long wmark_target)
{
unsigned long watermark;
/*
@@ -2227,27 +2227,20 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
- if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
- ALLOC_CMA, wmark_target))
- return COMPACT_SKIPPED;
-
- return COMPACT_CONTINUE;
+ return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
+ ALLOC_CMA, wmark_target);
}
/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
- * Returns
- * COMPACT_SKIPPED - If there are too few free pages for compaction
- * COMPACT_CONTINUE - If compaction should run now
*/
-enum compact_result compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx)
+bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
{
- enum compact_result ret;
- int fragindex;
+ enum compact_result compact_result;
+ bool suitable;
- ret = __compaction_suitable(zone, order, highest_zoneidx,
- zone_page_state(zone, NR_FREE_PAGES));
+ suitable = __compaction_suitable(zone, order, highest_zoneidx,
+ zone_page_state(zone, NR_FREE_PAGES));
/*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
@@ -2264,17 +2257,24 @@ enum compact_result compaction_suitable(struct zone *zone, int order,
* excessive compaction for costly orders, but it should not be at the
* expense of system stability.
*/
- if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
- fragindex = fragmentation_index(zone, order);
- if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
- ret = COMPACT_NOT_SUITABLE_ZONE;
+ if (suitable) {
+ compact_result = COMPACT_CONTINUE;
+ if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ int fragindex = fragmentation_index(zone, order);
+
+ if (fragindex >= 0 &&
+ fragindex <= sysctl_extfrag_threshold) {
+ suitable = false;
+ compact_result = COMPACT_NOT_SUITABLE_ZONE;
+ }
+ }
+ } else {
+ compact_result = COMPACT_SKIPPED;
}
- trace_mm_compaction_suitable(zone, order, ret);
- if (ret == COMPACT_NOT_SUITABLE_ZONE)
- ret = COMPACT_SKIPPED;
+ trace_mm_compaction_suitable(zone, order, compact_result);
- return ret;
+ return suitable;
}
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
@@ -2300,7 +2300,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
if (__compaction_suitable(zone, order, ac->highest_zoneidx,
- available) == COMPACT_CONTINUE)
+ available))
return true;
}
@@ -2341,11 +2341,10 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->highest_zoneidx, cc->alloc_flags))
return COMPACT_SUCCESS;
- ret = compaction_suitable(cc->zone, cc->order,
- cc->highest_zoneidx);
/* Compaction is likely to fail */
- if (ret == COMPACT_SKIPPED)
- return ret;
+ if (!compaction_suitable(cc->zone, cc->order,
+ cc->highest_zoneidx))
+ return COMPACT_SKIPPED;
}
/*
@@ -2846,7 +2845,7 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
continue;
if (compaction_suitable(zone, pgdat->kcompactd_max_order,
- highest_zoneidx) == COMPACT_CONTINUE)
+ highest_zoneidx))
return true;
}
@@ -2888,8 +2887,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
min_wmark_pages(zone), zoneid, 0))
continue;
- if (compaction_suitable(zone, cc.order,
- zoneid) != COMPACT_CONTINUE)
+ if (!compaction_suitable(zone, cc.order, zoneid))
continue;
if (kthread_should_stop())
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c0cfa9b86b48..e9a8ca124982 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6402,8 +6402,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
sc->reclaim_idx, 0))
return false;
- if (compaction_suitable(zone, sc->order,
- sc->reclaim_idx) == COMPACT_CONTINUE)
+ if (compaction_suitable(zone, sc->order, sc->reclaim_idx))
return false;
}
@@ -6599,8 +6598,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
return true;
/* Compaction cannot yet proceed. Do reclaim. */
- if (compaction_suitable(zone, sc->order,
- sc->reclaim_idx) == COMPACT_SKIPPED)
+ if (!compaction_suitable(zone, sc->order, sc->reclaim_idx))
return false;
/*
--
2.40.1
On 6/2/23 16:47, Johannes Weiner wrote:
> On Mon, May 29, 2023 at 12:38:07PM -0400, Johannes Weiner wrote:
>> On Mon, May 29, 2023 at 03:03:52PM +0200, Vlastimil Babka wrote:
>> > I think you simplified this part too much, so now once it runs out of
>> > retries, it will return false, while previously it would increase the priority.
>
> Here is the delta fix. If this looks good to everybody, can you please
> fold this into the patch you have in tree? Thanks!
>
> ---
> From 4b9429f9ef04fcb7bb5ffae0db8ea113b26d097b Mon Sep 17 00:00:00 2001
> From: Johannes Weiner <[email protected]>
> Date: Fri, 2 Jun 2023 16:02:37 +0200
> Subject: [PATCH] mm: compaction: simplify should_compact_retry() fix
>
> Vlastimil points out an unintended change. Previously when hitting
> max_retries we'd bump the priority level and restart the loop. Now we
> bail out and fail instead. Restore the original behavior.
>
> Signed-off-by: Johannes Weiner <[email protected]>
For the 2/5 +fix
Acked-by: Vlastimil Babka <[email protected]>
> ---
> mm/page_alloc.c | 24 +++++++++++++-----------
> 1 file changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 72660e924b95..e7d7db36582b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3768,6 +3768,15 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
> if (fatal_signal_pending(current))
> return false;
>
> + /*
> + * Compaction was skipped due to a lack of free order-0
> + * migration targets. Continue if reclaim can help.
> + */
> + if (compact_result == COMPACT_SKIPPED) {
> + ret = compaction_zonelist_suitable(ac, order, alloc_flags);
> + goto out;
> + }
> +
> /*
> * Compaction managed to coalesce some page blocks, but the
> * allocation failed presumably due to a race. Retry some.
> @@ -3785,17 +3794,10 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
> if (order > PAGE_ALLOC_COSTLY_ORDER)
> max_retries /= 4;
>
> - ret = ++(*compaction_retries) <= max_retries;
> - goto out;
> - }
> -
> - /*
> - * Compaction was skipped due to a lack of free order-0
> - * migration targets. Continue if reclaim can help.
> - */
> - if (compact_result == COMPACT_SKIPPED) {
> - ret = compaction_zonelist_suitable(ac, order, alloc_flags);
> - goto out;
> + if (++(*compaction_retries) <= max_retries) {
> + ret = true;
> + goto out;
> + }
> }
>
> /*
On 6/2/23 17:12, Johannes Weiner wrote:
> Since it only returns COMPACT_CONTINUE or COMPACT_SKIPPED now, a bool
> return value simplifies the callsites.
>
> Suggested-by: Vlastimil Babka <[email protected]>
> Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Thanks!