2022-04-16 05:58:05

by Davidlohr Bueso

[permalink] [raw]
Subject: [PATCH 3/6] mm: make __node_reclaim() more flexible

Currently __node_reclaim() is tailored to the allocator paths. With
proactive per-node reclaim it requires a bit more flexibly:

- Deal in terms of nr_pages instead of order. Similarly this also
applies to the respective tracing.
- Make the caller pass an already armed scan control.
- Return number of reclaimed pages. The caller can trivially check
against this explicitly instead.

The current node_reclaim() interface remains the same.

Signed-off-by: Davidlohr Bueso <[email protected]>
---
include/trace/events/vmscan.h | 12 ++++-----
mm/vmscan.c | 47 +++++++++++++++++++----------------
2 files changed, 31 insertions(+), 28 deletions(-)

diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index de136dbd623a..ab6ce8d8770b 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -439,25 +439,25 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active,

TRACE_EVENT(mm_vmscan_node_reclaim_begin,

- TP_PROTO(int nid, int order, gfp_t gfp_flags),
+ TP_PROTO(int nid, unsigned long nr_pages, gfp_t gfp_flags),

- TP_ARGS(nid, order, gfp_flags),
+ TP_ARGS(nid, nr_pages, gfp_flags),

TP_STRUCT__entry(
__field(int, nid)
- __field(int, order)
+ __field(int, nr_pages)
__field(gfp_t, gfp_flags)
),

TP_fast_assign(
__entry->nid = nid;
- __entry->order = order;
+ __entry->nr_pages = nr_pages;
__entry->gfp_flags = gfp_flags;
),

- TP_printk("nid=%d order=%d gfp_flags=%s",
+ TP_printk("nid=%d nr_pages=%d gfp_flags=%s",
__entry->nid,
- __entry->order,
+ __entry->nr_pages,
show_gfp_flags(__entry->gfp_flags))
);

diff --git a/mm/vmscan.c b/mm/vmscan.c
index cb583fcbf5bf..1735c302831c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4668,36 +4668,28 @@ static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)

/*
* Try to free up some pages from this node through reclaim.
+ * Returns the number of reclaimed pages.
*/
-static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
+static unsigned long __node_reclaim(struct pglist_data *pgdat,
+ gfp_t gfp_mask, unsigned long nr_pages,
+ struct scan_control *sc)
{
/* Minimum pages needed in order to stay on node */
- const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
unsigned int noreclaim_flag;
- struct scan_control sc = {
- .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = current_gfp_context(gfp_mask),
- .order = order,
- .priority = NODE_RECLAIM_PRIORITY,
- .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
- .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
- .may_swap = 1,
- .reclaim_idx = gfp_zone(gfp_mask),
- };
unsigned long pflags;

- trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
- sc.gfp_mask);
+ trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, nr_pages,
+ sc->gfp_mask);

cond_resched();
psi_memstall_enter(&pflags);
- fs_reclaim_acquire(sc.gfp_mask);
+ fs_reclaim_acquire(sc->gfp_mask);
/*
* We need to be able to allocate from the reserves for RECLAIM_UNMAP
*/
noreclaim_flag = memalloc_noreclaim_save();
- set_task_reclaim_state(p, &sc.reclaim_state);
+ set_task_reclaim_state(p, &sc->reclaim_state);

if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
/*
@@ -4705,23 +4697,34 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed.
*/
do {
- shrink_node(pgdat, &sc);
- } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
+ shrink_node(pgdat, sc);
+ } while (sc->nr_reclaimed < nr_pages && --sc->priority >= 0);
}

set_task_reclaim_state(p, NULL);
memalloc_noreclaim_restore(noreclaim_flag);
- fs_reclaim_release(sc.gfp_mask);
+ fs_reclaim_release(sc->gfp_mask);
psi_memstall_leave(&pflags);

- trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
+ trace_mm_vmscan_node_reclaim_end(sc->nr_reclaimed);

- return sc.nr_reclaimed >= nr_pages;
+ return sc->nr_reclaimed;
}

int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
{
int ret;
+ const unsigned long nr_pages = 1 << order;
+ struct scan_control sc = {
+ .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
+ .gfp_mask = current_gfp_context(gfp_mask),
+ .order = order,
+ .priority = NODE_RECLAIM_PRIORITY,
+ .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
+ .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
+ .may_swap = 1,
+ .reclaim_idx = gfp_zone(gfp_mask),
+ };

/*
* Node reclaim reclaims unmapped file backed pages and
@@ -4756,7 +4759,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
return NODE_RECLAIM_NOSCAN;

- ret = __node_reclaim(pgdat, gfp_mask, order);
+ ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages;
clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);

if (!ret)
--
2.26.2