2023-05-22 11:33:49

by T.J. Alumbaugh

[permalink] [raw]
Subject: [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap

Use DECLARE_BITMAP macro when possible.

Signed-off-by: T.J. Alumbaugh <[email protected]>
---
mm/vmscan.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4637f6462e9c..cf18873a36b9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
unsigned long next;
unsigned long addr;
struct vm_area_struct *vma;
- unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)];
+ DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
unsigned long first = -1;
struct lru_gen_mm_walk *walk = args->private;

--
2.40.1.698.g37aff9b760-goog



2023-05-22 11:34:00

by T.J. Alumbaugh

[permalink] [raw]
Subject: [PATCH mm-unstable 3/4] mm: multi-gen LRU: add helpers in page table walks

Add helpers to page table walking code:
- Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young"
- Avoids repeating same logic in two places

Signed-off-by: T.J. Alumbaugh <[email protected]>
---
mm/vmscan.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index e088db138f5f..ad0f589d32e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
#endif

+static bool should_walk_mmu(void)
+{
+ return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
+}
+
+static bool should_clear_pmd_young(void)
+{
+ return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
+}
+
/******************************************************************************
* shorthand helpers
******************************************************************************/
@@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
goto next;

if (!pmd_trans_huge(pmd[i])) {
- if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
+ if (should_clear_pmd_young())
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
}
@@ -4191,7 +4201,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
#endif
walk->mm_stats[MM_NONLEAF_TOTAL]++;

- if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) {
+ if (should_clear_pmd_young()) {
if (!pmd_young(val))
continue;

@@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
- if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) {
+ if (!should_walk_mmu()) {
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
}
@@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c
if (get_cap(LRU_GEN_CORE))
caps |= BIT(LRU_GEN_CORE);

- if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
+ if (should_walk_mmu())
caps |= BIT(LRU_GEN_MM_WALK);

- if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
+ if (should_clear_pmd_young())
caps |= BIT(LRU_GEN_NONLEAF_YOUNG);

return sysfs_emit(buf, "0x%04x\n", caps);
--
2.40.1.698.g37aff9b760-goog


2023-05-22 11:34:25

by T.J. Alumbaugh

[permalink] [raw]
Subject: [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim()

lru_gen_soft_reclaim() gets the lruvec from the memcg and node ID to keep a
cleaner interface on the caller side.

Signed-off-by: T.J. Alumbaugh <[email protected]>
---
include/linux/mmzone.h | 4 ++--
mm/memcontrol.c | 2 +-
mm/vmscan.c | 4 +++-
3 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3a68326c9989..5a7ada0413da 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -534,7 +534,7 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg);
void lru_gen_online_memcg(struct mem_cgroup *memcg);
void lru_gen_offline_memcg(struct mem_cgroup *memcg);
void lru_gen_release_memcg(struct mem_cgroup *memcg);
-void lru_gen_soft_reclaim(struct lruvec *lruvec);
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);

#else /* !CONFIG_MEMCG */

@@ -585,7 +585,7 @@ static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
{
}

-static inline void lru_gen_soft_reclaim(struct lruvec *lruvec)
+static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
}

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d31fb1e2cb33..738ba18f3a0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -485,7 +485,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)

if (lru_gen_enabled()) {
if (soft_limit_excess(memcg))
- lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec);
+ lru_gen_soft_reclaim(memcg, nid);
return;
}

diff --git a/mm/vmscan.c b/mm/vmscan.c
index cf18873a36b9..e088db138f5f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4846,8 +4846,10 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
}
}

-void lru_gen_soft_reclaim(struct lruvec *lruvec)
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
+
/* see the comment on MEMCG_NR_GENS */
if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
--
2.40.1.698.g37aff9b760-goog


2023-05-22 11:49:55

by T.J. Alumbaugh

[permalink] [raw]
Subject: [PATCH mm-unstable 4/4] mm: multi-gen LRU: cleanup lru_gen_test_recent()

Avoid passing memcg* and pglist_data* to lru_gen_test_recent()
since we only use the lruvec anyway.

Signed-off-by: T.J. Alumbaugh <[email protected]>
---
mm/workingset.c | 46 ++++++++++++++++------------------------------
1 file changed, 16 insertions(+), 30 deletions(-)

diff --git a/mm/workingset.c b/mm/workingset.c
index 90ae785d4c9c..5796e927e6d7 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -257,59 +257,46 @@ static void *lru_gen_eviction(struct folio *folio)

/*
* Tests if the shadow entry is for a folio that was recently evicted.
- * Fills in @memcgid, @pglist_data, @token, @workingset with the values
- * unpacked from shadow.
+ * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
*/
-static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
- struct pglist_data **pgdat, unsigned long *token, bool *workingset)
+static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
+ unsigned long *token, bool *workingset)
{
- struct mem_cgroup *eviction_memcg;
- struct lruvec *lruvec;
- struct lru_gen_folio *lrugen;
+ int memcg_id;
unsigned long min_seq;
+ struct mem_cgroup *memcg;
+ struct pglist_data *pgdat;

- unpack_shadow(shadow, memcgid, pgdat, token, workingset);
- eviction_memcg = mem_cgroup_from_id(*memcgid);
+ unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);

- lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat);
- lrugen = &lruvec->lrugen;
+ memcg = mem_cgroup_from_id(memcg_id);
+ *lruvec = mem_cgroup_lruvec(memcg, pgdat);

- min_seq = READ_ONCE(lrugen->min_seq[file]);
+ min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
}

static void lru_gen_refault(struct folio *folio, void *shadow)
{
int hist, tier, refs;
- int memcg_id;
bool workingset;
unsigned long token;
- unsigned long min_seq;
struct lruvec *lruvec;
struct lru_gen_folio *lrugen;
- struct mem_cgroup *memcg;
- struct pglist_data *pgdat;
int type = folio_is_file_lru(folio);
int delta = folio_nr_pages(folio);

rcu_read_lock();

- if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token,
- &workingset))
+ if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset))
goto unlock;

- memcg = folio_memcg_rcu(folio);
- if (memcg_id != mem_cgroup_id(memcg))
+ if (lruvec != folio_lruvec(folio))
goto unlock;

- if (pgdat != folio_pgdat(folio))
- goto unlock;
-
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
lrugen = &lruvec->lrugen;
- min_seq = READ_ONCE(lrugen->min_seq[type]);

- hist = lru_hist_from_seq(min_seq);
+ hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
/* see the comment in folio_lru_refs() */
refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
tier = lru_tier_from_refs(refs);
@@ -339,8 +326,8 @@ static void *lru_gen_eviction(struct folio *folio)
return NULL;
}

-static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
- struct pglist_data **pgdat, unsigned long *token, bool *workingset)
+static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
+ unsigned long *token, bool *workingset)
{
return false;
}
@@ -435,8 +422,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
unsigned long eviction;

if (lru_gen_enabled())
- return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction,
- workingset);
+ return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset);

unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
eviction <<= bucket_order;
--
2.40.1.698.g37aff9b760-goog


2023-05-22 11:57:58

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap

On 22.05.23 13:20, T.J. Alumbaugh wrote:
> Use DECLARE_BITMAP macro when possible.
>
> Signed-off-by: T.J. Alumbaugh <[email protected]>
> ---
> mm/vmscan.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 4637f6462e9c..cf18873a36b9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
> unsigned long next;
> unsigned long addr;
> struct vm_area_struct *vma;
> - unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)];
> + DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
> unsigned long first = -1;
> struct lru_gen_mm_walk *walk = args->private;
>

Reviewed-by: David Hildenbrand <[email protected]>

--
Thanks,

David / dhildenb


2023-06-01 08:08:49

by Yuanchu Xie

[permalink] [raw]
Subject: Re: [PATCH mm-unstable 4/4] mm: multi-gen LRU: cleanup lru_gen_test_recent()

On Mon, May 22, 2023 at 7:21 PM T.J. Alumbaugh <[email protected]> wrote:
>
> Avoid passing memcg* and pglist_data* to lru_gen_test_recent()
> since we only use the lruvec anyway.
>
> Signed-off-by: T.J. Alumbaugh <[email protected]>
> ---
> mm/workingset.c | 46 ++++++++++++++++------------------------------
> 1 file changed, 16 insertions(+), 30 deletions(-)
>
> diff --git a/mm/workingset.c b/mm/workingset.c
> index 90ae785d4c9c..5796e927e6d7 100644
> --- a/mm/workingset.c
> +++ b/mm/workingset.c
> @@ -257,59 +257,46 @@ static void *lru_gen_eviction(struct folio *folio)
>
> /*
> * Tests if the shadow entry is for a folio that was recently evicted.
> - * Fills in @memcgid, @pglist_data, @token, @workingset with the values
> - * unpacked from shadow.
> + * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
> */
> -static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
> - struct pglist_data **pgdat, unsigned long *token, bool *workingset)
> +static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
> + unsigned long *token, bool *workingset)
> {
> - struct mem_cgroup *eviction_memcg;
> - struct lruvec *lruvec;
> - struct lru_gen_folio *lrugen;
> + int memcg_id;
> unsigned long min_seq;
> + struct mem_cgroup *memcg;
> + struct pglist_data *pgdat;
>
> - unpack_shadow(shadow, memcgid, pgdat, token, workingset);
> - eviction_memcg = mem_cgroup_from_id(*memcgid);
> + unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
>
> - lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat);
> - lrugen = &lruvec->lrugen;
> + memcg = mem_cgroup_from_id(memcg_id);
> + *lruvec = mem_cgroup_lruvec(memcg, pgdat);
>
> - min_seq = READ_ONCE(lrugen->min_seq[file]);
> + min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
> return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
> }
>
> static void lru_gen_refault(struct folio *folio, void *shadow)
> {
> int hist, tier, refs;
> - int memcg_id;
> bool workingset;
> unsigned long token;
> - unsigned long min_seq;
> struct lruvec *lruvec;
> struct lru_gen_folio *lrugen;
> - struct mem_cgroup *memcg;
> - struct pglist_data *pgdat;
> int type = folio_is_file_lru(folio);
> int delta = folio_nr_pages(folio);
>
> rcu_read_lock();
>
> - if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token,
> - &workingset))
> + if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset))
> goto unlock;
>
> - memcg = folio_memcg_rcu(folio);
> - if (memcg_id != mem_cgroup_id(memcg))
> + if (lruvec != folio_lruvec(folio))
> goto unlock;
>
> - if (pgdat != folio_pgdat(folio))
> - goto unlock;
> -
> - lruvec = mem_cgroup_lruvec(memcg, pgdat);
> lrugen = &lruvec->lrugen;
> - min_seq = READ_ONCE(lrugen->min_seq[type]);
>
> - hist = lru_hist_from_seq(min_seq);
> + hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
> /* see the comment in folio_lru_refs() */
> refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
> tier = lru_tier_from_refs(refs);
> @@ -339,8 +326,8 @@ static void *lru_gen_eviction(struct folio *folio)
> return NULL;
> }
>
> -static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
> - struct pglist_data **pgdat, unsigned long *token, bool *workingset)
> +static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
> + unsigned long *token, bool *workingset)
> {
> return false;
> }
> @@ -435,8 +422,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
> unsigned long eviction;
>
> if (lru_gen_enabled())
> - return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction,
> - workingset);
> + return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset);
>
> unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
> eviction <<= bucket_order;
> --
> 2.40.1.698.g37aff9b760-goog
>
Reviewed-by: Yuanchu Xie <[email protected]>

2023-06-01 08:13:04

by Yuanchu Xie

[permalink] [raw]
Subject: Re: [PATCH mm-unstable 3/4] mm: multi-gen LRU: add helpers in page table walks

On Mon, May 22, 2023 at 7:21 PM T.J. Alumbaugh <[email protected]> wrote:
>
> Add helpers to page table walking code:
> - Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young"
I wonder if these should be called "can_walk_mmu" and
"can_clear_pmd_young", but that's rather minor.

> - Avoids repeating same logic in two places
>
> Signed-off-by: T.J. Alumbaugh <[email protected]>
> ---
> mm/vmscan.c | 20 +++++++++++++++-----
> 1 file changed, 15 insertions(+), 5 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index e088db138f5f..ad0f589d32e6 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
> #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
> #endif
>
> +static bool should_walk_mmu(void)
> +{
> + return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
> +}
> +
> +static bool should_clear_pmd_young(void)
> +{
> + return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
> +}
> +
> /******************************************************************************
> * shorthand helpers
> ******************************************************************************/
> @@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
> goto next;
>
> if (!pmd_trans_huge(pmd[i])) {
> - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
> + if (should_clear_pmd_young())
> pmdp_test_and_clear_young(vma, addr, pmd + i);
> goto next;
> }
> @@ -4191,7 +4201,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
> #endif
> walk->mm_stats[MM_NONLEAF_TOTAL]++;
>
> - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) {
> + if (should_clear_pmd_young()) {
> if (!pmd_young(val))
> continue;
>
> @@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
> * handful of PTEs. Spreading the work out over a period of time usually
> * is less efficient, but it avoids bursty page faults.
> */
> - if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) {
> + if (!should_walk_mmu()) {
> success = iterate_mm_list_nowalk(lruvec, max_seq);
> goto done;
> }
> @@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c
> if (get_cap(LRU_GEN_CORE))
> caps |= BIT(LRU_GEN_CORE);
>
> - if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
> + if (should_walk_mmu())
> caps |= BIT(LRU_GEN_MM_WALK);
>
> - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG))
> + if (should_clear_pmd_young())
> caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
>
> return sysfs_emit(buf, "0x%04x\n", caps);
> --
> 2.40.1.698.g37aff9b760-goog
>
Other than that,
Reviewed-by: Yuanchu Xie <[email protected]>

2023-06-01 08:15:23

by Yuanchu Xie

[permalink] [raw]
Subject: Re: [PATCH mm-unstable 2/4] mm: multi-gen LRU: cleanup lru_gen_soft_reclaim()

Reviewed-by: Yuanchu Xie <[email protected]>

On Mon, May 22, 2023 at 7:21 PM T.J. Alumbaugh <[email protected]> wrote:
>
> lru_gen_soft_reclaim() gets the lruvec from the memcg and node ID to keep a
> cleaner interface on the caller side.
>
> Signed-off-by: T.J. Alumbaugh <[email protected]>
> ---
> include/linux/mmzone.h | 4 ++--
> mm/memcontrol.c | 2 +-
> mm/vmscan.c | 4 +++-
> 3 files changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 3a68326c9989..5a7ada0413da 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -534,7 +534,7 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg);
> void lru_gen_online_memcg(struct mem_cgroup *memcg);
> void lru_gen_offline_memcg(struct mem_cgroup *memcg);
> void lru_gen_release_memcg(struct mem_cgroup *memcg);
> -void lru_gen_soft_reclaim(struct lruvec *lruvec);
> +void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
>
> #else /* !CONFIG_MEMCG */
>
> @@ -585,7 +585,7 @@ static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
> {
> }
>
> -static inline void lru_gen_soft_reclaim(struct lruvec *lruvec)
> +static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
> {
> }
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index d31fb1e2cb33..738ba18f3a0c 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -485,7 +485,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
>
> if (lru_gen_enabled()) {
> if (soft_limit_excess(memcg))
> - lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec);
> + lru_gen_soft_reclaim(memcg, nid);
> return;
> }
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index cf18873a36b9..e088db138f5f 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4846,8 +4846,10 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
> }
> }
>
> -void lru_gen_soft_reclaim(struct lruvec *lruvec)
> +void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
> {
> + struct lruvec *lruvec = get_lruvec(memcg, nid);
> +
> /* see the comment on MEMCG_NR_GENS */
> if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
> lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
> --
> 2.40.1.698.g37aff9b760-goog
>

2023-06-01 08:15:55

by Yuanchu Xie

[permalink] [raw]
Subject: Re: [PATCH mm-unstable 1/4] mm: multi-gen LRU: use macro for bitmap

Reviewed-by: Yuanchu Xie <[email protected]>

On Mon, May 22, 2023 at 7:21 PM T.J. Alumbaugh <[email protected]> wrote:
>
> Use DECLARE_BITMAP macro when possible.
>
> Signed-off-by: T.J. Alumbaugh <[email protected]>
> ---
> mm/vmscan.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 4637f6462e9c..cf18873a36b9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
> unsigned long next;
> unsigned long addr;
> struct vm_area_struct *vma;
> - unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)];
> + DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
> unsigned long first = -1;
> struct lru_gen_mm_walk *walk = args->private;
>
> --
> 2.40.1.698.g37aff9b760-goog
>