2022-06-14 07:49:15

by Yu Zhao

[permalink] [raw]
Subject: [PATCH v12 12/14] mm: multi-gen LRU: debugfs interface

Add /sys/kernel/debug/lru_gen for working set estimation and proactive
reclaim. These techniques are commonly used to optimize job scheduling
(bin packing) in data centers [1][2].

Compared with the page table-based approach and the PFN-based
approach, this lruvec-based approach has the following advantages:
1. It offers better choices because it is aware of memcgs, NUMA nodes,
shared mappings and unmapped page cache.
2. It is more scalable because it is O(nr_hot_pages), whereas the
PFN-based approach is O(nr_total_pages).

Add /sys/kernel/debug/lru_gen_full for debugging.

[1] https://dl.acm.org/doi/10.1145/3297858.3304053
[2] https://dl.acm.org/doi/10.1145/3503222.3507731

Signed-off-by: Yu Zhao <[email protected]>
Acked-by: Brian Geffon <[email protected]>
Acked-by: Jan Alexander Steffens (heftig) <[email protected]>
Acked-by: Oleksandr Natalenko <[email protected]>
Acked-by: Steven Barrett <[email protected]>
Acked-by: Suleiman Souhlal <[email protected]>
Tested-by: Daniel Byrne <[email protected]>
Tested-by: Donald Carr <[email protected]>
Tested-by: Holger Hoffstätte <[email protected]>
Tested-by: Konstantin Kharlamov <[email protected]>
Tested-by: Shuang Zhai <[email protected]>
Tested-by: Sofia Trinh <[email protected]>
Tested-by: Vaibhav Jain <[email protected]>
---
include/linux/nodemask.h | 1 +
mm/vmscan.c | 412 ++++++++++++++++++++++++++++++++++++++-
2 files changed, 403 insertions(+), 10 deletions(-)

diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 0f233b76c9ce..292ec0ce0d63 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -485,6 +485,7 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
+#define next_memory_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1U
#define nr_online_nodes 1U

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 075525a9526a..558fdb857c3e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,6 +53,7 @@
#include <linux/pagewalk.h>
#include <linux/shmem_fs.h>
#include <linux/ctype.h>
+#include <linux/debugfs.h>

#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -4135,12 +4136,40 @@ static void clear_mm_walk(void)
kfree(walk);
}

-static void inc_min_seq(struct lruvec *lruvec, int type)
+static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
{
+ int zone;
+ int remaining = MAX_LRU_BATCH;
struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);

+ if (type == LRU_GEN_ANON && !can_swap)
+ goto done;
+
+ /* prevent cold/hot inversion if force_scan is true */
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ struct list_head *head = &lrugen->lists[old_gen][type][zone];
+
+ while (!list_empty(head)) {
+ struct folio *folio = lru_to_folio(head);
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
+
+ new_gen = folio_inc_gen(lruvec, folio, false);
+ list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
+
+ if (!--remaining)
+ return false;
+ }
+ }
+done:
reset_ctrl_pos(lruvec, type, true);
WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
+
+ return true;
}

static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
@@ -4186,7 +4215,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
return success;
}

-static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
+static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
{
int prev, next;
int type, zone;
@@ -4200,9 +4229,13 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;

- VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap);
+ VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));

- inc_min_seq(lruvec, type);
+ while (!inc_min_seq(lruvec, type, can_swap)) {
+ spin_unlock_irq(&lruvec->lru_lock);
+ cond_resched();
+ spin_lock_irq(&lruvec->lru_lock);
+ }
}

/*
@@ -4239,7 +4272,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
}

static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
- struct scan_control *sc, bool can_swap)
+ struct scan_control *sc, bool can_swap, bool force_scan)
{
bool success;
struct lru_gen_mm_walk *walk;
@@ -4260,7 +4293,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
- if (!(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
+ if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
}
@@ -4274,7 +4307,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
walk->lruvec = lruvec;
walk->max_seq = max_seq;
walk->can_swap = can_swap;
- walk->force_scan = false;
+ walk->force_scan = force_scan;

do {
success = iterate_mm_list(lruvec, walk, &mm);
@@ -4294,7 +4327,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,

VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));

- inc_max_seq(lruvec, can_swap);
+ inc_max_seq(lruvec, can_swap, force_scan);
/* either this sees any waiters or they will see updated max_seq */
if (wq_has_sleeper(&lruvec->mm_state.wait))
wake_up_all(&lruvec->mm_state.wait);
@@ -4393,7 +4426,7 @@ static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned
}

if (nr_to_scan && need_aging)
- try_to_inc_max_seq(lruvec, max_seq, sc, swappiness);
+ try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);

return true;
}
@@ -4955,7 +4988,7 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *
if (current_is_kswapd())
return 0;

- if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap))
+ if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
return nr_to_scan;

return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
@@ -5244,6 +5277,362 @@ static struct attribute_group lru_gen_attr_group = {
.attrs = lru_gen_attrs,
};

+/******************************************************************************
+ * debugfs interface
+ ******************************************************************************/
+
+static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct mem_cgroup *memcg;
+ loff_t nr_to_skip = *pos;
+
+ m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
+ if (!m->private)
+ return ERR_PTR(-ENOMEM);
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ int nid;
+
+ for_each_node_state(nid, N_MEMORY) {
+ if (!nr_to_skip--)
+ return get_lruvec(memcg, nid);
+ }
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ return NULL;
+}
+
+static void lru_gen_seq_stop(struct seq_file *m, void *v)
+{
+ if (!IS_ERR_OR_NULL(v))
+ mem_cgroup_iter_break(NULL, lruvec_memcg(v));
+
+ kvfree(m->private);
+ m->private = NULL;
+}
+
+static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ int nid = lruvec_pgdat(v)->node_id;
+ struct mem_cgroup *memcg = lruvec_memcg(v);
+
+ ++*pos;
+
+ nid = next_memory_node(nid);
+ if (nid == MAX_NUMNODES) {
+ memcg = mem_cgroup_iter(NULL, memcg, NULL);
+ if (!memcg)
+ return NULL;
+
+ nid = first_memory_node;
+ }
+
+ return get_lruvec(memcg, nid);
+}
+
+static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
+ unsigned long max_seq, unsigned long *min_seq,
+ unsigned long seq)
+{
+ int i;
+ int type, tier;
+ int hist = lru_hist_from_seq(seq);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ for (tier = 0; tier < MAX_NR_TIERS; tier++) {
+ seq_printf(m, " %10d", tier);
+ for (type = 0; type < ANON_AND_FILE; type++) {
+ const char *s = " ";
+ unsigned long n[3] = {};
+
+ if (seq == max_seq) {
+ s = "RT ";
+ n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
+ n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
+ } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
+ s = "rep";
+ n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
+ n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
+ if (tier)
+ n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
+ }
+
+ for (i = 0; i < 3; i++)
+ seq_printf(m, " %10lu%c", n[i], s[i]);
+ }
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, " ");
+ for (i = 0; i < NR_MM_STATS; i++) {
+ const char *s = " ";
+ unsigned long n = 0;
+
+ if (seq == max_seq && NR_HIST_GENS == 1) {
+ s = "TOYDFA";
+ n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ } else if (seq != max_seq && NR_HIST_GENS > 1) {
+ s = "toydfa";
+ n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ }
+
+ seq_printf(m, " %10lu%c", n, s[i]);
+ }
+ seq_putc(m, '\n');
+}
+
+static int lru_gen_seq_show(struct seq_file *m, void *v)
+{
+ unsigned long seq;
+ bool full = !debugfs_real_fops(m->file)->write;
+ struct lruvec *lruvec = v;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ int nid = lruvec_pgdat(lruvec)->node_id;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ DEFINE_MAX_SEQ(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (nid == first_memory_node) {
+ const char *path = memcg ? m->private : "";
+
+#ifdef CONFIG_MEMCG
+ if (memcg)
+ cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
+#endif
+ seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
+ }
+
+ seq_printf(m, " node %5d\n", nid);
+
+ if (!full)
+ seq = min_seq[LRU_GEN_ANON];
+ else if (max_seq >= MAX_NR_GENS)
+ seq = max_seq - MAX_NR_GENS + 1;
+ else
+ seq = 0;
+
+ for (; seq <= max_seq; seq++) {
+ int type, zone;
+ int gen = lru_gen_from_seq(seq);
+ unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
+
+ seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
+
+ for (type = 0; type < ANON_AND_FILE; type++) {
+ unsigned long size = 0;
+ char mark = full && seq < min_seq[type] ? 'x' : ' ';
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
+
+ seq_printf(m, " %10lu%c", size, mark);
+ }
+
+ seq_putc(m, '\n');
+
+ if (full)
+ lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations lru_gen_seq_ops = {
+ .start = lru_gen_seq_start,
+ .stop = lru_gen_seq_stop,
+ .next = lru_gen_seq_next,
+ .show = lru_gen_seq_show,
+};
+
+static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
+ bool can_swap, bool force_scan)
+{
+ DEFINE_MAX_SEQ(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (seq < max_seq)
+ return 0;
+
+ if (seq > max_seq)
+ return -EINVAL;
+
+ if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
+ return -ERANGE;
+
+ try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
+
+ return 0;
+}
+
+static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
+ int swappiness, unsigned long nr_to_reclaim)
+{
+ DEFINE_MAX_SEQ(lruvec);
+
+ if (seq + MIN_NR_GENS > max_seq)
+ return -EINVAL;
+
+ sc->nr_reclaimed = 0;
+
+ while (!signal_pending(current)) {
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (seq < min_seq[!swappiness])
+ return 0;
+
+ if (sc->nr_reclaimed >= nr_to_reclaim)
+ return 0;
+
+ if (!evict_folios(lruvec, sc, swappiness, NULL))
+ return 0;
+
+ cond_resched();
+ }
+
+ return -EINTR;
+}
+
+static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
+ struct scan_control *sc, int swappiness, unsigned long opt)
+{
+ struct lruvec *lruvec;
+ int err = -EINVAL;
+ struct mem_cgroup *memcg = NULL;
+
+ if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
+ return -EINVAL;
+
+ if (!mem_cgroup_disabled()) {
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(memcg_id);
+#ifdef CONFIG_MEMCG
+ if (memcg && !css_tryget(&memcg->css))
+ memcg = NULL;
+#endif
+ rcu_read_unlock();
+
+ if (!memcg)
+ return -EINVAL;
+ }
+
+ if (memcg_id != mem_cgroup_id(memcg))
+ goto done;
+
+ lruvec = get_lruvec(memcg, nid);
+
+ if (swappiness < 0)
+ swappiness = get_swappiness(lruvec, sc);
+ else if (swappiness > 200)
+ goto done;
+
+ switch (cmd) {
+ case '+':
+ err = run_aging(lruvec, seq, sc, swappiness, opt);
+ break;
+ case '-':
+ err = run_eviction(lruvec, seq, sc, swappiness, opt);
+ break;
+ }
+done:
+ mem_cgroup_put(memcg);
+
+ return err;
+}
+
+static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
+ size_t len, loff_t *pos)
+{
+ void *buf;
+ char *cur, *next;
+ unsigned int flags;
+ struct blk_plug plug;
+ int err = -EINVAL;
+ struct scan_control sc = {
+ .may_writepage = true,
+ .may_unmap = true,
+ .may_swap = true,
+ .reclaim_idx = MAX_NR_ZONES - 1,
+ .gfp_mask = GFP_KERNEL,
+ };
+
+ buf = kvmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, src, len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ if (!set_mm_walk(NULL)) {
+ kvfree(buf);
+ return -ENOMEM;
+ }
+
+ set_task_reclaim_state(current, &sc.reclaim_state);
+ flags = memalloc_noreclaim_save();
+ blk_start_plug(&plug);
+
+ next = buf;
+ next[len] = '\0';
+
+ while ((cur = strsep(&next, ",;\n"))) {
+ int n;
+ int end;
+ char cmd;
+ unsigned int memcg_id;
+ unsigned int nid;
+ unsigned long seq;
+ unsigned int swappiness = -1;
+ unsigned long opt = -1;
+
+ cur = skip_spaces(cur);
+ if (!*cur)
+ continue;
+
+ n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
+ &seq, &end, &swappiness, &end, &opt, &end);
+ if (n < 4 || cur[end]) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
+ if (err)
+ break;
+ }
+
+ blk_finish_plug(&plug);
+ memalloc_noreclaim_restore(flags);
+ set_task_reclaim_state(current, NULL);
+
+ clear_mm_walk();
+ kvfree(buf);
+
+ return err ? : len;
+}
+
+static int lru_gen_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &lru_gen_seq_ops);
+}
+
+static const struct file_operations lru_gen_rw_fops = {
+ .open = lru_gen_seq_open,
+ .read = seq_read,
+ .write = lru_gen_seq_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations lru_gen_ro_fops = {
+ .open = lru_gen_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
/******************************************************************************
* initialization
******************************************************************************/
@@ -5301,6 +5690,9 @@ static int __init init_lru_gen(void)
if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
pr_err("lru_gen: failed to create sysfs group\n");

+ debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
+ debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
+
return 0;
};
late_initcall(init_lru_gen);
--
2.36.1.476.g0c4daa206d-goog


2022-06-22 09:39:55

by Qi Zheng

[permalink] [raw]
Subject: Re: [PATCH v12 12/14] mm: multi-gen LRU: debugfs interface



On 2022/6/14 15:16, Yu Zhao wrote:
> Add /sys/kernel/debug/lru_gen for working set estimation and proactive
> reclaim. These techniques are commonly used to optimize job scheduling
> (bin packing) in data centers [1][2].
>
> Compared with the page table-based approach and the PFN-based
> approach, this lruvec-based approach has the following advantages:
> 1. It offers better choices because it is aware of memcgs, NUMA nodes,
> shared mappings and unmapped page cache.
> 2. It is more scalable because it is O(nr_hot_pages), whereas the
> PFN-based approach is O(nr_total_pages).
>
> Add /sys/kernel/debug/lru_gen_full for debugging.
>
> [1] https://dl.acm.org/doi/10.1145/3297858.3304053
> [2] https://dl.acm.org/doi/10.1145/3503222.3507731
>
> Signed-off-by: Yu Zhao <[email protected]>
> Acked-by: Brian Geffon <[email protected]>
> Acked-by: Jan Alexander Steffens (heftig) <[email protected]>
> Acked-by: Oleksandr Natalenko <[email protected]>
> Acked-by: Steven Barrett <[email protected]>
> Acked-by: Suleiman Souhlal <[email protected]>
> Tested-by: Daniel Byrne <[email protected]>
> Tested-by: Donald Carr <[email protected]>
> Tested-by: Holger Hoffstätte <[email protected]>
> Tested-by: Konstantin Kharlamov <[email protected]>
> Tested-by: Shuang Zhai <[email protected]>
> Tested-by: Sofia Trinh <[email protected]>
> Tested-by: Vaibhav Jain <[email protected]>
> ---
> include/linux/nodemask.h | 1 +
> mm/vmscan.c | 412 ++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 403 insertions(+), 10 deletions(-)
>

Hi Yu,

> +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
> + size_t len, loff_t *pos)
> +{
> + void *buf;
> + char *cur, *next;
> + unsigned int flags;
> + struct blk_plug plug;
> + int err = -EINVAL;
> + struct scan_control sc = {
> + .may_writepage = true,
> + .may_unmap = true,
> + .may_swap = true,
> + .reclaim_idx = MAX_NR_ZONES - 1,
> + .gfp_mask = GFP_KERNEL,
> + };
> +
> + buf = kvmalloc(len + 1, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + if (copy_from_user(buf, src, len)) {
> + kvfree(buf);
> + return -EFAULT;
> + }
> +
> + if (!set_mm_walk(NULL)) {

The current->reclaim_state will be dereferenced in set_mm_walk(), so
calling set_mm_walk() before set_task_reclaim_state(current,
&sc.reclaim_state) will cause panic:

[ 1861.154916] BUG: kernel NULL pointer dereference, address:
0000000000000008
[ 1861.155720] #PF: supervisor read access in kernel mode
[ 1861.156263] #PF: error_code(0x0000) - not-present page
[ 1861.156805] PGD 0 P4D 0
[ 1861.157107] Oops: 0000 [#1] PREEMPT SMP PTI
[ 1861.157560] CPU: 5 PID: 1017 Comm: bash Not tainted 5.19.0-rc2+ #244
[ 1861.158227] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS rel-1.14.0-0-g14
[ 1861.159419] RIP: 0010:set_mm_walk+0x15/0x60
[ 1861.159878] Code: e8 30 5f 01 00 48 c7 43 70 00 00 00 00 5b c3 31 f6
eb e2 66 90 0f 1f f
[ 1861.161806] RSP: 0018:ffffc90006dd3d58 EFLAGS: 00010246
[ 1861.162356] RAX: 0000000000000000 RBX: 00005582747a70b0 RCX:
0000000000000000
[ 1861.163109] RDX: ffff88810a198000 RSI: 00005582747a70c1 RDI:
0000000000000000
[ 1861.163855] RBP: ffff888104f4e400 R08: 0000000000000000 R09:
ffff888100042400
[ 1861.164597] R10: 0000000000000000 R11: 0000000000000000 R12:
ffff888685896fc0
[ 1861.165334] R13: 00005582747a70b0 R14: ffff888103ef2e40 R15:
0000000000000011
[ 1861.166083] FS: 00007f843df57740(0000) GS:ffff888666b40000(0000)
knlGS:0000000000000000
[ 1861.166921] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 1861.167527] CR2: 0000000000000008 CR3: 0000000684e7e000 CR4:
00000000000006e0
[ 1861.168272] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
0000000000000000
[ 1861.169020] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7:
0000000000000400
[ 1861.169867] Call Trace:
[ 1861.170159] <TASK>
[ 1861.170396] lru_gen_seq_write+0xbf/0x600
[ 1861.170837] ? _raw_spin_unlock+0x15/0x30
[ 1861.171272] ? wp_page_reuse+0x5f/0x70
[ 1861.171678] ? do_wp_page+0xda/0x3e0
[ 1861.172063] ? __handle_mm_fault+0x92f/0xeb0
[ 1861.172529] full_proxy_write+0x4d/0x70
[ 1861.172941] vfs_write+0xb8/0x2a0
[ 1861.173302] ksys_write+0x59/0xd0
[ 1861.173667] do_syscall_64+0x34/0x80
[ 1861.174055] entry_SYSCALL_64_after_hwframe+0x46/0xb0

> + kvfree(buf);
> + return -ENOMEM;
> + }
> +
> + set_task_reclaim_state(current, &sc.reclaim_state);
> + flags = memalloc_noreclaim_save();
> + blk_start_plug(&plug);
> +
> + next = buf;
> + next[len] = '\0';
> +
> + while ((cur = strsep(&next, ",;\n"))) {
> + int n;
> + int end;
> + char cmd;
> + unsigned int memcg_id;
> + unsigned int nid;
> + unsigned long seq;
> + unsigned int swappiness = -1;
> + unsigned long opt = -1;
> +
> + cur = skip_spaces(cur);
> + if (!*cur)
> + continue;
> +
> + n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
> + &seq, &end, &swappiness, &end, &opt, &end);
> + if (n < 4 || cur[end]) {
> + err = -EINVAL;
> + break;
> + }
> +
> + err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
> + if (err)
> + break;
> + }
> +
> + blk_finish_plug(&plug);
> + memalloc_noreclaim_restore(flags);
> + set_task_reclaim_state(current, NULL);
> +
> + clear_mm_walk();

Ditto, we can't call clear_mm_walk() after
set_task_reclaim_state(current, NULL).

Maybe it can be modified as follows:

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2422edc786eb..552e6ae5243e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -5569,12 +5569,12 @@ static ssize_t lru_gen_seq_write(struct file
*file, const char __user *src,
return -EFAULT;
}

+ set_task_reclaim_state(current, &sc.reclaim_state);
if (!set_mm_walk(NULL)) {
kvfree(buf);
return -ENOMEM;
}

- set_task_reclaim_state(current, &sc.reclaim_state);
flags = memalloc_noreclaim_save();
blk_start_plug(&plug);

@@ -5609,9 +5609,9 @@ static ssize_t lru_gen_seq_write(struct file
*file, const char __user *src,

blk_finish_plug(&plug);
memalloc_noreclaim_restore(flags);
+ clear_mm_walk();
set_task_reclaim_state(current, NULL);

- clear_mm_walk();
kvfree(buf);

return err ? : len;

Thanks,
Qi

> + kvfree(buf);
> +
> + return err ? : len;
> +}
> +


--
Thanks,
Qi

2022-06-22 19:28:18

by Yu Zhao

[permalink] [raw]
Subject: Re: [PATCH v12 12/14] mm: multi-gen LRU: debugfs interface

On Wed, Jun 22, 2022 at 3:16 AM Qi Zheng <[email protected]> wrote:

> > +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
> > + size_t len, loff_t *pos)
> > +{
> > + void *buf;
> > + char *cur, *next;
> > + unsigned int flags;
> > + struct blk_plug plug;
> > + int err = -EINVAL;
> > + struct scan_control sc = {
> > + .may_writepage = true,
> > + .may_unmap = true,
> > + .may_swap = true,
> > + .reclaim_idx = MAX_NR_ZONES - 1,
> > + .gfp_mask = GFP_KERNEL,
> > + };
> > +
> > + buf = kvmalloc(len + 1, GFP_KERNEL);
> > + if (!buf)
> > + return -ENOMEM;
> > +
> > + if (copy_from_user(buf, src, len)) {
> > + kvfree(buf);
> > + return -EFAULT;
> > + }
> > +
> > + if (!set_mm_walk(NULL)) {
>
> The current->reclaim_state will be dereferenced in set_mm_walk(), so
> calling set_mm_walk() before set_task_reclaim_state(current,
> &sc.reclaim_state) will cause panic:
>
> [ 1861.154916] BUG: kernel NULL pointer dereference, address:
> 0000000000000008

Thanks.

Apparently I shot myself in the foot by one of the nits between v11 and v12.

> > + kvfree(buf);
> > + return -ENOMEM;
> > + }
> > +
> > + set_task_reclaim_state(current, &sc.reclaim_state);
> > + flags = memalloc_noreclaim_save();
> > + blk_start_plug(&plug);
> > +
> > + next = buf;
> > + next[len] = '\0';
> > +
> > + while ((cur = strsep(&next, ",;\n"))) {
> > + int n;
> > + int end;
> > + char cmd;
> > + unsigned int memcg_id;
> > + unsigned int nid;
> > + unsigned long seq;
> > + unsigned int swappiness = -1;
> > + unsigned long opt = -1;
> > +
> > + cur = skip_spaces(cur);
> > + if (!*cur)
> > + continue;
> > +
> > + n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
> > + &seq, &end, &swappiness, &end, &opt, &end);
> > + if (n < 4 || cur[end]) {
> > + err = -EINVAL;
> > + break;
> > + }
> > +
> > + err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
> > + if (err)
> > + break;
> > + }
> > +
> > + blk_finish_plug(&plug);
> > + memalloc_noreclaim_restore(flags);
> > + set_task_reclaim_state(current, NULL);
> > +
> > + clear_mm_walk();
>
> Ditto, we can't call clear_mm_walk() after
> set_task_reclaim_state(current, NULL).
>
> Maybe it can be modified as follows:
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 2422edc786eb..552e6ae5243e 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -5569,12 +5569,12 @@ static ssize_t lru_gen_seq_write(struct file
> *file, const char __user *src,
> return -EFAULT;
> }
>
> + set_task_reclaim_state(current, &sc.reclaim_state);
> if (!set_mm_walk(NULL)) {
> kvfree(buf);
> return -ENOMEM;
> }
>
> - set_task_reclaim_state(current, &sc.reclaim_state);

We need a `goto` because otherwise we leave a dangling
`current->reclaim_state`. (I swear I had one.)

2022-06-23 02:56:25

by Qi Zheng

[permalink] [raw]
Subject: Re: [PATCH v12 12/14] mm: multi-gen LRU: debugfs interface



On 2022/6/23 03:13, Yu Zhao wrote:
> On Wed, Jun 22, 2022 at 3:16 AM Qi Zheng <[email protected]> wrote:
>
>>> +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
>>> + size_t len, loff_t *pos)
>>> +{
>>> + void *buf;
>>> + char *cur, *next;
>>> + unsigned int flags;
>>> + struct blk_plug plug;
>>> + int err = -EINVAL;
>>> + struct scan_control sc = {
>>> + .may_writepage = true,
>>> + .may_unmap = true,
>>> + .may_swap = true,
>>> + .reclaim_idx = MAX_NR_ZONES - 1,
>>> + .gfp_mask = GFP_KERNEL,
>>> + };
>>> +
>>> + buf = kvmalloc(len + 1, GFP_KERNEL);
>>> + if (!buf)
>>> + return -ENOMEM;
>>> +
>>> + if (copy_from_user(buf, src, len)) {
>>> + kvfree(buf);
>>> + return -EFAULT;
>>> + }
>>> +
>>> + if (!set_mm_walk(NULL)) {
>>
>> The current->reclaim_state will be dereferenced in set_mm_walk(), so
>> calling set_mm_walk() before set_task_reclaim_state(current,
>> &sc.reclaim_state) will cause panic:
>>
>> [ 1861.154916] BUG: kernel NULL pointer dereference, address:
>> 0000000000000008
>
> Thanks.
>
> Apparently I shot myself in the foot by one of the nits between v11 and v12.
>
>>> + kvfree(buf);
>>> + return -ENOMEM;
>>> + }
>>> +
>>> + set_task_reclaim_state(current, &sc.reclaim_state);
>>> + flags = memalloc_noreclaim_save();
>>> + blk_start_plug(&plug);
>>> +
>>> + next = buf;
>>> + next[len] = '\0';
>>> +
>>> + while ((cur = strsep(&next, ",;\n"))) {
>>> + int n;
>>> + int end;
>>> + char cmd;
>>> + unsigned int memcg_id;
>>> + unsigned int nid;
>>> + unsigned long seq;
>>> + unsigned int swappiness = -1;
>>> + unsigned long opt = -1;
>>> +
>>> + cur = skip_spaces(cur);
>>> + if (!*cur)
>>> + continue;
>>> +
>>> + n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
>>> + &seq, &end, &swappiness, &end, &opt, &end);
>>> + if (n < 4 || cur[end]) {
>>> + err = -EINVAL;
>>> + break;
>>> + }
>>> +
>>> + err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
>>> + if (err)
>>> + break;
>>> + }
>>> +
>>> + blk_finish_plug(&plug);
>>> + memalloc_noreclaim_restore(flags);
>>> + set_task_reclaim_state(current, NULL);
>>> +
>>> + clear_mm_walk();
>>
>> Ditto, we can't call clear_mm_walk() after
>> set_task_reclaim_state(current, NULL).
>>
>> Maybe it can be modified as follows:
>>
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index 2422edc786eb..552e6ae5243e 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -5569,12 +5569,12 @@ static ssize_t lru_gen_seq_write(struct file
>> *file, const char __user *src,
>> return -EFAULT;
>> }
>>
>> + set_task_reclaim_state(current, &sc.reclaim_state);
>> if (!set_mm_walk(NULL)) {
>> kvfree(buf);
>> return -ENOMEM;
>> }
>>
>> - set_task_reclaim_state(current, &sc.reclaim_state);
>
> We need a `goto` because otherwise we leave a dangling

Yes, right. With this:

Reviewed-by: Qi Zheng <[email protected]>

> `current->reclaim_state`. (I swear I had one.)

(I believe :))

--
Thanks,
Qi