2022-05-13 11:23:58

by Gang Li

[permalink] [raw]
Subject: [PATCH 0/5 v1] mm, oom: Introduce per numa node oom for CONSTRAINT_MEMORY_POLICY

TLDR:
If a mempolicy is in effect(oc->constraint == CONSTRAINT_MEMORY_POLICY), out_of_memory() will
select victim on specific node to kill. So that kernel can avoid accidental killing on NUMA system.

Problem:
Before this patch series, oom will only kill the process with the highest memory usage.
by selecting process with the highest oom_badness on the entire system to kill.

This works fine on UMA system, but may have some accidental killing on NUMA system.

As shown below, if process c.out is bind to Node1 and keep allocating pages from Node1,
a.out will be killed first. But killing a.out did't free any mem on Node1, so c.out
will be killed then.

A lot of our AMD machines have 8 numa nodes. In these systems, there is a greater chance
of triggering this problem.

OOM before patches:
```
Per-node process memory usage (in MBs)
PID Node 0 Node 1 Total
----------- ---------- ------------- ----------
3095 a.out 3073.34 0.11 3073.45(Killed first. Maximum memory consumption)
3199 b.out 501.35 1500.00 2001.35
3805 c.out 1.52 (grow)2248.00 2249.52(Killed then. Node1 is full)
----------- ---------- ------------- ----------
Total 3576.21 3748.11 7324.31
```

Solution:
We store per node rss in mm_rss_stat for each process.

If a page allocation with mempolicy in effect(oc->constraint == CONSTRAINT_MEMORY_POLICY)
triger oom. We will calculate oom_badness with rss counter for the corresponding node. Then
select the process with the highest oom_badness on the corresponding node to kill.

OOM after patches:
```
Per-node process memory usage (in MBs)
PID Node 0 Node 1 Total
----------- ---------- ------------- ----------
3095 a.out 3073.34 0.11 3073.45
3199 b.out 501.35 1500.00 2001.35
3805 c.out 1.52 (grow)2248.00 2249.52(killed)
----------- ---------- ------------- ----------
Total 3576.21 3748.11 7324.31
```

Gang Li (5):
mm: add a new parameter `node` to `get/add/inc/dec_mm_counter`
mm: add numa_count field for rss_stat
mm: add numa fields for tracepoint rss_stat
mm: enable per numa node rss_stat count
mm, oom: enable per numa node oom for CONSTRAINT_MEMORY_POLICY

arch/s390/mm/pgtable.c | 4 +-
fs/exec.c | 2 +-
fs/proc/base.c | 6 +-
fs/proc/task_mmu.c | 14 ++--
include/linux/mm.h | 59 ++++++++++++-----
include/linux/mm_types_task.h | 16 +++++
include/linux/oom.h | 2 +-
include/trace/events/kmem.h | 27 ++++++--
kernel/events/uprobes.c | 6 +-
kernel/fork.c | 70 +++++++++++++++++++-
mm/huge_memory.c | 13 ++--
mm/khugepaged.c | 4 +-
mm/ksm.c | 2 +-
mm/madvise.c | 2 +-
mm/memory.c | 116 ++++++++++++++++++++++++----------
mm/migrate.c | 2 +
mm/migrate_device.c | 2 +-
mm/oom_kill.c | 59 ++++++++++++-----
mm/rmap.c | 16 ++---
mm/swapfile.c | 4 +-
mm/userfaultfd.c | 2 +-
21 files changed, 317 insertions(+), 111 deletions(-)

--
2.20.1



2022-05-14 01:01:37

by Gang Li

[permalink] [raw]
Subject: [PATCH 4/5 v1] mm: enable per numa node rss_stat count

Now we have all the infrastructure ready. Modify `get/add/inc/dec_mm_counter`,
`sync_mm_rss`, `add_mm_counter_fast` and `add_mm_rss_vec` to enable per numa
node rss_stat count.

Signed-off-by: Gang Li <[email protected]>
---
include/linux/mm.h | 42 +++++++++++++++++++++++++++++++++++-------
mm/memory.c | 20 ++++++++++++++++++--
2 files changed, 53 insertions(+), 9 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index cde5529285d6..f0f21065b81b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1994,8 +1994,18 @@ static inline bool get_user_page_fast_only(unsigned long addr,
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member, int node)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
+ long val;

+ WARN_ON(node == NUMA_NO_NODE && member == MM_NO_TYPE);
+
+ if (node == NUMA_NO_NODE)
+ val = atomic_long_read(&mm->rss_stat.count[member]);
+ else
+#ifdef CONFIG_NUMA
+ val = atomic_long_read(&mm->rss_stat.numa_count[node]);
+#else
+ val = 0;
+#endif
#ifdef SPLIT_RSS_COUNTING
/*
* counter is updated in asynchronous manner and may go to minus.
@@ -2012,23 +2022,41 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long member_count, int

static inline void add_mm_counter(struct mm_struct *mm, int member, long value, int node)
{
- long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;

- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, value);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_add_return(value, &mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, value);
}

static inline void inc_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;

- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, 1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_inc_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, 1);
}

static inline void dec_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;

- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, -1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_dec_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, -1);
}

/* Optimized variant when page is already known not to be PageAnon */
diff --git a/mm/memory.c b/mm/memory.c
index 2d3040a190f6..f7b67da772b2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -188,6 +188,14 @@ void sync_mm_rss(struct mm_struct *mm)
current->rss_stat.count[i] = 0;
}
}
+#ifdef CONFIG_NUMA
+ for_each_node(i) {
+ if (current->rss_stat.numa_count[i]) {
+ add_mm_counter(mm, MM_NO_TYPE, current->rss_stat.numa_count[i], i);
+ current->rss_stat.numa_count[i] = 0;
+ }
+ }
+#endif
current->rss_stat.events = 0;
}

@@ -195,9 +203,12 @@ static void add_mm_counter_fast(struct mm_struct *mm, int member, int val, int n
{
struct task_struct *task = current;

- if (likely(task->mm == mm))
+ if (likely(task->mm == mm)) {
task->rss_stat.count[member] += val;
- else
+#ifdef CONFIG_NUMA
+ task->rss_stat.numa_count[node] += val;
+#endif
+ } else
add_mm_counter(mm, member, val, node);
}
#define inc_mm_counter_fast(mm, member, node) add_mm_counter_fast(mm, member, 1, node)
@@ -508,6 +519,11 @@ static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss, int *numa_rss)
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i], NUMA_NO_NODE);
+#ifdef CONFIG_NUMA
+ for_each_node(i)
+ if (numa_rss[i] != 0)
+ add_mm_counter(mm, MM_NO_TYPE, numa_rss[i], i);
+#endif
}

/*
--
2.20.1


2022-05-14 01:23:30

by Gang Li

[permalink] [raw]
Subject: [PATCH 1/5 v1] mm: add a new parameter `node` to `get/add/inc/dec_mm_counter`

Add a new parameter `node` to mm_counter for counting per process
per node rss.

Since pages can be migrated between nodes, `remove_migration_pte`
also needs to call `add_mm_counter`.

Notice that the `MM_SWAPENTS` doesn't exist on any node. So when
add_mm_counter is used to modify rss_stat.count[MM_SWAPENTS], its
`node` field should be `NUMA_NO_NODE`.

And there is no need to modify `resident_page_types`, because
`MM_NO_TYPE` is not used in `check_mm`.

Signed-off-by: Gang Li <[email protected]>
---
arch/s390/mm/pgtable.c | 4 +-
fs/exec.c | 2 +-
fs/proc/task_mmu.c | 14 +++---
include/linux/mm.h | 14 +++---
include/linux/mm_types_task.h | 10 ++++
kernel/events/uprobes.c | 6 +--
mm/huge_memory.c | 13 ++---
mm/khugepaged.c | 4 +-
mm/ksm.c | 2 +-
mm/madvise.c | 2 +-
mm/memory.c | 91 +++++++++++++++++++++++------------
mm/migrate.c | 2 +
mm/migrate_device.c | 2 +-
mm/oom_kill.c | 16 +++---
mm/rmap.c | 16 +++---
mm/swapfile.c | 4 +-
mm/userfaultfd.c | 2 +-
17 files changed, 124 insertions(+), 80 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 697df02362af..d44198c5929f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -703,11 +703,11 @@ void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
{
if (!non_swap_entry(entry))
- dec_mm_counter(mm, MM_SWAPENTS);
+ dec_mm_counter(mm, MM_SWAPENTS, NUMA_NO_NODE);
else if (is_migration_entry(entry)) {
struct page *page = pfn_swap_entry_to_page(entry);

- dec_mm_counter(mm, mm_counter(page));
+ dec_mm_counter(mm, mm_counter(page), page_to_nid(page));
}
free_swap_and_cache(entry);
}
diff --git a/fs/exec.c b/fs/exec.c
index e3e55d5e0be1..6c82393b1720 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -192,7 +192,7 @@ static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
return;

bprm->vma_pages = pages;
- add_mm_counter(mm, MM_ANONPAGES, diff);
+ add_mm_counter(mm, MM_ANONPAGES, diff, NUMA_NO_NODE);
}

static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f46060eb91b5..5cf65327fa6d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -33,9 +33,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
unsigned long text, lib, swap, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

- anon = get_mm_counter(mm, MM_ANONPAGES);
- file = get_mm_counter(mm, MM_FILEPAGES);
- shmem = get_mm_counter(mm, MM_SHMEMPAGES);
+ anon = get_mm_counter(mm, MM_ANONPAGES, NUMA_NO_NODE);
+ file = get_mm_counter(mm, MM_FILEPAGES, NUMA_NO_NODE);
+ shmem = get_mm_counter(mm, MM_SHMEMPAGES, NUMA_NO_NODE);

/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
@@ -56,7 +56,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
text = min(text, mm->exec_vm << PAGE_SHIFT);
lib = (mm->exec_vm << PAGE_SHIFT) - text;

- swap = get_mm_counter(mm, MM_SWAPENTS);
+ swap = get_mm_counter(mm, MM_SWAPENTS, NUMA_NO_NODE);
SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
@@ -89,12 +89,12 @@ unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
- *shared = get_mm_counter(mm, MM_FILEPAGES) +
- get_mm_counter(mm, MM_SHMEMPAGES);
+ *shared = get_mm_counter(mm, MM_FILEPAGES, NUMA_NO_NODE) +
+ get_mm_counter(mm, MM_SHMEMPAGES, NUMA_NO_NODE);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->data_vm + mm->stack_vm;
- *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
+ *resident = *shared + get_mm_counter(mm, MM_ANONPAGES, NUMA_NO_NODE);
return mm->total_vm;
}

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9f44254af8ce..1b6c2e912ec8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1992,7 +1992,7 @@ static inline bool get_user_page_fast_only(unsigned long addr,
/*
* per-process(per-mm_struct) statistics.
*/
-static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
+static inline unsigned long get_mm_counter(struct mm_struct *mm, int member, int node)
{
long val = atomic_long_read(&mm->rss_stat.count[member]);

@@ -2009,21 +2009,21 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)

void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);

-static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
+static inline void add_mm_counter(struct mm_struct *mm, int member, long value, int node)
{
long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);

mm_trace_rss_stat(mm, member, count);
}

-static inline void inc_mm_counter(struct mm_struct *mm, int member)
+static inline void inc_mm_counter(struct mm_struct *mm, int member, int node)
{
long count = atomic_long_inc_return(&mm->rss_stat.count[member]);

mm_trace_rss_stat(mm, member, count);
}

-static inline void dec_mm_counter(struct mm_struct *mm, int member)
+static inline void dec_mm_counter(struct mm_struct *mm, int member, int node)
{
long count = atomic_long_dec_return(&mm->rss_stat.count[member]);

@@ -2047,9 +2047,9 @@ static inline int mm_counter(struct page *page)

static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
- return get_mm_counter(mm, MM_FILEPAGES) +
- get_mm_counter(mm, MM_ANONPAGES) +
- get_mm_counter(mm, MM_SHMEMPAGES);
+ return get_mm_counter(mm, MM_FILEPAGES, NUMA_NO_NODE) +
+ get_mm_counter(mm, MM_ANONPAGES, NUMA_NO_NODE) +
+ get_mm_counter(mm, MM_SHMEMPAGES, NUMA_NO_NODE);
}

static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index c1bc6731125c..3e7da8c7ab95 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -48,6 +48,16 @@ enum {
NR_MM_COUNTERS
};

+/*
+ * This macro should only be used in committing local values, like sync_mm_rss,
+ * add_mm_rss_vec. It means don't count per-mm-type, only count per-node in
+ * mm_stat.
+ *
+ * `MM_NO_TYPE` must equals to `NR_MM_COUNTERS`, since we will use it in
+ * `TRACE_MM_PAGES`.
+ */
+#define MM_NO_TYPE NR_MM_COUNTERS
+
#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
#define SPLIT_RSS_COUNTING
/* per-thread cached information, */
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 6418083901d4..f8cd234084fe 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -184,11 +184,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
lru_cache_add_inactive_or_unevictable(new_page, vma);
} else
/* no new page, just dec_mm_counter for old_page */
- dec_mm_counter(mm, MM_ANONPAGES);
+ dec_mm_counter(mm, MM_ANONPAGES, page_to_nid(old_page));

if (!PageAnon(old_page)) {
- dec_mm_counter(mm, mm_counter_file(old_page));
- inc_mm_counter(mm, MM_ANONPAGES);
+ dec_mm_counter(mm, mm_counter_file(old_page), page_to_nid(old_page));
+ inc_mm_counter(mm, MM_ANONPAGES, page_to_nid(new_page));
}

flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c468fee595ff..b2c0fd668d01 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -652,7 +652,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
- add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR, page_to_nid(page));
mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
@@ -1064,7 +1064,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_swp_mkuffd_wp(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
- add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR, page_to_nid(pmd_page(*dst_pmd)));
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
if (!userfaultfd_wp(dst_vma))
@@ -1114,7 +1114,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,

get_page(src_page);
page_dup_rmap(src_page, true);
- add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR, page_to_nid(src_page));
out_zero_page:
mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
@@ -1597,11 +1597,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,

if (PageAnon(page)) {
zap_deposited_table(tlb->mm, pmd);
- add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+ add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR, page_to_nid(page));
} else {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
- add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR,
+ page_to_nid(page));
}

spin_unlock(ptl);
@@ -1981,7 +1982,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
page_remove_rmap(page, vma, true);
put_page(page);
}
- add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR, page_to_nid(page));
return;
}

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a4e5eaf3eb01..3ceaae2c24c0 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -742,7 +742,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,

if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
clear_user_highpage(page, address);
- add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1, page_to_nid(page));
if (is_zero_pfn(pte_pfn(pteval))) {
/*
* ptl mostly unnecessary.
@@ -1510,7 +1510,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
/* step 3: set proper refcount and mm_counters. */
if (count) {
page_ref_sub(hpage, count);
- add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
+ add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count, page_to_nid(hpage));
}

/* step 4: collapse pmd */
diff --git a/mm/ksm.c b/mm/ksm.c
index 063a48eeb5ee..1185fa086a31 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1161,7 +1161,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* will get wrong values in /proc, and a BUG message in dmesg
* when tearing down the mm.
*/
- dec_mm_counter(mm, MM_ANONPAGES);
+ dec_mm_counter(mm, MM_ANONPAGES, page_to_nid(page));
}

flush_cache_page(vma, addr, pte_pfn(*ptep));
diff --git a/mm/madvise.c b/mm/madvise.c
index 1873616a37d2..819a1cf47d7d 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -704,7 +704,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (current->mm == mm)
sync_mm_rss(mm);

- add_mm_counter(mm, MM_SWAPENTS, nr_swap);
+ add_mm_counter(mm, MM_SWAPENTS, nr_swap, NUMA_NO_NODE);
}
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl);
diff --git a/mm/memory.c b/mm/memory.c
index 76e3af9639d9..adb07fb0b483 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -158,6 +158,8 @@ EXPORT_SYMBOL(zero_pfn);

unsigned long highest_memmap_pfn __read_mostly;

+static DEFINE_PER_CPU(int, percpu_numa_rss[MAX_NUMNODES]);
+
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
@@ -181,24 +183,24 @@ void sync_mm_rss(struct mm_struct *mm)

for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
- add_mm_counter(mm, i, current->rss_stat.count[i]);
+ add_mm_counter(mm, i, current->rss_stat.count[i], NUMA_NO_NODE);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}

-static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
+static void add_mm_counter_fast(struct mm_struct *mm, int member, int val, int node)
{
struct task_struct *task = current;

if (likely(task->mm == mm))
task->rss_stat.count[member] += val;
else
- add_mm_counter(mm, member, val);
+ add_mm_counter(mm, member, val, node);
}
-#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
-#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
+#define inc_mm_counter_fast(mm, member, node) add_mm_counter_fast(mm, member, 1, node)
+#define dec_mm_counter_fast(mm, member, node) add_mm_counter_fast(mm, member, -1, node)

/* sync counter once per 64 page faults */
#define TASK_RSS_EVENTS_THRESH (64)
@@ -211,8 +213,8 @@ static void check_sync_rss_stat(struct task_struct *task)
}
#else /* SPLIT_RSS_COUNTING */

-#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
-#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
+#define inc_mm_counter_fast(mm, member, node) inc_mm_counter(mm, member, node)
+#define dec_mm_counter_fast(mm, member, node) dec_mm_counter(mm, member, node)

static void check_sync_rss_stat(struct task_struct *task)
{
@@ -490,12 +492,13 @@ int __pte_alloc_kernel(pmd_t *pmd)
return 0;
}

-static inline void init_rss_vec(int *rss)
+static inline void init_rss_vec(int *rss, int *numa_rss)
{
memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
+ memset(numa_rss, 0, sizeof(int) * num_possible_nodes());
}

-static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
+static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss, int *numa_rss)
{
int i;

@@ -503,7 +506,7 @@ static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
sync_mm_rss(mm);
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
- add_mm_counter(mm, i, rss[i]);
+ add_mm_counter(mm, i, rss[i], NUMA_NO_NODE);
}

/*
@@ -771,7 +774,8 @@ try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
static unsigned long
copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
- struct vm_area_struct *src_vma, unsigned long addr, int *rss)
+ struct vm_area_struct *src_vma, unsigned long addr, int *rss,
+ int *numa_rss)
{
unsigned long vm_flags = dst_vma->vm_flags;
pte_t pte = *src_pte;
@@ -791,10 +795,12 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
spin_unlock(&mmlist_lock);
}
rss[MM_SWAPENTS]++;
+ numa_rss[page_to_nid(pte_page(*dst_pte))]++;
} else if (is_migration_entry(entry)) {
page = pfn_swap_entry_to_page(entry);

rss[mm_counter(page)]++;
+ numa_rss[page_to_nid(page)]++;

if (is_writable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
@@ -825,6 +831,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/
get_page(page);
rss[mm_counter(page)]++;
+ numa_rss[page_to_nid(page)]++;
page_dup_rmap(page, false);

/*
@@ -884,7 +891,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
static inline int
copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
- struct page **prealloc, pte_t pte, struct page *page)
+ struct page **prealloc, pte_t pte, struct page *page, int *numa_rss)
{
struct page *new_page;

@@ -918,6 +925,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
page_add_new_anon_rmap(new_page, dst_vma, addr, false);
lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
rss[mm_counter(new_page)]++;
+ rss[page_to_nid(new_page)]++;

/* All done, just insert the new page copy in the child */
pte = mk_pte(new_page, dst_vma->vm_page_prot);
@@ -936,7 +944,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
static inline int
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
- struct page **prealloc)
+ struct page **prealloc, int *numa_rss)
{
struct mm_struct *src_mm = src_vma->vm_mm;
unsigned long vm_flags = src_vma->vm_flags;
@@ -948,13 +956,14 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
int retval;

retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
- addr, rss, prealloc, pte, page);
+ addr, rss, prealloc, pte, page, numa_rss);
if (retval <= 0)
return retval;

get_page(page);
page_dup_rmap(page, false);
rss[mm_counter(page)]++;
+ numa_rss[page_to_nid(page)]++;
}

/*
@@ -1012,12 +1021,16 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
spinlock_t *src_ptl, *dst_ptl;
int progress, ret = 0;
int rss[NR_MM_COUNTERS];
+ int *numa_rss;
swp_entry_t entry = (swp_entry_t){0};
struct page *prealloc = NULL;
+ numa_rss = kcalloc(num_possible_nodes(), sizeof(int), GFP_KERNEL);
+ if (unlikely(!numa_rss))
+ numa_rss = (int *)get_cpu_ptr(&percpu_numa_rss);

again:
progress = 0;
- init_rss_vec(rss);
+ init_rss_vec(rss, numa_rss);

dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
if (!dst_pte) {
@@ -1050,7 +1063,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
ret = copy_nonpresent_pte(dst_mm, src_mm,
dst_pte, src_pte,
dst_vma, src_vma,
- addr, rss);
+ addr, rss, numa_rss);
if (ret == -EIO) {
entry = pte_to_swp_entry(*src_pte);
break;
@@ -1069,7 +1082,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
}
/* copy_present_pte() will clear `*prealloc' if consumed */
ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
- addr, rss, &prealloc);
+ addr, rss, &prealloc, numa_rss);
/*
* If we need a pre-allocated page for this pte, drop the
* locks, allocate, and try again.
@@ -1092,7 +1105,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
pte_unmap(orig_src_pte);
- add_mm_rss_vec(dst_mm, rss);
+ add_mm_rss_vec(dst_mm, rss, numa_rss);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();

@@ -1121,6 +1134,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
out:
if (unlikely(prealloc))
put_page(prealloc);
+ if (unlikely(numa_rss == (int *)raw_cpu_ptr(&percpu_numa_rss)))
+ put_cpu_ptr(numa_rss);
+ else
+ kfree(numa_rss);
return ret;
}

@@ -1344,14 +1361,18 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct mm_struct *mm = tlb->mm;
int force_flush = 0;
int rss[NR_MM_COUNTERS];
+ int *numa_rss;
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
swp_entry_t entry;
+ numa_rss = kcalloc(num_possible_nodes(), sizeof(int), GFP_KERNEL);
+ if (unlikely(!numa_rss))
+ numa_rss = (int *)get_cpu_ptr(&percpu_numa_rss);

tlb_change_page_size(tlb, PAGE_SIZE);
again:
- init_rss_vec(rss);
+ init_rss_vec(rss, numa_rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
flush_tlb_batched_pending(mm);
@@ -1386,6 +1407,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
mark_page_accessed(page);
}
rss[mm_counter(page)]--;
+ numa_rss[page_to_nid(page)]--;
page_remove_rmap(page, vma, false);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
@@ -1404,6 +1426,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (unlikely(!should_zap_page(details, page)))
continue;
rss[mm_counter(page)]--;
+ numa_rss[page_to_nid(page)]--;
if (is_device_private_entry(entry))
page_remove_rmap(page, vma, false);
put_page(page);
@@ -1419,6 +1442,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (!should_zap_page(details, page))
continue;
rss[mm_counter(page)]--;
+ numa_rss[page_to_nid(page)]--;
} else if (is_hwpoison_entry(entry)) {
if (!should_zap_cows(details))
continue;
@@ -1429,7 +1453,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end);

- add_mm_rss_vec(mm, rss);
+ add_mm_rss_vec(mm, rss, numa_rss);
arch_leave_lazy_mmu_mode();

/* Do the actual TLB flush before dropping ptl */
@@ -1453,6 +1477,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
goto again;
}

+ if (unlikely(numa_rss == (int *)raw_cpu_ptr(&percpu_numa_rss)))
+ put_cpu_ptr(numa_rss);
+ else
+ kfree(numa_rss);
return addr;
}

@@ -1767,7 +1795,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
return -EBUSY;
/* Ok, finally just insert the thing.. */
get_page(page);
- inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
+ inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page), page_to_nid(page));
page_add_file_rmap(page, vma, false);
set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
return 0;
@@ -3053,11 +3081,14 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm,
- mm_counter_file(old_page));
- inc_mm_counter_fast(mm, MM_ANONPAGES);
+ mm_counter_file(old_page), page_to_nid(old_page));
+ inc_mm_counter_fast(mm, MM_ANONPAGES, page_to_nid(new_page));
+ } else {
+ dec_mm_counter_fast(mm, MM_ANONPAGES, page_to_nid(old_page));
+ inc_mm_counter_fast(mm, MM_ANONPAGES, page_to_nid(new_page));
}
} else {
- inc_mm_counter_fast(mm, MM_ANONPAGES);
+ inc_mm_counter_fast(mm, MM_ANONPAGES, page_to_nid(new_page));
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
@@ -3685,8 +3716,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (should_try_to_free_swap(page, vma, vmf->flags))
try_to_free_swap(page);

- inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
- dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
+ inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES, page_to_nid(page));
+ dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS, NUMA_NO_NODE);
pte = mk_pte(page, vma->vm_page_prot);

/*
@@ -3861,7 +3892,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
return handle_userfault(vmf, VM_UFFD_MISSING);
}

- inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
+ inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES, page_to_nid(page));
page_add_new_anon_rmap(page, vma, vmf->address, false);
lru_cache_add_inactive_or_unevictable(page, vma);
setpte:
@@ -4002,7 +4033,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
if (write)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);

- add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
+ add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR, page_to_nid(page));
page_add_file_rmap(page, vma, true);

/*
@@ -4048,11 +4079,11 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {
- inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
+ inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES, page_to_nid(page));
page_add_new_anon_rmap(page, vma, addr, false);
lru_cache_add_inactive_or_unevictable(page, vma);
} else {
- inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
+ inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page), page_to_nid(page));
page_add_file_rmap(page, vma, false);
}
set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c31ee1e1c9b..8554c7a64928 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -253,6 +253,8 @@ static bool remove_migration_pte(struct folio *folio,

/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, -compound_nr(old), page_to_nid(old));
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, compound_nr(&folio->page), page_to_nid(&folio->page));
}

return true;
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 70c7dc05bbfc..eedd053febd8 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -609,7 +609,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (userfaultfd_missing(vma))
goto unlock_abort;

- inc_mm_counter(mm, MM_ANONPAGES);
+ inc_mm_counter(mm, MM_ANONPAGES, page_to_nid(page));
page_add_new_anon_rmap(page, vma, addr, false);
if (!is_zone_device_page(page))
lru_cache_add_inactive_or_unevictable(page, vma);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 49d7df39b02d..757f5665ae94 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -227,7 +227,7 @@ long oom_badness(struct task_struct *p, unsigned long totalpages)
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
+ points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS, NUMA_NO_NODE) +
mm_pgtables_bytes(p->mm) / PAGE_SIZE;
task_unlock(p);

@@ -403,7 +403,7 @@ static int dump_task(struct task_struct *p, void *arg)
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
mm_pgtables_bytes(task->mm),
- get_mm_counter(task->mm, MM_SWAPENTS),
+ get_mm_counter(task->mm, MM_SWAPENTS, NUMA_NO_NODE),
task->signal->oom_score_adj, task->comm);
task_unlock(task);

@@ -593,9 +593,9 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)

pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(tsk), tsk->comm,
- K(get_mm_counter(mm, MM_ANONPAGES)),
- K(get_mm_counter(mm, MM_FILEPAGES)),
- K(get_mm_counter(mm, MM_SHMEMPAGES)));
+ K(get_mm_counter(mm, MM_ANONPAGES, NUMA_NO_NODE)),
+ K(get_mm_counter(mm, MM_FILEPAGES, NUMA_NO_NODE)),
+ K(get_mm_counter(mm, MM_SHMEMPAGES, NUMA_NO_NODE)));
out_finish:
trace_finish_task_reaping(tsk->pid);
out_unlock:
@@ -917,9 +917,9 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
mark_oom_victim(victim);
pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
- K(get_mm_counter(mm, MM_ANONPAGES)),
- K(get_mm_counter(mm, MM_FILEPAGES)),
- K(get_mm_counter(mm, MM_SHMEMPAGES)),
+ K(get_mm_counter(mm, MM_ANONPAGES, NUMA_NO_NODE)),
+ K(get_mm_counter(mm, MM_FILEPAGES, NUMA_NO_NODE)),
+ K(get_mm_counter(mm, MM_SHMEMPAGES, NUMA_NO_NODE)),
from_kuid(&init_user_ns, task_uid(victim)),
mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
task_unlock(victim);
diff --git a/mm/rmap.c b/mm/rmap.c
index fedb82371efe..1566689476fc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1549,7 +1549,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
pvmw.pte, pteval,
vma_mmu_pagesize(vma));
} else {
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(&folio->page), page_to_nid(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
}

@@ -1564,7 +1564,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(&folio->page), page_to_nid(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
@@ -1615,7 +1615,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE);
- dec_mm_counter(mm, MM_ANONPAGES);
+ dec_mm_counter(mm, MM_ANONPAGES, page_to_nid(&folio->page));
goto discard;
}

@@ -1648,8 +1648,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
- dec_mm_counter(mm, MM_ANONPAGES);
- inc_mm_counter(mm, MM_SWAPENTS);
+ dec_mm_counter(mm, MM_ANONPAGES, page_to_nid(&folio->page));
+ inc_mm_counter(mm, MM_SWAPENTS, NUMA_NO_NODE);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
@@ -1671,7 +1671,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/vm/mmu_notifier.rst
*/
- dec_mm_counter(mm, mm_counter_file(&folio->page));
+ dec_mm_counter(mm, mm_counter_file(&folio->page), page_to_nid(&folio->page));
}
discard:
/*
@@ -1896,7 +1896,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
pvmw.pte, pteval,
vma_mmu_pagesize(vma));
} else {
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(&folio->page), page_to_nid(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
}

@@ -1911,7 +1911,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(&folio->page), page_to_nid(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 63c61f8b2611..098bdb58109a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1796,8 +1796,8 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}

- dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
+ dec_mm_counter(vma->vm_mm, MM_SWAPENTS, NUMA_NO_NODE);
+ inc_mm_counter(vma->vm_mm, MM_ANONPAGES, page_to_nid(page));
get_page(page);
if (page == swapcache) {
page_add_anon_rmap(page, vma, addr, false);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e9bb6db002aa..0355285a3d6f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -112,7 +112,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
* Must happen after rmap, as mm_counter() checks mapping (via
* PageAnon()), which is set by __page_set_anon_rmap().
*/
- inc_mm_counter(dst_mm, mm_counter(page));
+ inc_mm_counter(dst_mm, mm_counter(page), page_to_nid(page));

set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);

--
2.20.1


2022-05-14 01:54:51

by Gang Li

[permalink] [raw]
Subject: [PATCH 2/5 v1] mm: add numa_count field for rss_stat

This patch add new fields `numa_count` for mm_rss_stat and
task_rss_stat.

`numa_count` are in the size of `sizeof(long) * num_possible_numa()`.
To reduce mem consumption, they only contain the sum of rss which is
needed by `oom_badness` instead of recording different kinds of rss
sepratly.

Signed-off-by: Gang Li <[email protected]>
---
include/linux/mm_types_task.h | 6 +++
kernel/fork.c | 70 +++++++++++++++++++++++++++++++++--
2 files changed, 73 insertions(+), 3 deletions(-)

diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 3e7da8c7ab95..c1ac2a33b697 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -64,11 +64,17 @@ enum {
struct task_rss_stat {
int events; /* for synchronization threshold */
int count[NR_MM_COUNTERS];
+#ifdef CONFIG_NUMA
+ int *numa_count;
+#endif
};
#endif /* USE_SPLIT_PTE_PTLOCKS */

struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
+#ifdef CONFIG_NUMA
+ atomic_long_t *numa_count;
+#endif
};

struct page_frag {
diff --git a/kernel/fork.c b/kernel/fork.c
index 9796897560ab..e549e0b30e2b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -141,6 +141,10 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;

__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */

+#if (defined SPLIT_RSS_COUNTING) && (defined CONFIG_NUMA)
+#define SPLIT_RSS_NUMA_COUNTING
+#endif
+
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
@@ -765,6 +769,16 @@ static void check_mm(struct mm_struct *mm)
mm, resident_page_types[i], x);
}

+#ifdef CONFIG_NUMA
+ for (i = 0; i < num_possible_nodes(); i++) {
+ long x = atomic_long_read(&mm->rss_stat.numa_count[i]);
+
+ if (unlikely(x))
+ pr_alert("BUG: Bad rss-counter state mm:%p node:%d val:%ld\n",
+ mm, i, x);
+ }
+#endif
+
if (mm_pgtables_bytes(mm))
pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
mm_pgtables_bytes(mm));
@@ -777,6 +791,29 @@ static void check_mm(struct mm_struct *mm)
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))

+#ifdef CONFIG_NUMA
+static inline void mm_free_rss_stat(struct mm_struct *mm)
+{
+ kfree(mm->rss_stat.numa_count);
+}
+
+static inline int mm_init_rss_stat(struct mm_struct *mm)
+{
+ memset(&mm->rss_stat.count, 0, sizeof(mm->rss_stat.count));
+ mm->rss_stat.numa_count = kcalloc(num_possible_nodes(), sizeof(atomic_long_t), GFP_KERNEL);
+ if (unlikely(!mm->rss_stat.numa_count))
+ return -ENOMEM;
+ return 0;
+}
+#else
+static inline void mm_free_rss_stat(struct mm_struct *mm) {}
+static inline int mm_init_rss_stat(struct mm_struct *mm)
+{
+ memset(&mm->rss_stat.count, 0, sizeof(mm->rss_stat.count));
+ return 0;
+}
+#endif
+
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
@@ -791,6 +828,7 @@ void __mmdrop(struct mm_struct *mm)
destroy_context(mm);
mmu_notifier_subscriptions_destroy(mm);
check_mm(mm);
+ mm_free_rss_stat(mm);
put_user_ns(mm->user_ns);
free_mm(mm);
}
@@ -831,12 +869,22 @@ static inline void put_signal_struct(struct signal_struct *sig)
free_signal_struct(sig);
}

+#ifdef SPLIT_RSS_NUMA_COUNTING
+void rss_stat_free(struct task_struct *p)
+{
+ kfree(p->rss_stat.numa_count);
+}
+#else
+void rss_stat_free(struct task_struct *p) {}
+#endif
+
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);

+ rss_stat_free(tsk);
io_uring_free(tsk);
cgroup_free(tsk);
task_numa_free(tsk, true);
@@ -963,6 +1011,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
+ int *numa_count __maybe_unused;
int err;

if (node == NUMA_NO_NODE)
@@ -984,9 +1033,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#endif
account_kernel_stack(tsk, 1);

+#ifdef SPLIT_RSS_NUMA_COUNTING
+ numa_count = kcalloc(num_possible_nodes(), sizeof(int), GFP_KERNEL);
+ if (!numa_count)
+ goto free_stack;
+ tsk->rss_stat.numa_count = numa_count;
+#endif
+
err = scs_prepare(tsk, node);
if (err)
- goto free_stack;
+ goto free_rss_stat;

#ifdef CONFIG_SECCOMP
/*
@@ -1047,6 +1103,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#endif
return tsk;

+free_rss_stat:
+#ifdef SPLIT_RSS_NUMA_COUNTING
+ kfree(numa_count);
+#endif
free_stack:
exit_task_stack_account(tsk);
free_thread_stack(tsk);
@@ -1117,7 +1177,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->map_count = 0;
mm->locked_vm = 0;
atomic64_set(&mm->pinned_vm, 0);
- memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
spin_lock_init(&mm->arg_lock);
mm_init_cpumask(mm);
@@ -1144,6 +1203,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
if (mm_alloc_pgd(mm))
goto fail_nopgd;

+ if (mm_init_rss_stat(mm))
+ goto fail_nocontext;
+
if (init_new_context(p, mm))
goto fail_nocontext;

@@ -2139,7 +2201,9 @@ static __latent_entropy struct task_struct *copy_process(
p->io_uring = NULL;
#endif

-#if defined(SPLIT_RSS_COUNTING)
+#ifdef SPLIT_RSS_NUMA_COUNTING
+ memset(&p->rss_stat, 0, sizeof(p->rss_stat) - sizeof(p->rss_stat.numa_count));
+#else
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

--
2.20.1


2022-05-14 02:49:54

by Gang Li

[permalink] [raw]
Subject: [PATCH 3/5 v1] mm: add numa fields for tracepoint rss_stat

Since we add numa_count for mm->rss_stat, the tracepoint should
also be modified. Now the output looks like this:

```
sleep-660 [002] 918.524333: rss_stat: mm_id=1539334265 curr=0 type=MM_NO_TYPE type_size=0B node=2 node_size=32768B diff_size=-8192B
sleep-660 [002] 918.524333: rss_stat: mm_id=1539334265 curr=0 type=MM_FILEPAGES type_size=4096B node=-1 node_size=0B diff_size=-4096B
sleep-660 [002] 918.524333: rss_stat: mm_id=1539334265 curr=0 type=MM_NO_TYPE type_size=0B node=1 node_size=0B diff_size=-4096B
```

Signed-off-by: Gang Li <[email protected]>
---
include/linux/mm.h | 9 +++++----
include/trace/events/kmem.h | 27 ++++++++++++++++++++-------
mm/memory.c | 5 +++--
3 files changed, 28 insertions(+), 13 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1b6c2e912ec8..cde5529285d6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2007,27 +2007,28 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member, int
return (unsigned long)val;
}

-void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long member_count, int node,
+ long numa_count, long diff_count);

static inline void add_mm_counter(struct mm_struct *mm, int member, long value, int node)
{
long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);

- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, value);
}

static inline void inc_mm_counter(struct mm_struct *mm, int member, int node)
{
long count = atomic_long_inc_return(&mm->rss_stat.count[member]);

- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, 1);
}

static inline void dec_mm_counter(struct mm_struct *mm, int member, int node)
{
long count = atomic_long_dec_return(&mm->rss_stat.count[member]);

- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, -1);
}

/* Optimized variant when page is already known not to be PageAnon */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ddc8c944f417..2f4707d94624 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -347,7 +347,8 @@ static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
EM(MM_FILEPAGES) \
EM(MM_ANONPAGES) \
EM(MM_SWAPENTS) \
- EMe(MM_SHMEMPAGES)
+ EM(MM_SHMEMPAGES) \
+ EMe(MM_NO_TYPE)

#undef EM
#undef EMe
@@ -367,29 +368,41 @@ TRACE_EVENT(rss_stat,

TP_PROTO(struct mm_struct *mm,
int member,
- long count),
+ long member_count,
+ int node,
+ long node_count,
+ long diff_count),

- TP_ARGS(mm, member, count),
+ TP_ARGS(mm, member, member_count, node, node_count, diff_count),

TP_STRUCT__entry(
__field(unsigned int, mm_id)
__field(unsigned int, curr)
__field(int, member)
- __field(long, size)
+ __field(long, member_size)
+ __field(int, node)
+ __field(long, node_size)
+ __field(long, diff_size)
),

TP_fast_assign(
__entry->mm_id = mm_ptr_to_hash(mm);
__entry->curr = !!(current->mm == mm);
__entry->member = member;
- __entry->size = (count << PAGE_SHIFT);
+ __entry->member_size = (member_count << PAGE_SHIFT);
+ __entry->node = node;
+ __entry->node_size = (node_count << PAGE_SHIFT);
+ __entry->diff_size = (diff_count << PAGE_SHIFT);
),

- TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
+ TP_printk("mm_id=%u curr=%d type=%s type_size=%ldB node=%d node_size=%ldB diff_size=%ldB",
__entry->mm_id,
__entry->curr,
__print_symbolic(__entry->member, TRACE_MM_PAGES),
- __entry->size)
+ __entry->member_size,
+ __entry->node,
+ __entry->node_size,
+ __entry->diff_size)
);
#endif /* _TRACE_KMEM_H */

diff --git a/mm/memory.c b/mm/memory.c
index adb07fb0b483..2d3040a190f6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -170,9 +170,10 @@ static int __init init_zero_pfn(void)
}
early_initcall(init_zero_pfn);

-void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long member_count, int node,
+ long numa_count, long diff_count)
{
- trace_rss_stat(mm, member, count);
+ trace_rss_stat(mm, member, member_count, node, numa_count, diff_count);
}

#if defined(SPLIT_RSS_COUNTING)
--
2.20.1


2022-05-14 03:51:20

by Gang Li

[permalink] [raw]
Subject: [PATCH 5/5 v1] mm, oom: enable per numa node oom for CONSTRAINT_MEMORY_POLICY

Page allocator will only alloc pages on node indicated by
`nodemask`. But oom will still select bad process by total rss usage
which may reclam nothing on the node indicated by `nodemask`.

This patch let oom only calculate rss on the given node when
oc->constraint equals to CONSTRAINT_MEMORY_POLICY.

If `nodemask` is asigned, the process with the highest memory
consumption on the specific node will be killed. oom_kill dmesg will
looks like this:

```
[ 1471.436027] Tasks state (memory values in pages):
[ 1471.438518] [ pid ] uid tgid total_vm rss (01)nrss pgtables_bytes swapents oom_score_adj name
[ 1471.554703] [ 1011] 0 1011 220005 8589 1872 823296 0 0 node
[ 1471.707912] [ 12399] 0 12399 1311306 1311056 262170 10534912 0 0 a.out
[ 1471.712429] [ 13135] 0 13135 787018 674666 674300 5439488 0 0 a.out
[ 1471.721506] [ 13295] 0 13295 597 188 0 24576 0 0 sh
[ 1471.734600] oom-kill:constraint=CONSTRAINT_MEMORY_POLICY,nodemask=1,cpuset=/,mems_allowed=0-2,global_oom,task_memcg=/user.slice/user-0.slice/session-3.scope,task=a.out,pid=13135,uid=0
[ 1471.742583] Out of memory: Killed process 13135 (a.out) total-vm:3148072kB, anon-rss:2697304kB, file-rss:1360kB, shmem-rss:0kB, UID:0 pgtables:5312kB oom_score_adj:0
[ 1471.849615] oom_reaper: reaped process 13135 (a.out), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
```

Signed-off-by: Gang Li <[email protected]>
---
fs/proc/base.c | 6 +++++-
include/linux/oom.h | 2 +-
mm/oom_kill.c | 45 +++++++++++++++++++++++++++++++++++++--------
3 files changed, 43 insertions(+), 10 deletions(-)

diff --git a/fs/proc/base.c b/fs/proc/base.c
index c1031843cc6a..caf0f51284d0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -552,8 +552,12 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
unsigned long totalpages = totalram_pages() + total_swap_pages;
unsigned long points = 0;
long badness;
+ struct oom_control oc = {
+ .totalpages = totalpages,
+ .gfp_mask = 0,
+ };

- badness = oom_badness(task, totalpages);
+ badness = oom_badness(task, &oc);
/*
* Special case OOM_SCORE_ADJ_MIN for all others scale the
* badness value into [0, 2000] range which we have been
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 2db9a1432511..0cb6a60be776 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -109,7 +109,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
bool __oom_reap_task_mm(struct mm_struct *mm);

long oom_badness(struct task_struct *p,
- unsigned long totalpages);
+ struct oom_control *oc);

extern bool out_of_memory(struct oom_control *oc);

diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 757f5665ae94..75a80b5a63bf 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -198,7 +198,7 @@ static bool should_dump_unreclaim_slab(void)
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
-long oom_badness(struct task_struct *p, unsigned long totalpages)
+long oom_badness(struct task_struct *p, struct oom_control *oc)
{
long points;
long adj;
@@ -227,12 +227,22 @@ long oom_badness(struct task_struct *p, unsigned long totalpages)
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS, NUMA_NO_NODE) +
- mm_pgtables_bytes(p->mm) / PAGE_SIZE;
+ if (unlikely(oc->constraint == CONSTRAINT_MEMORY_POLICY)) {
+ struct zoneref *zoneref = first_zones_zonelist(oc->zonelist, gfp_zone(oc->gfp_mask),
+ oc->nodemask);
+ int nid_to_find_victim = zone_to_nid(zoneref->zone);
+
+ points = get_mm_counter(p->mm, -1, nid_to_find_victim) +
+ get_mm_counter(p->mm, MM_SWAPENTS, NUMA_NO_NODE) +
+ mm_pgtables_bytes(p->mm) / PAGE_SIZE;
+ } else {
+ points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS, NUMA_NO_NODE) +
+ mm_pgtables_bytes(p->mm) / PAGE_SIZE;
+ }
task_unlock(p);

/* Normalize to oom_score_adj units */
- adj *= totalpages / 1000;
+ adj *= oc->totalpages / 1000;
points += adj;

return points;
@@ -338,7 +348,7 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
goto select;
}

- points = oom_badness(task, oc->totalpages);
+ points = oom_badness(task, oc);
if (points == LONG_MIN || points < oc->chosen_points)
goto next;

@@ -382,6 +392,7 @@ static int dump_task(struct task_struct *p, void *arg)
{
struct oom_control *oc = arg;
struct task_struct *task;
+ unsigned long node_mm_rss;

if (oom_unkillable_task(p))
return 0;
@@ -399,9 +410,18 @@ static int dump_task(struct task_struct *p, void *arg)
return 0;
}

- pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
+ if (unlikely(oc->constraint == CONSTRAINT_MEMORY_POLICY)) {
+ struct zoneref *zoneref = first_zones_zonelist(oc->zonelist, gfp_zone(oc->gfp_mask),
+ oc->nodemask);
+ int nid_to_find_victim = zone_to_nid(zoneref->zone);
+
+ node_mm_rss = get_mm_counter(p->mm, -1, nid_to_find_victim);
+ } else {
+ node_mm_rss = 0;
+ }
+ pr_info("[%7d] %5d %5d %8lu %8lu %8lu %8ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
- task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
+ task->tgid, task->mm->total_vm, get_mm_rss(task->mm), node_mm_rss,
mm_pgtables_bytes(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS, NUMA_NO_NODE),
task->signal->oom_score_adj, task->comm);
@@ -422,8 +442,17 @@ static int dump_task(struct task_struct *p, void *arg)
*/
static void dump_tasks(struct oom_control *oc)
{
+ int nid_to_find_victim;
+
+ if (oc->nodemask) {
+ struct zoneref *zoneref = first_zones_zonelist(oc->zonelist, gfp_zone(oc->gfp_mask),
+ oc->nodemask);
+ nid_to_find_victim = zone_to_nid(zoneref->zone);
+ } else {
+ nid_to_find_victim = -1;
+ }
pr_info("Tasks state (memory values in pages):\n");
- pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss (%02d)nrss pgtables_bytes swapents oom_score_adj name\n", nid_to_find_victim);

if (is_memcg_oom(oc))
mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
--
2.20.1


2022-05-17 03:13:06

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH 0/5 v1] mm, oom: Introduce per numa node oom for CONSTRAINT_MEMORY_POLICY

On Thu 12-05-22 12:46:29, Gang Li wrote:
> TLDR:
> If a mempolicy is in effect(oc->constraint == CONSTRAINT_MEMORY_POLICY), out_of_memory() will
> select victim on specific node to kill. So that kernel can avoid accidental killing on NUMA system.
>
> Problem:
> Before this patch series, oom will only kill the process with the highest memory usage.
> by selecting process with the highest oom_badness on the entire system to kill.
>
> This works fine on UMA system, but may have some accidental killing on NUMA system.
>
> As shown below, if process c.out is bind to Node1 and keep allocating pages from Node1,
> a.out will be killed first. But killing a.out did't free any mem on Node1, so c.out
> will be killed then.
>
> A lot of our AMD machines have 8 numa nodes. In these systems, there is a greater chance
> of triggering this problem.

Sorry, I have only now found this email thread. The limitation of the
NUMA constrained oom is well known and long standing. Basically the
whole thing is a best effort as we are lacking per numa node memory
stats. I can see that you are trying to fill up that gap but this is
not really free. Have you measured the runtime overhead? Accounting is
done in a very performance sensitive paths and it would be rather
unfortunate to make everybody pay the overhead while binding to a
specific node or sets of nodes is not the most common usecase.

Also have you tried to have a look at cpusets? Those should be easier to
make a proper selection as it should be possible to iterate over tasks
belonging to a specific cpuset much more easier - essentialy something
similar to memcg oom killer. We do not do that right now and by a very
brief look at the CONSTRAINT_CPUSET it seems that this code is not
really doing much these days. Maybe that would be a more appropriate way
to deal with more precise node aware oom killing?

[...]
> 21 files changed, 317 insertions(+), 111 deletions(-)

The code footprint is not free either. And more importantnly does this
even work much more reliably? I can see quite some NUMA_NO_NODE
accounting (e.g. copy_pte_range!).Is this somehow fixable?

Also how do those numbers add up. Let's say you increase the counter as
NUMA_NO_NODE but later on during the clean up you decrease based on the
page node?

Last but not least I am really not following MM_NO_TYPE concept. I can
only see add_mm_counter users without any decrements. What is going on
there?
--
Michal Hocko
SUSE Labs

2022-05-17 07:04:09

by kernel test robot

[permalink] [raw]
Subject: [mm] c9dc81ef10: BUG:Bad_rss-counter_state_mm:#node:#val



Greeting,

FYI, we noticed the following commit (built with gcc-11):

commit: c9dc81ef10c33656280058c29dfaa1d549d1daee ("[PATCH 4/5 v1] mm: enable per numa node rss_stat count")
url: https://github.com/intel-lab-lkp/linux/commits/Gang-Li/mm-oom-Introduce-per-numa-node-oom-for-CONSTRAINT_MEMORY_POLICY/20220512-124948
base: https://git.kernel.org/cgit/linux/kernel/git/s390/linux.git features
patch link: https://lore.kernel.org/lkml/[email protected]

in testcase: will-it-scale
version: will-it-scale-x86_64-a34a85c-1_20220502
with following parameters:

nr_task: 100%
mode: process
test: page_fault1
cpufreq_governor: performance
ucode: 0x42e

test-description: Will It Scale takes a testcase and runs it from 1 through to n parallel copies to see if the testcase will scale. It builds both a process and threads based test in order to see any differences between the two.
test-url: https://github.com/antonblanchard/will-it-scale


on test machine: 48 threads 2 sockets Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz with 112G memory

caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):



If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>



[ 160.153480][ T3850] BUG: Bad rss-counter state mm:0000000041b404d2 node:0 val:512
[ 160.163467][ T3850] BUG: Bad rss-counter state mm:0000000041b404d2 node:1 val:-512



To reproduce:

git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
sudo bin/lkp install job.yaml # job file is attached in this email
bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run
sudo bin/lkp run generated-yaml-file

# if come across any failure that blocks the test,
# please remove ~/.lkp and /lkp dir to run from a clean state.



--
0-DAY CI Kernel Test Service
https://01.org/lkp



Attachments:
(No filename) (1.95 kB)
config-5.18.0-rc4-00043-gc9dc81ef10c3 (165.11 kB)
job-script (8.06 kB)
dmesg.xz (32.16 kB)
will-it-scale (205.00 B)
job.yaml (5.50 kB)
reproduce (361.00 B)
Download all attachments

2022-06-15 10:20:19

by Gang Li

[permalink] [raw]
Subject: Re: Re: [PATCH 0/5 v1] mm, oom: Introduce per numa node oom for CONSTRAINT_MEMORY_POLICY

Hi, I've done some benchmarking in the last few days.

On 2022/5/17 00:44, Michal Hocko wrote:
> Sorry, I have only now found this email thread. The limitation of the
> NUMA constrained oom is well known and long standing. Basically the
> whole thing is a best effort as we are lacking per numa node memory
> stats. I can see that you are trying to fill up that gap but this is
> not really free. Have you measured the runtime overhead? Accounting is
> done in a very performance sensitive paths and it would be rather
> unfortunate to make everybody pay the overhead while binding to a
> specific node or sets of nodes is not the most common usecase.

## CPU consumption

According to the result of Unixbench. There is less than one percent
performance loss in most cases.

On 40c512g machine.

40 parallel copies of tests:
+----------+----------+-----+----------+---------+---------+---------+
| numastat | FileCopy | ... | Pipe | Fork | syscall | total |
+----------+----------+-----+----------+---------+---------+---------+
| off | 2920.24 | ... | 35926.58 | 6980.14 | 2617.18 | 8484.52 |
| on | 2919.15 | ... | 36066.07 | 6835.01 | 2724.82 | 8461.24 |
| overhead | 0.04% | ... | -0.39% | 2.12% | -3.95% | 0.28% |
+----------+----------+-----+----------+---------+---------+---------+

1 parallel copy of tests:
+----------+----------+-----+---------+--------+---------+---------+
| numastat | FileCopy | ... | Pipe | Fork | syscall | total |
+----------+----------+-----+---------+--------+---------+---------+
| off | 1515.37 | ... | 1473.97 | 546.88 | 1152.37 | 1671.2 |
| on | 1508.09 | ... | 1473.75 | 532.61 | 1148.83 | 1662.72 |
| overhead | 0.48% | ... | 0.01% | 2.68% | 0.31% | 0.51% |
+----------+----------+-----+---------+--------+---------+---------+

## MEM consumption

per task_struct:
sizeof(int) * num_possible_nodes() + sizeof(int*)
typically 4 * 2 + 8 bytes

per mm_struct:
sizeof(atomic_long_t) * num_possible_nodes() + sizeof(atomic_long_t*)
typically 8 * 2 + 8 bytes

zap_pte_range:
sizeof(int) * num_possible_nodes() + sizeof(int*)
typically 4 * 2 + 8 bytes

> Also have you tried to have a look at cpusets? Those should be easier to
> make a proper selection as it should be possible to iterate over tasks
> belonging to a specific cpuset much more easier - essentialy something
> similar to memcg oom killer. We do not do that right now and by a very
> brief look at the CONSTRAINT_CPUSET it seems that this code is not
> really doing much these days. Maybe that would be a more appropriate way
> to deal with more precise node aware oom killing?

Looks like both CONSTRAINT_MEMORY_POLICY and CONSTRAINT_CPUSET can
be uesd to deal with node aware oom killing.

I think we can calculate badness in this way:
If constraint=CONSTRAINT_MEMORY_POLICY, get badness by `nodemask`.
If constraint=CONSTRAINT_CPUSET, get badness by `mems_allowed`.

example code:
```
long oom_badness(struct task_struct *p, struct oom_control *oc)
long points;

...

if (unlikely(oc->constraint == CONSTRAINT_MEMORY_POLICY)) {
for_each_node_mask(nid, oc->nodemask)
points += get_mm_counter(p->mm, -1, nid)
} else if (unlikely(oc->constraint == CONSTRAINT_CPUSET)) {
for_each_node_mask(nid, cpuset_current_mems_allowed)
points += get_mm_counter(p->mm, -1, nid)
} else {
points = get_mm_rss(p->mm);
}
points += get_mm_counter(p->mm, MM_SWAPENTS, NUMA_NO_NODE) \
+ mm_pgtables_bytes(p->mm) / PAGE_SIZE;

...

}
```

>
> [...]
>> 21 files changed, 317 insertions(+), 111 deletions(-)
>
> The code footprint is not free either. And more importantnly does this
> even work much more reliably? I can see quite some NUMA_NO_NODE
> accounting (e.g. copy_pte_range!).Is this somehow fixable?

> Also how do those numbers add up. Let's say you increase the counter as
> NUMA_NO_NODE but later on during the clean up you decrease based on the
> page node?
> Last but not least I am really not following MM_NO_TYPE concept. I can
> only see add_mm_counter users without any decrements. What is going on
> there?

There are two usage scenarios of NUMA_NO_NODE in this patch.

1. placeholder when swap pages in and out of swapfile.
```
// mem to swapfile
dec_mm_counter(vma->vm_mm, MM_ANONPAGES, page_to_nid(page));
inc_mm_counter(vma->vm_mm, MM_SWAPENTS, NUMA_NO_NODE);

// swapfile to mem
inc_mm_counter(vma->vm_mm, MM_ANONPAGES, page_to_nid(page));
dec_mm_counter(vma->vm_mm, MM_SWAPENTS, NUMA_NO_NODE);
```

In *_mm_counter(vma->vm_mm, MM_SWAPENTS, NUMA_NO_NODE),
NUMA_NO_NODE is a placeholder. It means this page does not exist in any
node anymore.

2. placeholder in `add_mm_rss_vec` and `sync_mm_rss` for per process mm
counter synchronization with SPLIT_RSS_COUNTING enabled.


MM_NO_TYPE is also a placeholder in `*_mm_counter`, `add_mm_rss_vec` and
`sync_mm_rss`.

These placeholders are very strange. Maybe I should introduce a helper
function for mm->rss_stat.numa_count counting instead of using
placeholder.