Greeting,
FYI, we noticed a -53.8% regression of stress-ng.memhotplug.ops_per_sec due to commit:
commit: 9eeb73028cfb54eb06efe87c50cc014d3f1ff43e ("[patch 174/212] mm/migrate: update node demotion order on hotplug events")
url: https://github.com/0day-ci/linux/commits/Andrew-Morton/ia64-fix-typo-in-a-comment/20210903-065028
in testcase: stress-ng
on test machine: 96 threads 2 sockets Intel(R) Xeon(R) Gold 6252 CPU @ 2.10GHz with 192G memory
with following parameters:
nr_threads: 10%
disk: 1HDD
testtime: 60s
fs: ext4
class: os
test: memhotplug
cpufreq_governor: performance
ucode: 0x5003006
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
Details are as below:
-------------------------------------------------------------------------------------------------->
To reproduce:
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run
bin/lkp run generated-yaml-file
=========================================================================================
class/compiler/cpufreq_governor/disk/fs/kconfig/nr_threads/rootfs/tbox_group/test/testcase/testtime/ucode:
os/gcc-9/performance/1HDD/ext4/x86_64-rhel-8.3/10%/debian-10.4-x86_64-20200603.cgz/lkp-csl-2sp5/memhotplug/stress-ng/60s/0x5003006
commit:
603f5ce25c ("mm/numa: automatically generate node migration order")
9eeb73028c ("mm/migrate: update node demotion order on hotplug events")
603f5ce25c452baf 9eeb73028cfb54eb06efe87c50c
---------------- ---------------------------
%stddev %change %stddev
\ | \
2638 -53.7% 1221 stress-ng.memhotplug.ops
43.89 -53.8% 20.28 stress-ng.memhotplug.ops_per_sec
40637 ? 2% -61.5% 15654 ? 4% stress-ng.time.involuntary_context_switches
91.17 -50.6% 45.00 stress-ng.time.percent_of_cpu_this_job_got
57.11 -50.3% 28.40 stress-ng.time.system_time
1075917 -36.4% 684753 cpuidle..usage
0.92 -0.5 0.43 ? 2% mpstat.cpu.all.sys%
7183 ? 3% +17.9% 8467 ? 8% numa-vmstat.node1.nr_kernel_stack
13042 ? 10% -34.2% 8577 ? 11% softirqs.CPU0.SCHED
17047 +5.8% 18036 proc-vmstat.nr_kernel_stack
1.218e+09 ? 2% -54.3% 5.564e+08 ? 3% proc-vmstat.pgfree
766.67 ? 5% +12.2% 860.50 ? 3% slabinfo.kmalloc-cg-32.active_objs
7462 ? 7% +13.3% 8458 ? 4% slabinfo.kmalloc-cg-8.active_objs
25595 ? 2% -40.0% 15361 vmstat.system.cs
7186 -22.4% 5577 vmstat.system.in
1422 ? 8% -34.0% 939.50 ? 43% numa-meminfo.node0.Active(anon)
7183 ? 3% +17.9% 8467 ? 8% numa-meminfo.node1.KernelStack
835236 ? 12% +23.0% 1027326 ? 11% numa-meminfo.node1.MemUsed
41.67 -48.8% 21.33 ? 3% turbostat.Avg_MHz
1.42 ? 2% -0.6 0.83 ? 2% turbostat.Busy%
2937 -12.3% 2574 turbostat.Bzy_MHz
51129 ? 15% -34.7% 33365 ? 10% turbostat.C1
0.10 ? 4% -0.0 0.07 ? 14% turbostat.C1%
161038 -53.8% 74435 turbostat.C1E
0.82 -0.4 0.39 turbostat.C1E%
837765 -33.9% 553856 turbostat.C6
15.14 -28.8% 10.78 ? 3% turbostat.CPU%c1
455267 ? 2% -23.9% 346620 ? 2% turbostat.IRQ
112.21 -7.7% 103.57 turbostat.PkgWatt
84.04 -4.3% 80.40 turbostat.RAMWatt
33885 ? 6% -24.2% 25673 interrupts.CAL:Function_call_interrupts
6313 ? 8% -75.6% 1541 ? 19% interrupts.CPU0.CAL:Function_call_interrupts
30982 ? 16% -55.4% 13832 ? 5% interrupts.CPU0.LOC:Local_timer_interrupts
270.83 ? 18% -79.5% 55.50 ? 21% interrupts.CPU0.RES:Rescheduling_interrupts
2401 ? 25% -76.3% 568.50 ? 21% interrupts.CPU1.CAL:Function_call_interrupts
21139 ? 23% -56.0% 9299 ? 27% interrupts.CPU1.LOC:Local_timer_interrupts
108.33 ? 18% -81.1% 20.50 ? 38% interrupts.CPU1.RES:Rescheduling_interrupts
863.50 ? 24% -59.7% 348.17 ? 34% interrupts.CPU2.CAL:Function_call_interrupts
98.00 ? 35% -70.9% 28.50 ? 86% interrupts.CPU2.RES:Rescheduling_interrupts
1504 ? 29% +168.4% 4037 ? 50% interrupts.CPU22.LOC:Local_timer_interrupts
12932 ? 21% -69.8% 3904 ? 41% interrupts.CPU5.LOC:Local_timer_interrupts
77.17 ? 65% -85.1% 11.50 ? 49% interrupts.CPU5.RES:Rescheduling_interrupts
6555 ? 33% -45.9% 3549 ? 58% interrupts.CPU54.LOC:Local_timer_interrupts
87.33 ? 31% -74.6% 22.17 ? 60% interrupts.CPU58.RES:Rescheduling_interrupts
76.17 ? 24% -80.7% 14.67 ? 39% interrupts.CPU59.RES:Rescheduling_interrupts
76.17 ? 24% -72.4% 21.00 ? 62% interrupts.CPU63.RES:Rescheduling_interrupts
6038 ? 34% -67.1% 1984 ? 40% interrupts.CPU66.LOC:Local_timer_interrupts
7942 ? 37% -55.8% 3513 ? 70% interrupts.CPU7.LOC:Local_timer_interrupts
428264 -22.2% 333008 interrupts.LOC:Local_timer_interrupts
3123 ? 8% -53.8% 1444 ? 10% interrupts.RES:Rescheduling_interrupts
stress-ng.memhotplug.ops
3000 +--------------------------------------------------------------------+
| .+ .+ +. +. |
2500 |.++.++.++.++ +.++.++.++.++.++.++ +.+ ++.++.+ ++.+ |
| |
| |
2000 |-+ |
| |
1500 |-+ |
| OO OO OO OO O OO OO OO OO OO OO OO O OO OO O OO O O O OO OO O|
1000 |-+ O O O O |
| |
| |
500 |-+ |
| O O |
0 +--------------------------------------------------------------------+
stress-ng.memhotplug.ops_per_sec
50 +----------------------------------------------------------------------+
45 |-+ .+ .+ +. +. |
|.++.++.++.++ +.+.++.++.++.++.++.+ +.+ ++.++.+ ++.+ |
40 |-+ |
35 |-+ |
| |
30 |-+ |
25 |-+ |
20 |-OO OO OO OO O O OO OO OO OO OO O OO O OO OO O O O O OO OO OO |
| O O OO O |
15 |-+ |
10 |-+ |
| |
5 |-+ O O |
0 +----------------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
---
0DAY/LKP+ Test Infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/[email protected] Intel Corporation
Thanks,
Oliver Sang
kernel test robot <[email protected]> writes:
> Greeting,
>
> FYI, we noticed a -53.8% regression of stress-ng.memhotplug.ops_per_sec due to commit:
>
>
> commit: 9eeb73028cfb54eb06efe87c50cc014d3f1ff43e ("[patch 174/212] mm/migrate: update node demotion order on hotplug events")
> url: https://github.com/0day-ci/linux/commits/Andrew-Morton/ia64-fix-typo-in-a-comment/20210903-065028
>
>
> in testcase: stress-ng
> on test machine: 96 threads 2 sockets Intel(R) Xeon(R) Gold 6252 CPU @ 2.10GHz with 192G memory
> with following parameters:
>
> nr_threads: 10%
> disk: 1HDD
> testtime: 60s
> fs: ext4
> class: os
> test: memhotplug
> cpufreq_governor: performance
> ucode: 0x5003006
>
Because we added some operations during online/offline CPU, it's
expected that the performance of online/offline CPU will decrease. In
most cases, the performance of CPU hotplug isn't a big problem. But
then I remembers that the performance of the CPU hotplug may influence
suspend/resume performance :-(
It appears that it is easy and reasonable to enclose the added
operations inside #ifdef CONFIG_NUMA. Is this sufficient to restore the
performance of suspend/resume?
Best Regards,
Huang, Ying
Hi, Oliver,
kernel test robot <[email protected]> writes:
> Greeting,
>
> FYI, we noticed a -53.8% regression of stress-ng.memhotplug.ops_per_sec due to commit:
>
>
> commit: 9eeb73028cfb54eb06efe87c50cc014d3f1ff43e ("[patch 174/212] mm/migrate: update node demotion order on hotplug events")
> url: https://github.com/0day-ci/linux/commits/Andrew-Morton/ia64-fix-typo-in-a-comment/20210903-065028
>
>
> in testcase: stress-ng
> on test machine: 96 threads 2 sockets Intel(R) Xeon(R) Gold 6252 CPU @ 2.10GHz with 192G memory
> with following parameters:
>
> nr_threads: 10%
> disk: 1HDD
> testtime: 60s
> fs: ext4
> class: os
> test: memhotplug
> cpufreq_governor: performance
> ucode: 0x5003006
>
Can you help to test whether the following patch can recover the
regression?
Best Regards,
Huang, Ying
----------------------------8<--------------------------------------
From 5d3e18a9f083954584932a20233ef489d9398342 Mon Sep 17 00:00:00 2001
From: Huang Ying <[email protected]>
Date: Thu, 16 Sep 2021 16:51:44 +0800
Subject: [PATCH] mm/migrate: recover hotplug performance regression
0-Day kernel test robot reported a -53.8% performance regression for
stress-ng memhotplug test case. This patch is to recover the
regression via avoid updating the demotion order if not necessary.
Refer: https://lore.kernel.org/lkml/20210905135932.GE15026@xsang-OptiPlex-9020/
Fixes: 884a6e5d1f93 ("mm/migrate: update node demotion order on hotplug events")
Signed-off-by: "Huang, Ying" <[email protected]>
Suggested-by: Dave Hansen <[email protected]>
Reported-by: kernel test robot <[email protected]>
Cc: Yang Shi <[email protected]>
Cc: Zi Yan <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Wei Xu <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: Keith Busch <[email protected]>
---
mm/migrate.c | 26 ++++++++++++++++++++++----
1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 77d107a4577f..20d803707497 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1145,6 +1145,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
static int node_demotion[MAX_NUMNODES] __read_mostly =
{[0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE};
+static bool node_demotion_disabled __read_mostly;
+
/**
* next_demotion_node() - Get the next node in the demotion path
* @node: The starting node to lookup the next node
@@ -1158,6 +1160,8 @@ int next_demotion_node(int node)
{
int target;
+ if (node_demotion_disabled)
+ return NUMA_NO_NODE;
/*
* node_demotion[] is updated without excluding this
* function from running. RCU doesn't provide any
@@ -3198,13 +3202,26 @@ static void __set_migration_target_nodes(void)
goto again;
}
+static int nr_node_has_cpu;
+static int nr_node_has_mem;
+
+static void check_set_migration_target_nodes(void)
+{
+ if (num_node_state(N_MEMORY) != nr_node_has_mem ||
+ num_node_state(N_CPU) != nr_node_has_cpu) {
+ __set_migration_target_nodes();
+ nr_node_has_mem = num_node_state(N_MEMORY);
+ nr_node_has_cpu = num_node_state(N_CPU);
+ }
+}
+
/*
* For callers that do not hold get_online_mems() already.
*/
static void set_migration_target_nodes(void)
{
get_online_mems();
- __set_migration_target_nodes();
+ check_set_migration_target_nodes();
put_online_mems();
}
@@ -3249,7 +3266,7 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
* will leave migration disabled until the offline
* completes and the MEM_OFFLINE case below runs.
*/
- disable_all_migrate_targets();
+ node_demotion_disabled = true;
break;
case MEM_OFFLINE:
case MEM_ONLINE:
@@ -3257,14 +3274,15 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
* Recalculate the target nodes once the node
* reaches its final state (online or offline).
*/
- __set_migration_target_nodes();
+ check_set_migration_target_nodes();
+ node_demotion_disabled = false;
break;
case MEM_CANCEL_OFFLINE:
/*
* MEM_GOING_OFFLINE disabled all the migration
* targets. Reenable them.
*/
- __set_migration_target_nodes();
+ node_demotion_disabled = false;
break;
case MEM_GOING_ONLINE:
case MEM_CANCEL_ONLINE:
--
2.30.2
hi, All,
we confirmed this regression was fixed by
commit: 295be91f7ef0027fca2f2e4788e99731aa931834 ("mm/migrate: optimize hotplug-time demotion order updates")
https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git master
the report link is:
https://lists.01.org/hyperkitty/list/[email protected]/thread/4KKRZ7CSIPKMD3Q43GZTGQXA6WBJM2XO/
On Fri, Sep 17, 2021 at 11:14:59AM +0800, Huang, Ying wrote:
> Hi, Oliver,
>
> kernel test robot <[email protected]> writes:
>
> > Greeting,
> >
> > FYI, we noticed a -53.8% regression of stress-ng.memhotplug.ops_per_sec due to commit:
> >
> >
> > commit: 9eeb73028cfb54eb06efe87c50cc014d3f1ff43e ("[patch 174/212] mm/migrate: update node demotion order on hotplug events")
> > url: https://github.com/0day-ci/linux/commits/Andrew-Morton/ia64-fix-typo-in-a-comment/20210903-065028
> >
> >
> > in testcase: stress-ng
> > on test machine: 96 threads 2 sockets Intel(R) Xeon(R) Gold 6252 CPU @ 2.10GHz with 192G memory
> > with following parameters:
> >
> > nr_threads: 10%
> > disk: 1HDD
> > testtime: 60s
> > fs: ext4
> > class: os
> > test: memhotplug
> > cpufreq_governor: performance
> > ucode: 0x5003006
> >
>
> Can you help to test whether the following patch can recover the
> regression?
>
> Best Regards,
> Huang, Ying
>
> ----------------------------8<--------------------------------------
> From 5d3e18a9f083954584932a20233ef489d9398342 Mon Sep 17 00:00:00 2001
> From: Huang Ying <[email protected]>
> Date: Thu, 16 Sep 2021 16:51:44 +0800
> Subject: [PATCH] mm/migrate: recover hotplug performance regression
>
> 0-Day kernel test robot reported a -53.8% performance regression for
> stress-ng memhotplug test case. This patch is to recover the
> regression via avoid updating the demotion order if not necessary.
>
> Refer: https://lore.kernel.org/lkml/20210905135932.GE15026@xsang-OptiPlex-9020/
> Fixes: 884a6e5d1f93 ("mm/migrate: update node demotion order on hotplug events")
> Signed-off-by: "Huang, Ying" <[email protected]>
> Suggested-by: Dave Hansen <[email protected]>
> Reported-by: kernel test robot <[email protected]>
> Cc: Yang Shi <[email protected]>
> Cc: Zi Yan <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Wei Xu <[email protected]>
> Cc: Oscar Salvador <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Dan Williams <[email protected]>
> Cc: David Hildenbrand <[email protected]>
> Cc: Greg Thelen <[email protected]>
> Cc: Keith Busch <[email protected]>
> ---
> mm/migrate.c | 26 ++++++++++++++++++++++----
> 1 file changed, 22 insertions(+), 4 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 77d107a4577f..20d803707497 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1145,6 +1145,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
> static int node_demotion[MAX_NUMNODES] __read_mostly =
> {[0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE};
>
> +static bool node_demotion_disabled __read_mostly;
> +
> /**
> * next_demotion_node() - Get the next node in the demotion path
> * @node: The starting node to lookup the next node
> @@ -1158,6 +1160,8 @@ int next_demotion_node(int node)
> {
> int target;
>
> + if (node_demotion_disabled)
> + return NUMA_NO_NODE;
> /*
> * node_demotion[] is updated without excluding this
> * function from running. RCU doesn't provide any
> @@ -3198,13 +3202,26 @@ static void __set_migration_target_nodes(void)
> goto again;
> }
>
> +static int nr_node_has_cpu;
> +static int nr_node_has_mem;
> +
> +static void check_set_migration_target_nodes(void)
> +{
> + if (num_node_state(N_MEMORY) != nr_node_has_mem ||
> + num_node_state(N_CPU) != nr_node_has_cpu) {
> + __set_migration_target_nodes();
> + nr_node_has_mem = num_node_state(N_MEMORY);
> + nr_node_has_cpu = num_node_state(N_CPU);
> + }
> +}
> +
> /*
> * For callers that do not hold get_online_mems() already.
> */
> static void set_migration_target_nodes(void)
> {
> get_online_mems();
> - __set_migration_target_nodes();
> + check_set_migration_target_nodes();
> put_online_mems();
> }
>
> @@ -3249,7 +3266,7 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
> * will leave migration disabled until the offline
> * completes and the MEM_OFFLINE case below runs.
> */
> - disable_all_migrate_targets();
> + node_demotion_disabled = true;
> break;
> case MEM_OFFLINE:
> case MEM_ONLINE:
> @@ -3257,14 +3274,15 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
> * Recalculate the target nodes once the node
> * reaches its final state (online or offline).
> */
> - __set_migration_target_nodes();
> + check_set_migration_target_nodes();
> + node_demotion_disabled = false;
> break;
> case MEM_CANCEL_OFFLINE:
> /*
> * MEM_GOING_OFFLINE disabled all the migration
> * targets. Reenable them.
> */
> - __set_migration_target_nodes();
> + node_demotion_disabled = false;
> break;
> case MEM_GOING_ONLINE:
> case MEM_CANCEL_ONLINE:
> --
> 2.30.2
>