Greeting,
FYI, we noticed a -23.7% regression of vm-scalability.median due to commit:
commit: 8bb3c61bafa8c1cd222ada602bb94ff23119e738 ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
https://kernel.googlesource.com/pub/scm/linux/kernel/git/viro/vfs.git work.mount
in testcase: vm-scalability
on test machine: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz with 128G memory
with following parameters:
runtime: 300s
size: 16G
test: shm-pread-rand
cpufreq_governor: performance
ucode: 0xb000036
test-description: The motivation behind this suite is to exercise functions and regions of the mm/ of the Linux kernel which are of interest to us.
test-url: https://git.kernel.org/cgit/linux/kernel/git/wfg/vm-scalability.git/
In addition to that, the commit also has significant impact on the following tests:
+------------------+-----------------------------------------------------------------------+
| testcase: change | vm-scalability: vm-scalability.median -21.8% regression |
| test machine | 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz with 128G memory |
| test parameters | cpufreq_governor=performance |
| | runtime=300s |
| | size=16G |
| | test=shm-xread-rand |
| | ucode=0xb000036 |
+------------------+-----------------------------------------------------------------------+
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
Details are as below:
-------------------------------------------------------------------------------------------------->
To reproduce:
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/runtime/size/tbox_group/test/testcase/ucode:
gcc-7/performance/x86_64-rhel-7.6/debian-x86_64-2019-05-14.cgz/300s/16G/lkp-bdw-ep4/shm-pread-rand/vm-scalability/0xb000036
commit:
63228b974a ("make shmem_fill_super() static")
8bb3c61baf ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
63228b974a6e1e39 8bb3c61bafa8c1cd222ada602bb
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
0:4 -1% 0:4 perf-profile.children.cycles-pp.error_entry
%stddev %change %stddev
\ | \
45.57 -59.3% 18.56 vm-scalability.free_time
54243 -23.7% 41391 ± 3% vm-scalability.median
4760988 -23.3% 3650267 ± 2% vm-scalability.throughput
356.61 -9.0% 324.46 vm-scalability.time.elapsed_time
356.61 -9.0% 324.46 vm-scalability.time.elapsed_time.max
114308 ± 3% -6.6% 106792 vm-scalability.time.involuntary_context_switches
65954824 -50.0% 32978012 vm-scalability.time.maximum_resident_set_size
1.362e+08 -49.9% 68168167 vm-scalability.time.minor_page_faults
8533 +1.3% 8642 vm-scalability.time.percent_of_cpu_this_job_got
6021 -56.2% 2635 vm-scalability.time.system_time
24410 +4.1% 25408 vm-scalability.time.user_time
1.431e+09 -23.3% 1.097e+09 ± 2% vm-scalability.workload
37.62 +6.5% 40.07 ± 7% boot-time.boot
33180 ±146% -89.5% 3493 ± 14% cpuidle.C1.usage
8.428e+08 ± 40% -65.6% 2.899e+08 ± 50% cpuidle.C6.time
996104 ± 31% -57.2% 425958 ± 45% cpuidle.C6.usage
4.29 ± 27% -2.4 1.92 ± 4% mpstat.cpu.all.idle%
18.96 -9.7 9.24 mpstat.cpu.all.sys%
76.74 +12.1 88.83 mpstat.cpu.all.usr%
10748 ± 8% +9.9% 11815 ± 14% sched_debug.cfs_rq:/.runnable_weight.avg
208.36 ± 4% +14.5% 238.52 ± 3% sched_debug.cfs_rq:/.util_est_enqueued.stddev
61.83 ± 6% +19.2% 73.68 ± 12% sched_debug.cpu.sched_goidle.avg
10203622 -46.5% 5456851 ± 5% numa-numastat.node0.local_node
10207916 -46.5% 5463997 ± 5% numa-numastat.node0.numa_hit
9917963 -50.2% 4941906 ± 5% numa-numastat.node1.local_node
9930875 -50.1% 4951982 ± 5% numa-numastat.node1.numa_hit
267261 -46.4% 143303 slabinfo.radix_tree_node.active_objs
4837 -46.7% 2577 slabinfo.radix_tree_node.active_slabs
270907 -46.7% 144336 slabinfo.radix_tree_node.num_objs
4837 -46.7% 2577 slabinfo.radix_tree_node.num_slabs
76.00 +15.8% 88.00 vmstat.cpu.us
63526778 -47.7% 33255415 vmstat.memory.cache
56890579 +62.4% 92373408 vmstat.memory.free
1070 -3.0% 1038 vmstat.system.cs
2674 +2.4% 2738 turbostat.Avg_MHz
32037 ±150% -93.3% 2139 ± 25% turbostat.C1
991833 ± 31% -57.4% 422249 ± 45% turbostat.C6
2.62 ± 41% -1.6 0.99 ± 50% turbostat.C6%
1.62 ± 62% -66.7% 0.54 ± 65% turbostat.CPU%c6
66.25 ± 3% +5.3% 69.75 turbostat.PkgTmp
29.80 +5.1% 31.30 turbostat.RAMWatt
5404973 -72.3% 1495267 meminfo.Active
5404747 -72.3% 1495039 meminfo.Active(anon)
63415329 -47.6% 33229890 meminfo.Cached
63554859 -48.6% 32651783 meminfo.Committed_AS
57234837 -45.9% 30959238 meminfo.Inactive
57233523 -45.9% 30957913 meminfo.Inactive(anon)
216029 -33.4% 143846 meminfo.KReclaimable
57179054 -46.0% 30902610 meminfo.Mapped
56257735 +63.0% 91685305 meminfo.MemAvailable
56767938 +62.5% 92231592 meminfo.MemFree
75139272 -47.2% 39675618 meminfo.Memused
1039 ± 64% -58.7% 429.50 ±165% meminfo.Mlocked
10730605 -48.5% 5531358 meminfo.PageTables
216029 -33.4% 143846 meminfo.SReclaimable
62385638 -48.4% 32200708 meminfo.Shmem
350797 -20.7% 278299 meminfo.Slab
226631 -42.8% 129724 meminfo.max_used_kB
2747395 ± 2% -71.7% 777377 ± 6% numa-meminfo.node0.Active
2747257 ± 2% -71.7% 777239 ± 6% numa-meminfo.node0.Active(anon)
31748347 -47.3% 16719086 ± 2% numa-meminfo.node0.FilePages
28618699 -45.6% 15557625 ± 2% numa-meminfo.node0.Inactive
28618165 -45.6% 15557164 ± 2% numa-meminfo.node0.Inactive(anon)
28570648 -45.7% 15509571 ± 2% numa-meminfo.node0.Mapped
28083214 ± 3% +62.5% 45634775 ± 2% numa-meminfo.node0.MemFree
37802576 ± 2% -46.4% 20251015 ± 4% numa-meminfo.node0.MemUsed
5439763 ± 6% -45.6% 2957207 ± 16% numa-meminfo.node0.PageTables
31232535 -48.2% 16193393 ± 2% numa-meminfo.node0.Shmem
2718587 -73.2% 729480 ± 6% numa-meminfo.node1.Active
2718497 -73.2% 729390 ± 6% numa-meminfo.node1.Active(anon)
31668773 -48.0% 16468291 ± 2% numa-meminfo.node1.FilePages
28555887 -46.3% 15346454 ± 2% numa-meminfo.node1.Inactive
28555107 -46.3% 15345591 ± 2% numa-meminfo.node1.Inactive(anon)
28548207 -46.3% 15337939 ± 2% numa-meminfo.node1.Mapped
28682299 ± 2% +62.6% 46649099 ± 2% numa-meminfo.node1.MemFree
37339119 ± 2% -48.1% 19372319 ± 5% numa-meminfo.node1.MemUsed
5292403 ± 5% -51.5% 2564683 ± 18% numa-meminfo.node1.PageTables
31153849 -48.8% 15963746 ± 2% numa-meminfo.node1.Shmem
1350072 -72.4% 373214 proc-vmstat.nr_active_anon
1401204 +63.3% 2287872 proc-vmstat.nr_dirty_background_threshold
2805837 +63.3% 4581345 proc-vmstat.nr_dirty_threshold
15857347 -47.7% 8300115 proc-vmstat.nr_file_pages
14186977 +62.6% 23066682 proc-vmstat.nr_free_pages
14312758 -46.0% 7732409 proc-vmstat.nr_inactive_anon
14299279 -46.0% 7718742 proc-vmstat.nr_mapped
259.50 ± 65% -58.5% 107.75 ±165% proc-vmstat.nr_mlock
2684388 -48.5% 1381692 proc-vmstat.nr_page_table_pages
15599663 -48.4% 8042556 proc-vmstat.nr_shmem
54043 -33.5% 35937 proc-vmstat.nr_slab_reclaimable
1350072 -72.4% 373214 proc-vmstat.nr_zone_active_anon
14312758 -46.0% 7732409 proc-vmstat.nr_zone_inactive_anon
193.50 ± 62% +1207.8% 2530 ±113% proc-vmstat.numa_hint_faults_local
20165318 -48.2% 10440741 proc-vmstat.numa_hit
20148104 -48.3% 10423512 proc-vmstat.numa_local
16499590 -50.0% 8255098 proc-vmstat.pgactivate
20258153 -48.1% 10514291 proc-vmstat.pgalloc_normal
1.371e+08 -49.7% 69006001 proc-vmstat.pgfault
20112038 -48.8% 10289827 ± 2% proc-vmstat.pgfree
686373 ± 2% -71.7% 194202 ± 6% numa-vmstat.node0.nr_active_anon
7938445 -47.3% 4181535 ± 2% numa-vmstat.node0.nr_file_pages
7018951 ± 3% +62.5% 11407016 ± 2% numa-vmstat.node0.nr_free_pages
7156363 -45.6% 3891170 ± 2% numa-vmstat.node0.nr_inactive_anon
7144564 -45.7% 3879384 ± 2% numa-vmstat.node0.nr_mapped
147.25 ± 68% -57.7% 62.25 ±166% numa-vmstat.node0.nr_mlock
1360414 ± 6% -45.7% 739239 ± 16% numa-vmstat.node0.nr_page_table_pages
7809492 -48.1% 4050112 ± 2% numa-vmstat.node0.nr_shmem
686373 ± 2% -71.7% 194202 ± 6% numa-vmstat.node0.nr_zone_active_anon
7156363 -45.6% 3891171 ± 2% numa-vmstat.node0.nr_zone_inactive_anon
10281673 -44.9% 5666708 ± 5% numa-vmstat.node0.numa_hit
10277368 -44.9% 5659426 ± 5% numa-vmstat.node0.numa_local
679201 -73.2% 182247 ± 6% numa-vmstat.node1.nr_active_anon
7918638 -48.0% 4118797 ± 2% numa-vmstat.node1.nr_file_pages
7168402 ± 2% +62.7% 11660690 ± 2% numa-vmstat.node1.nr_free_pages
7140634 -46.2% 3838218 ± 2% numa-vmstat.node1.nr_inactive_anon
7138960 -46.3% 3836299 ± 2% numa-vmstat.node1.nr_mapped
112.00 ± 60% -59.8% 45.00 ±165% numa-vmstat.node1.nr_mlock
1323543 ± 6% -51.6% 640919 ± 18% numa-vmstat.node1.nr_page_table_pages
7789907 -48.7% 3992660 ± 2% numa-vmstat.node1.nr_shmem
679202 -73.2% 182246 ± 6% numa-vmstat.node1.nr_zone_active_anon
7140633 -46.2% 3838218 ± 2% numa-vmstat.node1.nr_zone_inactive_anon
9901667 -47.9% 5155800 ± 5% numa-vmstat.node1.numa_hit
9739325 -48.7% 4996127 ± 5% numa-vmstat.node1.numa_local
164592 ± 3% -9.9% 148339 ± 6% softirqs.CPU0.TIMER
164109 ± 2% -15.4% 138916 ± 7% softirqs.CPU10.TIMER
161316 ± 2% -14.7% 137665 ± 10% softirqs.CPU11.TIMER
161790 ± 3% -13.7% 139674 ± 9% softirqs.CPU13.TIMER
167454 ± 2% -10.0% 150695 ± 6% softirqs.CPU14.TIMER
36730 ± 10% -11.1% 32650 ± 3% softirqs.CPU17.RCU
161800 ± 5% -20.2% 129065 ± 9% softirqs.CPU17.TIMER
159221 ± 5% -13.8% 137234 ± 7% softirqs.CPU18.TIMER
37187 ± 4% -6.5% 34765 ± 2% softirqs.CPU19.RCU
160907 ± 4% -15.7% 135603 ± 8% softirqs.CPU19.TIMER
165935 ± 4% -11.2% 147397 ± 7% softirqs.CPU20.TIMER
155034 ± 3% -10.5% 138768 ± 6% softirqs.CPU22.TIMER
151672 ± 3% -12.5% 132674 ± 8% softirqs.CPU27.TIMER
154953 ± 6% -11.0% 137969 ± 8% softirqs.CPU29.TIMER
164268 ± 3% -14.3% 140762 ± 7% softirqs.CPU3.TIMER
160670 ± 5% -16.6% 134041 ± 9% softirqs.CPU31.TIMER
155203 ± 4% -12.4% 136016 ± 6% softirqs.CPU32.TIMER
155364 ± 5% -14.8% 132311 ± 5% softirqs.CPU33.TIMER
159227 ± 4% -12.7% 139061 ± 11% softirqs.CPU34.TIMER
150917 ± 4% -11.8% 133176 ± 7% softirqs.CPU35.TIMER
7508 ± 23% -23.6% 5734 ± 3% softirqs.CPU36.SCHED
156798 ± 8% -12.9% 136643 softirqs.CPU39.TIMER
155204 ± 7% -10.9% 138356 ± 2% softirqs.CPU40.TIMER
7919 ± 22% -29.9% 5554 ± 2% softirqs.CPU43.SCHED
161460 ± 5% -10.8% 143999 ± 6% softirqs.CPU44.TIMER
161143 ± 4% -13.6% 139241 ± 5% softirqs.CPU47.TIMER
162003 ± 12% -15.6% 136703 ± 4% softirqs.CPU48.TIMER
158763 ± 5% -12.8% 138486 ± 2% softirqs.CPU49.TIMER
158713 ± 5% -10.9% 141433 ± 6% softirqs.CPU5.TIMER
162862 ± 4% -11.9% 143450 ± 4% softirqs.CPU50.TIMER
8632 ± 23% -33.0% 5780 ± 5% softirqs.CPU52.SCHED
163977 ± 4% -11.2% 145624 ± 8% softirqs.CPU52.TIMER
158723 ± 3% -14.2% 136159 ± 7% softirqs.CPU54.TIMER
160583 ± 2% -16.4% 134203 ± 8% softirqs.CPU55.TIMER
160141 ± 3% -14.6% 136793 ± 8% softirqs.CPU57.TIMER
164000 ± 2% -9.5% 148475 ± 5% softirqs.CPU58.TIMER
158146 ± 5% -18.6% 128705 ± 9% softirqs.CPU61.TIMER
155401 ± 5% -11.0% 138276 ± 4% softirqs.CPU66.TIMER
155533 ± 3% -11.2% 138081 ± 6% softirqs.CPU67.TIMER
151725 ± 4% -13.4% 131410 ± 5% softirqs.CPU68.TIMER
149529 -11.3% 132690 ± 6% softirqs.CPU70.TIMER
34610 ± 4% -9.6% 31273 ± 3% softirqs.CPU75.RCU
158786 ± 4% -18.8% 128867 ± 6% softirqs.CPU75.TIMER
151385 ± 4% -15.1% 128505 ± 4% softirqs.CPU77.TIMER
149154 ± 3% -13.0% 129780 ± 4% softirqs.CPU79.TIMER
153403 ± 5% -12.9% 133602 ± 5% softirqs.CPU83.TIMER
152849 ± 6% -11.2% 135697 ± 4% softirqs.CPU84.TIMER
612122 ± 8% -17.9% 502274 ± 2% softirqs.SCHED
13993264 ± 3% -11.5% 12384879 ± 5% softirqs.TIMER
27.18 +3.9% 28.24 perf-stat.i.MPKI
7.958e+09 -6.3% 7.459e+09 ± 3% perf-stat.i.branch-instructions
0.24 ± 26% -0.1 0.12 ± 5% perf-stat.i.branch-miss-rate%
12550714 ± 2% -39.9% 7546804 perf-stat.i.branch-misses
61.09 +5.7 66.82 perf-stat.i.cache-miss-rate%
5.434e+08 +9.3% 5.937e+08 ± 3% perf-stat.i.cache-misses
8.066e+08 +5.6% 8.52e+08 ± 3% perf-stat.i.cache-references
1046 ± 2% -3.7% 1007 perf-stat.i.context-switches
7.30 +6.2% 7.75 ± 3% perf-stat.i.cpi
2.348e+11 +2.3% 2.404e+11 perf-stat.i.cpu-cycles
49.30 ± 5% +15.9% 57.14 ± 4% perf-stat.i.cpu-migrations
1645 ± 64% -64.1% 590.65 perf-stat.i.cycles-between-cache-misses
5.16 +0.8 5.94 perf-stat.i.dTLB-load-miss-rate%
4.801e+08 +14.5% 5.495e+08 ± 3% perf-stat.i.dTLB-load-misses
9.535e+09 -4.4% 9.111e+09 ± 3% perf-stat.i.dTLB-loads
2.812e+09 +5.4% 2.964e+09 ± 3% perf-stat.i.dTLB-stores
89.44 +2.9 92.36 perf-stat.i.iTLB-load-miss-rate%
1043185 ± 3% -30.3% 726967 perf-stat.i.iTLB-load-misses
3.402e+10 -5.9% 3.201e+10 ± 3% perf-stat.i.instructions
242803 ± 27% -39.5% 146788 ± 3% perf-stat.i.instructions-per-iTLB-miss
0.15 -9.1% 0.14 ± 2% perf-stat.i.ipc
378192 -44.1% 211320 perf-stat.i.minor-faults
1.515e+08 ± 7% +54.7% 2.344e+08 ± 22% perf-stat.i.node-load-misses
2306246 -45.6% 1255650 ± 2% perf-stat.i.node-store-misses
1509272 ± 2% -42.0% 874825 ± 3% perf-stat.i.node-stores
378199 -44.1% 211328 perf-stat.i.page-faults
23.60 +12.5% 26.55 perf-stat.overall.MPKI
0.16 ± 2% -0.1 0.10 ± 3% perf-stat.overall.branch-miss-rate%
67.33 +2.3 69.66 perf-stat.overall.cache-miss-rate%
6.89 +9.0% 7.51 ± 3% perf-stat.overall.cpi
433.61 -6.4% 405.91 ± 3% perf-stat.overall.cycles-between-cache-misses
4.77 +0.9 5.67 perf-stat.overall.dTLB-load-miss-rate%
32563 ± 4% +34.8% 43901 ± 3% perf-stat.overall.instructions-per-iTLB-miss
0.15 -8.1% 0.13 ± 3% perf-stat.overall.ipc
8588 +10.3% 9470 perf-stat.overall.path-length
7.954e+09 -6.4% 7.444e+09 ± 3% perf-stat.ps.branch-instructions
12645894 ± 3% -39.9% 7603958 perf-stat.ps.branch-misses
5.401e+08 +9.4% 5.909e+08 ± 3% perf-stat.ps.cache-misses
8.022e+08 +5.7% 8.482e+08 ± 3% perf-stat.ps.cache-references
1044 ± 2% -3.7% 1006 perf-stat.ps.context-switches
2.342e+11 +2.3% 2.396e+11 perf-stat.ps.cpu-cycles
49.11 ± 5% +16.0% 56.98 ± 4% perf-stat.ps.cpu-migrations
4.769e+08 +14.7% 5.468e+08 ± 3% perf-stat.ps.dTLB-load-misses
9.524e+09 -4.6% 9.09e+09 ± 3% perf-stat.ps.dTLB-loads
2.799e+09 +5.5% 2.953e+09 ± 3% perf-stat.ps.dTLB-stores
1045456 ± 3% -30.4% 727863 perf-stat.ps.iTLB-load-misses
3.4e+10 -6.0% 3.195e+10 ± 2% perf-stat.ps.instructions
379246 -44.1% 212102 perf-stat.ps.minor-faults
1.506e+08 ± 7% +54.9% 2.333e+08 ± 22% perf-stat.ps.node-load-misses
2327702 -45.6% 1266384 ± 2% perf-stat.ps.node-store-misses
1523195 ± 2% -42.1% 882438 ± 3% perf-stat.ps.node-stores
379246 -44.1% 212103 perf-stat.ps.page-faults
1.229e+13 -15.5% 1.039e+13 ± 3% perf-stat.total.instructions
24.07 ± 6% -3.6 20.48 ± 3% perf-profile.calltrace.cycles-pp.swapgs_restore_regs_and_return_to_usermode
19.40 ± 6% -2.7 16.74 ± 5% perf-profile.calltrace.cycles-pp.prepare_exit_to_usermode.swapgs_restore_regs_and_return_to_usermode
2.27 ± 21% -1.4 0.84 ± 62% perf-profile.calltrace.cycles-pp.ret_from_fork
2.27 ± 21% -1.4 0.84 ± 62% perf-profile.calltrace.cycles-pp.kthread.ret_from_fork
1.92 ± 30% -1.3 0.65 ± 62% perf-profile.calltrace.cycles-pp.process_one_work.worker_thread.kthread.ret_from_fork
1.93 ± 29% -1.3 0.67 ± 64% perf-profile.calltrace.cycles-pp.worker_thread.kthread.ret_from_fork
1.08 ± 14% -0.5 0.56 ± 62% perf-profile.calltrace.cycles-pp.mga_dirty_update.soft_cursor.bit_cursor.fb_flashcursor.process_one_work
1.08 ± 14% -0.5 0.57 ± 62% perf-profile.calltrace.cycles-pp.fb_flashcursor.process_one_work.worker_thread.kthread.ret_from_fork
1.08 ± 14% -0.5 0.57 ± 62% perf-profile.calltrace.cycles-pp.bit_cursor.fb_flashcursor.process_one_work.worker_thread.kthread
1.08 ± 14% -0.5 0.57 ± 62% perf-profile.calltrace.cycles-pp.soft_cursor.bit_cursor.fb_flashcursor.process_one_work.worker_thread
1.00 ± 11% -0.5 0.54 ± 64% perf-profile.calltrace.cycles-pp.memcpy_toio.mga_dirty_update.soft_cursor.bit_cursor.fb_flashcursor
0.71 ± 15% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.__GI___libc_write.__libc_start_main
0.69 ± 14% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe.__GI___libc_write.__libc_start_main
0.69 ± 14% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe.__GI___libc_write.__libc_start_main
0.68 ± 15% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__GI___libc_write.__libc_start_main
0.68 ± 15% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__GI___libc_write
0.68 ± 15% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.new_sync_write.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe
0.68 ± 15% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.generic_file_write_iter.new_sync_write.vfs_write.ksys_write.do_syscall_64
0.67 ± 16% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.__generic_file_write_iter.generic_file_write_iter.new_sync_write.vfs_write.ksys_write
0.67 ± 15% -0.4 0.26 ±100% perf-profile.calltrace.cycles-pp.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.new_sync_write.vfs_write
2.78 ± 7% -0.3 2.49 ± 5% perf-profile.calltrace.cycles-pp.update_curr.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle
0.95 ± 10% +0.3 1.26 ± 11% perf-profile.calltrace.cycles-pp.note_gp_changes.rcu_core.__softirqentry_text_start.irq_exit.smp_apic_timer_interrupt
0.15 ±173% +0.5 0.68 ± 13% perf-profile.calltrace.cycles-pp.rb_next.timerqueue_del.__remove_hrtimer.__hrtimer_run_queues.hrtimer_interrupt
0.94 ± 33% +0.7 1.64 ± 7% perf-profile.calltrace.cycles-pp.timerqueue_del.__remove_hrtimer.__hrtimer_run_queues.hrtimer_interrupt.smp_apic_timer_interrupt
1.88 ± 9% +0.8 2.66 ± 13% perf-profile.calltrace.cycles-pp.ktime_get_update_offsets_now.hrtimer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
1.34 ± 26% +0.8 2.12 ± 9% perf-profile.calltrace.cycles-pp.__remove_hrtimer.__hrtimer_run_queues.hrtimer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
0.81 ± 38% +1.3 2.12 ± 25% perf-profile.calltrace.cycles-pp.io_serial_in.wait_for_xmitr.serial8250_console_putchar.uart_console_write.serial8250_console_write
0.60 ±104% +1.8 2.44 ± 25% perf-profile.calltrace.cycles-pp.irq_work_interrupt
0.60 ±104% +1.8 2.44 ± 25% perf-profile.calltrace.cycles-pp.smp_irq_work_interrupt.irq_work_interrupt
0.58 ±105% +1.9 2.44 ± 25% perf-profile.calltrace.cycles-pp.irq_work_run_list.irq_work_run.smp_irq_work_interrupt.irq_work_interrupt
0.58 ±105% +1.9 2.44 ± 25% perf-profile.calltrace.cycles-pp.irq_work_run.smp_irq_work_interrupt.irq_work_interrupt
0.78 ±100% +2.0 2.76 ± 24% perf-profile.calltrace.cycles-pp.printk.irq_work_run_list.irq_work_run.smp_irq_work_interrupt.irq_work_interrupt
0.78 ±100% +2.1 2.89 ± 22% perf-profile.calltrace.cycles-pp.vprintk_emit.printk.irq_work_run_list.irq_work_run.smp_irq_work_interrupt
4.43 ± 27% +2.2 6.61 ± 13% perf-profile.calltrace.cycles-pp.interrupt_entry
24.11 ± 6% -3.6 20.52 ± 3% perf-profile.children.cycles-pp.swapgs_restore_regs_and_return_to_usermode
19.46 ± 6% -2.6 16.82 ± 5% perf-profile.children.cycles-pp.prepare_exit_to_usermode
2.32 ± 21% -1.3 0.98 ± 36% perf-profile.children.cycles-pp.ret_from_fork
2.27 ± 21% -1.3 0.96 ± 37% perf-profile.children.cycles-pp.kthread
1.92 ± 30% -1.2 0.71 ± 45% perf-profile.children.cycles-pp.process_one_work
1.93 ± 29% -1.2 0.73 ± 47% perf-profile.children.cycles-pp.worker_thread
5.72 ± 11% -0.9 4.81 ± 4% perf-profile.children.cycles-pp.entry_SYSCALL_64_after_hwframe
5.71 ± 12% -0.9 4.80 ± 4% perf-profile.children.cycles-pp.do_syscall_64
1.43 ± 21% -0.6 0.86 ± 13% perf-profile.children.cycles-pp.page_fault
1.32 ± 22% -0.5 0.79 ± 14% perf-profile.children.cycles-pp.do_page_fault
1.30 ± 22% -0.5 0.78 ± 15% perf-profile.children.cycles-pp.__do_page_fault
1.08 ± 14% -0.5 0.61 ± 46% perf-profile.children.cycles-pp.memcpy_toio
1.08 ± 14% -0.5 0.61 ± 47% perf-profile.children.cycles-pp.mga_dirty_update
1.08 ± 14% -0.5 0.61 ± 46% perf-profile.children.cycles-pp.fb_flashcursor
1.08 ± 14% -0.5 0.61 ± 46% perf-profile.children.cycles-pp.bit_cursor
1.08 ± 14% -0.5 0.61 ± 46% perf-profile.children.cycles-pp.soft_cursor
1.19 ± 26% -0.4 0.76 ± 12% perf-profile.children.cycles-pp.handle_mm_fault
3.50 ± 8% -0.4 3.11 ± 5% perf-profile.children.cycles-pp.native_irq_return_iret
0.89 ± 10% -0.3 0.60 ± 11% perf-profile.children.cycles-pp.ksys_write
0.88 ± 10% -0.3 0.60 ± 11% perf-profile.children.cycles-pp.vfs_write
0.87 ± 9% -0.3 0.59 ± 12% perf-profile.children.cycles-pp.new_sync_write
0.71 ± 14% -0.2 0.47 ± 14% perf-profile.children.cycles-pp.__GI___libc_write
0.68 ± 15% -0.2 0.46 ± 21% perf-profile.children.cycles-pp.generic_file_write_iter
0.62 ± 22% -0.2 0.42 ± 18% perf-profile.children.cycles-pp.__libc_fork
0.41 ± 25% -0.2 0.23 ± 25% perf-profile.children.cycles-pp.filemap_map_pages
0.36 ± 33% -0.2 0.20 ± 39% perf-profile.children.cycles-pp.select_task_rq_fair
0.28 ± 61% -0.1 0.14 ± 28% perf-profile.children.cycles-pp.alloc_pages_vma
0.24 ± 42% -0.1 0.10 ± 27% perf-profile.children.cycles-pp.clear_page_erms
0.24 ± 42% -0.1 0.11 ± 24% perf-profile.children.cycles-pp.prep_new_page
0.26 ± 31% -0.1 0.15 ± 22% perf-profile.children.cycles-pp.wp_page_copy
0.27 ± 27% -0.1 0.17 ± 24% perf-profile.children.cycles-pp.do_wp_page
0.20 ± 19% -0.1 0.11 ± 37% perf-profile.children.cycles-pp.select_idle_sibling
0.14 ± 16% -0.1 0.05 ±103% perf-profile.children.cycles-pp.__do_sys_wait4
0.15 ± 32% -0.1 0.07 ± 22% perf-profile.children.cycles-pp.available_idle_cpu
0.20 ± 25% -0.1 0.12 ± 39% perf-profile.children.cycles-pp.iov_iter_fault_in_readable
0.17 ± 14% -0.1 0.10 ± 31% perf-profile.children.cycles-pp.wait4
0.23 ± 8% -0.1 0.17 ± 15% perf-profile.children.cycles-pp.dup_mm
0.18 ± 34% -0.1 0.11 ± 22% perf-profile.children.cycles-pp.walk_component
0.11 ± 25% -0.1 0.04 ±106% perf-profile.children.cycles-pp.alloc_set_pte
0.18 ± 18% -0.1 0.11 ± 36% perf-profile.children.cycles-pp.__wake_up_common_lock
0.18 ± 18% -0.1 0.11 ± 36% perf-profile.children.cycles-pp.__wake_up_common
0.10 ± 30% -0.0 0.05 ± 62% perf-profile.children.cycles-pp.find_vma
0.13 ± 12% -0.0 0.10 ± 22% perf-profile.children.cycles-pp.free_pgtables
0.08 ± 29% +0.0 0.12 ± 16% perf-profile.children.cycles-pp.tlb_finish_mmu
0.08 ± 29% +0.0 0.12 ± 16% perf-profile.children.cycles-pp.tlb_flush_mmu
0.07 ± 25% +0.1 0.12 ± 26% perf-profile.children.cycles-pp.__note_gp_changes
0.07 ± 77% +0.1 0.18 ± 32% perf-profile.children.cycles-pp.intel_pmu_disable_all
0.05 ± 70% +0.1 0.18 ± 58% perf-profile.children.cycles-pp.change_prot_numa
0.39 ± 25% +0.1 0.52 ± 14% perf-profile.children.cycles-pp.rb_erase
0.06 ± 77% +0.1 0.19 ± 48% perf-profile.children.cycles-pp.change_p4d_range
0.09 ± 20% +0.1 0.23 ± 47% perf-profile.children.cycles-pp.change_protection
0.57 ± 10% +0.2 0.72 ± 12% perf-profile.children.cycles-pp.__hrtimer_next_event_base
0.38 ± 10% +0.2 0.61 ± 15% perf-profile.children.cycles-pp.rcu_irq_enter
0.95 ± 10% +0.3 1.27 ± 12% perf-profile.children.cycles-pp.note_gp_changes
0.42 ± 29% +0.4 0.79 ± 14% perf-profile.children.cycles-pp.rb_next
0.97 ± 32% +0.7 1.65 ± 7% perf-profile.children.cycles-pp.timerqueue_del
1.34 ± 26% +0.8 2.12 ± 9% perf-profile.children.cycles-pp.__remove_hrtimer
1.89 ± 9% +0.8 2.67 ± 13% perf-profile.children.cycles-pp.ktime_get_update_offsets_now
1.19 ± 50% +1.3 2.53 ± 6% perf-profile.children.cycles-pp.io_serial_in
0.82 ± 94% +2.0 2.81 ± 24% perf-profile.children.cycles-pp.irq_work_interrupt
0.82 ± 94% +2.0 2.81 ± 24% perf-profile.children.cycles-pp.smp_irq_work_interrupt
0.79 ±100% +2.0 2.81 ± 24% perf-profile.children.cycles-pp.irq_work_run
0.78 ±100% +2.0 2.81 ± 24% perf-profile.children.cycles-pp.printk
0.78 ±100% +2.0 2.81 ± 24% perf-profile.children.cycles-pp.vprintk_emit
1.43 ± 92% +2.5 3.95 ± 19% perf-profile.children.cycles-pp.irq_work_run_list
6.58 ± 21% +2.7 9.33 ± 10% perf-profile.children.cycles-pp.interrupt_entry
17.55 ± 8% -2.3 15.23 ± 5% perf-profile.self.cycles-pp.prepare_exit_to_usermode
4.65 ± 9% -1.0 3.70 ± 6% perf-profile.self.cycles-pp.swapgs_restore_regs_and_return_to_usermode
1.08 ± 14% -0.5 0.61 ± 46% perf-profile.self.cycles-pp.memcpy_toio
3.48 ± 8% -0.4 3.09 ± 5% perf-profile.self.cycles-pp.native_irq_return_iret
0.84 ± 17% -0.4 0.46 ± 24% perf-profile.self.cycles-pp.tick_sched_timer
0.24 ± 45% -0.1 0.10 ± 27% perf-profile.self.cycles-pp.clear_page_erms
0.15 ± 32% -0.1 0.07 ± 22% perf-profile.self.cycles-pp.available_idle_cpu
0.10 ± 15% -0.0 0.05 ± 60% perf-profile.self.cycles-pp.iov_iter_fault_in_readable
0.03 ±102% +0.1 0.11 ± 31% perf-profile.self.cycles-pp.intel_pmu_disable_all
0.04 ±115% +0.1 0.15 ± 52% perf-profile.self.cycles-pp.change_p4d_range
0.38 ± 22% +0.1 0.51 ± 15% perf-profile.self.cycles-pp.rb_erase
0.55 ± 10% +0.2 0.71 ± 13% perf-profile.self.cycles-pp.__hrtimer_next_event_base
0.19 ± 53% +0.2 0.36 ± 8% perf-profile.self.cycles-pp.timerqueue_del
0.36 ± 14% +0.2 0.61 ± 15% perf-profile.self.cycles-pp.rcu_irq_enter
0.41 ± 33% +0.4 0.79 ± 14% perf-profile.self.cycles-pp.rb_next
0.98 ± 33% +0.5 1.45 ± 10% perf-profile.self.cycles-pp.smp_apic_timer_interrupt
1.19 ± 26% +0.7 1.90 ± 18% perf-profile.self.cycles-pp.ktime_get_update_offsets_now
1.19 ± 50% +1.1 2.29 ± 16% perf-profile.self.cycles-pp.io_serial_in
6.58 ± 21% +2.7 9.30 ± 10% perf-profile.self.cycles-pp.interrupt_entry
298.00 ± 32% -39.8% 179.50 ± 9% interrupts.37:PCI-MSI.1572868-edge.eth0-TxRx-4
290.75 ± 19% -31.2% 200.00 ± 22% interrupts.42:PCI-MSI.1572873-edge.eth0-TxRx-9
283.75 ± 41% -31.9% 193.25 ± 14% interrupts.46:PCI-MSI.1572877-edge.eth0-TxRx-13
177.50 -10.4% 159.00 interrupts.49:PCI-MSI.1572880-edge.eth0-TxRx-16
180.00 ± 2% -11.7% 159.00 interrupts.50:PCI-MSI.1572881-edge.eth0-TxRx-17
177.00 -9.7% 159.75 interrupts.51:PCI-MSI.1572882-edge.eth0-TxRx-18
177.00 -10.2% 159.00 interrupts.52:PCI-MSI.1572883-edge.eth0-TxRx-19
178.00 ± 2% -10.7% 159.00 interrupts.53:PCI-MSI.1572884-edge.eth0-TxRx-20
179.50 ± 2% -8.9% 163.50 ± 4% interrupts.54:PCI-MSI.1572885-edge.eth0-TxRx-21
177.00 -9.9% 159.50 interrupts.55:PCI-MSI.1572886-edge.eth0-TxRx-22
177.00 -10.2% 159.00 interrupts.56:PCI-MSI.1572887-edge.eth0-TxRx-23
177.00 -8.6% 161.75 ± 2% interrupts.57:PCI-MSI.1572888-edge.eth0-TxRx-24
177.00 -8.8% 161.50 ± 2% interrupts.58:PCI-MSI.1572889-edge.eth0-TxRx-25
177.00 -8.1% 162.75 ± 3% interrupts.59:PCI-MSI.1572890-edge.eth0-TxRx-26
177.00 -10.2% 159.00 interrupts.60:PCI-MSI.1572891-edge.eth0-TxRx-27
177.00 -10.2% 159.00 interrupts.61:PCI-MSI.1572892-edge.eth0-TxRx-28
177.00 -10.0% 159.25 interrupts.62:PCI-MSI.1572893-edge.eth0-TxRx-29
177.00 -10.2% 159.00 interrupts.63:PCI-MSI.1572894-edge.eth0-TxRx-30
177.00 -10.0% 159.25 interrupts.64:PCI-MSI.1572895-edge.eth0-TxRx-31
177.00 -10.0% 159.25 interrupts.65:PCI-MSI.1572896-edge.eth0-TxRx-32
177.00 -10.2% 159.00 interrupts.66:PCI-MSI.1572897-edge.eth0-TxRx-33
177.00 -10.2% 159.00 interrupts.67:PCI-MSI.1572898-edge.eth0-TxRx-34
177.00 -10.2% 159.00 interrupts.68:PCI-MSI.1572899-edge.eth0-TxRx-35
177.00 -10.2% 159.00 interrupts.69:PCI-MSI.1572900-edge.eth0-TxRx-36
182.50 ± 4% -12.9% 159.00 interrupts.70:PCI-MSI.1572901-edge.eth0-TxRx-37
177.00 -9.6% 160.00 interrupts.71:PCI-MSI.1572902-edge.eth0-TxRx-38
177.00 -10.2% 159.00 interrupts.72:PCI-MSI.1572903-edge.eth0-TxRx-39
177.00 -7.6% 163.50 ± 4% interrupts.73:PCI-MSI.1572904-edge.eth0-TxRx-40
177.00 -10.2% 159.00 interrupts.74:PCI-MSI.1572905-edge.eth0-TxRx-41
179.75 ± 3% -11.5% 159.00 interrupts.75:PCI-MSI.1572906-edge.eth0-TxRx-42
177.00 -10.2% 159.00 interrupts.76:PCI-MSI.1572907-edge.eth0-TxRx-43
178.25 -7.9% 164.25 ± 4% interrupts.77:PCI-MSI.1572908-edge.eth0-TxRx-44
177.00 -9.3% 160.50 interrupts.78:PCI-MSI.1572909-edge.eth0-TxRx-45
177.00 -10.2% 159.00 interrupts.79:PCI-MSI.1572910-edge.eth0-TxRx-46
177.00 -9.5% 160.25 interrupts.80:PCI-MSI.1572911-edge.eth0-TxRx-47
177.00 -10.2% 159.00 interrupts.81:PCI-MSI.1572912-edge.eth0-TxRx-48
177.00 -10.2% 159.00 interrupts.83:PCI-MSI.1572914-edge.eth0-TxRx-50
177.00 -9.7% 159.75 interrupts.84:PCI-MSI.1572915-edge.eth0-TxRx-51
188.75 ± 5% -13.0% 164.25 ± 5% interrupts.85:PCI-MSI.1572916-edge.eth0-TxRx-52
225.75 ± 38% -29.6% 159.00 interrupts.86:PCI-MSI.1572917-edge.eth0-TxRx-53
181.75 ± 5% -12.4% 159.25 interrupts.87:PCI-MSI.1572918-edge.eth0-TxRx-54
179.75 ± 2% -9.7% 162.25 ± 3% interrupts.88:PCI-MSI.1572919-edge.eth0-TxRx-55
181.00 ± 4% -12.2% 159.00 interrupts.89:PCI-MSI.1572920-edge.eth0-TxRx-56
177.00 -10.2% 159.00 interrupts.90:PCI-MSI.1572921-edge.eth0-TxRx-57
177.00 -10.2% 159.00 interrupts.91:PCI-MSI.1572922-edge.eth0-TxRx-58
177.00 -7.8% 163.25 ± 4% interrupts.92:PCI-MSI.1572923-edge.eth0-TxRx-59
177.00 -10.2% 159.00 interrupts.93:PCI-MSI.1572924-edge.eth0-TxRx-60
177.25 -8.5% 162.25 ± 2% interrupts.94:PCI-MSI.1572925-edge.eth0-TxRx-61
177.00 -9.7% 159.75 interrupts.95:PCI-MSI.1572926-edge.eth0-TxRx-62
448.75 ± 2% -11.1% 398.75 interrupts.9:IO-APIC.9-fasteoi.acpi
358186 -9.5% 324307 interrupts.CAL:Function_call_interrupts
448.75 ± 2% -11.1% 398.75 interrupts.CPU1.9:IO-APIC.9-fasteoi.acpi
4090 ± 4% -11.0% 3641 ± 2% interrupts.CPU1.CAL:Function_call_interrupts
4094 ± 3% -10.4% 3670 ± 4% interrupts.CPU10.CAL:Function_call_interrupts
4086 ± 3% -20.1% 3267 ± 26% interrupts.CPU11.CAL:Function_call_interrupts
4118 ± 2% -9.4% 3729 interrupts.CPU12.CAL:Function_call_interrupts
283.75 ± 41% -31.9% 193.25 ± 14% interrupts.CPU13.46:PCI-MSI.1572877-edge.eth0-TxRx-13
4136 ± 3% -10.6% 3699 interrupts.CPU13.CAL:Function_call_interrupts
4080 ± 3% -8.5% 3732 interrupts.CPU14.CAL:Function_call_interrupts
4075 ± 3% -8.5% 3727 interrupts.CPU15.CAL:Function_call_interrupts
177.50 -10.4% 159.00 interrupts.CPU16.49:PCI-MSI.1572880-edge.eth0-TxRx-16
4096 ± 3% -9.1% 3722 interrupts.CPU16.CAL:Function_call_interrupts
659.50 ± 5% +26.9% 836.75 ± 12% interrupts.CPU16.RES:Rescheduling_interrupts
180.00 ± 2% -11.7% 159.00 interrupts.CPU17.50:PCI-MSI.1572881-edge.eth0-TxRx-17
4059 ± 3% -8.8% 3700 interrupts.CPU17.CAL:Function_call_interrupts
177.00 -9.7% 159.75 interrupts.CPU18.51:PCI-MSI.1572882-edge.eth0-TxRx-18
4057 ± 2% -8.8% 3698 interrupts.CPU18.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU19.52:PCI-MSI.1572883-edge.eth0-TxRx-19
4082 ± 2% -26.5% 3000 ± 41% interrupts.CPU19.CAL:Function_call_interrupts
4101 ± 3% -9.8% 3701 ± 2% interrupts.CPU2.CAL:Function_call_interrupts
178.00 ± 2% -10.7% 159.00 interrupts.CPU20.53:PCI-MSI.1572884-edge.eth0-TxRx-20
4060 ± 3% -9.0% 3693 interrupts.CPU20.CAL:Function_call_interrupts
555.75 ± 41% +111.9% 1177 ± 58% interrupts.CPU20.RES:Rescheduling_interrupts
179.50 ± 2% -8.9% 163.50 ± 4% interrupts.CPU21.54:PCI-MSI.1572885-edge.eth0-TxRx-21
4053 ± 3% -9.3% 3676 interrupts.CPU21.CAL:Function_call_interrupts
1004 ± 23% -50.7% 495.25 ± 21% interrupts.CPU21.RES:Rescheduling_interrupts
177.00 -9.9% 159.50 interrupts.CPU22.55:PCI-MSI.1572886-edge.eth0-TxRx-22
4033 ± 3% -9.0% 3671 ± 4% interrupts.CPU22.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU23.56:PCI-MSI.1572887-edge.eth0-TxRx-23
4023 ± 3% -8.0% 3700 interrupts.CPU23.CAL:Function_call_interrupts
596.25 ± 76% -58.0% 250.25 ± 15% interrupts.CPU23.RES:Rescheduling_interrupts
177.00 -8.6% 161.75 ± 2% interrupts.CPU24.57:PCI-MSI.1572888-edge.eth0-TxRx-24
4004 ± 3% -7.6% 3698 interrupts.CPU24.CAL:Function_call_interrupts
177.00 -8.8% 161.50 ± 2% interrupts.CPU25.58:PCI-MSI.1572889-edge.eth0-TxRx-25
4034 ± 3% -9.4% 3656 ± 2% interrupts.CPU25.CAL:Function_call_interrupts
177.00 -8.1% 162.75 ± 3% interrupts.CPU26.59:PCI-MSI.1572890-edge.eth0-TxRx-26
4027 ± 3% -8.1% 3700 interrupts.CPU26.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU27.60:PCI-MSI.1572891-edge.eth0-TxRx-27
4072 ± 2% -9.2% 3696 interrupts.CPU27.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU28.61:PCI-MSI.1572892-edge.eth0-TxRx-28
177.00 -10.0% 159.25 interrupts.CPU29.62:PCI-MSI.1572893-edge.eth0-TxRx-29
4068 ± 3% -10.8% 3627 ± 2% interrupts.CPU29.CAL:Function_call_interrupts
4077 ± 3% -19.2% 3293 ± 19% interrupts.CPU3.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU30.63:PCI-MSI.1572894-edge.eth0-TxRx-30
4085 ± 3% -10.9% 3641 ± 2% interrupts.CPU30.CAL:Function_call_interrupts
177.00 -10.0% 159.25 interrupts.CPU31.64:PCI-MSI.1572895-edge.eth0-TxRx-31
4072 ± 3% -30.4% 2833 ± 50% interrupts.CPU31.CAL:Function_call_interrupts
416.25 ± 42% -67.5% 135.25 ± 50% interrupts.CPU31.RES:Rescheduling_interrupts
177.00 -10.0% 159.25 interrupts.CPU32.65:PCI-MSI.1572896-edge.eth0-TxRx-32
4099 ± 2% -11.2% 3639 ± 2% interrupts.CPU32.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU33.66:PCI-MSI.1572897-edge.eth0-TxRx-33
4092 ± 2% -11.2% 3635 ± 2% interrupts.CPU33.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU34.67:PCI-MSI.1572898-edge.eth0-TxRx-34
320.75 ± 30% -34.7% 209.50 ± 30% interrupts.CPU34.RES:Rescheduling_interrupts
177.00 -10.2% 159.00 interrupts.CPU35.68:PCI-MSI.1572899-edge.eth0-TxRx-35
4081 ± 2% -10.6% 3648 interrupts.CPU35.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU36.69:PCI-MSI.1572900-edge.eth0-TxRx-36
182.50 ± 4% -12.9% 159.00 interrupts.CPU37.70:PCI-MSI.1572901-edge.eth0-TxRx-37
4061 ± 2% -10.5% 3633 ± 3% interrupts.CPU37.CAL:Function_call_interrupts
177.00 -9.6% 160.00 interrupts.CPU38.71:PCI-MSI.1572902-edge.eth0-TxRx-38
4053 ± 2% -10.4% 3631 ± 3% interrupts.CPU38.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU39.72:PCI-MSI.1572903-edge.eth0-TxRx-39
4037 ± 2% -10.1% 3628 ± 3% interrupts.CPU39.CAL:Function_call_interrupts
337.25 ± 45% -47.1% 178.50 ± 44% interrupts.CPU39.RES:Rescheduling_interrupts
298.00 ± 32% -39.8% 179.50 ± 9% interrupts.CPU4.37:PCI-MSI.1572868-edge.eth0-TxRx-4
4069 ± 3% -9.6% 3680 ± 2% interrupts.CPU4.CAL:Function_call_interrupts
718679 -9.5% 650161 interrupts.CPU4.LOC:Local_timer_interrupts
177.00 -7.6% 163.50 ± 4% interrupts.CPU40.73:PCI-MSI.1572904-edge.eth0-TxRx-40
4039 ± 2% -9.7% 3646 ± 2% interrupts.CPU40.CAL:Function_call_interrupts
274.00 ± 31% -43.0% 156.25 ± 42% interrupts.CPU40.RES:Rescheduling_interrupts
177.00 -10.2% 159.00 interrupts.CPU41.74:PCI-MSI.1572905-edge.eth0-TxRx-41
4058 ± 3% -10.3% 3640 ± 2% interrupts.CPU41.CAL:Function_call_interrupts
179.75 ± 3% -11.5% 159.00 interrupts.CPU42.75:PCI-MSI.1572906-edge.eth0-TxRx-42
4073 ± 3% -10.8% 3634 ± 2% interrupts.CPU42.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU43.76:PCI-MSI.1572907-edge.eth0-TxRx-43
178.25 -7.9% 164.25 ± 4% interrupts.CPU44.77:PCI-MSI.1572908-edge.eth0-TxRx-44
177.00 -9.3% 160.50 interrupts.CPU45.78:PCI-MSI.1572909-edge.eth0-TxRx-45
7928 -37.7% 4939 ± 34% interrupts.CPU45.NMI:Non-maskable_interrupts
7928 -37.7% 4939 ± 34% interrupts.CPU45.PMI:Performance_monitoring_interrupts
177.00 -10.2% 159.00 interrupts.CPU46.79:PCI-MSI.1572910-edge.eth0-TxRx-46
177.00 -9.5% 160.25 interrupts.CPU47.80:PCI-MSI.1572911-edge.eth0-TxRx-47
7916 -37.5% 4947 ± 34% interrupts.CPU47.NMI:Non-maskable_interrupts
7916 -37.5% 4947 ± 34% interrupts.CPU47.PMI:Performance_monitoring_interrupts
177.00 -10.2% 159.00 interrupts.CPU48.81:PCI-MSI.1572912-edge.eth0-TxRx-48
718707 -9.5% 650597 interrupts.CPU48.LOC:Local_timer_interrupts
7952 -37.8% 4948 ± 34% interrupts.CPU49.NMI:Non-maskable_interrupts
7952 -37.8% 4948 ± 34% interrupts.CPU49.PMI:Performance_monitoring_interrupts
4107 ± 4% -12.3% 3601 ± 3% interrupts.CPU5.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU50.83:PCI-MSI.1572914-edge.eth0-TxRx-50
177.00 -9.7% 159.75 interrupts.CPU51.84:PCI-MSI.1572915-edge.eth0-TxRx-51
718389 -9.4% 651067 interrupts.CPU51.LOC:Local_timer_interrupts
188.75 ± 5% -13.0% 164.25 ± 5% interrupts.CPU52.85:PCI-MSI.1572916-edge.eth0-TxRx-52
225.75 ± 38% -29.6% 159.00 interrupts.CPU53.86:PCI-MSI.1572917-edge.eth0-TxRx-53
181.75 ± 5% -12.4% 159.25 interrupts.CPU54.87:PCI-MSI.1572918-edge.eth0-TxRx-54
4099 ± 4% -8.9% 3734 ± 4% interrupts.CPU54.CAL:Function_call_interrupts
179.75 ± 2% -9.7% 162.25 ± 3% interrupts.CPU55.88:PCI-MSI.1572919-edge.eth0-TxRx-55
4201 -9.7% 3793 ± 4% interrupts.CPU55.CAL:Function_call_interrupts
181.00 ± 4% -12.2% 159.00 interrupts.CPU56.89:PCI-MSI.1572920-edge.eth0-TxRx-56
4188 -11.6% 3703 ± 6% interrupts.CPU56.CAL:Function_call_interrupts
718672 -9.4% 650886 interrupts.CPU56.LOC:Local_timer_interrupts
177.00 -10.2% 159.00 interrupts.CPU57.90:PCI-MSI.1572921-edge.eth0-TxRx-57
4186 ± 2% -31.1% 2885 ± 50% interrupts.CPU57.CAL:Function_call_interrupts
71.75 ± 42% +120.2% 158.00 ± 32% interrupts.CPU57.RES:Rescheduling_interrupts
177.00 -10.2% 159.00 interrupts.CPU58.91:PCI-MSI.1572922-edge.eth0-TxRx-58
125.75 ± 62% +74.0% 218.75 ± 53% interrupts.CPU58.RES:Rescheduling_interrupts
177.00 -7.8% 163.25 ± 4% interrupts.CPU59.92:PCI-MSI.1572923-edge.eth0-TxRx-59
4169 ± 2% -9.1% 3788 ± 4% interrupts.CPU59.CAL:Function_call_interrupts
4072 ± 3% -11.4% 3610 ± 3% interrupts.CPU6.CAL:Function_call_interrupts
177.00 -10.2% 159.00 interrupts.CPU60.93:PCI-MSI.1572924-edge.eth0-TxRx-60
177.25 -8.5% 162.25 ± 2% interrupts.CPU61.94:PCI-MSI.1572925-edge.eth0-TxRx-61
188.50 ± 71% -65.1% 65.75 ± 42% interrupts.CPU61.RES:Rescheduling_interrupts
177.00 -9.7% 159.75 interrupts.CPU62.95:PCI-MSI.1572926-edge.eth0-TxRx-62
4067 ± 2% -6.0% 3823 ± 3% interrupts.CPU62.CAL:Function_call_interrupts
4183 ± 3% -8.7% 3819 ± 3% interrupts.CPU63.CAL:Function_call_interrupts
4265 -11.9% 3756 ± 5% interrupts.CPU64.CAL:Function_call_interrupts
4275 -11.4% 3786 ± 4% interrupts.CPU65.CAL:Function_call_interrupts
4216 ± 2% -9.7% 3809 ± 3% interrupts.CPU66.CAL:Function_call_interrupts
4259 -9.9% 3837 ± 3% interrupts.CPU67.CAL:Function_call_interrupts
4239 -10.2% 3807 ± 2% interrupts.CPU69.CAL:Function_call_interrupts
4124 ± 3% -13.7% 3558 ± 7% interrupts.CPU7.CAL:Function_call_interrupts
4229 -8.8% 3857 ± 3% interrupts.CPU70.CAL:Function_call_interrupts
4237 -9.8% 3823 ± 3% interrupts.CPU73.CAL:Function_call_interrupts
4230 -9.8% 3816 ± 3% interrupts.CPU74.CAL:Function_call_interrupts
4218 -10.1% 3791 ± 3% interrupts.CPU75.CAL:Function_call_interrupts
4213 -10.5% 3770 ± 4% interrupts.CPU76.CAL:Function_call_interrupts
719037 -9.5% 651067 interrupts.CPU76.LOC:Local_timer_interrupts
4235 -10.4% 3795 ± 3% interrupts.CPU77.CAL:Function_call_interrupts
4103 ± 4% -6.7% 3829 ± 3% interrupts.CPU78.CAL:Function_call_interrupts
4139 ± 5% -8.1% 3803 ± 3% interrupts.CPU79.CAL:Function_call_interrupts
4118 ± 3% -11.7% 3638 ± 3% interrupts.CPU8.CAL:Function_call_interrupts
4247 ± 2% -11.1% 3774 ± 2% interrupts.CPU85.CAL:Function_call_interrupts
4243 ± 2% -11.7% 3746 ± 2% interrupts.CPU86.CAL:Function_call_interrupts
4152 ± 4% -11.1% 3691 ± 2% interrupts.CPU87.CAL:Function_call_interrupts
393.50 ± 86% -73.6% 103.75 ± 65% interrupts.CPU87.RES:Rescheduling_interrupts
290.75 ± 19% -31.2% 200.00 ± 22% interrupts.CPU9.42:PCI-MSI.1572873-edge.eth0-TxRx-9
4148 ± 3% -11.3% 3678 interrupts.CPU9.CAL:Function_call_interrupts
vm-scalability.time.user_time
25600 +-+-----------------------------------------------------------------+
| O O O O O O O
25400 O-+O O O O O O O O O O O O O O O O |
| O |
25200 +-+ |
| |
25000 +-+ |
| |
24800 +-+ |
| |
24600 +-+ |
| .+.. .+..+.. .+..+..+. .+.. |
24400 +-++. + +. +. +..+..+..+.+..+..+ |
| |
24200 +-+-----------------------------------------------------------------+
vm-scalability.time.system_time
6500 +-+------------------------------------------------------------------+
| +.. |
6000 +-+ : +..+..+..+..+ |
5500 +-+ : |
|..+..+..+..+.+.. .+..+..+..+..+..+ |
5000 +-+ +. |
| |
4500 +-+ |
| |
4000 +-+ |
3500 +-+ |
| |
3000 +-+ |
O O O O O O O O |
2500 +-+O-----O--O----O--O--------O--O--O-O--O--O--O--O-----O----O--O-----O
vm-scalability.time.percent_of_cpu_this_job_got
8660 +-+--------------------------O---------------------------------------+
O O O O O O O O O O O O O O O O O O O O |
8640 +-+ O O O O
8620 +-+ |
| |
8600 +-+ |
| |
8580 +-+ |
| |
8560 +-+ |
8540 +-+ .+.. |
|..+..+..+.. +. +.. .+..+ |
8520 +-+ +.+..+.. .+..+.. .+.. + +. |
| +. +. + |
8500 +-+------------------------------------------------------------------+
vm-scalability.time.elapsed_time
360 +-+-------------------------------------------------------------------+
| +..+..+..+..+..+ |
355 +-+ + |
350 +-+ + |
|..+..+..+..+..+..+.+..+..+..+..+..+ |
345 +-+ |
| |
340 +-+ |
| |
335 +-+ |
330 +-+ |
| |
325 O-+ O O O O O O O O O O O O O O O O
| O O O O O O O O |
320 +-+-------------------------------------------------------------------+
vm-scalability.time.elapsed_time.max
360 +-+-------------------------------------------------------------------+
| +..+..+..+..+..+ |
355 +-+ + |
350 +-+ + |
|..+..+..+..+..+..+.+..+..+..+..+..+ |
345 +-+ |
| |
340 +-+ |
| |
335 +-+ |
330 +-+ |
| |
325 O-+ O O O O O O O O O O O O O O O O
| O O O O O O O O |
320 +-+-------------------------------------------------------------------+
vm-scalability.time.maximum_resident_set_size
7e+07 +-+---------------------------------------------------------------+
|..+..+.+..+..+..+.+..+..+..+.+..+..+..+.+..+..+..+ |
6.5e+07 +-+ |
6e+07 +-+ |
| |
5.5e+07 +-+ |
| |
5e+07 +-+ |
| |
4.5e+07 +-+ |
4e+07 +-+ |
| |
3.5e+07 +-+ |
O O O O O O O O O O O O O O O O O O O O O O O O O
3e+07 +-+---------------------------------------------------------------+
vm-scalability.time.minor_page_faults
1.4e+08 +-+---------------------------------------------------------------+
|..+..+.+..+..+..+.+..+..+..+.+..+..+..+.+..+..+..+ |
1.3e+08 +-+ |
1.2e+08 +-+ |
| |
1.1e+08 +-+ |
| |
1e+08 +-+ |
| |
9e+07 +-+ |
8e+07 +-+ |
| |
7e+07 +-+ |
O O O O O O O O O O O O O O O O O O O O O O O O O
6e+07 +-+---------------------------------------------------------------+
vm-scalability.throughput
5e+06 +-+---------------------------------------------------------------+
| |
4.8e+06 +-++..+.+..+..+..+. .+..+.+..+..+..+.+..+..+..+ |
| +..+. |
4.6e+06 +-+ |
4.4e+06 +-+ |
| |
4.2e+06 +-+ |
| |
4e+06 +-+ |
3.8e+06 +-+ |
O O O O O O O O O O O O O O O O O O O |
3.6e+06 +-+ O O O O |
| O O
3.4e+06 +-+---------------------------------------------------------------+
vm-scalability.free_time
50 +-+--------------------------------------------------------------------+
| +.. .+ |
45 +-+ : +..+..+..+. |
| : |
40 +-+ : |
|..+..+..+..+..+..+..+..+..+..+..+..+ |
35 +-+ |
| |
30 +-+ |
| |
25 +-+ |
| |
20 O-+ O O O O O O O O O |
| O O O O O O O O O O O O O O O
15 +-+--------------------------------------------------------------------+
vm-scalability.median
56000 +-+-----------------------------------------------------------------+
| .+..+..+.+..+..+.. .+.+..+..+..+..+..+.+..+..+ |
54000 +-+ +..+. |
52000 +-+ |
| |
50000 +-+ |
48000 +-+ |
| |
46000 +-+ |
44000 +-+ |
O O O O O O O O O O O O O O O O O O O |
42000 +-+ O O O |
40000 +-+ O |
| O O
38000 +-+-----------------------------------------------------------------+
vm-scalability.workload
1.45e+09 +-+--------------------------------------------------------------+
|. +..+..+. .+ +. + +..+ |
1.4e+09 +-+ +. |
1.35e+09 +-+ |
| |
1.3e+09 +-+ |
1.25e+09 +-+ |
| |
1.2e+09 +-+ |
1.15e+09 +-+ |
O O O O O O O O O O O O O O O O O O O |
1.1e+09 +-+ O O O O |
1.05e+09 +-+ O O
| |
1e+09 +-+--------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
***************************************************************************************************
lkp-bdw-ep4: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz with 128G memory
=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/runtime/size/tbox_group/test/testcase/ucode:
gcc-7/performance/x86_64-rhel-7.6/debian-x86_64-2019-05-14.cgz/300s/16G/lkp-bdw-ep4/shm-xread-rand/vm-scalability/0xb000036
commit:
63228b974a ("make shmem_fill_super() static")
8bb3c61baf ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
63228b974a6e1e39 8bb3c61bafa8c1cd222ada602bb
---------------- ---------------------------
%stddev %change %stddev
\ | \
54356 -21.8% 42499 vm-scalability.median
4770855 -21.8% 3728764 vm-scalability.throughput
349.31 -7.0% 324.86 vm-scalability.time.elapsed_time
349.31 -7.0% 324.86 vm-scalability.time.elapsed_time.max
65954846 -50.0% 32978141 vm-scalability.time.maximum_resident_set_size
1.362e+08 -50.0% 68103210 vm-scalability.time.minor_page_faults
8530 +1.3% 8643 vm-scalability.time.percent_of_cpu_this_job_got
5405 -51.0% 2646 vm-scalability.time.system_time
24394 +4.3% 25433 vm-scalability.time.user_time
1.432e+09 -21.7% 1.121e+09 vm-scalability.workload
7.168e+08 ± 34% -75.7% 1.742e+08 ± 93% cpuidle.C6.time
907209 ± 47% -78.1% 198824 ± 83% cpuidle.C6.usage
3.70 ± 21% -1.6 2.10 ± 8% mpstat.cpu.all.idle%
17.49 -8.2 9.25 mpstat.cpu.all.sys%
78.81 +9.8 88.65 mpstat.cpu.all.usr%
77.75 +12.9% 87.75 vmstat.cpu.us
63901378 -48.0% 33213370 vmstat.memory.cache
56436353 +63.8% 92422539 vmstat.memory.free
10350080 ± 2% -45.0% 5688023 ± 2% numa-numastat.node0.local_node
10354411 ± 2% -45.0% 5690870 ± 2% numa-numastat.node0.numa_hit
9750290 ± 2% -51.8% 4699438 ± 2% numa-numastat.node1.local_node
9763173 ± 2% -51.7% 4713809 ± 2% numa-numastat.node1.numa_hit
2689 +1.7% 2734 turbostat.Avg_MHz
902180 ± 47% -78.4% 194763 ± 85% turbostat.C6
2.28 ± 34% -1.7 0.58 ± 96% turbostat.C6%
1.35 ± 46% -77.7% 0.30 ±121% turbostat.CPU%c6
30.21 +4.6% 31.60 turbostat.RAMWatt
-72806 +87.6% -136565 sched_debug.cfs_rq:/.spread0.min
1.92 ± 7% +19.6% 2.29 ± 12% sched_debug.cpu.nr_running.max
0.18 ± 9% +22.4% 0.22 ± 10% sched_debug.cpu.nr_running.stddev
121.46 ± 13% -16.7% 101.12 ± 2% sched_debug.cpu.ttwu_count.min
3932 ± 11% -29.4% 2775 ± 9% sched_debug.cpu.ttwu_local.max
615.08 ± 5% -15.7% 518.29 ± 11% sched_debug.cpu.ttwu_local.stddev
20816 ± 5% -7.9% 19164 ± 3% slabinfo.filp.active_objs
20932 ± 5% -8.2% 19211 ± 3% slabinfo.filp.num_objs
269203 -46.8% 143215 slabinfo.radix_tree_node.active_objs
4873 -47.2% 2575 slabinfo.radix_tree_node.active_slabs
272913 -47.2% 144211 slabinfo.radix_tree_node.num_objs
4873 -47.2% 2575 slabinfo.radix_tree_node.num_slabs
1405 ± 5% -12.5% 1229 ± 6% slabinfo.task_group.active_objs
1405 ± 5% -12.5% 1229 ± 6% slabinfo.task_group.num_objs
4915431 -69.3% 1509547 meminfo.Active
4915200 -69.3% 1509312 meminfo.Active(anon)
63839608 -48.0% 33183635 meminfo.Cached
63979230 -49.0% 32623586 meminfo.Committed_AS
6921742 ± 6% +7.8% 7461414 ± 5% meminfo.DirectMap2M
58148546 -46.9% 30898463 meminfo.Inactive
58147241 -46.9% 30897147 meminfo.Inactive(anon)
217284 -34.0% 143504 meminfo.KReclaimable
58092433 -46.9% 30842010 meminfo.Mapped
55756866 +64.5% 91744663 meminfo.MemAvailable
56266443 +64.0% 92291121 meminfo.MemFree
75640767 -47.6% 39616089 meminfo.Memused
964.00 ± 80% -54.8% 435.75 ±162% meminfo.Mlocked
10802723 -48.9% 5519172 meminfo.PageTables
217284 -34.0% 143504 meminfo.SReclaimable
62810185 -48.8% 32154716 meminfo.Shmem
353627 -21.6% 277251 meminfo.Slab
233089 -44.5% 129423 meminfo.max_used_kB
1225495 -69.2% 377122 proc-vmstat.nr_active_anon
1388615 +64.8% 2287986 proc-vmstat.nr_dirty_background_threshold
2780630 +64.8% 4581571 proc-vmstat.nr_dirty_threshold
15965493 -48.0% 8300092 proc-vmstat.nr_file_pages
14060913 +64.1% 23067814 proc-vmstat.nr_free_pages
14545439 -46.9% 7728430 proc-vmstat.nr_inactive_anon
14531845 -46.9% 7714767 proc-vmstat.nr_mapped
240.00 ± 80% -54.7% 108.75 ±163% proc-vmstat.nr_mlock
2701369 -48.9% 1380808 proc-vmstat.nr_page_table_pages
15707875 -48.8% 8042599 proc-vmstat.nr_shmem
54332 -34.0% 35869 proc-vmstat.nr_slab_reclaimable
34085 -1.9% 33436 proc-vmstat.nr_slab_unreclaimable
1225495 -69.2% 377122 proc-vmstat.nr_zone_active_anon
14545439 -46.9% 7728430 proc-vmstat.nr_zone_inactive_anon
20143497 -48.2% 10429002 proc-vmstat.numa_hit
20126277 -48.3% 10411774 proc-vmstat.numa_local
943.75 ±107% +498.7% 5650 ± 93% proc-vmstat.numa_pages_migrated
16499608 -50.0% 8255793 proc-vmstat.pgactivate
20232662 -48.1% 10504886 proc-vmstat.pgalloc_normal
1.371e+08 -49.7% 68940704 proc-vmstat.pgfault
20002795 -48.8% 10235632 ± 3% proc-vmstat.pgfree
943.75 ±107% +498.7% 5650 ± 93% proc-vmstat.pgmigrate_success
155505 ± 3% -9.4% 140961 ± 2% softirqs.CPU1.TIMER
157106 ± 3% -12.6% 137264 ± 6% softirqs.CPU11.TIMER
153765 ± 2% -15.4% 130055 ± 8% softirqs.CPU17.TIMER
162850 ± 9% -14.9% 138618 ± 6% softirqs.CPU19.TIMER
172284 ± 7% -9.0% 156755 ± 4% softirqs.CPU2.TIMER
159844 ± 3% -9.2% 145111 ± 6% softirqs.CPU20.TIMER
36807 ± 3% +15.1% 42364 ± 6% softirqs.CPU22.RCU
162686 ± 2% -9.4% 147337 ± 5% softirqs.CPU3.TIMER
163397 ± 7% -11.9% 143969 ± 6% softirqs.CPU4.TIMER
154428 ± 2% -11.2% 137081 ± 2% softirqs.CPU45.TIMER
163755 ± 10% -13.2% 142160 ± 6% softirqs.CPU48.TIMER
153570 -13.0% 133532 ± 6% softirqs.CPU52.TIMER
154036 ± 2% -12.4% 134900 ± 4% softirqs.CPU53.TIMER
154008 ± 4% -11.7% 136016 ± 6% softirqs.CPU55.TIMER
34006 ± 6% +8.0% 36725 ± 3% softirqs.CPU57.RCU
158723 ± 3% -9.2% 144056 ± 4% softirqs.CPU62.TIMER
162701 ± 4% -7.7% 150237 ± 5% softirqs.CPU64.TIMER
35775 ± 2% +11.7% 39947 ± 4% softirqs.CPU66.RCU
33878 ± 4% +10.9% 37584 ± 5% softirqs.CPU70.RCU
150699 ± 4% -6.8% 140491 ± 6% softirqs.CPU72.TIMER
154115 ± 3% -13.0% 134119 ± 7% softirqs.CPU8.TIMER
153577 ± 3% -12.2% 134832 ± 5% softirqs.CPU9.TIMER
582521 ± 5% -15.8% 490718 ± 3% softirqs.SCHED
2510138 -68.8% 782461 ± 7% numa-meminfo.node0.Active
2509964 -68.8% 782225 ± 7% numa-meminfo.node0.Active(anon)
32261712 -46.4% 17291935 ± 2% numa-meminfo.node0.FilePages
29358600 -45.2% 16101644 ± 2% numa-meminfo.node0.Inactive
29357617 -45.2% 16100328 ± 2% numa-meminfo.node0.Inactive(anon)
29327818 -45.3% 16037347 ± 2% numa-meminfo.node0.Mapped
27396097 ± 2% +63.0% 44648994 numa-meminfo.node0.MemFree
38489693 -44.8% 21236797 ± 3% numa-meminfo.node0.MemUsed
550.50 ± 82% -54.1% 252.75 ±161% numa-meminfo.node0.Mlocked
5650587 ± 7% -40.3% 3372589 ± 8% numa-meminfo.node0.PageTables
31739245 -47.2% 16772234 ± 2% numa-meminfo.node0.Shmem
2446002 -69.8% 738096 ± 7% numa-meminfo.node1.Active
2445944 -69.8% 738096 ± 7% numa-meminfo.node1.Active(anon)
31580012 -49.7% 15892140 ± 2% numa-meminfo.node1.FilePages
28750348 -48.6% 14785203 ± 2% numa-meminfo.node1.Inactive
28750027 -48.6% 14785203 ± 2% numa-meminfo.node1.Inactive(anon)
81497 ± 39% -57.4% 34677 ± 26% numa-meminfo.node1.KReclaimable
6512 ± 13% -16.3% 5453 ± 2% numa-meminfo.node1.KernelStack
28724993 -48.5% 14793096 ± 2% numa-meminfo.node1.Mapped
28876430 +65.0% 47644386 numa-meminfo.node1.MemFree
37144988 -50.5% 18377032 ± 3% numa-meminfo.node1.MemUsed
5144842 ± 7% -58.3% 2144958 ± 13% numa-meminfo.node1.PageTables
81497 ± 39% -57.4% 34677 ± 26% numa-meminfo.node1.SReclaimable
62066 ± 8% -14.3% 53170 ± 5% numa-meminfo.node1.SUnreclaim
31072005 -50.5% 15381867 ± 2% numa-meminfo.node1.Shmem
143564 ± 23% -38.8% 87849 ± 8% numa-meminfo.node1.Slab
627134 -68.8% 195945 ± 7% numa-vmstat.node0.nr_active_anon
8061764 -46.4% 4321607 ± 2% numa-vmstat.node0.nr_file_pages
6852625 ± 2% +62.9% 11163615 numa-vmstat.node0.nr_free_pages
7336090 -45.2% 4023329 ± 2% numa-vmstat.node0.nr_inactive_anon
7328743 -45.3% 4007665 ± 2% numa-vmstat.node0.nr_mapped
137.25 ± 82% -54.5% 62.50 ±163% numa-vmstat.node0.nr_mlock
1412807 ± 7% -40.3% 843137 ± 8% numa-vmstat.node0.nr_page_table_pages
7931148 -47.1% 4191682 ± 2% numa-vmstat.node0.nr_shmem
627133 -68.8% 195945 ± 7% numa-vmstat.node0.nr_zone_active_anon
7336090 -45.2% 4023330 ± 2% numa-vmstat.node0.nr_zone_inactive_anon
10364106 -43.1% 5896146 ± 2% numa-vmstat.node0.numa_hit
10359380 -43.1% 5893189 ± 2% numa-vmstat.node0.numa_local
611156 -69.8% 184871 ± 7% numa-vmstat.node1.nr_active_anon
7891437 -49.7% 3971770 ± 2% numa-vmstat.node1.nr_file_pages
7222570 +64.9% 11912451 numa-vmstat.node1.nr_free_pages
7184265 -48.6% 3694684 ± 2% numa-vmstat.node1.nr_inactive_anon
6512 ± 13% -16.2% 5455 ± 2% numa-vmstat.node1.nr_kernel_stack
7178065 -48.5% 3696721 ± 2% numa-vmstat.node1.nr_mapped
103.00 ± 76% -55.8% 45.50 ±164% numa-vmstat.node1.nr_mlock
1286193 ± 7% -58.3% 536154 ± 13% numa-vmstat.node1.nr_page_table_pages
7764436 -50.5% 3844201 ± 2% numa-vmstat.node1.nr_shmem
20366 ± 39% -57.4% 8668 ± 26% numa-vmstat.node1.nr_slab_reclaimable
15516 ± 8% -14.3% 13292 ± 5% numa-vmstat.node1.nr_slab_unreclaimable
611156 -69.8% 184870 ± 7% numa-vmstat.node1.nr_zone_active_anon
7184265 -48.6% 3694684 ± 2% numa-vmstat.node1.nr_zone_inactive_anon
9814840 -50.0% 4908634 ± 2% numa-vmstat.node1.numa_hit
9652600 -50.8% 4744956 ± 3% numa-vmstat.node1.numa_local
303.25 ± 41% -42.6% 174.00 ± 6% interrupts.41:PCI-MSI.1572872-edge.eth0-TxRx-8
178.00 ± 3% -10.4% 159.50 interrupts.95:PCI-MSI.1572926-edge.eth0-TxRx-62
350658 -7.3% 325134 interrupts.CAL:Function_call_interrupts
4086 ± 2% -10.6% 3651 interrupts.CPU1.CAL:Function_call_interrupts
4151 ± 2% -13.9% 3572 ± 3% interrupts.CPU2.CAL:Function_call_interrupts
151.25 ± 41% +277.2% 570.50 ± 36% interrupts.CPU24.RES:Rescheduling_interrupts
4071 ± 2% -11.0% 3625 interrupts.CPU3.CAL:Function_call_interrupts
356.00 ± 22% -52.4% 169.50 ± 35% interrupts.CPU30.RES:Rescheduling_interrupts
4066 ± 2% -11.0% 3619 interrupts.CPU4.CAL:Function_call_interrupts
443.75 ± 9% -56.2% 194.25 ± 18% interrupts.CPU41.RES:Rescheduling_interrupts
132.25 ± 36% +213.8% 415.00 ± 93% interrupts.CPU48.RES:Rescheduling_interrupts
4067 ± 2% -11.4% 3603 interrupts.CPU5.CAL:Function_call_interrupts
4169 -11.4% 3692 ± 3% interrupts.CPU58.CAL:Function_call_interrupts
4162 -13.4% 3603 ± 3% interrupts.CPU59.CAL:Function_call_interrupts
4048 -10.6% 3620 interrupts.CPU6.CAL:Function_call_interrupts
1193 ± 25% -60.5% 471.25 ± 21% interrupts.CPU6.RES:Rescheduling_interrupts
4154 -9.4% 3763 interrupts.CPU60.CAL:Function_call_interrupts
4147 ± 2% -9.4% 3759 interrupts.CPU61.CAL:Function_call_interrupts
178.00 ± 3% -10.4% 159.50 interrupts.CPU62.95:PCI-MSI.1572926-edge.eth0-TxRx-62
74.50 ± 32% +146.3% 183.50 ± 43% interrupts.CPU66.RES:Rescheduling_interrupts
4038 ± 2% -10.4% 3616 interrupts.CPU7.CAL:Function_call_interrupts
4106 ± 3% -8.2% 3770 interrupts.CPU70.CAL:Function_call_interrupts
4102 ± 3% -8.1% 3767 interrupts.CPU71.CAL:Function_call_interrupts
4118 ± 2% -8.6% 3765 interrupts.CPU72.CAL:Function_call_interrupts
4113 ± 2% -8.6% 3761 interrupts.CPU73.CAL:Function_call_interrupts
4048 ± 4% -7.2% 3755 interrupts.CPU74.CAL:Function_call_interrupts
4101 -8.6% 3750 interrupts.CPU75.CAL:Function_call_interrupts
4087 ± 2% -8.8% 3726 interrupts.CPU76.CAL:Function_call_interrupts
303.25 ± 41% -42.6% 174.00 ± 6% interrupts.CPU8.41:PCI-MSI.1572872-edge.eth0-TxRx-8
4121 ± 2% -8.1% 3788 ± 2% interrupts.CPU81.CAL:Function_call_interrupts
81.50 ± 53% -78.8% 17.25 ± 52% interrupts.CPU81.RES:Rescheduling_interrupts
4215 -10.2% 3783 interrupts.CPU82.CAL:Function_call_interrupts
4198 -9.9% 3782 interrupts.CPU83.CAL:Function_call_interrupts
4130 ± 2% -8.6% 3776 ± 2% interrupts.CPU84.CAL:Function_call_interrupts
4118 ± 4% -9.3% 3736 ± 2% interrupts.CPU85.CAL:Function_call_interrupts
4197 -10.3% 3765 interrupts.CPU86.CAL:Function_call_interrupts
4105 ± 2% -9.0% 3736 ± 2% interrupts.CPU87.CAL:Function_call_interrupts
7.925e+09 -3.5% 7.647e+09 perf-stat.i.branch-instructions
0.20 ± 37% -0.1 0.11 ± 9% perf-stat.i.branch-miss-rate%
10814780 ± 2% -31.4% 7415640 perf-stat.i.branch-misses
61.93 +5.0 66.91 perf-stat.i.cache-miss-rate%
5.56e+08 +9.9% 6.111e+08 perf-stat.i.cache-misses
8.267e+08 +5.8% 8.747e+08 perf-stat.i.cache-references
7.31 +2.7% 7.51 perf-stat.i.cpi
2.363e+11 +1.6% 2.402e+11 perf-stat.i.cpu-cycles
49.67 ± 7% +17.8% 58.50 ± 3% perf-stat.i.cpu-migrations
1035 ± 48% -44.0% 579.74 perf-stat.i.cycles-between-cache-misses
5.29 +0.6 5.91 perf-stat.i.dTLB-load-miss-rate%
4.939e+08 +14.1% 5.636e+08 perf-stat.i.dTLB-load-misses
9.551e+09 -2.2% 9.346e+09 perf-stat.i.dTLB-loads
2.889e+09 +5.3% 3.041e+09 perf-stat.i.dTLB-stores
88.66 +2.8 91.44 perf-stat.i.iTLB-load-miss-rate%
1048381 ± 2% -31.4% 719167 ± 3% perf-stat.i.iTLB-load-misses
48064 ± 21% -32.9% 32249 ± 12% perf-stat.i.iTLB-loads
3.396e+10 -3.3% 3.282e+10 perf-stat.i.instructions
221052 ± 20% -28.8% 157319 ± 8% perf-stat.i.instructions-per-iTLB-miss
0.15 -6.2% 0.14 perf-stat.i.ipc
388532 -45.8% 210677 perf-stat.i.minor-faults
3.8e+08 ± 6% +9.4% 4.157e+08 ± 2% perf-stat.i.node-loads
33.35 ± 14% -8.7 24.65 ± 3% perf-stat.i.node-store-miss-rate%
2262085 -44.7% 1251208 perf-stat.i.node-store-misses
1533212 ± 2% -43.6% 864523 ± 3% perf-stat.i.node-stores
388536 -45.8% 210682 perf-stat.i.page-faults
24.25 +9.7% 26.59 perf-stat.overall.MPKI
0.14 ± 2% -0.0 0.10 perf-stat.overall.branch-miss-rate%
67.22 +2.6 69.85 perf-stat.overall.cache-miss-rate%
6.94 +5.2% 7.31 perf-stat.overall.cpi
426.12 -7.6% 393.54 perf-stat.overall.cycles-between-cache-misses
4.90 +0.8 5.67 perf-stat.overall.dTLB-load-miss-rate%
32322 ± 3% +40.9% 45552 ± 3% perf-stat.overall.instructions-per-iTLB-miss
0.14 -5.0% 0.14 perf-stat.overall.ipc
8330 +14.3% 9525 perf-stat.overall.path-length
7.918e+09 -3.6% 7.632e+09 perf-stat.ps.branch-instructions
10882606 ± 2% -31.4% 7464205 perf-stat.ps.branch-misses
5.529e+08 +10.0% 6.083e+08 perf-stat.ps.cache-misses
8.226e+08 +5.9% 8.71e+08 perf-stat.ps.cache-references
2.356e+11 +1.6% 2.394e+11 perf-stat.ps.cpu-cycles
49.51 ± 7% +17.8% 58.31 ± 3% perf-stat.ps.cpu-migrations
4.909e+08 +14.3% 5.609e+08 perf-stat.ps.dTLB-load-misses
9.537e+09 -2.2% 9.324e+09 perf-stat.ps.dTLB-loads
2.876e+09 +5.3% 3.029e+09 perf-stat.ps.dTLB-stores
1050462 ± 2% -31.5% 720005 ± 3% perf-stat.ps.iTLB-load-misses
48065 ± 21% -32.8% 32284 ± 12% perf-stat.ps.iTLB-loads
3.392e+10 -3.4% 3.275e+10 perf-stat.ps.instructions
389703 -45.7% 211444 perf-stat.ps.minor-faults
3.777e+08 ± 6% +9.5% 4.137e+08 ± 2% perf-stat.ps.node-loads
2279161 -44.6% 1261817 perf-stat.ps.node-store-misses
1545271 ± 2% -43.6% 871478 ± 3% perf-stat.ps.node-stores
389703 -45.7% 211444 perf-stat.ps.page-faults
1.193e+13 -10.5% 1.067e+13 perf-stat.total.instructions
24.30 ± 9% -5.1 19.16 ± 5% perf-profile.calltrace.cycles-pp.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.smp_apic_timer_interrupt
24.08 ± 9% -5.1 18.95 ± 5% perf-profile.calltrace.cycles-pp.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt
26.38 ± 7% -4.7 21.71 ± 7% perf-profile.calltrace.cycles-pp.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
33.92 ± 6% -3.7 30.24 ± 6% perf-profile.calltrace.cycles-pp.__hrtimer_run_queues.hrtimer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
6.80 ± 9% -1.2 5.60 ± 12% perf-profile.calltrace.cycles-pp.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
1.33 ± 35% -1.0 0.36 ±100% perf-profile.calltrace.cycles-pp.worker_thread.kthread.ret_from_fork
1.60 ± 24% -0.9 0.66 ± 59% perf-profile.calltrace.cycles-pp.ret_from_fork
1.60 ± 24% -0.9 0.66 ± 59% perf-profile.calltrace.cycles-pp.kthread.ret_from_fork
1.31 ± 37% -0.9 0.36 ±100% perf-profile.calltrace.cycles-pp.process_one_work.worker_thread.kthread.ret_from_fork
5.61 ± 8% -0.9 4.68 ± 13% perf-profile.calltrace.cycles-pp.__softirqentry_text_start.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
2.87 ± 14% -0.9 1.99 ± 8% perf-profile.calltrace.cycles-pp.update_curr.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle
0.84 ± 20% -0.5 0.29 ±100% perf-profile.calltrace.cycles-pp.fb_flashcursor.process_one_work.worker_thread.kthread.ret_from_fork
0.84 ± 20% -0.5 0.29 ±100% perf-profile.calltrace.cycles-pp.bit_cursor.fb_flashcursor.process_one_work.worker_thread.kthread
0.84 ± 20% -0.5 0.29 ±100% perf-profile.calltrace.cycles-pp.soft_cursor.bit_cursor.fb_flashcursor.process_one_work.worker_thread
0.83 ± 20% -0.5 0.29 ±100% perf-profile.calltrace.cycles-pp.mga_dirty_update.soft_cursor.bit_cursor.fb_flashcursor.process_one_work
1.74 ± 15% -0.5 1.25 ± 4% perf-profile.calltrace.cycles-pp.__update_load_avg_cfs_rq.update_load_avg.task_tick_fair.scheduler_tick.update_process_times
0.77 ± 23% -0.5 0.29 ±100% perf-profile.calltrace.cycles-pp.memcpy_toio.mga_dirty_update.soft_cursor.bit_cursor.fb_flashcursor
0.76 ± 11% -0.4 0.33 ±100% perf-profile.calltrace.cycles-pp.trigger_load_balance.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
1.50 ± 12% +0.5 1.99 ± 14% perf-profile.calltrace.cycles-pp.__remove_hrtimer.__hrtimer_run_queues.hrtimer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
4.65 ± 12% +2.0 6.65 ± 13% perf-profile.calltrace.cycles-pp.interrupt_entry
24.16 ± 9% -5.2 19.00 ± 5% perf-profile.children.cycles-pp.update_process_times
24.33 ± 9% -5.1 19.18 ± 5% perf-profile.children.cycles-pp.tick_sched_handle
26.44 ± 7% -4.7 21.75 ± 7% perf-profile.children.cycles-pp.tick_sched_timer
34.01 ± 6% -3.7 30.31 ± 6% perf-profile.children.cycles-pp.__hrtimer_run_queues
6.91 ± 8% -1.2 5.69 ± 13% perf-profile.children.cycles-pp.irq_exit
5.69 ± 8% -1.0 4.71 ± 13% perf-profile.children.cycles-pp.__softirqentry_text_start
1.29 ± 18% -0.9 0.35 ±124% perf-profile.children.cycles-pp.wake_up_klogd_work_func
3.02 ± 12% -0.9 2.11 ± 7% perf-profile.children.cycles-pp.update_curr
1.68 ± 22% -0.9 0.82 ± 27% perf-profile.children.cycles-pp.ret_from_fork
1.60 ± 24% -0.8 0.76 ± 28% perf-profile.children.cycles-pp.kthread
1.33 ± 35% -0.8 0.53 ± 39% perf-profile.children.cycles-pp.worker_thread
1.31 ± 37% -0.8 0.53 ± 40% perf-profile.children.cycles-pp.process_one_work
1.77 ± 15% -0.5 1.27 ± 5% perf-profile.children.cycles-pp.__update_load_avg_cfs_rq
0.84 ± 20% -0.4 0.42 ± 43% perf-profile.children.cycles-pp.fb_flashcursor
0.83 ± 20% -0.4 0.42 ± 43% perf-profile.children.cycles-pp.mga_dirty_update
0.84 ± 20% -0.4 0.42 ± 41% perf-profile.children.cycles-pp.bit_cursor
0.84 ± 20% -0.4 0.42 ± 41% perf-profile.children.cycles-pp.soft_cursor
0.83 ± 21% -0.4 0.42 ± 43% perf-profile.children.cycles-pp.memcpy_toio
0.76 ± 11% -0.2 0.52 ± 31% perf-profile.children.cycles-pp.trigger_load_balance
0.34 ± 22% -0.2 0.10 ± 96% perf-profile.children.cycles-pp.fbcon_redraw
0.67 ± 16% -0.2 0.44 ± 12% perf-profile.children.cycles-pp._raw_spin_lock
0.34 ± 22% -0.2 0.12 ± 72% perf-profile.children.cycles-pp.con_scroll
0.34 ± 22% -0.2 0.12 ± 72% perf-profile.children.cycles-pp.fbcon_scroll
0.34 ± 22% -0.2 0.12 ± 72% perf-profile.children.cycles-pp.lf
0.41 ± 18% -0.2 0.26 ± 32% perf-profile.children.cycles-pp.__calc_delta
0.35 ± 19% -0.1 0.22 ± 29% perf-profile.children.cycles-pp.rebalance_domains
0.31 ± 15% -0.1 0.20 ± 28% perf-profile.children.cycles-pp.do_sys_open
0.22 ± 20% -0.1 0.12 ± 17% perf-profile.children.cycles-pp.run_rebalance_domains
0.12 ± 15% -0.1 0.05 ± 60% perf-profile.children.cycles-pp.lookup_fast
0.28 ± 17% -0.1 0.22 ± 19% perf-profile.children.cycles-pp.tick_sched_do_timer
0.14 ± 23% -0.1 0.08 ± 27% perf-profile.children.cycles-pp.__get_user_pages
0.14 ± 23% -0.1 0.08 ± 27% perf-profile.children.cycles-pp.get_user_pages_remote
0.03 ±102% +0.1 0.09 ± 30% perf-profile.children.cycles-pp.alloc_set_pte
0.10 ± 12% +0.1 0.17 ± 28% perf-profile.children.cycles-pp.proc_reg_read
0.21 ± 35% +0.2 0.37 ± 41% perf-profile.children.cycles-pp.perf_event_task_tick
0.40 ± 29% +0.2 0.63 ± 18% perf-profile.children.cycles-pp.rb_erase
1.52 ± 12% +0.5 2.00 ± 14% perf-profile.children.cycles-pp.__remove_hrtimer
2.18 ± 15% -0.7 1.50 ± 4% perf-profile.self.cycles-pp.update_curr
1.59 ± 14% -0.5 1.09 ± 6% perf-profile.self.cycles-pp.__update_load_avg_cfs_rq
1.08 ± 15% -0.5 0.59 ± 44% perf-profile.self.cycles-pp.delay_tsc
0.83 ± 20% -0.4 0.42 ± 43% perf-profile.self.cycles-pp.memcpy_toio
1.69 ± 12% -0.3 1.35 ± 8% perf-profile.self.cycles-pp.task_tick_fair
0.76 ± 11% -0.3 0.49 ± 30% perf-profile.self.cycles-pp.trigger_load_balance
0.32 ± 25% -0.2 0.11 ± 76% perf-profile.self.cycles-pp.sys_imageblit
0.52 ± 11% -0.2 0.37 ± 29% perf-profile.self.cycles-pp.idle_cpu
0.40 ± 22% -0.1 0.25 ± 32% perf-profile.self.cycles-pp.__calc_delta
0.12 ± 26% -0.1 0.04 ±101% perf-profile.self.cycles-pp.update_process_times
0.01 ±173% +0.1 0.10 ± 31% perf-profile.self.cycles-pp.get_page_from_freelist
0.21 ± 35% +0.2 0.37 ± 41% perf-profile.self.cycles-pp.perf_event_task_tick
0.40 ± 29% +0.2 0.63 ± 18% perf-profile.self.cycles-pp.rb_erase
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Rong Chen
On Mon, 9 Sep 2019, Al Viro wrote:
> On Sun, Sep 08, 2019 at 10:46:01PM +0100, Al Viro wrote:
> > On Tue, Sep 03, 2019 at 04:41:22PM +0800, kernel test robot wrote:
> > > Greeting,
> > >
> > > FYI, we noticed a -23.7% regression of vm-scalability.median due to commit:
> > >
> > >
> > > commit: 8bb3c61bafa8c1cd222ada602bb94ff23119e738 ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
> > > https://kernel.googlesource.com/pub/scm/linux/kernel/git/viro/vfs.git work.mount
> > >
> > > in testcase: vm-scalability
> > > on test machine: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz with 128G memory
> > > with following parameters:
> > >
> > > runtime: 300s
> > > size: 16G
> > > test: shm-pread-rand
> > > cpufreq_governor: performance
> > > ucode: 0xb000036
> >
> > That thing loses size=... option. Both size= and nr_blocks= affect the
> > same thing (->max_blocks), but the parser keeps track of the options
> > it has seen and applying the parsed data to superblock checks only
> > whether nr_blocks= had been there. IOW, size= gets parsed, but the
> > result goes nowhere.
> >
> > I'm not sure whether it's better to fix the patch up or redo it from
> > scratch - it needs to be carved up anyway and it's highly non-transparent,
> > so I'm probably going to replace the damn thing entirely with something
> > that would be easier to follow.
>
> ... and this
> + { Opt_huge, "deny", SHMEM_HUGE_DENY },
> + { Opt_huge, "force", SHMEM_HUGE_FORCE },
> had been wrong - huge=deny and huge=force should not be accepted _and_
> fs_parameter_enum is not suitable for negative constants right now
> anyway.
Sorry you've been spending time redisovering these, Al: I sent David
the tmpfs fixes (Cc'ing you and Andrew and lists) a couple of weeks
ago - but had no idea until your mail that the "loss of size" was
behind this vm-scalability regression report.
Ah, not for the first time, I missed saying "[PATCH]" in the subject:
sorry, that may have rendered it invisible to many eyes.
Here's what Andrew has been carrying in the mmotm tree since I sent it:
I'm sure we'd both be happy for you to take it into your tree. I had
expected it to percolate through from mmotm to linux-next by now,
but apparently not.
From: Hugh Dickins <[email protected]>
Subject: tmpfs: fixups to use of the new mount API
Several fixups to shmem_parse_param() and tmpfs use of new mount API:
mm/shmem.c manages filesystem named "tmpfs": revert "shmem" to "tmpfs"
in its mount error messages.
/sys/kernel/mm/transparent_hugepage/shmem_enabled has valid options
"deny" and "force", but they are not valid as tmpfs "huge" options.
The "size" param is an alternative to "nr_blocks", and needs to be
recognized as changing max_blocks. And where there's ambiguity, it's
better to mention "size" than "nr_blocks" in messages, since "size" is
the variant shown in /proc/mounts.
shmem_apply_options() left ctx->mpol as the new mpol, so then it was
freed in shmem_free_fc(), and the filesystem went on to use-after-free.
shmem_parse_param() issue "tmpfs: Bad value for '%s'" messages just
like fs_parse() would, instead of a different wording. Where config
disables "mpol" or "huge", say "tmpfs: Unsupported parameter '%s'".
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 144df3b288c41 ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
Signed-off-by: Hugh Dickins <[email protected]>
Cc: David Howells <[email protected]>
Cc: Al Viro <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
---
mm/shmem.c | 80 ++++++++++++++++++++++++++-------------------------
1 file changed, 42 insertions(+), 38 deletions(-)
--- a/mm/shmem.c~tmpfs-fixups-to-use-of-the-new-mount-api
+++ a/mm/shmem.c
@@ -3432,13 +3432,11 @@ static const struct fs_parameter_enum sh
{ Opt_huge, "always", SHMEM_HUGE_ALWAYS },
{ Opt_huge, "within_size", SHMEM_HUGE_WITHIN_SIZE },
{ Opt_huge, "advise", SHMEM_HUGE_ADVISE },
- { Opt_huge, "deny", SHMEM_HUGE_DENY },
- { Opt_huge, "force", SHMEM_HUGE_FORCE },
{}
};
const struct fs_parameter_description shmem_fs_parameters = {
- .name = "shmem",
+ .name = "tmpfs",
.specs = shmem_param_specs,
.enums = shmem_param_enums,
};
@@ -3448,9 +3446,9 @@ static void shmem_apply_options(struct s
unsigned long inodes_in_use)
{
struct shmem_fs_context *ctx = fc->fs_private;
- struct mempolicy *old = NULL;
- if (test_bit(Opt_nr_blocks, &ctx->changes))
+ if (test_bit(Opt_nr_blocks, &ctx->changes) ||
+ test_bit(Opt_size, &ctx->changes))
sbinfo->max_blocks = ctx->max_blocks;
if (test_bit(Opt_nr_inodes, &ctx->changes)) {
sbinfo->max_inodes = ctx->max_inodes;
@@ -3459,8 +3457,11 @@ static void shmem_apply_options(struct s
if (test_bit(Opt_huge, &ctx->changes))
sbinfo->huge = ctx->huge;
if (test_bit(Opt_mpol, &ctx->changes)) {
- old = sbinfo->mpol;
- sbinfo->mpol = ctx->mpol;
+ /*
+ * Update sbinfo->mpol now while stat_lock is held.
+ * Leave shmem_free_fc() to free the old mpol if any.
+ */
+ swap(sbinfo->mpol, ctx->mpol);
}
if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE) {
@@ -3471,8 +3472,6 @@ static void shmem_apply_options(struct s
if (test_bit(Opt_mode, &ctx->changes))
sbinfo->mode = ctx->mode;
}
-
- mpol_put(old);
}
static int shmem_parse_param(struct fs_context *fc, struct fs_parameter *param)
@@ -3498,7 +3497,7 @@ static int shmem_parse_param(struct fs_c
rest++;
}
if (*rest)
- return invalf(fc, "shmem: Invalid size");
+ goto bad_value;
ctx->max_blocks = DIV_ROUND_UP(size, PAGE_SIZE);
break;
@@ -3506,55 +3505,59 @@ static int shmem_parse_param(struct fs_c
rest = param->string;
ctx->max_blocks = memparse(param->string, &rest);
if (*rest)
- return invalf(fc, "shmem: Invalid nr_blocks");
+ goto bad_value;
break;
+
case Opt_nr_inodes:
rest = param->string;
ctx->max_inodes = memparse(param->string, &rest);
if (*rest)
- return invalf(fc, "shmem: Invalid nr_inodes");
+ goto bad_value;
break;
+
case Opt_mode:
ctx->mode = result.uint_32 & 07777;
break;
+
case Opt_uid:
ctx->uid = make_kuid(current_user_ns(), result.uint_32);
if (!uid_valid(ctx->uid))
- return invalf(fc, "shmem: Invalid uid");
+ goto bad_value;
break;
case Opt_gid:
ctx->gid = make_kgid(current_user_ns(), result.uint_32);
if (!gid_valid(ctx->gid))
- return invalf(fc, "shmem: Invalid gid");
+ goto bad_value;
break;
case Opt_huge:
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
- if (!has_transparent_hugepage() &&
- result.uint_32 != SHMEM_HUGE_NEVER)
- return invalf(fc, "shmem: Huge pages disabled");
-
ctx->huge = result.uint_32;
+ if (ctx->huge != SHMEM_HUGE_NEVER &&
+ !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
+ has_transparent_hugepage()))
+ goto unsupported_parameter;
break;
-#else
- return invalf(fc, "shmem: huge= option disabled");
-#endif
-
- case Opt_mpol: {
-#ifdef CONFIG_NUMA
- struct mempolicy *mpol;
- if (mpol_parse_str(param->string, &mpol))
- return invalf(fc, "shmem: Invalid mpol=");
- mpol_put(ctx->mpol);
- ctx->mpol = mpol;
-#endif
- break;
- }
+
+ case Opt_mpol:
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ struct mempolicy *mpol;
+ if (mpol_parse_str(param->string, &mpol))
+ goto bad_value;
+ mpol_put(ctx->mpol);
+ ctx->mpol = mpol;
+ break;
+ }
+ goto unsupported_parameter;
}
__set_bit(opt, &ctx->changes);
return 0;
+
+unsupported_parameter:
+ return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
+bad_value:
+ return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
}
/*
@@ -3572,14 +3575,15 @@ static int shmem_reconfigure(struct fs_c
unsigned long inodes_in_use;
spin_lock(&sbinfo->stat_lock);
- if (test_bit(Opt_nr_blocks, &ctx->changes)) {
+ if (test_bit(Opt_nr_blocks, &ctx->changes) ||
+ test_bit(Opt_size, &ctx->changes)) {
if (ctx->max_blocks && !sbinfo->max_blocks) {
spin_unlock(&sbinfo->stat_lock);
- return invalf(fc, "shmem: Can't retroactively limit nr_blocks");
+ return invalf(fc, "tmpfs: Cannot retroactively limit size");
}
if (percpu_counter_compare(&sbinfo->used_blocks, ctx->max_blocks) > 0) {
spin_unlock(&sbinfo->stat_lock);
- return invalf(fc, "shmem: Too few blocks for current use");
+ return invalf(fc, "tmpfs: Too small a size for current use");
}
}
@@ -3587,11 +3591,11 @@ static int shmem_reconfigure(struct fs_c
if (test_bit(Opt_nr_inodes, &ctx->changes)) {
if (ctx->max_inodes && !sbinfo->max_inodes) {
spin_unlock(&sbinfo->stat_lock);
- return invalf(fc, "shmem: Can't retroactively limit nr_inodes");
+ return invalf(fc, "tmpfs: Cannot retroactively limit inodes");
}
if (ctx->max_inodes < inodes_in_use) {
spin_unlock(&sbinfo->stat_lock);
- return invalf(fc, "shmem: Too few inodes for current use");
+ return invalf(fc, "tmpfs: Too few inodes for current use");
}
}
_
On Sun, Sep 08, 2019 at 08:10:17PM -0700, Hugh Dickins wrote:
Hmm... FWIW, I'd ended up redoing most of the thing, with
hopefully sane carve-up. Differences:
* we really care only about three things having
been set - ->max_blocks, ->max_inodes and ->huge. This
__set_bit() hack is cute, but asking for trouble (and getting
it). Explicit ctx->seen & SHMEM_SEEN_BLOCKS, etc. is
cleaner.
*
> const struct fs_parameter_description shmem_fs_parameters = {
> - .name = "shmem",
> + .name = "tmpfs",
> .specs = shmem_param_specs,
> .enums = shmem_param_enums,
> };
Missed that one, will fold.
*
> @@ -3448,9 +3446,9 @@ static void shmem_apply_options(struct s
The whole "apply" thing is useless - in remount we
need to copy max_inode/max_blocks/huge/mpol under the lock after
checks, and we can do that manually just fine. Other options
(uid/gid/mode) get ignored. There very little overlap
with fill_super case, really.
> - old = sbinfo->mpol;
> - sbinfo->mpol = ctx->mpol;
> + /*
> + * Update sbinfo->mpol now while stat_lock is held.
> + * Leave shmem_free_fc() to free the old mpol if any.
> + */
> + swap(sbinfo->mpol, ctx->mpol);
Umm... Missed that use-after-free due to destructor, TBH (in
remount, that is). Fixed (in a slightly different way).
> }
> if (*rest)
> - return invalf(fc, "shmem: Invalid size");
> + goto bad_value;
> ctx->max_blocks = DIV_ROUND_UP(size, PAGE_SIZE);
> break;
FWIW, I had those with s/shmem/tmpfs/, no problem with merging like
that. Will fold.
[snip]
> case Opt_huge:
> -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
> - if (!has_transparent_hugepage() &&
> - result.uint_32 != SHMEM_HUGE_NEVER)
> - return invalf(fc, "shmem: Huge pages disabled");
> -
> ctx->huge = result.uint_32;
> + if (ctx->huge != SHMEM_HUGE_NEVER &&
> + !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
> + has_transparent_hugepage()))
> + goto unsupported_parameter;
> break;
> -#else
> - return invalf(fc, "shmem: huge= option disabled");
> -#endif
> -
> - case Opt_mpol: {
> -#ifdef CONFIG_NUMA
> - struct mempolicy *mpol;
> - if (mpol_parse_str(param->string, &mpol))
> - return invalf(fc, "shmem: Invalid mpol=");
> - mpol_put(ctx->mpol);
> - ctx->mpol = mpol;
> -#endif
> - break;
> - }
OK...
> + case Opt_mpol:
> + if (IS_ENABLED(CONFIG_NUMA)) {
> + struct mempolicy *mpol;
> + if (mpol_parse_str(param->string, &mpol))
> + goto bad_value;
> + mpol_put(ctx->mpol);
> + ctx->mpol = mpol;
> + break;
> + }
> + goto unsupported_parameter;
Slightly different here - I'd done that bit as
mpol_put(ctx->mpol);
ctx->mpol = NULL;
if (mpol_parse_str(param->string, &ctx->mpol))
return invalf (goto bad_value now)
> +unsupported_parameter:
> + return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
> +bad_value:
> + return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
> - return invalf(fc, "shmem: Can't retroactively limit nr_blocks");
> + return invalf(fc, "tmpfs: Cannot retroactively limit size");
> }
> if (percpu_counter_compare(&sbinfo->used_blocks, ctx->max_blocks) > 0) {
> spin_unlock(&sbinfo->stat_lock);
> - return invalf(fc, "shmem: Too few blocks for current use");
> + return invalf(fc, "tmpfs: Too small a size for current use");
> }
> }
>
> @@ -3587,11 +3591,11 @@ static int shmem_reconfigure(struct fs_c
> if (test_bit(Opt_nr_inodes, &ctx->changes)) {
> if (ctx->max_inodes && !sbinfo->max_inodes) {
> spin_unlock(&sbinfo->stat_lock);
> - return invalf(fc, "shmem: Can't retroactively limit nr_inodes");
> + return invalf(fc, "tmpfs: Cannot retroactively limit inodes");
> }
> if (ctx->max_inodes < inodes_in_use) {
> spin_unlock(&sbinfo->stat_lock);
> - return invalf(fc, "shmem: Too few inodes for current use");
> + return invalf(fc, "tmpfs: Too few inodes for current use");
> }
> }
s/Can't/Cannot/ and s/few blocks/small a size/? No problem, except that I'd done
err = "Too few inodes for current use";
goto out;
...
out:
return invalf(fc, "tmpfs: %s", err);
Anyway, see vfs.git#uncertain.shmem for what I've got with those folded in.
Do you see any problems with that one? That's the last 5 commits in there...
On Sun, Sep 08, 2019 at 10:46:01PM +0100, Al Viro wrote:
> On Tue, Sep 03, 2019 at 04:41:22PM +0800, kernel test robot wrote:
> > Greeting,
> >
> > FYI, we noticed a -23.7% regression of vm-scalability.median due to commit:
> >
> >
> > commit: 8bb3c61bafa8c1cd222ada602bb94ff23119e738 ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
> > https://kernel.googlesource.com/pub/scm/linux/kernel/git/viro/vfs.git work.mount
> >
> > in testcase: vm-scalability
> > on test machine: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz with 128G memory
> > with following parameters:
> >
> > runtime: 300s
> > size: 16G
> > test: shm-pread-rand
> > cpufreq_governor: performance
> > ucode: 0xb000036
>
> That thing loses size=... option. Both size= and nr_blocks= affect the
> same thing (->max_blocks), but the parser keeps track of the options
> it has seen and applying the parsed data to superblock checks only
> whether nr_blocks= had been there. IOW, size= gets parsed, but the
> result goes nowhere.
>
> I'm not sure whether it's better to fix the patch up or redo it from
> scratch - it needs to be carved up anyway and it's highly non-transparent,
> so I'm probably going to replace the damn thing entirely with something
> that would be easier to follow.
... and this
+ { Opt_huge, "deny", SHMEM_HUGE_DENY },
+ { Opt_huge, "force", SHMEM_HUGE_FORCE },
had been wrong - huge=deny and huge=force should not be accepted _and_
fs_parameter_enum is not suitable for negative constants right now
anyway.
On Tue, Sep 03, 2019 at 04:41:22PM +0800, kernel test robot wrote:
> Greeting,
>
> FYI, we noticed a -23.7% regression of vm-scalability.median due to commit:
>
>
> commit: 8bb3c61bafa8c1cd222ada602bb94ff23119e738 ("vfs: Convert ramfs, shmem, tmpfs, devtmpfs, rootfs to use the new mount API")
> https://kernel.googlesource.com/pub/scm/linux/kernel/git/viro/vfs.git work.mount
>
> in testcase: vm-scalability
> on test machine: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz with 128G memory
> with following parameters:
>
> runtime: 300s
> size: 16G
> test: shm-pread-rand
> cpufreq_governor: performance
> ucode: 0xb000036
That thing loses size=... option. Both size= and nr_blocks= affect the
same thing (->max_blocks), but the parser keeps track of the options
it has seen and applying the parsed data to superblock checks only
whether nr_blocks= had been there. IOW, size= gets parsed, but the
result goes nowhere.
I'm not sure whether it's better to fix the patch up or redo it from
scratch - it needs to be carved up anyway and it's highly non-transparent,
so I'm probably going to replace the damn thing entirely with something
that would be easier to follow.
On Mon, 9 Sep 2019, Al Viro wrote:
>
> Anyway, see vfs.git#uncertain.shmem for what I've got with those folded in.
> Do you see any problems with that one? That's the last 5 commits in there...
It's mostly fine, I've no problem with going your way instead of what
we had in mmotm; but I have seen some problems with it, and had been
intending to send you a fixup patch tonight (shmem_reconfigure() missing
unlock on error is the main problem, but there are other fixes needed).
But I'm growing tired. I've a feeling my "swap" of the mpols, instead
of immediate mpol_put(), was necessary to protect against a race with
shmem_get_sbmpol(), but I'm not clear-headed enough to trust myself on
that now. And I've a mystery to solve, that shmem_reconfigure() gets
stuck into showing the wrong error message.
Tomorrow....
Oh, and my first attempt to build and boot that series over 5.3-rc5
wouldn't boot. Luckily there was a tell-tale "i915" in the stacktrace,
which reminded me of the drivers/gpu/drm/i915/gem/i915_gemfs.c fix
we discussed earlier in the cycle. That is of course in linux-next
by now, but I wonder if your branch ought to contain a duplicate of
that fix, so that people with i915 doing bisections on 5.4-rc do not
fall into an unbootable hole between vfs and gpu merges.
Hugh
On Mon, 9 Sep 2019, Hugh Dickins wrote:
> On Mon, 9 Sep 2019, Al Viro wrote:
> >
> > Anyway, see vfs.git#uncertain.shmem for what I've got with those folded in.
> > Do you see any problems with that one? That's the last 5 commits in there...
>
> It's mostly fine, I've no problem with going your way instead of what
> we had in mmotm; but I have seen some problems with it, and had been
> intending to send you a fixup patch tonight (shmem_reconfigure() missing
> unlock on error is the main problem, but there are other fixes needed).
>
> But I'm growing tired. I've a feeling my "swap" of the mpols, instead
> of immediate mpol_put(), was necessary to protect against a race with
> shmem_get_sbmpol(), but I'm not clear-headed enough to trust myself on
> that now. And I've a mystery to solve, that shmem_reconfigure() gets
> stuck into showing the wrong error message.
On my "swap" for the mpol_put(): no, the race against shmem_get_sbmpol()
is safe enough without that, and what you have matches what was always
done before. I rather like my "swap", which the previous double-free had
led me to, but it's fine if you prefer the ordinary way. I was probably
coming down from some over-exposure to iput() under spinlock, but there's
no such complications here.
>
> Tomorrow....
>
> Oh, and my first attempt to build and boot that series over 5.3-rc5
> wouldn't boot. Luckily there was a tell-tale "i915" in the stacktrace,
> which reminded me of the drivers/gpu/drm/i915/gem/i915_gemfs.c fix
> we discussed earlier in the cycle. That is of course in linux-next
> by now, but I wonder if your branch ought to contain a duplicate of
> that fix, so that people with i915 doing bisections on 5.4-rc do not
> fall into an unbootable hole between vfs and gpu merges.
Below are the fixups I arrived at last night (I've not rechecked your
tree today, to see if you made any changes since). But they're not
enough: I now understand why shmem_reconfigure() got stuck showing
the wrong error message, but I'll have to leave it to you to decide
what to do about it, because I don't know whether it's just a mistake,
or different filesystem types have different needs there.
My /etc/fstab has a line in for one of my test mounts:
tmpfs /tlo tmpfs size=4G 0 0
and that "size=4G" is what causes the problem: because each time
shmem_parse_options(fc, data) is called for a remount, data (that is,
options) points to a string starting with "size=4G,", followed by
what's actually been asked for in the remount options.
So if I try
mount -o remount,size=0 /tlo
that succeeds, setting the filesystem size to 0 meaning unlimited.
So if then as a test I try
mount -o remount,size=1M /tlo
that correctly fails with "Cannot retroactively limit size".
But then when I try
mount -o remount,nr_inodes=0 /tlo
I again get "Cannot retroactively limit size",
when it should have succeeded (again, 0 here meaning unlimited).
That's because the options in shmem_parse_options() are
"size=4G,nr_inodes=0", which indeed looks like an attempt to
retroactively limit size; but the user never asked "size=4G" there.
I think this problem, and some of what's fixed below, predate your
rework, and would equally affect the version in mmotm: I just didn't
discover these issues when I was testing that before.
Hugh
--- aviro/mm/shmem.c 2019-09-09 14:10:34.379832855 -0700
+++ hughd/mm/shmem.c 2019-09-09 23:29:28.467037895 -0700
@@ -3456,7 +3456,7 @@ static int shmem_parse_one(struct fs_con
ctx->huge = result.uint_32;
if (ctx->huge != SHMEM_HUGE_NEVER &&
!(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
- has_transparent_hugepage()))
+ has_transparent_hugepage()))
goto unsupported_parameter;
ctx->seen |= SHMEM_SEEN_HUGE;
break;
@@ -3532,26 +3532,26 @@ static int shmem_reconfigure(struct fs_c
spin_lock(&sbinfo->stat_lock);
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
- if (ctx->seen & SHMEM_SEEN_BLOCKS) {
+ if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
+ if (!sbinfo->max_blocks) {
+ err = "Cannot retroactively limit size";
+ goto out;
+ }
if (percpu_counter_compare(&sbinfo->used_blocks,
ctx->blocks) > 0) {
err = "Too small a size for current use";
goto out;
}
- if (ctx->blocks && !sbinfo->max_blocks) {
- err = "Cannot retroactively limit nr_blocks";
+ }
+ if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
+ if (!sbinfo->max_inodes) {
+ err = "Cannot retroactively limit inodes";
goto out;
}
- }
- if (ctx->seen & SHMEM_SEEN_INODES) {
if (ctx->inodes < inodes) {
err = "Too few inodes for current use";
goto out;
}
- if (ctx->inodes && !sbinfo->max_inodes) {
- err = "Cannot retroactively limit nr_inodes";
- goto out;
- }
}
if (ctx->seen & SHMEM_SEEN_HUGE)
@@ -3574,6 +3574,7 @@ static int shmem_reconfigure(struct fs_c
spin_unlock(&sbinfo->stat_lock);
return 0;
out:
+ spin_unlock(&sbinfo->stat_lock);
return invalf(fc, "tmpfs: %s", err);
}