FYI, we noticed the below changes on
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit 60b99b486b568c13cbb7caa83cf8a12af7665f1e ("f2fs: introduce a periodic checkpoint flow")
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/period/nr_threads/disk/fs/size/filenum/rwmode/iomode:
lkp-ws02/fileio/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/600s/100%/1HDD/f2fs/64G/1024f/seqrewr/sync
commit:
5c2674347466d5c2d5169214e95f4ad6dc09e9b6
60b99b486b568c13cbb7caa83cf8a12af7665f1e
5c2674347466d5c2 60b99b486b568c13cbb7caa83c
---------------- --------------------------
%stddev %change %stddev
\ | \
1111 ? 8% +279.7% 4219 ? 23% fileio.request_latency_max_ms
5656 ? 0% +1.9% 5764 ? 0% fileio.requests_per_sec
1.088e+08 ? 0% +1.9% 1.108e+08 ? 0% fileio.time.file_system_outputs
46225 ? 0% +4.9% 48473 ? 0% fileio.time.involuntary_context_switches
3759 ? 0% +2.6% 3858 ? 1% fileio.time.maximum_resident_set_size
47.00 ? 92% +163.8% 124.00 ? 8% numa-vmstat.node0.workingset_refault
37306 ? 2% -10.6% 33365 ? 1% softirqs.BLOCK
328.00 ? 0% +55.5% 510.00 ? 1% time.file_system_inputs
79132 ? 17% +30.7% 103393 ? 2% numa-meminfo.node0.Active
122897 ? 10% -20.4% 97780 ? 2% numa-meminfo.node1.Active
1.45 ? 1% +3.3% 1.50 ? 1% turbostat.%Busy
0.40 ? 13% +76.1% 0.70 ? 19% turbostat.Pkg%pc3
0.12 ? 15% +638.8% 0.91 ? 16% turbostat.Pkg%pc6
83662 ? 0% +1.9% 85235 ? 0% vmstat.io.bo
626.00 ? 5% +23.0% 770.25 ? 8% vmstat.memory.buff
9980 ? 1% +2.6% 10241 ? 1% vmstat.system.cs
2.012e+08 ? 11% +25.0% 2.514e+08 ? 8% cpuidle.C1E-NHM.time
52329 ? 11% -23.6% 39994 ? 12% cpuidle.C3-NHM.usage
5227 ? 29% +413.0% 26816 ?114% cpuidle.POLL.time
336.25 ? 33% +82.5% 613.50 ? 6% cpuidle.POLL.usage
2648 ? 5% -13.8% 2283 ? 2% proc-vmstat.kswapd_high_wmark_hit_quickly
3872 ? 1% +12.6% 4361 ? 3% proc-vmstat.kswapd_low_wmark_hit_quickly
1072 ? 29% +30.1% 1395 ? 16% proc-vmstat.pgpgin
9856 ? 11% +350.3% 44384 ? 3% proc-vmstat.slabs_scanned
3364 ? 2% -20.9% 2660 ? 2% slabinfo.f2fs_extent_tree.active_objs
3364 ? 2% -20.9% 2660 ? 2% slabinfo.f2fs_extent_tree.num_objs
19645 ? 0% +9.8% 21579 ? 4% slabinfo.jbd2_revoke_record_s.num_objs
3165 ? 4% -8.7% 2891 ? 8% slabinfo.proc_inode_cache.num_objs
0.00 ? -1% +Inf% 1774752 ? 7% latency_stats.avg.call_rwsem_down_read_failed.f2fs_convert_inline_inode.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_pwrite64.entry_SYSCALL_64_fastpath
0.00 ? -1% +Inf% 1804979 ? 68% latency_stats.avg.get_request.blk_queue_bio.generic_make_request.submit_bio.f2fs_submit_page_bio.[f2fs].get_meta_page.[f2fs].get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_get_block.[f2fs]
0.00 ? -1% +Inf% 3191014 ? 6% latency_stats.max.call_rwsem_down_read_failed.f2fs_convert_inline_inode.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_pwrite64.entry_SYSCALL_64_fastpath
1043277 ? 6% +294.7% 4118293 ? 23% latency_stats.max.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_pwrite64.entry_SYSCALL_64_fastpath
0.00 ? -1% +Inf% 1804979 ? 68% latency_stats.max.get_request.blk_queue_bio.generic_make_request.submit_bio.f2fs_submit_page_bio.[f2fs].get_meta_page.[f2fs].get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_get_block.[f2fs]
16549 ? 50% +248.8% 57724 ?145% latency_stats.max.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs4_file_open.[nfsv4].do_dentry_open.vfs_open.path_openat.do_filp_open.do_sys_open
0.00 ? -1% +Inf% 16400627 ? 7% latency_stats.sum.call_rwsem_down_read_failed.f2fs_convert_inline_inode.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_pwrite64.entry_SYSCALL_64_fastpath
0.00 ? -1% +Inf% 1804979 ? 68% latency_stats.sum.get_request.blk_queue_bio.generic_make_request.submit_bio.f2fs_submit_page_bio.[f2fs].get_meta_page.[f2fs].get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_get_block.[f2fs]
-102455 ?-26% +43.5% -147067 ?-18% sched_debug.cfs_rq[14]:/.spread0
4.25 ? 42% +152.9% 10.75 ? 30% sched_debug.cfs_rq[15]:/.util_avg
49880 ? 10% -75.7% 12114 ? 81% sched_debug.cfs_rq[4]:/.spread0
47475 ? 29% -64.6% 16798 ? 70% sched_debug.cfs_rq[5]:/.spread0
4.75 ? 9% +157.9% 12.25 ? 65% sched_debug.cfs_rq[6]:/.load_avg
5.00 ? 14% +165.0% 13.25 ? 58% sched_debug.cfs_rq[6]:/.tg_load_avg_contrib
-86.50 ?-72% +177.5% -240.00 ? -8% sched_debug.cpu#1.nr_uninterruptible
-3.00 ?-212% -525.0% 12.75 ?115% sched_debug.cpu#11.nr_uninterruptible
2.75 ?211% +1154.5% 34.50 ? 9% sched_debug.cpu#15.nr_uninterruptible
4.50 ?163% +583.3% 30.75 ? 25% sched_debug.cpu#18.nr_uninterruptible
10.25 ?136% +234.1% 34.25 ? 43% sched_debug.cpu#19.nr_uninterruptible
4.75 ?399% +636.8% 35.00 ? 46% sched_debug.cpu#2.nr_uninterruptible
6.75 ?174% +259.3% 24.25 ? 39% sched_debug.cpu#22.nr_uninterruptible
921417 ? 7% -10.1% 828517 ? 7% sched_debug.cpu#5.avg_idle
14.00 ? 25% +98.2% 27.75 ? 29% sched_debug.cpu#7.nr_uninterruptible
lkp-ws02: Westmere-EP
Memory: 16G
fileio.requests_per_sec
5860 ++-------------------------------------------------------------------+
5840 ++ O O |
O O O O O O O O O O O O O O O O O |
5820 ++ O O |
5800 ++ |
5780 ++ |
5760 ++ O O O |
| O
5740 ++ |
5720 ++ |
5700 ++ |
5680 ++ |
| .*..*.. .*. .*..*.. .*
5660 *+ *..*.*..*..*..*.. .*..*. *..*. *..*..*..*.*..*..*. |
5640 ++------------------------*------------------------------------------+
fileio.time.file_system_outputs
1.125e+08 ++--------------------------------------------------------------+
O O O O O O O |
1.12e+08 ++ O O O O O O O O O O O O O O |
1.115e+08 ++ |
| |
1.11e+08 ++ O |
| O O O
1.105e+08 ++ |
| |
1.1e+08 ++ |
1.095e+08 ++ |
| |
1.09e+08 ++.*.*.. .*.. .*..*. |
*. *..*.*..*..*.*.. .*. *.*. *..*..*.*..*..*.*..*
1.085e+08 ++----------------------*--*------------------------------------+
time.file_system_inputs
520 O+----O--------O--O-------------------O--O----------------------O-----+
500 ++ O O O O O O O O O O O O O O O
| O O O |
480 ++ |
460 ++ |
| |
440 ++ |
420 ++ |
400 ++ |
| |
380 ++ |
360 ++ |
| |
340 *+.*..*..*..*..*..*.*..*..*..*..*..*..*..*..*..*..*..*.*..*..*..*..*..*
320 ++--------------------------------------------------------------------+
vmstat.io.bo
86500 ++------------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O |
86000 ++ O O O |
| |
85500 ++ |
| O O O O
85000 ++ |
| |
84500 ++ |
| |
84000 ++ |
*..*.. .*..*..*.. .*.. .*..*.*..*..*
83500 ++ *..*.*..*.. .*.. .*.*..*. * *..*..*. |
| *. *. |
83000 ++------------------------------------------------------------------+
proc-vmstat.slabs_scanned
50000 ++------------------------------------------------------------------+
| O O O O O O O O O
45000 O+ O O O O O O O O O O O O O O |
40000 ++ O |
| |
35000 ++ |
30000 ++ |
| .*..*.. .*..*..*.. .*..*. .* |
25000 *+ * *. *..*. : |
20000 ++ : |
| : |
15000 ++ : |
10000 ++ : .* .*.. .*..|
| *..*. + .*. .*..*.*. *
5000 ++-------------------------------------------*--------*-------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Ying Huang