Hi Jens,
this series contains a bunch of different BDI cleanups. The biggest item
is to isolate block drivers from the BDI in preparation of changing the
lifetime of the block device BDI in a follow up series.
Changes since v2:
- fix a rw_page return value check
- fix up various changelogs
Changes since v1:
- rebased to the for-5.9/block-merge branch
- explicitly set the readahead to 0 for ubifs, vboxsf and mtd
- split the zram block_device operations
- let rw_page users fall back to bios in swap_readpage
Diffstat:
block/blk-core.c | 2
block/blk-integrity.c | 4
block/blk-mq-debugfs.c | 1
block/blk-settings.c | 5
block/blk-sysfs.c | 282 ++++++++++--------------------------------
block/genhd.c | 13 +
drivers/block/aoe/aoeblk.c | 2
drivers/block/brd.c | 1
drivers/block/drbd/drbd_nl.c | 18 --
drivers/block/drbd/drbd_req.c | 4
drivers/block/rbd.c | 2
drivers/block/zram/zram_drv.c | 19 +-
drivers/md/bcache/super.c | 4
drivers/md/dm-table.c | 9 -
drivers/md/raid0.c | 16 --
drivers/md/raid10.c | 46 ++----
drivers/md/raid5.c | 31 +---
drivers/mmc/core/queue.c | 3
drivers/mtd/mtdcore.c | 1
drivers/nvdimm/btt.c | 2
drivers/nvdimm/pmem.c | 1
drivers/nvme/host/core.c | 3
drivers/nvme/host/multipath.c | 10 -
drivers/scsi/iscsi_tcp.c | 4
fs/9p/vfs_file.c | 2
fs/9p/vfs_super.c | 4
fs/afs/super.c | 1
fs/btrfs/disk-io.c | 2
fs/fs-writeback.c | 7 -
fs/fuse/inode.c | 4
fs/namei.c | 4
fs/nfs/super.c | 9 -
fs/super.c | 2
fs/ubifs/super.c | 1
fs/vboxsf/super.c | 1
include/linux/backing-dev.h | 78 +----------
include/linux/blkdev.h | 3
include/linux/drbd.h | 1
include/linux/fs.h | 2
mm/backing-dev.c | 12 -
mm/filemap.c | 4
mm/memcontrol.c | 2
mm/memory-failure.c | 2
mm/migrate.c | 2
mm/mmap.c | 2
mm/page-writeback.c | 18 +-
mm/page_io.c | 18 +-
mm/swapfile.c | 4
48 files changed, 204 insertions(+), 464 deletions(-)
Drivers shouldn't really mess with the readahead size, as that is a VM
concept. Instead set it based on the optimal I/O size by lifting the
algorithm from the md driver when registering the disk. Also set
bdi->io_pages there as well by applying the same scheme based on
max_sectors.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/blk-settings.c | 5 ++---
block/blk-sysfs.c | 1 -
block/genhd.c | 13 +++++++++++--
drivers/block/aoe/aoeblk.c | 2 --
drivers/block/drbd/drbd_nl.c | 12 +-----------
drivers/md/bcache/super.c | 4 ----
drivers/md/dm-table.c | 3 ---
drivers/md/raid0.c | 16 ----------------
drivers/md/raid10.c | 24 +-----------------------
drivers/md/raid5.c | 13 +------------
10 files changed, 16 insertions(+), 77 deletions(-)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 76a7e03bcd6cac..01049e9b998f1d 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -452,6 +452,8 @@ EXPORT_SYMBOL(blk_limits_io_opt);
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
+ q->backing_dev_info->ra_pages =
+ max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
}
EXPORT_SYMBOL(blk_queue_io_opt);
@@ -628,9 +630,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
-
- t->backing_dev_info->io_pages =
- t->limits.max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(disk_stack_limits);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 7dda709f3ccb6f..ce418d9128a0b2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -245,7 +245,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
- q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
spin_unlock_irq(&q->queue_lock);
return ret;
diff --git a/block/genhd.c b/block/genhd.c
index 8b1e9f48957cb5..097d4e4bc0b8a2 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -775,6 +775,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups,
bool register_queue)
{
+ struct request_queue *q = disk->queue;
dev_t devt;
int retval;
@@ -785,7 +786,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
* registration.
*/
if (register_queue)
- elevator_init_mq(disk->queue);
+ elevator_init_mq(q);
/* minors == 0 indicates to use ext devt from part0 and should
* be accompanied with EXT_DEVT flag. Make sure all
@@ -815,10 +816,18 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->flags |= GENHD_FL_NO_PART_SCAN;
} else {
- struct backing_dev_info *bdi = disk->queue->backing_dev_info;
+ struct backing_dev_info *bdi = q->backing_dev_info;
struct device *dev = disk_to_dev(disk);
int ret;
+ /*
+ * For read-ahead of large files to be effective, we need to
+ * readahead at least twice the optimal I/O size.
+ */
+ bdi->ra_pages = max(queue_io_opt(q) * 2 / PAGE_SIZE,
+ VM_READAHEAD_PAGES);
+ bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+
/* Register BDI before referencing it from bdev */
dev->devt = devt;
ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 5ca7216e9e01f3..89b33b402b4e52 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -347,7 +347,6 @@ aoeblk_gdalloc(void *vp)
mempool_t *mp;
struct request_queue *q;
struct blk_mq_tag_set *set;
- enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
int late = 0;
int err;
@@ -407,7 +406,6 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 650372ee2c7822..212bf711fb6b41 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1360,18 +1360,8 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
decide_on_write_same_support(device, q, b, o, disable_write_same);
- if (b) {
+ if (b)
blk_stack_limits(&q->limits, &b->limits, 0);
-
- if (q->backing_dev_info->ra_pages !=
- b->backing_dev_info->ra_pages) {
- drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info->ra_pages,
- b->backing_dev_info->ra_pages);
- q->backing_dev_info->ra_pages =
- b->backing_dev_info->ra_pages;
- }
- }
fixup_discard_if_not_supported(q);
fixup_write_zeroes(device, q);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 9e45faa054b6f4..9d3f0711be030f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1367,10 +1367,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
if (ret)
return ret;
- dc->disk.disk->queue->backing_dev_info->ra_pages =
- max(dc->disk.disk->queue->backing_dev_info->ra_pages,
- q->backing_dev_info->ra_pages);
-
atomic_set(&dc->io_errors, 0);
dc->io_disable = false;
dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aac4c31cfc8498..324a42ed2f8894 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1924,9 +1924,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->nr_zones = blkdev_nr_zones(t->md->disk);
}
#endif
-
- /* Allow reads to exceed readahead limits */
- q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f54a449f97aa79..aa2d7279176880 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -410,22 +410,6 @@ static int raid0_run(struct mddev *mddev)
mdname(mddev),
(unsigned long long)mddev->array_sectors);
- if (mddev->queue) {
- /* calculate the max read-ahead size.
- * For read-ahead of large files to be effective, we need to
- * readahead at least twice a whole stripe. i.e. number of devices
- * multiplied by chunk size times 2.
- * If an individual device has an ra_pages greater than the
- * chunk size, then we will not drive that device as hard as it
- * wants. We consider this a configuration error: a larger
- * chunksize should be used in that case.
- */
- int stripe = mddev->raid_disks *
- (mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
- mddev->queue->backing_dev_info->ra_pages = 2* stripe;
- }
-
dump_zones(mddev);
ret = md_integrity_register(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9f88ff9bdee437..23d15acbf457d4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3865,19 +3865,6 @@ static int raid10_run(struct mddev *mddev)
mddev->resync_max_sectors = size;
set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
- if (mddev->queue) {
- int stripe = conf->geo.raid_disks *
- ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-
- /* Calculate max read-ahead size.
- * We need to readahead at least twice a whole stripe....
- * maybe...
- */
- stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
- }
-
if (md_integrity_register(mddev))
goto out_free_conf;
@@ -4715,17 +4702,8 @@ static void end_reshape(struct r10conf *conf)
conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
- */
- if (conf->mddev->queue) {
- int stripe = conf->geo.raid_disks *
- ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
- stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ if (conf->mddev->queue)
raid10_set_io_opt(conf);
- }
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 68e41ce3ca75cc..415ce3cc155698 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7413,8 +7413,6 @@ static int raid5_run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -8002,17 +8000,8 @@ static void end_reshape(struct r5conf *conf)
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
- */
- if (conf->mddev->queue) {
- int data_disks = conf->raid_disks - conf->max_degraded;
- int stripe = data_disks * ((conf->chunk_sectors << 9)
- / PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ if (conf->mddev->queue)
raid5_set_io_opt(conf);
- }
}
}
--
2.27.0
Just checking SB_I_CGROUPWB for cgroup writeback support is enough.
Either the file system allocates its own bdi (e.g. btrfs), in which case
it is known to support cgroup writeback, or the bdi comes from the block
layer, which always supports cgroup writeback.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
---
block/blk-core.c | 1 -
fs/btrfs/disk-io.c | 1 -
include/linux/backing-dev.h | 8 +++-----
3 files changed, 3 insertions(+), 7 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index ea1665de7a2079..68db7e745b49dd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -538,7 +538,6 @@ struct request_queue *blk_alloc_queue(int node_id)
if (!q->stats)
goto fail_stats;
- q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->node = node_id;
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f92c45fe019c48..4b5a8640329e4c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3032,7 +3032,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_sb_buffer;
}
- sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0b06b2d26c9aa3..52583b6f2ea05d 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -123,7 +123,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*
- * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
* BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
* inefficient.
*/
@@ -233,9 +232,9 @@ int inode_congested(struct inode *inode, int cong_bits);
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
* @inode: inode of interest
*
- * cgroup writeback requires support from both the bdi and filesystem.
- * Also, both memcg and iocg have to be on the default hierarchy. Test
- * whether all conditions are met.
+ * Cgroup writeback requires support from the filesystem. Also, both memcg and
+ * iocg have to be on the default hierarchy. Test whether all conditions are
+ * met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
@@ -247,7 +246,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
cgroup_subsys_on_dfl(io_cgrp_subsys) &&
bdi_cap_account_dirty(bdi) &&
- (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
--
2.27.0
BDI_CAP_SYNCHRONOUS_IO is only checked in the swap code, and used to
decided if ->rw_page can be used on a block device. Just check up for
the method instead. The only complication is that zram needs a second
set of block_device_operations as it can switch between modes that
actually support ->rw_page and those who don't.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/block/brd.c | 1 -
drivers/block/zram/zram_drv.c | 19 +++++++++++++------
drivers/nvdimm/btt.c | 2 --
drivers/nvdimm/pmem.c | 1 -
include/linux/backing-dev.h | 9 ---------
mm/swapfile.c | 2 +-
6 files changed, 14 insertions(+), 20 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2723a70eb85593..cc49a921339f77 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -403,7 +403,6 @@ static struct brd_device *brd_alloc(int i)
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
- brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9100ac36670afc..d73ddf018fa65f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,6 +52,9 @@ static unsigned int num_devices = 1;
*/
static size_t huge_class_size;
+static const struct block_device_operations zram_devops;
+static const struct block_device_operations zram_wb_devops;
+
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio);
@@ -408,8 +411,7 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;
- zram->disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_SYNCHRONOUS_IO;
+ zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
@@ -528,8 +530,7 @@ static ssize_t backing_dev_store(struct device *dev,
* freely but in fact, IO is going on so finally could cause
* use-after-free when the IO is really done.
*/
- zram->disk->queue->backing_dev_info->capabilities &=
- ~BDI_CAP_SYNCHRONOUS_IO;
+ zram->disk->fops = &zram_wb_devops;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1819,6 +1820,13 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+static const struct block_device_operations zram_wb_devops = {
+ .open = zram_open,
+ .submit_bio = zram_submit_bio,
+ .swap_slot_free_notify = zram_slot_free_notify,
+ .owner = THIS_MODULE
+};
+
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
@@ -1946,8 +1954,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
- zram->disk->queue->backing_dev_info->capabilities |=
- (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
+ zram->disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 412d21d8f64351..b4184dc9b41eb4 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1540,8 +1540,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->private_data = btt;
btt->btt_disk->queue = btt->btt_queue;
btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
- btt->btt_disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_SYNCHRONOUS_IO;
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 94790e6e0e4ce1..436b83fb24ad61 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -478,7 +478,6 @@ static int pmem_attach_disk(struct device *dev,
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
disk->private_data = pmem;
- disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 52583b6f2ea05d..860ea33571bce5 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -122,9 +122,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
- *
- * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
- * inefficient.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
@@ -132,7 +129,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
-#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
@@ -174,11 +170,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
long congestion_wait(int sync, long timeout);
long wait_iff_congested(int sync, long timeout);
-static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
-}
-
static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
{
return bdi->capabilities & BDI_CAP_STABLE_WRITES;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c26916e95fd4a..18eac97b10e502 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3230,7 +3230,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
p->flags |= SWP_STABLE_WRITES;
- if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
+ if (p->bdev && p->bdev->bd_disk->fops->rw_page)
p->flags |= SWP_SYNCHRONOUS_IO;
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
--
2.27.0
Replace BDI_CAP_NO_ACCT_WB with a positive BDI_CAP_WRITEBACK_ACCT to
make the checks more obvious. Also remove the pointless
bdi_cap_account_writeback wrapper that just obsfucates the check.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/fuse/inode.c | 3 ++-
include/linux/backing-dev.h | 13 +++----------
mm/backing-dev.c | 1 +
mm/page-writeback.c | 4 ++--
4 files changed, 8 insertions(+), 13 deletions(-)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 17b00670fb539e..581329203d6860 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1050,7 +1050,8 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
return err;
/* fuse does it's own writeback accounting */
- sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
+ sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT;
+ sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT;
/*
* For a single fuse filesystem use max 1% of dirty +
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5da4ea3dd0cc5c..b217344a2c63be 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -120,17 +120,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
*
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
* BDI_CAP_NO_WRITEBACK: Don't write pages back
- * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
+ * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_NO_ACCT_WB 0x00000004
+#define BDI_CAP_WRITEBACK_ACCT 0x00000004
#define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
- (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+ (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY)
extern struct backing_dev_info noop_backing_dev_info;
@@ -179,13 +179,6 @@ static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
}
-static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
-{
- /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
- return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
- BDI_CAP_NO_WRITEBACK));
-}
-
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 41ec322090fca6..5f5958e1d39060 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -744,6 +744,7 @@ struct backing_dev_info *bdi_alloc(int node_id)
kfree(bdi);
return NULL;
}
+ bdi->capabilities = BDI_CAP_WRITEBACK_ACCT;
bdi->ra_pages = VM_READAHEAD_PAGES;
return bdi;
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 27a10536adad30..44c4a588f48df5 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2740,7 +2740,7 @@ int test_clear_page_writeback(struct page *page)
if (ret) {
__xa_clear_mark(&mapping->i_pages, page_index(page),
PAGECACHE_TAG_WRITEBACK);
- if (bdi_cap_account_writeback(bdi)) {
+ if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
struct bdi_writeback *wb = inode_to_wb(inode);
dec_wb_stat(wb, WB_WRITEBACK);
@@ -2793,7 +2793,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
PAGECACHE_TAG_WRITEBACK);
xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
- if (bdi_cap_account_writeback(bdi))
+ if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT)
inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
/*
--
2.27.0
The BDI_CAP_STABLE_WRITES is one of the few bits of information in the
backing_dev_info shared between the block drivers and the writeback code.
To help untangling the dependency replace it with a queue flag and a
superblock flag derived from it. This also helps with the case of e.g.
a file system requiring stable writes due to its own checksumming, but
not forcing it on other users of the block device like the swap code.
One downside is that we can't support the stable_pages_required bdi
attribute in sysfs anymore. It is replaced with a queue attribute, that
can also be made writable for easier testing.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/blk-integrity.c | 4 ++--
block/blk-mq-debugfs.c | 1 +
block/blk-sysfs.c | 2 ++
drivers/block/rbd.c | 2 +-
drivers/block/zram/zram_drv.c | 2 +-
drivers/md/dm-table.c | 6 +++---
drivers/md/raid5.c | 8 ++++----
drivers/mmc/core/queue.c | 3 +--
drivers/nvme/host/core.c | 3 +--
drivers/nvme/host/multipath.c | 10 +++-------
drivers/scsi/iscsi_tcp.c | 4 ++--
fs/super.c | 2 ++
include/linux/backing-dev.h | 6 ------
include/linux/blkdev.h | 3 +++
include/linux/fs.h | 1 +
mm/backing-dev.c | 6 ++----
mm/page-writeback.c | 2 +-
mm/swapfile.c | 2 +-
18 files changed, 31 insertions(+), 36 deletions(-)
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index c03705cbb9c9f2..2b36a8f9b81390 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -408,7 +408,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;
- disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
if (disk->queue->ksm) {
@@ -428,7 +428,7 @@ EXPORT_SYMBOL(blk_integrity_register);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 3f09bcb8a6fd7e..5a7d870eff2f89 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -116,6 +116,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(DEAD),
QUEUE_FLAG_NAME(INIT_DONE),
+ QUEUE_FLAG_NAME(STABLE_WRITES),
QUEUE_FLAG_NAME(POLL),
QUEUE_FLAG_NAME(WC),
QUEUE_FLAG_NAME(FUA),
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9bb4e42fb73265..4a3799ed33f775 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -291,6 +291,7 @@ static struct queue_sysfs_entry queue_##_name##_entry = { \
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
+QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
#undef QUEUE_SYSFS_BIT_FNS
static ssize_t queue_zoned_show(struct request_queue *q, char *page)
@@ -645,6 +646,7 @@ static struct attribute *queue_attrs[] = {
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
+ &queue_stable_writes_entry.attr,
&queue_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4f61e920946144..4a8515acccb3bf 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5022,7 +5022,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
}
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
/*
* disk_release() expects a queue ref from add_disk() and will
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d73ddf018fa65f..e6ed9c9f500a42 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1954,7 +1954,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
- zram->disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 324a42ed2f8894..e1adec51cb5b41 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1815,7 +1815,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+ return q && blk_queue_stable_writes(q);
}
/*
@@ -1900,9 +1900,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* because they do their own checksumming.
*/
if (dm_table_requires_stable_pages(t))
- q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
else
- q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
/*
* Determine whether or not this queue's I/O timings contribute
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 415ce3cc155698..eb1d9e0a6ef92f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6536,14 +6536,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
if (!conf)
err = -ENODEV;
else if (new != conf->skip_copy) {
+ struct request_queue *q = mddev->queue;
+
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
else
- mddev->queue->backing_dev_info->capabilities &=
- ~BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
mddev_resume(mddev);
}
mddev_unlock(mddev);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4b1eb89b401d98..e789107c32a18d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -472,8 +472,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
}
if (mmc_host_is_spi(host) && host->use_spi_crc)
- mq->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
mq->queue->queuedata = mq;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index aa2b66edba5e01..d0970da9edee4a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3836,8 +3836,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
goto out_free_ns;
if (ctrl->opts && ctrl->opts->data_digest)
- ns->queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 74bad4e3d37785..c3688e4b59f383 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -669,13 +669,9 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
nvme_mpath_set_live(ns);
}
- if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct gendisk *disk = ns->head->disk;
-
- if (disk)
- disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
- }
+ if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ ns->head->disk->queue);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b5dd1caae5e92d..a622f334c933f5 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -962,8 +962,8 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
struct iscsi_conn *conn = session->leadconn;
if (conn->datadgst_en)
- sdev->request_queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ sdev->request_queue);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}
diff --git a/fs/super.c b/fs/super.c
index 904459b3511995..a51c2083cd6b18 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1256,6 +1256,8 @@ static int set_bdev_super(struct super_block *s, void *data)
s->s_dev = s->s_bdev->bd_dev;
s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
+ if (blk_queue_stable_writes(s->s_bdev->bd_disk->queue))
+ s->s_iflags |= SB_I_STABLE_WRITES;
return 0;
}
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 860ea33571bce5..5da4ea3dd0cc5c 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -126,7 +126,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
#define BDI_CAP_NO_ACCT_WB 0x00000004
-#define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
@@ -170,11 +169,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
long congestion_wait(int sync, long timeout);
long wait_iff_congested(int sync, long timeout);
-static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_STABLE_WRITES;
-}
-
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
{
return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bbdd3cf620381a..c313a1df01e6b1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -603,6 +603,7 @@ struct request_queue {
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 17 /* Write back caching */
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
@@ -631,6 +632,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_stable_writes(q) \
+ test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 236543605dd118..d383c21f3c0971 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1366,6 +1366,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 3e441e0ff1bc88..41ec322090fca6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -204,10 +204,8 @@ static ssize_t stable_pages_required_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct backing_dev_info *bdi = dev_get_drvdata(dev);
-
- return snprintf(page, PAGE_SIZE-1, "%d\n",
- bdi_cap_stable_pages_required(bdi) ? 1 : 0);
+ pr_info_once("the stable_pages_required attribute has been deprecated\n");
+ return snprintf(page, PAGE_SIZE-1, "%d\n", 0);
}
static DEVICE_ATTR_RO(stable_pages_required);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 28b3e7a6756577..27a10536adad30 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2851,7 +2851,7 @@ EXPORT_SYMBOL_GPL(wait_on_page_writeback);
*/
void wait_for_stable_page(struct page *page)
{
- if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
+ if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES)
wait_on_page_writeback(page);
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 18eac97b10e502..e2a589faad28cb 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3227,7 +3227,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap_unlock_inode;
}
- if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
+ if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue))
p->flags |= SWP_STABLE_WRITES;
if (p->bdev && p->bdev->bd_disk->fops->rw_page)
--
2.27.0
Replace the two negative flags that are always used together with a
single positive flag that indicates the writeback capability instead
of two related non-capabilities. Also remove the pointless wrappers
to just check the flag.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/9p/vfs_file.c | 2 +-
fs/fs-writeback.c | 7 +++---
include/linux/backing-dev.h | 48 ++++++++-----------------------------
mm/backing-dev.c | 6 ++---
mm/filemap.c | 4 ++--
mm/memcontrol.c | 2 +-
mm/memory-failure.c | 2 +-
mm/migrate.c | 2 +-
mm/mmap.c | 2 +-
mm/page-writeback.c | 12 +++++-----
10 files changed, 29 insertions(+), 58 deletions(-)
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 92cd1d80218d70..5479d894a10696 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -625,7 +625,7 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
inode = file_inode(vma->vm_file);
- if (!mapping_cap_writeback_dirty(inode->i_mapping))
+ if (!mapping_can_writeback(inode->i_mapping))
wbc.nr_to_write = 0;
might_sleep();
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a605c3dddabc76..e62e48fecff4f9 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2318,7 +2318,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
wb = locked_inode_to_wb_and_lock_list(inode);
- WARN(bdi_cap_writeback_dirty(wb->bdi) &&
+ WARN((wb->bdi->capabilities & BDI_CAP_WRITEBACK) &&
!test_bit(WB_registered, &wb->state),
"bdi-%s not registered\n", bdi_dev_name(wb->bdi));
@@ -2343,7 +2343,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* to make sure background write-back happens
* later.
*/
- if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
+ if (wakeup_bdi &&
+ (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
wb_wakeup_delayed(wb);
return;
}
@@ -2578,7 +2579,7 @@ int write_inode_now(struct inode *inode, int sync)
.range_end = LLONG_MAX,
};
- if (!mapping_cap_writeback_dirty(inode->i_mapping))
+ if (!mapping_can_writeback(inode->i_mapping))
wbc.nr_to_write = 0;
might_sleep();
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index b217344a2c63be..44df4fcef65c1e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -110,27 +110,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
/*
* Flags in backing_dev_info::capability
*
- * The first three flags control whether dirty pages will contribute to the
- * VM's accounting and whether writepages() should be called for dirty pages
- * (something that would not, for example, be appropriate for ramfs)
- *
- * WARNING: these flags are closely related and should not normally be
- * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
- * three flags into a single convenience macro.
- *
- * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
- * BDI_CAP_NO_WRITEBACK: Don't write pages back
- * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
- * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
+ * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
+ * should contribute to accounting
+ * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
-#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
-#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_WRITEBACK_ACCT 0x00000004
-#define BDI_CAP_STRICTLIMIT 0x00000010
-#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
-
-#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
- (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY)
+#define BDI_CAP_WRITEBACK (1 << 0)
+#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
+#define BDI_CAP_STRICTLIMIT (1 << 2)
extern struct backing_dev_info noop_backing_dev_info;
@@ -169,24 +156,9 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
long congestion_wait(int sync, long timeout);
long wait_iff_congested(int sync, long timeout);
-static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
-}
-
-static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
-}
-
-static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
-{
- return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline bool mapping_cap_account_dirty(struct address_space *mapping)
+static inline bool mapping_can_writeback(struct address_space *mapping)
{
- return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
+ return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
static inline int bdi_sched_wait(void *word)
@@ -223,7 +195,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
cgroup_subsys_on_dfl(io_cgrp_subsys) &&
- bdi_cap_account_dirty(bdi) &&
+ (bdi->capabilities & BDI_CAP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 5f5958e1d39060..01bd0a4f16096a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -14,9 +14,7 @@
#include <linux/device.h>
#include <trace/events/writeback.h>
-struct backing_dev_info noop_backing_dev_info = {
- .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
-};
+struct backing_dev_info noop_backing_dev_info;
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
static struct class *bdi_class;
@@ -744,7 +742,7 @@ struct backing_dev_info *bdi_alloc(int node_id)
kfree(bdi);
return NULL;
}
- bdi->capabilities = BDI_CAP_WRITEBACK_ACCT;
+ bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
bdi->ra_pages = VM_READAHEAD_PAGES;
return bdi;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 385759c4ce4be6..89ebadcfecf8e5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -413,7 +413,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
.range_end = end,
};
- if (!mapping_cap_writeback_dirty(mapping) ||
+ if (!mapping_can_writeback(mapping) ||
!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0;
@@ -1634,7 +1634,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
no_page:
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
- if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
+ if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp_mask |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS)
gfp_mask &= ~__GFP_FS;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 19622328e4b5ac..cb2f1840481f57 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5419,7 +5419,7 @@ static int mem_cgroup_move_account(struct page *page,
if (PageDirty(page)) {
struct address_space *mapping = page_mapping(page);
- if (mapping_cap_account_dirty(mapping)) {
+ if (mapping_can_writeback(mapping)) {
__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
-nr_pages);
__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 47b8ccb1fb9b85..012e0d315b1f90 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1006,7 +1006,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
*/
mapping = page_mapping(hpage);
if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
- mapping_cap_writeback_dirty(mapping)) {
+ mapping_can_writeback(mapping)) {
if (page_mkclean(hpage)) {
SetPageDirty(hpage);
} else {
diff --git a/mm/migrate.c b/mm/migrate.c
index 40cd7016ae6fc6..7e2cafa06428f3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -503,7 +503,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
__dec_lruvec_state(old_lruvec, NR_SHMEM);
__inc_lruvec_state(new_lruvec, NR_SHMEM);
}
- if (dirty && mapping_cap_account_dirty(mapping)) {
+ if (dirty && mapping_can_writeback(mapping)) {
__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
diff --git a/mm/mmap.c b/mm/mmap.c
index 59a4682ebf3fae..3efb7ae6447fd9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1665,7 +1665,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
/* Can the mapping track the dirty pages? */
return vma->vm_file && vma->vm_file->f_mapping &&
- mapping_cap_account_dirty(vma->vm_file->f_mapping);
+ mapping_can_writeback(vma->vm_file->f_mapping);
}
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 44c4a588f48df5..ad288f1b3052fe 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1882,7 +1882,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
int ratelimit;
int *p;
- if (!bdi_cap_account_dirty(bdi))
+ if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
return;
if (inode_cgwb_enabled(inode))
@@ -2425,7 +2425,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
trace_writeback_dirty_page(page, mapping);
- if (mapping_cap_account_dirty(mapping)) {
+ if (mapping_can_writeback(mapping)) {
struct bdi_writeback *wb;
inode_attach_wb(inode, page);
@@ -2452,7 +2452,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
void account_page_cleaned(struct page *page, struct address_space *mapping,
struct bdi_writeback *wb)
{
- if (mapping_cap_account_dirty(mapping)) {
+ if (mapping_can_writeback(mapping)) {
dec_lruvec_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
@@ -2515,7 +2515,7 @@ void account_page_redirty(struct page *page)
{
struct address_space *mapping = page->mapping;
- if (mapping && mapping_cap_account_dirty(mapping)) {
+ if (mapping && mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
@@ -2627,7 +2627,7 @@ void __cancel_dirty_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
- if (mapping_cap_account_dirty(mapping)) {
+ if (mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
@@ -2667,7 +2667,7 @@ int clear_page_dirty_for_io(struct page *page)
VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (mapping && mapping_cap_account_dirty(mapping)) {
+ if (mapping && mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
--
2.27.0
The raid5 and raid10 drivers currently update the read-ahead size,
but not the optimal I/O size on reshape. To prepare for deriving the
read-ahead size from the optimal I/O size make sure it is updated
as well.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/md/raid10.c | 22 ++++++++++++++--------
drivers/md/raid5.c | 10 ++++++++--
2 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b1d0c9d4ef7757..9f88ff9bdee437 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3695,10 +3695,20 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
+static void raid10_set_io_opt(struct r10conf *conf)
+{
+ int raid_disks = conf->geo.raid_disks;
+
+ if (!(conf->geo.raid_disks % conf->geo.near_copies))
+ raid_disks /= conf->geo.near_copies;
+ blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
+ raid_disks);
+}
+
static int raid10_run(struct mddev *mddev)
{
struct r10conf *conf;
- int i, disk_idx, chunk_size;
+ int i, disk_idx;
struct raid10_info *disk;
struct md_rdev *rdev;
sector_t size;
@@ -3734,18 +3744,13 @@ static int raid10_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;
- chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->geo.raid_disks % conf->geo.near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
- else
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->geo.raid_disks / conf->geo.near_copies));
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ raid10_set_io_opt(conf);
}
rdev_for_each(rdev, mddev) {
@@ -4719,6 +4724,7 @@ static void end_reshape(struct r10conf *conf)
stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ raid10_set_io_opt(conf);
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d7780b1dd0c528..68e41ce3ca75cc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7123,6 +7123,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
+static void raid5_set_io_opt(struct r5conf *conf)
+{
+ blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
+ (conf->raid_disks - conf->max_degraded));
+}
+
static int raid5_run(struct mddev *mddev)
{
struct r5conf *conf;
@@ -7412,8 +7418,7 @@ static int raid5_run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks - conf->max_degraded));
+ raid5_set_io_opt(conf);
mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
@@ -8006,6 +8011,7 @@ static void end_reshape(struct r5conf *conf)
/ PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ raid5_set_io_opt(conf);
}
}
}
--
2.27.0
The last user of SB_I_MULTIROOT is disappeared with commit f2aedb713c28
("NFS: Add fs_context support.")
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
---
fs/namei.c | 4 ++--
include/linux/fs.h | 1 -
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/fs/namei.c b/fs/namei.c
index 72d4219c93acb7..e9ff0d54a110a7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -568,8 +568,8 @@ static bool path_connected(struct vfsmount *mnt, struct dentry *dentry)
{
struct super_block *sb = mnt->mnt_sb;
- /* Bind mounts and multi-root filesystems can have disconnected paths */
- if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
+ /* Bind mounts can have disconnected paths */
+ if (mnt->mnt_root == sb->s_root)
return true;
return is_subdir(dentry, mnt->mnt_root);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 41cd993ec0f686..236543605dd118 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1366,7 +1366,6 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
-#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
--
2.27.0
Generate the queue_sysfs_entry given that we have all the required
information for it, and rename the generated show and store methods
to match the other ones in the file.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
---
block/blk-sysfs.c | 31 +++++++++----------------------
1 file changed, 9 insertions(+), 22 deletions(-)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ce418d9128a0b2..cfbb039da8751f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -257,16 +257,16 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page));
}
-#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
+#define QUEUE_SYSFS_BIT_FNS(_name, flag, neg) \
static ssize_t \
-queue_show_##name(struct request_queue *q, char *page) \
+queue_##_name##_show(struct request_queue *q, char *page) \
{ \
int bit; \
bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
return queue_var_show(neg ? !bit : bit, page); \
} \
static ssize_t \
-queue_store_##name(struct request_queue *q, const char *page, size_t count) \
+queue_##_name##_store(struct request_queue *q, const char *page, size_t count) \
{ \
unsigned long val; \
ssize_t ret; \
@@ -281,7 +281,12 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
else \
blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
return ret; \
-}
+} \
+static struct queue_sysfs_entry queue_##_name##_entry = { \
+ .attr = { .name = __stringify(_name), .mode = 0644 }, \
+ .show = queue_##_name##_show, \
+ .store = queue_##_name##_store, \
+};
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
@@ -661,12 +666,6 @@ static struct queue_sysfs_entry queue_zone_append_max_entry = {
.show = queue_zone_append_max_show,
};
-static struct queue_sysfs_entry queue_nonrot_entry = {
- .attr = {.name = "rotational", .mode = 0644 },
- .show = queue_show_nonrot,
- .store = queue_store_nonrot,
-};
-
static struct queue_sysfs_entry queue_zoned_entry = {
.attr = {.name = "zoned", .mode = 0444 },
.show = queue_zoned_show,
@@ -699,18 +698,6 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
.store = queue_rq_affinity_store,
};
-static struct queue_sysfs_entry queue_iostats_entry = {
- .attr = {.name = "iostats", .mode = 0644 },
- .show = queue_show_iostats,
- .store = queue_store_iostats,
-};
-
-static struct queue_sysfs_entry queue_random_entry = {
- .attr = {.name = "add_random", .mode = 0644 },
- .show = queue_show_random,
- .store = queue_store_random,
-};
-
static struct queue_sysfs_entry queue_poll_entry = {
.attr = {.name = "io_poll", .mode = 0644 },
.show = queue_poll_show,
--
2.27.0
Set up a readahead size by default, as very few users have a good
reason to change it.
Signed-off-by: Christoph Hellwig <[email protected]>
Acked-by: David Sterba <[email protected]> [btrfs]
Acked-by: Richard Weinberger <[email protected]> [ubifs, mtd]
---
block/blk-core.c | 1 -
drivers/mtd/mtdcore.c | 1 +
fs/9p/vfs_super.c | 4 ++--
fs/afs/super.c | 1 -
fs/btrfs/disk-io.c | 1 -
fs/fuse/inode.c | 1 -
fs/nfs/super.c | 9 +--------
fs/ubifs/super.c | 1 +
fs/vboxsf/super.c | 1 +
mm/backing-dev.c | 1 +
10 files changed, 7 insertions(+), 14 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index 93104c7470e8ac..ea1665de7a2079 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -538,7 +538,6 @@ struct request_queue *blk_alloc_queue(int node_id)
if (!q->stats)
goto fail_stats;
- q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->node = node_id;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 7d930569a7dfb7..01b3fe888d885b 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -2196,6 +2196,7 @@ static struct backing_dev_info * __init mtd_bdi_init(char *name)
bdi = bdi_alloc(NUMA_NO_NODE);
if (!bdi)
return ERR_PTR(-ENOMEM);
+ bdi->ra_pages = 0;
/*
* We put '-0' suffix to the name to get the same name format as we
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 74df32be4c6a52..a338eb979cadf9 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -80,8 +80,8 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
if (ret)
return ret;
- if (v9ses->cache)
- sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
+ if (!v9ses->cache)
+ sb->s_bdi->ra_pages = 0;
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
if (!v9ses->cache)
diff --git a/fs/afs/super.c b/fs/afs/super.c
index b552357b1d1379..3a40ee752c1e3f 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -456,7 +456,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
ret = super_setup_bdi(sb);
if (ret)
return ret;
- sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
/* allocate the root inode and dentry */
if (as->dyn_root) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index ad157b55d7f5f0..f92c45fe019c48 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3033,7 +3033,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
- sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index bba747520e9b08..17b00670fb539e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1049,7 +1049,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
if (err)
return err;
- sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
/* fuse does it's own writeback accounting */
sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 7a70287f21a2c1..f943e37853fa25 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1200,13 +1200,6 @@ static void nfs_get_cache_cookie(struct super_block *sb,
}
#endif
-static void nfs_set_readahead(struct backing_dev_info *bdi,
- unsigned long iomax_pages)
-{
- bdi->ra_pages = VM_READAHEAD_PAGES;
- bdi->io_pages = iomax_pages;
-}
-
int nfs_get_tree_common(struct fs_context *fc)
{
struct nfs_fs_context *ctx = nfs_fc2context(fc);
@@ -1251,7 +1244,7 @@ int nfs_get_tree_common(struct fs_context *fc)
MINOR(server->s_dev));
if (error)
goto error_splat_super;
- nfs_set_readahead(s->s_bdi, server->rpages);
+ s->s_bdi->io_pages = server->rpages;
server->super = s;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 7fc2f3f07c16ed..ee7692e7a35371 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2159,6 +2159,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
c->vi.vol_id);
if (err)
goto out_close;
+ sb->s_bdi->ra_pages = 0; /* ubifs does its own readahead */
sb->s_fs_info = c;
sb->s_magic = UBIFS_SUPER_MAGIC;
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index 8fe03b4a0d2b03..6574ae5a97c2c8 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -167,6 +167,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id);
if (err)
goto fail_free;
+ sb->s_bdi->ra_pages = 0;
/* Turn source into a shfl_string and map the folder */
size = strlen(fc->source) + 1;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8e8b00627bb2d8..3e441e0ff1bc88 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -746,6 +746,7 @@ struct backing_dev_info *bdi_alloc(int node_id)
kfree(bdi);
return NULL;
}
+ bdi->ra_pages = VM_READAHEAD_PAGES;
return bdi;
}
EXPORT_SYMBOL(bdi_alloc);
--
2.27.0
This case isn't ever used.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
---
drivers/block/drbd/drbd_req.c | 4 ----
include/linux/drbd.h | 1 -
2 files changed, 5 deletions(-)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 674be09b2da94a..4d944f2eb56efa 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -901,13 +901,9 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
enum drbd_read_balancing rbm)
{
- struct backing_dev_info *bdi;
int stripe_shift;
switch (rbm) {
- case RB_CONGESTED_REMOTE:
- bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
- return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 5755537b51b114..6a8286132751df 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -94,7 +94,6 @@ enum drbd_read_balancing {
RB_PREFER_REMOTE,
RB_ROUND_ROBIN,
RB_LEAST_PENDING,
- RB_CONGESTED_REMOTE,
RB_32K_STRIPING,
RB_64K_STRIPING,
RB_128K_STRIPING,
--
2.27.0
Add two helpers macros to avoid boilerplate code for the queue sysfs
entries.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
---
block/blk-sysfs.c | 248 +++++++++++-----------------------------------
1 file changed, 58 insertions(+), 190 deletions(-)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cfbb039da8751f..9bb4e42fb73265 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -551,201 +551,69 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
return queue_var_show(blk_queue_dax(q), page);
}
-static struct queue_sysfs_entry queue_requests_entry = {
- .attr = {.name = "nr_requests", .mode = 0644 },
- .show = queue_requests_show,
- .store = queue_requests_store,
-};
-
-static struct queue_sysfs_entry queue_ra_entry = {
- .attr = {.name = "read_ahead_kb", .mode = 0644 },
- .show = queue_ra_show,
- .store = queue_ra_store,
-};
-
-static struct queue_sysfs_entry queue_max_sectors_entry = {
- .attr = {.name = "max_sectors_kb", .mode = 0644 },
- .show = queue_max_sectors_show,
- .store = queue_max_sectors_store,
-};
+#define QUEUE_RO_ENTRY(_prefix, _name) \
+static struct queue_sysfs_entry _prefix##_entry = { \
+ .attr = { .name = _name, .mode = 0444 }, \
+ .show = _prefix##_show, \
+};
+
+#define QUEUE_RW_ENTRY(_prefix, _name) \
+static struct queue_sysfs_entry _prefix##_entry = { \
+ .attr = { .name = _name, .mode = 0644 }, \
+ .show = _prefix##_show, \
+ .store = _prefix##_store, \
+};
+
+QUEUE_RW_ENTRY(queue_requests, "nr_requests");
+QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
+QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
+QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
+QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
+QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
+QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
+QUEUE_RW_ENTRY(elv_iosched, "scheduler");
+
+QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
+QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
+QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
+QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
+QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
+
+QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
+QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
+QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
+QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
+QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
+
+QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
+QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
+QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
+
+QUEUE_RO_ENTRY(queue_zoned, "zoned");
+QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
+QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
+QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
+
+QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
+QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
+QUEUE_RW_ENTRY(queue_poll, "io_poll");
+QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
+QUEUE_RW_ENTRY(queue_wc, "write_cache");
+QUEUE_RO_ENTRY(queue_fua, "fua");
+QUEUE_RO_ENTRY(queue_dax, "dax");
+QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
+QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
-static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
- .attr = {.name = "max_hw_sectors_kb", .mode = 0444 },
- .show = queue_max_hw_sectors_show,
-};
-
-static struct queue_sysfs_entry queue_max_segments_entry = {
- .attr = {.name = "max_segments", .mode = 0444 },
- .show = queue_max_segments_show,
-};
-
-static struct queue_sysfs_entry queue_max_discard_segments_entry = {
- .attr = {.name = "max_discard_segments", .mode = 0444 },
- .show = queue_max_discard_segments_show,
-};
-
-static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
- .attr = {.name = "max_integrity_segments", .mode = 0444 },
- .show = queue_max_integrity_segments_show,
-};
-
-static struct queue_sysfs_entry queue_max_segment_size_entry = {
- .attr = {.name = "max_segment_size", .mode = 0444 },
- .show = queue_max_segment_size_show,
-};
-
-static struct queue_sysfs_entry queue_iosched_entry = {
- .attr = {.name = "scheduler", .mode = 0644 },
- .show = elv_iosched_show,
- .store = elv_iosched_store,
-};
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+QUEUE_RW_ENTRY(blk_throtl_sample, "throttle_sample_time");
+#endif
+/* legacy alias for logical_block_size: */
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.attr = {.name = "hw_sector_size", .mode = 0444 },
.show = queue_logical_block_size_show,
};
-static struct queue_sysfs_entry queue_logical_block_size_entry = {
- .attr = {.name = "logical_block_size", .mode = 0444 },
- .show = queue_logical_block_size_show,
-};
-
-static struct queue_sysfs_entry queue_physical_block_size_entry = {
- .attr = {.name = "physical_block_size", .mode = 0444 },
- .show = queue_physical_block_size_show,
-};
-
-static struct queue_sysfs_entry queue_chunk_sectors_entry = {
- .attr = {.name = "chunk_sectors", .mode = 0444 },
- .show = queue_chunk_sectors_show,
-};
-
-static struct queue_sysfs_entry queue_io_min_entry = {
- .attr = {.name = "minimum_io_size", .mode = 0444 },
- .show = queue_io_min_show,
-};
-
-static struct queue_sysfs_entry queue_io_opt_entry = {
- .attr = {.name = "optimal_io_size", .mode = 0444 },
- .show = queue_io_opt_show,
-};
-
-static struct queue_sysfs_entry queue_discard_granularity_entry = {
- .attr = {.name = "discard_granularity", .mode = 0444 },
- .show = queue_discard_granularity_show,
-};
-
-static struct queue_sysfs_entry queue_discard_max_hw_entry = {
- .attr = {.name = "discard_max_hw_bytes", .mode = 0444 },
- .show = queue_discard_max_hw_show,
-};
-
-static struct queue_sysfs_entry queue_discard_max_entry = {
- .attr = {.name = "discard_max_bytes", .mode = 0644 },
- .show = queue_discard_max_show,
- .store = queue_discard_max_store,
-};
-
-static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
- .attr = {.name = "discard_zeroes_data", .mode = 0444 },
- .show = queue_discard_zeroes_data_show,
-};
-
-static struct queue_sysfs_entry queue_write_same_max_entry = {
- .attr = {.name = "write_same_max_bytes", .mode = 0444 },
- .show = queue_write_same_max_show,
-};
-
-static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
- .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 },
- .show = queue_write_zeroes_max_show,
-};
-
-static struct queue_sysfs_entry queue_zone_append_max_entry = {
- .attr = {.name = "zone_append_max_bytes", .mode = 0444 },
- .show = queue_zone_append_max_show,
-};
-
-static struct queue_sysfs_entry queue_zoned_entry = {
- .attr = {.name = "zoned", .mode = 0444 },
- .show = queue_zoned_show,
-};
-
-static struct queue_sysfs_entry queue_nr_zones_entry = {
- .attr = {.name = "nr_zones", .mode = 0444 },
- .show = queue_nr_zones_show,
-};
-
-static struct queue_sysfs_entry queue_max_open_zones_entry = {
- .attr = {.name = "max_open_zones", .mode = 0444 },
- .show = queue_max_open_zones_show,
-};
-
-static struct queue_sysfs_entry queue_max_active_zones_entry = {
- .attr = {.name = "max_active_zones", .mode = 0444 },
- .show = queue_max_active_zones_show,
-};
-
-static struct queue_sysfs_entry queue_nomerges_entry = {
- .attr = {.name = "nomerges", .mode = 0644 },
- .show = queue_nomerges_show,
- .store = queue_nomerges_store,
-};
-
-static struct queue_sysfs_entry queue_rq_affinity_entry = {
- .attr = {.name = "rq_affinity", .mode = 0644 },
- .show = queue_rq_affinity_show,
- .store = queue_rq_affinity_store,
-};
-
-static struct queue_sysfs_entry queue_poll_entry = {
- .attr = {.name = "io_poll", .mode = 0644 },
- .show = queue_poll_show,
- .store = queue_poll_store,
-};
-
-static struct queue_sysfs_entry queue_poll_delay_entry = {
- .attr = {.name = "io_poll_delay", .mode = 0644 },
- .show = queue_poll_delay_show,
- .store = queue_poll_delay_store,
-};
-
-static struct queue_sysfs_entry queue_wc_entry = {
- .attr = {.name = "write_cache", .mode = 0644 },
- .show = queue_wc_show,
- .store = queue_wc_store,
-};
-
-static struct queue_sysfs_entry queue_fua_entry = {
- .attr = {.name = "fua", .mode = 0444 },
- .show = queue_fua_show,
-};
-
-static struct queue_sysfs_entry queue_dax_entry = {
- .attr = {.name = "dax", .mode = 0444 },
- .show = queue_dax_show,
-};
-
-static struct queue_sysfs_entry queue_io_timeout_entry = {
- .attr = {.name = "io_timeout", .mode = 0644 },
- .show = queue_io_timeout_show,
- .store = queue_io_timeout_store,
-};
-
-static struct queue_sysfs_entry queue_wb_lat_entry = {
- .attr = {.name = "wbt_lat_usec", .mode = 0644 },
- .show = queue_wb_lat_show,
- .store = queue_wb_lat_store,
-};
-
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-static struct queue_sysfs_entry throtl_sample_time_entry = {
- .attr = {.name = "throttle_sample_time", .mode = 0644 },
- .show = blk_throtl_sample_time_show,
- .store = blk_throtl_sample_time_store,
-};
-#endif
-
static struct attribute *queue_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -755,7 +623,7 @@ static struct attribute *queue_attrs[] = {
&queue_max_discard_segments_entry.attr,
&queue_max_integrity_segments_entry.attr,
&queue_max_segment_size_entry.attr,
- &queue_iosched_entry.attr,
+ &elv_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_physical_block_size_entry.attr,
@@ -786,7 +654,7 @@ static struct attribute *queue_attrs[] = {
&queue_poll_delay_entry.attr,
&queue_io_timeout_entry.attr,
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- &throtl_sample_time_entry.attr,
+ &blk_throtl_sample_time_entry.attr,
#endif
NULL,
};
--
2.27.0
There is no point in trying to call bdev_read_page if SWP_SYNCHRONOUS_IO
is not set, as the device won't support it.
Signed-off-by: Christoph Hellwig <[email protected]>
---
mm/page_io.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/mm/page_io.c b/mm/page_io.c
index ccda7679008851..7eef3c84766abc 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -403,15 +403,17 @@ int swap_readpage(struct page *page, bool synchronous)
goto out;
}
- ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
- if (!ret) {
- if (trylock_page(page)) {
- swap_slot_free_notify(page);
- unlock_page(page);
- }
+ if (sis->flags & SWP_SYNCHRONOUS_IO) {
+ ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
+ if (!ret) {
+ if (trylock_page(page)) {
+ swap_slot_free_notify(page);
+ unlock_page(page);
+ }
- count_vm_event(PSWPIN);
- goto out;
+ count_vm_event(PSWPIN);
+ goto out;
+ }
}
ret = 0;
--
2.27.0
Ever since the switch to blk-mq, a lower device not used for VM
writeback will not be marked congested, so the check will never
trigger.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/block/drbd/drbd_nl.c | 6 ------
1 file changed, 6 deletions(-)
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index d0d9a549b58388..650372ee2c7822 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -3370,7 +3370,6 @@ static void device_to_statistics(struct device_statistics *s,
if (get_ldev(device)) {
struct drbd_md *md = &device->ldev->md;
u64 *history_uuids = (u64 *)s->history_uuids;
- struct request_queue *q;
int n;
spin_lock_irq(&md->uuid_lock);
@@ -3384,11 +3383,6 @@ static void device_to_statistics(struct device_statistics *s,
spin_unlock_irq(&md->uuid_lock);
s->dev_disk_flags = md->flags;
- q = bdev_get_queue(device->ldev->backing_bdev);
- s->dev_lower_blocked =
- bdi_congested(q->backing_dev_info,
- (1 << WB_async_congested) |
- (1 << WB_sync_congested));
put_ldev(device);
}
s->dev_size = drbd_get_capacity(device->this_bdev);
--
2.27.0
On Sun, Jul 26, 2020 at 05:03:19PM +0200, Christoph Hellwig wrote:
> Hi Jens,
>
> this series contains a bunch of different BDI cleanups. The biggest item
> is to isolate block drivers from the BDI in preparation of changing the
> lifetime of the block device BDI in a follow up series.
Ooops, sorry for reposting this - it is exactly the same v3 I already
posted.
On Sun, Jul 26, 2020 at 05:03:29PM +0200, Christoph Hellwig wrote:
> BDI_CAP_SYNCHRONOUS_IO is only checked in the swap code, and used to
> decided if ->rw_page can be used on a block device. Just check up for
> the method instead. The only complication is that zram needs a second
> set of block_device_operations as it can switch between modes that
> actually support ->rw_page and those who don't.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> drivers/block/brd.c | 1 -
> drivers/block/zram/zram_drv.c | 19 +++++++++++++------
> drivers/nvdimm/btt.c | 2 --
> drivers/nvdimm/pmem.c | 1 -
> include/linux/backing-dev.h | 9 ---------
> mm/swapfile.c | 2 +-
> 6 files changed, 14 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/block/brd.c b/drivers/block/brd.c
> index 2723a70eb85593..cc49a921339f77 100644
> --- a/drivers/block/brd.c
> +++ b/drivers/block/brd.c
> @@ -403,7 +403,6 @@ static struct brd_device *brd_alloc(int i)
> disk->flags = GENHD_FL_EXT_DEVT;
> sprintf(disk->disk_name, "ram%d", i);
> set_capacity(disk, rd_size * 2);
> - brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
>
> /* Tell the block layer that this is not a rotational device */
> blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 9100ac36670afc..d73ddf018fa65f 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -52,6 +52,9 @@ static unsigned int num_devices = 1;
> */
> static size_t huge_class_size;
>
> +static const struct block_device_operations zram_devops;
> +static const struct block_device_operations zram_wb_devops;
> +
> static void zram_free_page(struct zram *zram, size_t index);
> static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> u32 index, int offset, struct bio *bio);
> @@ -408,8 +411,7 @@ static void reset_bdev(struct zram *zram)
> zram->backing_dev = NULL;
> zram->old_block_size = 0;
> zram->bdev = NULL;
> - zram->disk->queue->backing_dev_info->capabilities |=
> - BDI_CAP_SYNCHRONOUS_IO;
> + zram->disk->fops = &zram_devops;
> kvfree(zram->bitmap);
> zram->bitmap = NULL;
> }
> @@ -528,8 +530,7 @@ static ssize_t backing_dev_store(struct device *dev,
> * freely but in fact, IO is going on so finally could cause
> * use-after-free when the IO is really done.
> */
> - zram->disk->queue->backing_dev_info->capabilities &=
> - ~BDI_CAP_SYNCHRONOUS_IO;
> + zram->disk->fops = &zram_wb_devops;
> up_write(&zram->init_lock);
For zram, regardless of BDI_CAP_SYNCHRONOUS_IO, it have used rw_page
every time on read/write path. This one with next patch will make zram
use bio instead of rw_page when it's declared !BDI_CAP_SYNCHRONOUS_IO,
which introduce regression for performance.
In the swap code, BDI_CAP_SYNCHRONOUS_IO is used to avoid swap cache
when the page was private. bdev_read_page is not designed to rely on
synchronous operation. That's why this patch breaks the old behavior.
>
> pr_info("setup backing device %s\n", file_name);
> @@ -1819,6 +1820,13 @@ static const struct block_device_operations zram_devops = {
> .owner = THIS_MODULE
> };
>
> +static const struct block_device_operations zram_wb_devops = {
> + .open = zram_open,
> + .submit_bio = zram_submit_bio,
> + .swap_slot_free_notify = zram_slot_free_notify,
> + .owner = THIS_MODULE
> +};
> +
> static DEVICE_ATTR_WO(compact);
> static DEVICE_ATTR_RW(disksize);
> static DEVICE_ATTR_RO(initstate);
> @@ -1946,8 +1954,7 @@ static int zram_add(void)
> if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
> blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
>
> - zram->disk->queue->backing_dev_info->capabilities |=
> - (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
> + zram->disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
> device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
>
> strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
> diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
> index 412d21d8f64351..b4184dc9b41eb4 100644
> --- a/drivers/nvdimm/btt.c
> +++ b/drivers/nvdimm/btt.c
> @@ -1540,8 +1540,6 @@ static int btt_blk_init(struct btt *btt)
> btt->btt_disk->private_data = btt;
> btt->btt_disk->queue = btt->btt_queue;
> btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
> - btt->btt_disk->queue->backing_dev_info->capabilities |=
> - BDI_CAP_SYNCHRONOUS_IO;
>
> blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
> blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
> index 94790e6e0e4ce1..436b83fb24ad61 100644
> --- a/drivers/nvdimm/pmem.c
> +++ b/drivers/nvdimm/pmem.c
> @@ -478,7 +478,6 @@ static int pmem_attach_disk(struct device *dev,
> disk->queue = q;
> disk->flags = GENHD_FL_EXT_DEVT;
> disk->private_data = pmem;
> - disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
> nvdimm_namespace_disk_name(ndns, disk->disk_name);
> set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
> / 512);
> diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
> index 52583b6f2ea05d..860ea33571bce5 100644
> --- a/include/linux/backing-dev.h
> +++ b/include/linux/backing-dev.h
> @@ -122,9 +122,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
> * BDI_CAP_NO_WRITEBACK: Don't write pages back
> * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
> * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
> - *
> - * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
> - * inefficient.
> */
> #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
> #define BDI_CAP_NO_WRITEBACK 0x00000002
> @@ -132,7 +129,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
> #define BDI_CAP_STABLE_WRITES 0x00000008
> #define BDI_CAP_STRICTLIMIT 0x00000010
> #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
> -#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
>
> #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
> (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
> @@ -174,11 +170,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
> long congestion_wait(int sync, long timeout);
> long wait_iff_congested(int sync, long timeout);
>
> -static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
> -{
> - return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
> -}
> -
> static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
> {
> return bdi->capabilities & BDI_CAP_STABLE_WRITES;
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 6c26916e95fd4a..18eac97b10e502 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -3230,7 +3230,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
> if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
> p->flags |= SWP_STABLE_WRITES;
>
> - if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
> + if (p->bdev && p->bdev->bd_disk->fops->rw_page)
> p->flags |= SWP_SYNCHRONOUS_IO;
>
> if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
> --
> 2.27.0
>
On Sun, Jul 26, 2020 at 12:06:39PM -0700, Minchan Kim wrote:
> > @@ -528,8 +530,7 @@ static ssize_t backing_dev_store(struct device *dev,
> > * freely but in fact, IO is going on so finally could cause
> > * use-after-free when the IO is really done.
> > */
> > - zram->disk->queue->backing_dev_info->capabilities &=
> > - ~BDI_CAP_SYNCHRONOUS_IO;
> > + zram->disk->fops = &zram_wb_devops;
> > up_write(&zram->init_lock);
>
> For zram, regardless of BDI_CAP_SYNCHRONOUS_IO, it have used rw_page
> every time on read/write path. This one with next patch will make zram
> use bio instead of rw_page when it's declared !BDI_CAP_SYNCHRONOUS_IO,
> which introduce regression for performance.
It really should not matter, as the overhead of setting up a bio
is minimal. It also is only used in the legacy mpage buffered I/O
code outside of the swap code, which has so many performance issues on
its own that even if this made a difference it wouldn't matter.
If you want magic treatment for your zram swap code you really need
to integrate it with the swap code instead of burding the block layer
with all this mess.
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
Looks good,
Reviewed-by: Johannes Thumshirn <[email protected]>
Greeting,
FYI, we noticed a -13.6% regression of fsmark.files_per_sec due to commit:
commit: 54529aac984de8d3928810c85b575efac0f9d616 ("[PATCH 07/14] block: make QUEUE_SYSFS_BIT_FNS a little more useful")
url: https://github.com/0day-ci/linux/commits/Christoph-Hellwig/fs-remove-the-unused-SB_I_MULTIROOT-flag/20200727-000342
base: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git for-next
in testcase: fsmark
on test machine: 192 threads Cooper Lake with 128G memory
with following parameters:
iterations: 1x
nr_threads: 1t
disk: 1BRD_32G
fs: btrfs
filesize: 4K
test_size: 4G
sync_method: fsyncBeforeClose
nr_files_per_directory: 1fpd
cpufreq_governor: performance
ucode: 0x86000017
test-description: The fsmark is a file system benchmark to test synchronous write workloads, for example, mail servers workload.
test-url: https://sourceforge.net/projects/fsmark/
In addition to that, the commit also has significant impact on the following tests:
+------------------+-------------------------------------------------------------+
| testcase: change | fio-basic: fio.write_iops 993.0% improvement |
| test machine | 192 threads Intel(R) Xeon(R) CPU @ 2.20GHz with 192G memory |
| test parameters | bs=4k |
| | cpufreq_governor=performance |
| | disk=1SSD |
| | fs=btrfs |
| | ioengine=sync |
| | nr_task=8 |
| | runtime=300s |
| | rw=randwrite |
| | test_size=512g |
| | ucode=0x4002f01 |
+------------------+-------------------------------------------------------------+
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
Details are as below:
-------------------------------------------------------------------------------------------------->
To reproduce:
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
=========================================================================================
compiler/cpufreq_governor/disk/filesize/fs/iterations/kconfig/nr_files_per_directory/nr_threads/rootfs/sync_method/tbox_group/test_size/testcase/ucode:
gcc-9/performance/1BRD_32G/4K/btrfs/1x/x86_64-rhel-8.3/1fpd/1t/debian-10.4-x86_64-20200603.cgz/fsyncBeforeClose/lkp-cpx-4s1/4G/fsmark/0x86000017
commit:
f8d6c28b2a ("block: lift setting the readahead size into the block layer")
54529aac98 ("block: make QUEUE_SYSFS_BIT_FNS a little more useful")
f8d6c28b2a8e1156 54529aac984de8d3928810c85b5
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
0:4 -1% 0:4 perf-profile.children.cycles-pp.error_entry
%stddev %change %stddev
\ | \
17751926 +8.9% 19337948 fsmark.app_overhead
6346 -13.6% 5483 fsmark.files_per_sec
165.15 +14.7% 189.42 fsmark.time.elapsed_time
165.15 +14.7% 189.42 fsmark.time.elapsed_time.max
24782 ± 2% +16.3% 28832 fsmark.time.involuntary_context_switches
151.13 +15.2% 174.14 fsmark.time.system_time
1147770 +11.2% 1276761 fsmark.time.voluntary_context_switches
26.68 -1.8% 26.20 boot-time.dhcp
1094 ± 37% -48.7% 561.25 ± 50% numa-meminfo.node2.PageTables
47861180 -9.2% 43467411 meminfo.MemFree
17734610 +24.8% 22128380 ± 3% meminfo.Memused
500290 +66.6% 833499 vmstat.io.bo
47896231 -9.2% 43485212 vmstat.memory.free
48955 +21.0% 59253 vmstat.system.cs
100261 +18.1% 118396 slabinfo.radix_tree_node.active_objs
1798 +18.0% 2122 slabinfo.radix_tree_node.active_slabs
100759 +18.0% 118876 slabinfo.radix_tree_node.num_objs
1798 +18.0% 2122 slabinfo.radix_tree_node.num_slabs
38615161 +89.9% 73314982 ± 2% cpuidle.C1.time
3109557 +58.2% 4920768 cpuidle.C1.usage
3.027e+10 +14.3% 3.459e+10 cpuidle.C1E.time
64384612 +14.0% 73424957 cpuidle.C1E.usage
420763 ± 7% +23.8% 521103 ± 11% cpuidle.POLL.time
1001776 +4.1% 1042639 proc-vmstat.nr_active_file
12993 +1.9% 13240 proc-vmstat.nr_dirty
1343494 -8.2% 1233140 proc-vmstat.nr_dirty_background_threshold
2690273 -8.2% 2469297 proc-vmstat.nr_dirty_threshold
11965705 -9.2% 10868385 proc-vmstat.nr_free_pages
1001776 +4.1% 1042640 proc-vmstat.nr_zone_active_file
13032 +2.0% 13297 proc-vmstat.nr_zone_write_pending
1940119 ± 2% +72.5% 3346705 ± 9% proc-vmstat.numa_foreign
4131750 +10.3% 4559084 ± 6% proc-vmstat.numa_hit
4038575 +10.6% 4465938 ± 6% proc-vmstat.numa_local
1940119 ± 2% +72.5% 3346705 ± 9% proc-vmstat.numa_miss
2033294 ± 2% +69.2% 3439851 ± 9% proc-vmstat.numa_other
950436 +6.2% 1009835 ± 6% proc-vmstat.pgactivate
6711546 ± 2% +28.4% 8614751 ± 2% proc-vmstat.pgalloc_normal
665913 +14.7% 764043 proc-vmstat.pgfault
83920002 +91.2% 1.604e+08 proc-vmstat.pgpgout
9.16 +7.3% 9.82 perf-stat.i.MPKI
1.25e+09 -6.7% 1.167e+09 perf-stat.i.branch-instructions
11477332 -5.2% 10880556 ± 2% perf-stat.i.branch-misses
49712 +21.0% 60163 perf-stat.i.context-switches
2.95 +7.3% 3.17 ± 2% perf-stat.i.cpi
1.615e+09 -4.9% 1.536e+09 perf-stat.i.dTLB-loads
8.427e+08 -5.4% 7.973e+08 perf-stat.i.dTLB-stores
6.372e+09 -6.9% 5.93e+09 perf-stat.i.instructions
1516 -6.2% 1422 perf-stat.i.instructions-per-iTLB-miss
0.34 -6.9% 0.32 ± 2% perf-stat.i.ipc
19.67 -5.5% 18.59 perf-stat.i.metric.M/sec
63.85 +4.9 68.72 ± 3% perf-stat.i.node-load-miss-rate%
930657 ± 5% -14.6% 795230 ± 12% perf-stat.i.node-loads
472374 ± 2% +31.6% 621845 ± 9% perf-stat.i.node-store-misses
972412 ± 3% +9.0% 1060090 ± 6% perf-stat.i.node-stores
8.97 +7.5% 9.64 perf-stat.overall.MPKI
2.90 +7.4% 3.12 ± 2% perf-stat.overall.cpi
1515 -6.3% 1419 perf-stat.overall.instructions-per-iTLB-miss
0.34 -6.8% 0.32 ± 2% perf-stat.overall.ipc
1.243e+09 -6.6% 1.16e+09 perf-stat.ps.branch-instructions
11419074 -5.2% 10822847 ± 2% perf-stat.ps.branch-misses
49384 +21.1% 59799 perf-stat.ps.context-switches
1.606e+09 -4.8% 1.528e+09 perf-stat.ps.dTLB-loads
8.377e+08 -5.3% 7.931e+08 perf-stat.ps.dTLB-stores
6.334e+09 -6.9% 5.898e+09 perf-stat.ps.instructions
924849 ± 5% -14.5% 790976 ± 12% perf-stat.ps.node-loads
469241 ± 2% +31.8% 618470 ± 9% perf-stat.ps.node-store-misses
966794 ± 3% +9.1% 1054708 ± 6% perf-stat.ps.node-stores
1.05e+12 +6.9% 1.123e+12 perf-stat.total.instructions
11038 ± 22% +25.7% 13880 ± 23% sched_debug.cfs_rq:/.load.avg
230.71 ± 20% -42.7% 132.22 ± 10% sched_debug.cfs_rq:/.load_avg.avg
21506 ± 20% -51.5% 10427 ± 6% sched_debug.cfs_rq:/.load_avg.max
2086 ± 20% -49.5% 1052 ± 8% sched_debug.cfs_rq:/.load_avg.stddev
93248 ± 14% +39.6% 130209 sched_debug.cpu.clock.avg
93261 ± 14% +39.6% 130221 sched_debug.cpu.clock.max
93235 ± 14% +39.6% 130198 sched_debug.cpu.clock.min
92126 ± 14% +39.2% 128251 sched_debug.cpu.clock_task.avg
92856 ± 14% +39.5% 129536 sched_debug.cpu.clock_task.max
83568 ± 15% +42.4% 119014 sched_debug.cpu.clock_task.min
797.56 ± 7% +27.5% 1016 ± 4% sched_debug.cpu.clock_task.stddev
5007 ± 8% +23.4% 6180 sched_debug.cpu.curr->pid.max
562.64 ± 2% +7.9% 607.19 ± 4% sched_debug.cpu.curr->pid.stddev
13542 ± 25% +122.7% 30158 sched_debug.cpu.nr_switches.avg
525933 ± 27% +49.2% 784862 ± 9% sched_debug.cpu.nr_switches.max
848.29 ± 32% +40.3% 1190 ± 15% sched_debug.cpu.nr_switches.min
48875 ± 24% +81.5% 88697 ± 5% sched_debug.cpu.nr_switches.stddev
11890 ± 28% +139.7% 28500 sched_debug.cpu.sched_count.avg
519219 ± 28% +50.2% 779702 ± 10% sched_debug.cpu.sched_count.max
222.04 ± 23% +69.1% 375.56 ± 6% sched_debug.cpu.sched_count.min
48367 ± 24% +82.7% 88372 ± 6% sched_debug.cpu.sched_count.stddev
5870 ± 28% +140.5% 14121 sched_debug.cpu.sched_goidle.avg
253647 ± 28% +50.3% 381213 ± 10% sched_debug.cpu.sched_goidle.max
83.21 ± 22% +70.5% 141.88 ± 7% sched_debug.cpu.sched_goidle.min
23845 ± 24% +83.4% 43739 ± 6% sched_debug.cpu.sched_goidle.stddev
5911 ± 28% +140.1% 14194 sched_debug.cpu.ttwu_count.avg
373905 ± 29% +112.3% 793765 ± 10% sched_debug.cpu.ttwu_count.max
84.08 ± 25% +74.2% 146.50 ± 7% sched_debug.cpu.ttwu_count.min
29382 ± 28% +123.8% 65763 ± 7% sched_debug.cpu.ttwu_count.stddev
204.86 ± 25% +68.6% 345.39 sched_debug.cpu.ttwu_local.avg
6318 ± 28% +44.3% 9115 ± 5% sched_debug.cpu.ttwu_local.max
76.42 ± 24% +67.7% 128.19 ± 4% sched_debug.cpu.ttwu_local.min
451.44 ± 28% +46.2% 660.22 ± 4% sched_debug.cpu.ttwu_local.stddev
93236 ± 14% +39.6% 130198 sched_debug.cpu_clk
92489 ± 14% +40.0% 129450 sched_debug.ktime
93635 ± 14% +39.5% 130601 sched_debug.sched_clk
332710 +14.6% 381386 interrupts.CPU0.LOC:Local_timer_interrupts
332760 +14.6% 381375 interrupts.CPU1.LOC:Local_timer_interrupts
332744 +14.6% 381431 interrupts.CPU10.LOC:Local_timer_interrupts
332675 +14.6% 381345 interrupts.CPU100.LOC:Local_timer_interrupts
332711 +14.6% 381337 interrupts.CPU101.LOC:Local_timer_interrupts
332687 +14.6% 381338 interrupts.CPU102.LOC:Local_timer_interrupts
332686 +14.6% 381306 interrupts.CPU103.LOC:Local_timer_interrupts
332673 +14.6% 381359 interrupts.CPU104.LOC:Local_timer_interrupts
332692 +14.6% 381284 interrupts.CPU105.LOC:Local_timer_interrupts
332672 +14.6% 381322 interrupts.CPU106.LOC:Local_timer_interrupts
332662 +14.6% 381373 interrupts.CPU107.LOC:Local_timer_interrupts
332692 +14.6% 381320 interrupts.CPU108.LOC:Local_timer_interrupts
332660 +14.6% 381331 interrupts.CPU109.LOC:Local_timer_interrupts
332696 +14.6% 381321 interrupts.CPU11.LOC:Local_timer_interrupts
95.00 ±109% -72.4% 26.25 ±135% interrupts.CPU11.RES:Rescheduling_interrupts
332674 +14.6% 381351 interrupts.CPU110.LOC:Local_timer_interrupts
332712 +14.6% 381322 interrupts.CPU111.LOC:Local_timer_interrupts
332671 +14.6% 381355 interrupts.CPU112.LOC:Local_timer_interrupts
332660 +14.6% 381389 interrupts.CPU113.LOC:Local_timer_interrupts
332658 +14.6% 381317 interrupts.CPU114.LOC:Local_timer_interrupts
332659 +14.6% 381322 interrupts.CPU115.LOC:Local_timer_interrupts
332659 +14.6% 381304 interrupts.CPU116.LOC:Local_timer_interrupts
332636 +14.6% 381350 interrupts.CPU117.LOC:Local_timer_interrupts
332644 +14.6% 381353 interrupts.CPU118.LOC:Local_timer_interrupts
332658 +14.6% 381306 interrupts.CPU119.LOC:Local_timer_interrupts
332608 +14.6% 381178 interrupts.CPU12.LOC:Local_timer_interrupts
332661 +14.6% 381292 interrupts.CPU120.LOC:Local_timer_interrupts
332697 +14.6% 381360 interrupts.CPU121.LOC:Local_timer_interrupts
332680 +14.6% 381351 interrupts.CPU122.LOC:Local_timer_interrupts
332663 +14.6% 381299 interrupts.CPU123.LOC:Local_timer_interrupts
332668 +14.6% 381367 interrupts.CPU124.LOC:Local_timer_interrupts
332675 +14.6% 381272 interrupts.CPU125.LOC:Local_timer_interrupts
332663 +14.6% 381267 interrupts.CPU126.LOC:Local_timer_interrupts
332671 +14.6% 381311 interrupts.CPU127.LOC:Local_timer_interrupts
332734 +14.6% 381284 interrupts.CPU128.LOC:Local_timer_interrupts
332665 +14.6% 381279 interrupts.CPU129.LOC:Local_timer_interrupts
332720 +14.6% 381445 interrupts.CPU13.LOC:Local_timer_interrupts
332666 +14.6% 381276 interrupts.CPU130.LOC:Local_timer_interrupts
332642 +14.6% 381277 interrupts.CPU131.LOC:Local_timer_interrupts
332671 +14.6% 381297 interrupts.CPU132.LOC:Local_timer_interrupts
332664 +14.6% 381276 interrupts.CPU133.LOC:Local_timer_interrupts
332691 +14.6% 381296 interrupts.CPU134.LOC:Local_timer_interrupts
332689 +14.6% 381270 interrupts.CPU135.LOC:Local_timer_interrupts
332573 +14.6% 381248 interrupts.CPU136.LOC:Local_timer_interrupts
9.50 ± 49% +381.6% 45.75 ±103% interrupts.CPU136.RES:Rescheduling_interrupts
332643 +14.6% 381263 interrupts.CPU137.LOC:Local_timer_interrupts
332654 +14.6% 381233 interrupts.CPU138.LOC:Local_timer_interrupts
332657 +14.6% 381274 interrupts.CPU139.LOC:Local_timer_interrupts
332653 +14.7% 381411 interrupts.CPU14.LOC:Local_timer_interrupts
332673 +14.6% 381277 interrupts.CPU140.LOC:Local_timer_interrupts
332655 +14.6% 381276 interrupts.CPU141.LOC:Local_timer_interrupts
332548 +14.7% 381277 interrupts.CPU142.LOC:Local_timer_interrupts
332658 +14.6% 381277 interrupts.CPU143.LOC:Local_timer_interrupts
332701 +14.6% 381360 interrupts.CPU144.LOC:Local_timer_interrupts
332698 +14.6% 381314 interrupts.CPU145.LOC:Local_timer_interrupts
332696 +14.6% 381314 interrupts.CPU146.LOC:Local_timer_interrupts
332711 +14.6% 381362 interrupts.CPU147.LOC:Local_timer_interrupts
332700 +14.6% 381328 interrupts.CPU148.LOC:Local_timer_interrupts
332700 +14.6% 381312 interrupts.CPU149.LOC:Local_timer_interrupts
332727 +14.6% 381299 interrupts.CPU15.LOC:Local_timer_interrupts
332703 +14.6% 381352 interrupts.CPU150.LOC:Local_timer_interrupts
332696 +14.6% 381295 interrupts.CPU151.LOC:Local_timer_interrupts
332800 +14.6% 381300 interrupts.CPU152.LOC:Local_timer_interrupts
332710 +14.6% 381291 interrupts.CPU153.LOC:Local_timer_interrupts
332727 +14.6% 381302 interrupts.CPU154.LOC:Local_timer_interrupts
332711 +14.6% 381291 interrupts.CPU155.LOC:Local_timer_interrupts
332761 +14.6% 381291 interrupts.CPU156.LOC:Local_timer_interrupts
332686 +14.6% 381282 interrupts.CPU157.LOC:Local_timer_interrupts
332681 +14.6% 381299 interrupts.CPU158.LOC:Local_timer_interrupts
332713 +14.6% 381340 interrupts.CPU159.LOC:Local_timer_interrupts
332695 +14.6% 381344 interrupts.CPU16.LOC:Local_timer_interrupts
332720 +14.6% 381292 interrupts.CPU160.LOC:Local_timer_interrupts
332689 +14.6% 381290 interrupts.CPU161.LOC:Local_timer_interrupts
332711 +14.6% 381275 interrupts.CPU162.LOC:Local_timer_interrupts
332691 +14.6% 381276 interrupts.CPU163.LOC:Local_timer_interrupts
332685 +14.6% 381288 interrupts.CPU164.LOC:Local_timer_interrupts
332794 +14.6% 381316 interrupts.CPU165.LOC:Local_timer_interrupts
332691 +14.6% 381274 interrupts.CPU166.LOC:Local_timer_interrupts
332686 +14.6% 381280 interrupts.CPU167.LOC:Local_timer_interrupts
332709 +14.6% 381291 interrupts.CPU168.LOC:Local_timer_interrupts
332683 +14.6% 381304 interrupts.CPU169.LOC:Local_timer_interrupts
332700 +14.6% 381292 interrupts.CPU17.LOC:Local_timer_interrupts
332729 +14.6% 381304 interrupts.CPU170.LOC:Local_timer_interrupts
332745 +14.6% 381315 interrupts.CPU171.LOC:Local_timer_interrupts
332728 +14.6% 381314 interrupts.CPU172.LOC:Local_timer_interrupts
332664 +14.6% 381293 interrupts.CPU173.LOC:Local_timer_interrupts
332721 +14.6% 381283 interrupts.CPU174.LOC:Local_timer_interrupts
332708 +14.6% 381281 interrupts.CPU175.LOC:Local_timer_interrupts
332691 +14.6% 381285 interrupts.CPU176.LOC:Local_timer_interrupts
332675 +14.6% 381292 interrupts.CPU177.LOC:Local_timer_interrupts
332664 +14.6% 381296 interrupts.CPU178.LOC:Local_timer_interrupts
332660 +14.6% 381287 interrupts.CPU179.LOC:Local_timer_interrupts
332652 +14.6% 381318 interrupts.CPU18.LOC:Local_timer_interrupts
332664 +14.6% 381282 interrupts.CPU180.LOC:Local_timer_interrupts
332566 +14.7% 381290 interrupts.CPU181.LOC:Local_timer_interrupts
7.00 ± 17% +607.1% 49.50 ±105% interrupts.CPU181.RES:Rescheduling_interrupts
332645 +14.6% 381288 interrupts.CPU182.LOC:Local_timer_interrupts
332671 +14.6% 381283 interrupts.CPU183.LOC:Local_timer_interrupts
10.25 ± 25% +509.8% 62.50 ±119% interrupts.CPU183.RES:Rescheduling_interrupts
332705 +14.6% 381294 interrupts.CPU184.LOC:Local_timer_interrupts
332699 +14.6% 381334 interrupts.CPU185.LOC:Local_timer_interrupts
332645 +14.6% 381330 interrupts.CPU186.LOC:Local_timer_interrupts
332644 +14.6% 381306 interrupts.CPU187.LOC:Local_timer_interrupts
332655 +14.6% 381268 interrupts.CPU188.LOC:Local_timer_interrupts
332645 +14.6% 381357 interrupts.CPU189.LOC:Local_timer_interrupts
13.25 ± 20% +207.5% 40.75 ± 87% interrupts.CPU189.RES:Rescheduling_interrupts
332697 +14.6% 381311 interrupts.CPU19.LOC:Local_timer_interrupts
332661 +14.6% 381300 interrupts.CPU190.LOC:Local_timer_interrupts
332826 +14.6% 381443 interrupts.CPU191.LOC:Local_timer_interrupts
332687 +14.6% 381405 interrupts.CPU2.LOC:Local_timer_interrupts
332683 +14.6% 381333 interrupts.CPU20.LOC:Local_timer_interrupts
332671 +14.6% 381325 interrupts.CPU21.LOC:Local_timer_interrupts
332703 +14.6% 381238 interrupts.CPU22.LOC:Local_timer_interrupts
332696 +14.6% 381347 interrupts.CPU23.LOC:Local_timer_interrupts
332653 +14.6% 381286 interrupts.CPU24.LOC:Local_timer_interrupts
332709 +14.6% 381354 interrupts.CPU25.LOC:Local_timer_interrupts
332675 +14.6% 381329 interrupts.CPU26.LOC:Local_timer_interrupts
332742 +14.6% 381338 interrupts.CPU27.LOC:Local_timer_interrupts
332699 +14.6% 381378 interrupts.CPU28.LOC:Local_timer_interrupts
332735 +14.6% 381336 interrupts.CPU29.LOC:Local_timer_interrupts
332595 +14.7% 381338 interrupts.CPU3.LOC:Local_timer_interrupts
332700 +14.6% 381348 interrupts.CPU30.LOC:Local_timer_interrupts
8.25 ± 85% +430.3% 43.75 ± 94% interrupts.CPU30.RES:Rescheduling_interrupts
332708 +14.6% 381250 interrupts.CPU31.LOC:Local_timer_interrupts
332722 +14.6% 381214 interrupts.CPU32.LOC:Local_timer_interrupts
332682 +14.6% 381300 interrupts.CPU33.LOC:Local_timer_interrupts
332680 +14.6% 381288 interrupts.CPU34.LOC:Local_timer_interrupts
332677 +14.6% 381283 interrupts.CPU35.LOC:Local_timer_interrupts
332731 +14.6% 381287 interrupts.CPU36.LOC:Local_timer_interrupts
332680 +14.6% 381286 interrupts.CPU37.LOC:Local_timer_interrupts
332698 +14.6% 381287 interrupts.CPU38.LOC:Local_timer_interrupts
332690 +14.6% 381287 interrupts.CPU39.LOC:Local_timer_interrupts
332719 +14.6% 381410 interrupts.CPU4.LOC:Local_timer_interrupts
332662 +14.6% 381283 interrupts.CPU40.LOC:Local_timer_interrupts
332695 +14.6% 381286 interrupts.CPU41.LOC:Local_timer_interrupts
332688 +14.6% 381286 interrupts.CPU42.LOC:Local_timer_interrupts
332673 +14.6% 381287 interrupts.CPU43.LOC:Local_timer_interrupts
332705 +14.6% 381305 interrupts.CPU44.LOC:Local_timer_interrupts
332706 +14.6% 381285 interrupts.CPU45.LOC:Local_timer_interrupts
332692 +14.6% 381295 interrupts.CPU46.LOC:Local_timer_interrupts
332697 +14.6% 381368 interrupts.CPU47.LOC:Local_timer_interrupts
332712 +14.6% 381297 interrupts.CPU48.LOC:Local_timer_interrupts
332706 +14.6% 381312 interrupts.CPU49.LOC:Local_timer_interrupts
332709 +14.6% 381387 interrupts.CPU5.LOC:Local_timer_interrupts
332738 +14.6% 381286 interrupts.CPU50.LOC:Local_timer_interrupts
332723 +14.6% 381308 interrupts.CPU51.LOC:Local_timer_interrupts
332698 +14.6% 381319 interrupts.CPU52.LOC:Local_timer_interrupts
332708 +14.6% 381311 interrupts.CPU53.LOC:Local_timer_interrupts
332702 +14.6% 381325 interrupts.CPU54.LOC:Local_timer_interrupts
332699 +14.6% 381305 interrupts.CPU55.LOC:Local_timer_interrupts
332722 +14.6% 381223 interrupts.CPU56.LOC:Local_timer_interrupts
332716 +14.6% 381336 interrupts.CPU57.LOC:Local_timer_interrupts
332711 +14.6% 381322 interrupts.CPU58.LOC:Local_timer_interrupts
332740 +14.6% 381308 interrupts.CPU59.LOC:Local_timer_interrupts
332702 +14.6% 381330 interrupts.CPU6.LOC:Local_timer_interrupts
332720 +14.6% 381313 interrupts.CPU60.LOC:Local_timer_interrupts
332703 +14.6% 381309 interrupts.CPU61.LOC:Local_timer_interrupts
332709 +14.6% 381312 interrupts.CPU62.LOC:Local_timer_interrupts
332705 +14.6% 381319 interrupts.CPU63.LOC:Local_timer_interrupts
332744 +14.6% 381299 interrupts.CPU64.LOC:Local_timer_interrupts
332748 +14.6% 381300 interrupts.CPU65.LOC:Local_timer_interrupts
332751 +14.6% 381297 interrupts.CPU66.LOC:Local_timer_interrupts
332722 +14.6% 381307 interrupts.CPU67.LOC:Local_timer_interrupts
332708 +14.6% 381295 interrupts.CPU68.LOC:Local_timer_interrupts
332721 +14.6% 381322 interrupts.CPU69.LOC:Local_timer_interrupts
332638 +14.6% 381198 interrupts.CPU7.LOC:Local_timer_interrupts
332707 +14.6% 381360 interrupts.CPU70.LOC:Local_timer_interrupts
332703 +14.6% 381334 interrupts.CPU71.LOC:Local_timer_interrupts
332682 +14.6% 381285 interrupts.CPU72.LOC:Local_timer_interrupts
332701 +14.6% 381330 interrupts.CPU73.LOC:Local_timer_interrupts
332671 +14.6% 381332 interrupts.CPU74.LOC:Local_timer_interrupts
332757 +14.6% 381305 interrupts.CPU75.LOC:Local_timer_interrupts
332733 +14.6% 381306 interrupts.CPU76.LOC:Local_timer_interrupts
332665 +14.6% 381317 interrupts.CPU77.LOC:Local_timer_interrupts
332672 +14.6% 381343 interrupts.CPU78.LOC:Local_timer_interrupts
332664 +14.6% 381310 interrupts.CPU79.LOC:Local_timer_interrupts
332715 +14.6% 381404 interrupts.CPU8.LOC:Local_timer_interrupts
332628 +14.6% 381316 interrupts.CPU80.LOC:Local_timer_interrupts
332571 +14.7% 381315 interrupts.CPU81.LOC:Local_timer_interrupts
332668 +14.6% 381303 interrupts.CPU82.LOC:Local_timer_interrupts
6.25 ± 36% +492.0% 37.00 ±118% interrupts.CPU82.RES:Rescheduling_interrupts
332676 +14.6% 381345 interrupts.CPU83.LOC:Local_timer_interrupts
332676 +14.6% 381302 interrupts.CPU84.LOC:Local_timer_interrupts
7.00 ± 17% +675.0% 54.25 ±127% interrupts.CPU84.RES:Rescheduling_interrupts
332689 +14.6% 381314 interrupts.CPU85.LOC:Local_timer_interrupts
332681 +14.6% 381213 interrupts.CPU86.LOC:Local_timer_interrupts
9.00 ± 23% +377.8% 43.00 ±107% interrupts.CPU86.RES:Rescheduling_interrupts
332673 +14.6% 381348 interrupts.CPU87.LOC:Local_timer_interrupts
332669 +14.6% 381382 interrupts.CPU88.LOC:Local_timer_interrupts
332676 +14.6% 381330 interrupts.CPU89.LOC:Local_timer_interrupts
332676 +14.6% 381320 interrupts.CPU9.LOC:Local_timer_interrupts
332677 +14.6% 381337 interrupts.CPU90.LOC:Local_timer_interrupts
7.00 ± 17% +760.7% 60.25 ±143% interrupts.CPU90.RES:Rescheduling_interrupts
332716 +14.6% 381324 interrupts.CPU91.LOC:Local_timer_interrupts
332678 +14.6% 381318 interrupts.CPU92.LOC:Local_timer_interrupts
332679 +14.6% 381313 interrupts.CPU93.LOC:Local_timer_interrupts
332680 +14.6% 381307 interrupts.CPU94.LOC:Local_timer_interrupts
332739 +14.6% 381288 interrupts.CPU95.LOC:Local_timer_interrupts
7.00 ± 22% +967.9% 74.75 ±138% interrupts.CPU95.RES:Rescheduling_interrupts
332660 +14.6% 381292 interrupts.CPU96.LOC:Local_timer_interrupts
332715 +14.6% 381374 interrupts.CPU97.LOC:Local_timer_interrupts
332670 +14.6% 381336 interrupts.CPU98.LOC:Local_timer_interrupts
332669 +14.6% 381401 interrupts.CPU99.LOC:Local_timer_interrupts
63876342 +14.6% 73212105 interrupts.LOC:Local_timer_interrupts
15573 ± 4% +20.6% 18773 ± 3% interrupts.RES:Rescheduling_interrupts
17.28 ± 3% -3.9 13.36 ± 16% perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe
17.26 ± 3% -3.9 13.34 ± 16% perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe
13.39 ± 4% -2.9 10.50 ± 16% perf-profile.calltrace.cycles-pp.do_fsync.__x64_sys_fsync.do_syscall_64.entry_SYSCALL_64_after_hwframe
13.39 ± 4% -2.9 10.50 ± 16% perf-profile.calltrace.cycles-pp.__x64_sys_fsync.do_syscall_64.entry_SYSCALL_64_after_hwframe
13.38 ± 4% -2.9 10.50 ± 16% perf-profile.calltrace.cycles-pp.btrfs_sync_file.do_fsync.__x64_sys_fsync.do_syscall_64.entry_SYSCALL_64_after_hwframe
4.64 ± 2% -1.3 3.35 ± 18% perf-profile.calltrace.cycles-pp.btrfs_log_dentry_safe.btrfs_sync_file.do_fsync.__x64_sys_fsync.do_syscall_64
4.63 ± 2% -1.3 3.34 ± 18% perf-profile.calltrace.cycles-pp.btrfs_log_inode_parent.btrfs_log_dentry_safe.btrfs_sync_file.do_fsync.__x64_sys_fsync
4.56 ± 2% -1.3 3.30 ± 18% perf-profile.calltrace.cycles-pp.btrfs_log_inode.btrfs_log_inode_parent.btrfs_log_dentry_safe.btrfs_sync_file.do_fsync
2.16 ± 5% -1.2 0.99 ± 19% perf-profile.calltrace.cycles-pp.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio.btree_write_cache_pages.do_writepages
4.36 ± 4% -1.1 3.27 ± 18% perf-profile.calltrace.cycles-pp.submit_one_bio.btree_write_cache_pages.do_writepages.__filemap_fdatawrite_range.btrfs_write_marked_extents
4.35 ± 4% -1.1 3.27 ± 18% perf-profile.calltrace.cycles-pp.btree_submit_bio_hook.submit_one_bio.btree_write_cache_pages.do_writepages.__filemap_fdatawrite_range
1.12 ± 7% -0.9 0.26 ±100% perf-profile.calltrace.cycles-pp.btrfs_check_node.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio.btree_write_cache_pages
2.02 -0.6 1.45 ± 18% perf-profile.calltrace.cycles-pp.btrfs_truncate_inode_items.btrfs_log_inode.btrfs_log_inode_parent.btrfs_log_dentry_safe.btrfs_sync_file
1.99 -0.6 1.44 ± 17% perf-profile.calltrace.cycles-pp.btrfs_search_slot.btrfs_truncate_inode_items.btrfs_log_inode.btrfs_log_inode_parent.btrfs_log_dentry_safe
1.89 ± 2% -0.5 1.38 ± 17% perf-profile.calltrace.cycles-pp.btrfs_cow_block.btrfs_search_slot.btrfs_truncate_inode_items.btrfs_log_inode.btrfs_log_inode_parent
0.95 ± 5% -0.5 0.45 ± 57% perf-profile.calltrace.cycles-pp.check_leaf.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio.btree_write_cache_pages
1.88 ± 2% -0.5 1.38 ± 17% perf-profile.calltrace.cycles-pp.__btrfs_cow_block.btrfs_cow_block.btrfs_search_slot.btrfs_truncate_inode_items.btrfs_log_inode
0.74 ± 8% -0.4 0.32 ±100% perf-profile.calltrace.cycles-pp.btrfs_search_slot.btrfs_update_root.btrfs_sync_log.btrfs_sync_file.do_fsync
0.71 ± 7% -0.4 0.31 ±100% perf-profile.calltrace.cycles-pp.btrfs_cow_block.btrfs_search_slot.btrfs_update_root.btrfs_sync_log.btrfs_sync_file
0.90 ± 2% -0.4 0.50 ± 59% perf-profile.calltrace.cycles-pp.copy_extent_buffer_full.__btrfs_cow_block.btrfs_cow_block.btrfs_search_slot.btrfs_truncate_inode_items
0.71 ± 7% -0.4 0.31 ±100% perf-profile.calltrace.cycles-pp.__btrfs_cow_block.btrfs_cow_block.btrfs_search_slot.btrfs_update_root.btrfs_sync_log
0.90 ± 2% -0.4 0.50 ± 59% perf-profile.calltrace.cycles-pp.copy_page.copy_extent_buffer_full.__btrfs_cow_block.btrfs_cow_block.btrfs_search_slot
1.68 ± 2% -0.4 1.28 ± 23% perf-profile.calltrace.cycles-pp.do_mkdirat.do_syscall_64.entry_SYSCALL_64_after_hwframe
0.66 ± 5% -0.4 0.27 ±100% perf-profile.calltrace.cycles-pp.new_sync_write.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe
1.22 ± 7% -0.4 0.85 ± 14% perf-profile.calltrace.cycles-pp.do_sys_openat2.do_sys_open.do_syscall_64.entry_SYSCALL_64_after_hwframe
1.22 ± 7% -0.4 0.85 ± 14% perf-profile.calltrace.cycles-pp.do_sys_open.do_syscall_64.entry_SYSCALL_64_after_hwframe
1.18 ± 8% -0.4 0.82 ± 15% perf-profile.calltrace.cycles-pp.do_filp_open.do_sys_openat2.do_sys_open.do_syscall_64.entry_SYSCALL_64_after_hwframe
1.18 ± 8% -0.4 0.82 ± 15% perf-profile.calltrace.cycles-pp.path_openat.do_filp_open.do_sys_openat2.do_sys_open.do_syscall_64
0.62 ± 7% -0.4 0.27 ±100% perf-profile.calltrace.cycles-pp.btrfs_add_link.btrfs_mkdir.vfs_mkdir.do_mkdirat.do_syscall_64
0.60 ± 7% -0.3 0.26 ±100% perf-profile.calltrace.cycles-pp.btrfs_insert_dir_item.btrfs_add_link.btrfs_mkdir.vfs_mkdir.do_mkdirat
1.01 ± 7% -0.3 0.68 ± 13% perf-profile.calltrace.cycles-pp.start_ordered_ops.btrfs_sync_file.do_fsync.__x64_sys_fsync.do_syscall_64
1.00 ± 7% -0.3 0.68 ± 13% perf-profile.calltrace.cycles-pp.btrfs_fdatawrite_range.start_ordered_ops.btrfs_sync_file.do_fsync.__x64_sys_fsync
0.99 ± 7% -0.3 0.67 ± 13% perf-profile.calltrace.cycles-pp.do_writepages.__filemap_fdatawrite_range.btrfs_fdatawrite_range.start_ordered_ops.btrfs_sync_file
1.00 ± 7% -0.3 0.68 ± 13% perf-profile.calltrace.cycles-pp.__filemap_fdatawrite_range.btrfs_fdatawrite_range.start_ordered_ops.btrfs_sync_file.do_fsync
0.99 ± 7% -0.3 0.67 ± 13% perf-profile.calltrace.cycles-pp.extent_writepages.do_writepages.__filemap_fdatawrite_range.btrfs_fdatawrite_range.start_ordered_ops
0.79 ± 8% -0.3 0.47 ± 57% perf-profile.calltrace.cycles-pp.btrfs_create.path_openat.do_filp_open.do_sys_openat2.do_sys_open
1.32 -0.3 1.01 ± 24% perf-profile.calltrace.cycles-pp.vfs_mkdir.do_mkdirat.do_syscall_64.entry_SYSCALL_64_after_hwframe
0.75 ± 8% -0.3 0.45 ± 59% perf-profile.calltrace.cycles-pp.btrfs_update_root.btrfs_sync_log.btrfs_sync_file.do_fsync.__x64_sys_fsync
1.30 -0.3 1.00 ± 23% perf-profile.calltrace.cycles-pp.btrfs_mkdir.vfs_mkdir.do_mkdirat.do_syscall_64.entry_SYSCALL_64_after_hwframe
1.30 ± 3% -0.3 1.01 ± 21% perf-profile.calltrace.cycles-pp.copy_items.btrfs_log_inode.btrfs_log_inode_parent.btrfs_log_dentry_safe.btrfs_sync_file
0.83 ± 4% -0.3 0.54 ± 58% perf-profile.calltrace.cycles-pp.log_csums.copy_items.btrfs_log_inode.btrfs_log_inode_parent.btrfs_log_dentry_safe
0.68 ± 5% -0.3 0.41 ± 58% perf-profile.calltrace.cycles-pp.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe
0.68 ± 5% -0.3 0.41 ± 58% perf-profile.calltrace.cycles-pp.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe
0.84 ± 6% +0.1 0.94 ± 2% perf-profile.calltrace.cycles-pp.perf_mux_hrtimer_handler.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt.asm_call_on_stack
0.00 +0.7 0.66 ± 15% perf-profile.calltrace.cycles-pp.btrfs_map_bio.btree_submit_bio_hook.submit_one_bio.submit_extent_page.write_one_eb
0.68 ± 8% +1.0 1.73 ± 17% perf-profile.calltrace.cycles-pp.write_one_eb.btree_write_cache_pages.do_writepages.__filemap_fdatawrite_range.btrfs_write_marked_extents
0.00 +1.3 1.26 ± 14% perf-profile.calltrace.cycles-pp.submit_one_bio.submit_extent_page.write_one_eb.btree_write_cache_pages.do_writepages
0.00 +1.3 1.26 ± 14% perf-profile.calltrace.cycles-pp.btree_submit_bio_hook.submit_one_bio.submit_extent_page.write_one_eb.btree_write_cache_pages
0.00 +1.4 1.37 ± 16% perf-profile.calltrace.cycles-pp.submit_extent_page.write_one_eb.btree_write_cache_pages.do_writepages.__filemap_fdatawrite_range
80.22 +3.8 83.98 ± 2% perf-profile.calltrace.cycles-pp.secondary_startup_64
17.50 ± 3% -3.9 13.56 ± 16% perf-profile.children.cycles-pp.entry_SYSCALL_64_after_hwframe
17.48 ± 3% -3.9 13.55 ± 16% perf-profile.children.cycles-pp.do_syscall_64
13.39 ± 4% -2.9 10.50 ± 16% perf-profile.children.cycles-pp.do_fsync
13.39 ± 4% -2.9 10.50 ± 16% perf-profile.children.cycles-pp.__x64_sys_fsync
13.38 ± 4% -2.9 10.50 ± 16% perf-profile.children.cycles-pp.btrfs_sync_file
4.64 ± 2% -1.3 3.35 ± 18% perf-profile.children.cycles-pp.btrfs_log_dentry_safe
4.63 ± 2% -1.3 3.34 ± 18% perf-profile.children.cycles-pp.btrfs_log_inode_parent
4.57 ± 2% -1.3 3.30 ± 18% perf-profile.children.cycles-pp.btrfs_log_inode
5.26 ± 2% -1.2 4.01 ± 17% perf-profile.children.cycles-pp.btrfs_search_slot
3.26 -0.8 2.43 ± 18% perf-profile.children.cycles-pp.btrfs_cow_block
3.25 -0.8 2.42 ± 18% perf-profile.children.cycles-pp.__btrfs_cow_block
1.22 ± 8% -0.8 0.40 ± 21% perf-profile.children.cycles-pp.btrfs_commit_transaction
2.33 ± 6% -0.7 1.67 ± 17% perf-profile.children.cycles-pp.btree_csum_one_bio
0.87 ± 8% -0.6 0.25 ± 22% perf-profile.children.cycles-pp.btrfs_finish_extent_commit
0.86 ± 8% -0.6 0.24 ± 23% perf-profile.children.cycles-pp.unpin_extent_range
0.86 ± 8% -0.6 0.24 ± 23% perf-profile.children.cycles-pp.__btrfs_add_free_space
0.85 ± 8% -0.6 0.24 ± 23% perf-profile.children.cycles-pp.steal_from_bitmap
2.02 -0.6 1.45 ± 18% perf-profile.children.cycles-pp.btrfs_truncate_inode_items
1.24 ± 5% -0.6 0.68 ± 2% perf-profile.children.cycles-pp._find_next_bit
1.56 -0.5 1.07 ± 17% perf-profile.children.cycles-pp.copy_page
1.56 -0.5 1.07 ± 17% perf-profile.children.cycles-pp.copy_extent_buffer_full
1.68 ± 2% -0.4 1.28 ± 23% perf-profile.children.cycles-pp.do_mkdirat
1.23 ± 8% -0.4 0.86 ± 14% perf-profile.children.cycles-pp.do_sys_openat2
1.23 ± 8% -0.4 0.86 ± 14% perf-profile.children.cycles-pp.do_sys_open
1.19 ± 8% -0.4 0.83 ± 15% perf-profile.children.cycles-pp.path_openat
1.19 ± 8% -0.4 0.84 ± 15% perf-profile.children.cycles-pp.do_filp_open
1.14 ± 5% -0.4 0.79 ± 13% perf-profile.children.cycles-pp.check_leaf
1.01 ± 7% -0.3 0.68 ± 13% perf-profile.children.cycles-pp.start_ordered_ops
1.01 ± 7% -0.3 0.68 ± 13% perf-profile.children.cycles-pp.btrfs_fdatawrite_range
0.99 ± 6% -0.3 0.67 ± 13% perf-profile.children.cycles-pp.extent_writepages
1.39 ± 4% -0.3 1.07 ± 13% perf-profile.children.cycles-pp.btrfs_insert_empty_items
1.32 -0.3 1.01 ± 24% perf-profile.children.cycles-pp.vfs_mkdir
1.31 -0.3 1.00 ± 23% perf-profile.children.cycles-pp.btrfs_mkdir
1.14 ± 7% -0.3 0.84 ± 20% perf-profile.children.cycles-pp.btrfs_check_node
1.40 ± 3% -0.3 1.11 ± 8% perf-profile.children.cycles-pp._raw_spin_lock
1.30 ± 3% -0.3 1.02 ± 21% perf-profile.children.cycles-pp.copy_items
1.04 ± 7% -0.3 0.77 ± 19% perf-profile.children.cycles-pp.read_extent_buffer
0.67 ± 5% -0.2 0.43 ± 23% perf-profile.children.cycles-pp.btrfs_new_inode
0.82 ± 7% -0.2 0.59 ± 20% perf-profile.children.cycles-pp.btrfs_add_link
0.80 ± 8% -0.2 0.58 ± 15% perf-profile.children.cycles-pp.btrfs_create
0.77 ± 7% -0.2 0.55 ± 22% perf-profile.children.cycles-pp.btrfs_insert_dir_item
0.59 ± 9% -0.2 0.41 ± 8% perf-profile.children.cycles-pp.btrfs_submit_bio_hook
0.75 ± 8% -0.2 0.57 ± 14% perf-profile.children.cycles-pp.btrfs_update_root
0.65 ± 4% -0.2 0.47 ± 18% perf-profile.children.cycles-pp.btrfs_file_write_iter
0.49 ± 5% -0.2 0.31 ± 23% perf-profile.children.cycles-pp.btrfs_commit_inode_delayed_items
0.67 ± 5% -0.2 0.48 ± 18% perf-profile.children.cycles-pp.new_sync_write
0.69 ± 5% -0.2 0.51 ± 18% perf-profile.children.cycles-pp.ksys_write
0.69 ± 4% -0.2 0.51 ± 17% perf-profile.children.cycles-pp.vfs_write
0.83 ± 4% -0.2 0.66 ± 20% perf-profile.children.cycles-pp.log_csums
0.62 ± 7% -0.2 0.46 ± 21% perf-profile.children.cycles-pp.insert_with_overflow
0.55 ± 7% -0.2 0.39 ± 19% perf-profile.children.cycles-pp.btrfs_buffered_write
0.47 ± 9% -0.2 0.32 ± 15% perf-profile.children.cycles-pp.kmem_cache_alloc
0.54 ± 3% -0.1 0.40 ± 12% perf-profile.children.cycles-pp.btrfs_get_32
0.73 ± 8% -0.1 0.58 ± 15% perf-profile.children.cycles-pp.read_block_for_search
0.39 ± 3% -0.1 0.26 ± 20% perf-profile.children.cycles-pp.extent_write_cache_pages
0.59 ± 5% -0.1 0.47 ± 14% perf-profile.children.cycles-pp.setup_items_for_insert
0.46 ± 3% -0.1 0.33 ± 9% perf-profile.children.cycles-pp.alloc_extent_buffer
0.34 ± 4% -0.1 0.22 ± 21% perf-profile.children.cycles-pp.__extent_writepage
0.47 ± 6% -0.1 0.35 ± 23% perf-profile.children.cycles-pp.__btrfs_btree_balance_dirty
0.23 ± 8% -0.1 0.12 ± 21% perf-profile.children.cycles-pp.btrfs_write_and_wait_transaction
0.46 ± 5% -0.1 0.34 ± 13% perf-profile.children.cycles-pp.generic_bin_search
0.37 ± 11% -0.1 0.26 ± 21% perf-profile.children.cycles-pp.btrfs_get_64
0.45 ± 7% -0.1 0.34 ± 22% perf-profile.children.cycles-pp.__percpu_counter_sum
0.46 ± 6% -0.1 0.35 ± 22% perf-profile.children.cycles-pp.__percpu_counter_compare
0.56 ± 8% -0.1 0.46 ± 10% perf-profile.children.cycles-pp.find_extent_buffer
0.33 ± 4% -0.1 0.23 ± 16% perf-profile.children.cycles-pp.split_leaf
0.34 ± 10% -0.1 0.24 ± 7% perf-profile.children.cycles-pp.pagecache_get_page
0.29 ± 11% -0.1 0.20 ± 13% perf-profile.children.cycles-pp.__set_extent_bit
0.27 ± 7% -0.1 0.17 ± 23% perf-profile.children.cycles-pp.writepage_delalloc
0.28 ± 14% -0.1 0.19 ± 24% perf-profile.children.cycles-pp.btrfs_reserve_extent
0.17 ± 17% -0.1 0.09 ± 18% perf-profile.children.cycles-pp.new_inode
0.25 ± 10% -0.1 0.16 ± 23% perf-profile.children.cycles-pp.set_extent_bit
0.17 ± 19% -0.1 0.08 ± 24% perf-profile.children.cycles-pp.new_inode_pseudo
0.16 ± 18% -0.1 0.08 ± 21% perf-profile.children.cycles-pp.alloc_inode
0.28 ± 9% -0.1 0.20 ± 28% perf-profile.children.cycles-pp.btrfs_check_ref_name_override
0.20 ± 9% -0.1 0.12 ± 17% perf-profile.children.cycles-pp.btrfs_wait_ordered_range
0.37 ± 2% -0.1 0.29 ± 11% perf-profile.children.cycles-pp.btrfs_mark_buffer_dirty
0.21 ± 9% -0.1 0.14 ± 16% perf-profile.children.cycles-pp.btrfs_get_token_32
0.23 ± 15% -0.1 0.16 ± 25% perf-profile.children.cycles-pp.find_free_extent
0.23 ± 8% -0.1 0.15 ± 24% perf-profile.children.cycles-pp.btrfs_run_delalloc_range
0.19 ± 21% -0.1 0.12 ± 27% perf-profile.children.cycles-pp.__slab_alloc
0.14 ± 21% -0.1 0.06 ± 20% perf-profile.children.cycles-pp.btrfs_alloc_inode
0.21 ± 4% -0.1 0.14 ± 28% perf-profile.children.cycles-pp.__test_set_page_writeback
0.18 ± 22% -0.1 0.11 ± 24% perf-profile.children.cycles-pp.___slab_alloc
0.17 ± 24% -0.1 0.10 ± 33% perf-profile.children.cycles-pp.allocate_slab
0.17 ± 24% -0.1 0.10 ± 33% perf-profile.children.cycles-pp.new_slab
0.22 ± 5% -0.1 0.15 ± 24% perf-profile.children.cycles-pp.cow_file_range
0.27 ± 12% -0.1 0.20 ± 13% perf-profile.children.cycles-pp.__alloc_pages_nodemask
0.19 ± 9% -0.1 0.12 ± 20% perf-profile.children.cycles-pp.check_extent_data_item
0.16 ± 10% -0.1 0.10 ± 18% perf-profile.children.cycles-pp.btrfs_start_ordered_extent
0.09 ± 19% -0.1 0.03 ±102% perf-profile.children.cycles-pp.add_to_page_cache_lru
0.25 ± 7% -0.1 0.19 ± 20% perf-profile.children.cycles-pp.btrfs_lookup
0.25 ± 7% -0.1 0.19 ± 20% perf-profile.children.cycles-pp.btrfs_lookup_dentry
0.17 ± 7% -0.1 0.11 ± 21% perf-profile.children.cycles-pp.cpumask_next
0.16 ± 4% -0.1 0.10 ± 12% perf-profile.children.cycles-pp.memzero_extent_buffer
0.24 ± 11% -0.1 0.18 ± 14% perf-profile.children.cycles-pp.get_page_from_freelist
0.26 ± 5% -0.1 0.21 ± 5% perf-profile.children.cycles-pp.end_bio_extent_writepage
0.33 ± 3% -0.1 0.27 ± 12% perf-profile.children.cycles-pp.set_extent_buffer_dirty
0.17 ± 12% -0.1 0.11 ± 23% perf-profile.children.cycles-pp.clear_page_dirty_for_io
0.10 ± 12% -0.1 0.05 ± 62% perf-profile.children.cycles-pp.___might_sleep
0.11 ± 17% -0.1 0.06 ± 63% perf-profile.children.cycles-pp.alloc_extent_state
0.08 ± 10% -0.1 0.03 ±100% perf-profile.children.cycles-pp.__x64_sys_gettimeofday
0.28 ± 4% -0.1 0.23 ± 14% perf-profile.children.cycles-pp.__set_page_dirty_nobuffers
0.18 ± 15% -0.1 0.13 ± 14% perf-profile.children.cycles-pp.btrfs_search_forward
0.14 ± 9% -0.1 0.09 ± 12% perf-profile.children.cycles-pp.push_leaf_right
0.13 ± 10% -0.0 0.08 ± 10% perf-profile.children.cycles-pp.__clear_extent_bit
0.10 ± 12% -0.0 0.05 ± 59% perf-profile.children.cycles-pp.fill_inode_item
0.09 ± 8% -0.0 0.05 ± 62% perf-profile.children.cycles-pp.crc32c_pcl_intel_update
0.11 ± 10% -0.0 0.06 ± 58% perf-profile.children.cycles-pp.__mod_lruvec_state
0.09 ± 13% -0.0 0.04 ± 58% perf-profile.children.cycles-pp.check_inode_key
0.07 ± 10% -0.0 0.03 ±100% perf-profile.children.cycles-pp.inode_tree_add
0.07 ± 6% -0.0 0.03 ±100% perf-profile.children.cycles-pp.__extent_writepage_io
0.15 ± 7% -0.0 0.11 ± 15% perf-profile.children.cycles-pp.btrfs_comp_cpu_keys
0.23 ± 6% -0.0 0.19 ± 7% perf-profile.children.cycles-pp.end_extent_writepage
0.23 ± 6% -0.0 0.19 ± 7% perf-profile.children.cycles-pp.btrfs_writepage_endio_finish_ordered
0.17 ± 6% -0.0 0.13 ± 15% perf-profile.children.cycles-pp.btrfs_set_token_32
0.07 ± 6% -0.0 0.03 ±100% perf-profile.children.cycles-pp.read
0.08 ± 21% -0.0 0.04 ± 63% perf-profile.children.cycles-pp.copy_user_enhanced_fast_string
0.59 ± 3% -0.0 0.55 ± 2% perf-profile.children.cycles-pp._raw_spin_lock_irqsave
0.07 ± 7% -0.0 0.03 ±100% perf-profile.children.cycles-pp.kernel_fpu_begin
0.08 ± 15% -0.0 0.04 ± 58% perf-profile.children.cycles-pp.clear_extent_bit
0.11 ± 10% -0.0 0.07 ± 17% perf-profile.children.cycles-pp.__push_leaf_right
0.09 ± 19% -0.0 0.05 ± 60% perf-profile.children.cycles-pp.btrfs_free_path
0.10 ± 12% -0.0 0.07 ± 6% perf-profile.children.cycles-pp.btrfs_read_node_slot
0.09 ± 38% -0.0 0.05 ± 8% perf-profile.children.cycles-pp.drop_objectid_items
0.09 ± 9% -0.0 0.06 ± 14% perf-profile.children.cycles-pp.read_tree_block
0.09 ± 14% -0.0 0.06 ± 17% perf-profile.children.cycles-pp.btrfs_log_all_xattrs
0.10 ± 5% -0.0 0.07 ± 13% perf-profile.children.cycles-pp.write_extent_buffer
0.10 ± 21% -0.0 0.07 ± 26% perf-profile.children.cycles-pp.prepare_pages
0.07 ± 19% -0.0 0.04 ± 57% perf-profile.children.cycles-pp.__xa_set_mark
0.09 ± 7% -0.0 0.07 ± 16% perf-profile.children.cycles-pp.__vfs_getxattr
0.12 ± 7% -0.0 0.10 ± 11% perf-profile.children.cycles-pp.clear_page_erms
0.05 ± 9% +0.0 0.08 ± 8% perf-profile.children.cycles-pp.btrfs_block_rsv_release
0.11 ± 7% +0.0 0.14 ± 7% perf-profile.children.cycles-pp.nr_iowait_cpu
0.08 ± 21% +0.0 0.11 ± 4% perf-profile.children.cycles-pp.rcu_eqs_exit
0.29 ± 7% +0.0 0.32 ± 5% perf-profile.children.cycles-pp.schedule
0.15 ± 20% +0.1 0.21 ± 19% perf-profile.children.cycles-pp.start_transaction
0.13 ± 17% +0.1 0.20 ± 10% perf-profile.children.cycles-pp.schedule_idle
0.43 ± 6% +0.1 0.52 ± 4% perf-profile.children.cycles-pp.__sched_text_start
0.86 ± 6% +0.1 0.96 ± 2% perf-profile.children.cycles-pp.perf_mux_hrtimer_handler
0.23 ± 5% +0.1 0.34 ± 15% perf-profile.children.cycles-pp.__queue_work
0.07 ± 10% +0.1 0.18 ± 21% perf-profile.children.cycles-pp.btrfs_use_block_rsv
0.23 ± 4% +0.2 0.39 ± 7% perf-profile.children.cycles-pp.queue_work_on
0.31 ± 5% +0.2 0.47 ± 21% perf-profile.children.cycles-pp.brd_lookup_page
0.12 ± 33% +0.2 0.31 ± 20% perf-profile.children.cycles-pp.btrfs_reserve_metadata_bytes
0.00 +0.2 0.23 ± 13% perf-profile.children.cycles-pp.__btrfs_run_delayed_items
0.13 ± 31% +0.3 0.45 ± 17% perf-profile.children.cycles-pp.flush_space
0.13 ± 31% +0.3 0.48 ± 17% perf-profile.children.cycles-pp.btrfs_async_reclaim_metadata_space
0.69 ± 8% +1.0 1.73 ± 17% perf-profile.children.cycles-pp.write_one_eb
0.19 ± 14% +1.2 1.39 ± 16% perf-profile.children.cycles-pp.submit_extent_page
80.22 +3.8 83.98 ± 2% perf-profile.children.cycles-pp.secondary_startup_64
80.22 +3.8 83.98 ± 2% perf-profile.children.cycles-pp.cpu_startup_entry
80.22 +3.8 83.98 ± 2% perf-profile.children.cycles-pp.do_idle
1.23 ± 5% -0.6 0.67 ± 3% perf-profile.self.cycles-pp._find_next_bit
1.53 ± 2% -0.5 1.04 ± 20% perf-profile.self.cycles-pp.copy_page
1.03 ± 6% -0.3 0.76 ± 19% perf-profile.self.cycles-pp.read_extent_buffer
1.29 ± 3% -0.2 1.08 ± 8% perf-profile.self.cycles-pp._raw_spin_lock
0.45 ± 3% -0.1 0.30 ± 17% perf-profile.self.cycles-pp.btrfs_get_32
0.29 ± 8% -0.1 0.19 ± 19% perf-profile.self.cycles-pp.__percpu_counter_sum
0.29 ± 14% -0.1 0.19 ± 17% perf-profile.self.cycles-pp.btrfs_get_64
0.35 ± 9% -0.1 0.27 ± 15% perf-profile.self.cycles-pp.generic_bin_search
0.23 ± 4% -0.1 0.17 ± 26% perf-profile.self.cycles-pp.__btrfs_cow_block
0.18 ± 11% -0.1 0.12 ± 15% perf-profile.self.cycles-pp.btrfs_get_token_32
0.16 ± 2% -0.1 0.10 ± 12% perf-profile.self.cycles-pp.memzero_extent_buffer
0.10 ± 8% -0.1 0.04 ± 59% perf-profile.self.cycles-pp.___might_sleep
0.17 ± 9% -0.1 0.12 ± 24% perf-profile.self.cycles-pp.check_leaf
0.17 ± 12% -0.0 0.12 ± 17% perf-profile.self.cycles-pp.kmem_cache_alloc
0.08 ± 21% -0.0 0.04 ± 60% perf-profile.self.cycles-pp.copy_user_enhanced_fast_string
0.15 ± 7% -0.0 0.11 ± 14% perf-profile.self.cycles-pp.btrfs_comp_cpu_keys
0.11 ± 11% -0.0 0.08 ± 10% perf-profile.self.cycles-pp.hrtimer_interrupt
0.09 ± 7% -0.0 0.07 ± 13% perf-profile.self.cycles-pp.write_extent_buffer
0.06 ± 6% +0.0 0.08 ± 5% perf-profile.self.cycles-pp.__sched_text_start
0.11 ± 7% +0.0 0.14 ± 8% perf-profile.self.cycles-pp.nr_iowait_cpu
0.00 +0.1 0.07 ± 11% perf-profile.self.cycles-pp.btrfs_end_bio
0.17 ± 4% +0.1 0.27 ± 20% perf-profile.self.cycles-pp.brd_lookup_page
23336 ± 4% +12.3% 26214 ± 2% softirqs.CPU10.SCHED
37060 ± 7% +20.4% 44626 ± 3% softirqs.CPU100.RCU
72376 ± 4% +14.2% 82628 ± 6% softirqs.CPU100.TIMER
38158 ± 6% +15.8% 44173 ± 4% softirqs.CPU101.RCU
23419 +12.2% 26282 ± 3% softirqs.CPU101.SCHED
72977 ± 3% +13.2% 82640 ± 6% softirqs.CPU101.TIMER
36452 ± 6% +19.1% 43426 ± 5% softirqs.CPU102.RCU
22891 +14.4% 26190 ± 2% softirqs.CPU102.SCHED
71951 ± 3% +14.9% 82691 ± 5% softirqs.CPU102.TIMER
36585 ± 7% +18.9% 43489 ± 4% softirqs.CPU103.RCU
22931 +13.7% 26076 ± 3% softirqs.CPU103.SCHED
71487 ± 3% +14.9% 82163 ± 6% softirqs.CPU103.TIMER
37494 ± 4% +16.7% 43759 ± 4% softirqs.CPU104.RCU
72362 ± 4% +12.6% 81512 ± 5% softirqs.CPU104.TIMER
36511 ± 7% +20.3% 43910 ± 3% softirqs.CPU105.RCU
72600 ± 4% +13.9% 82663 ± 6% softirqs.CPU105.TIMER
36685 ± 6% +20.4% 44154 ± 4% softirqs.CPU106.RCU
22849 ± 2% +14.1% 26063 ± 2% softirqs.CPU106.SCHED
71776 ± 3% +14.4% 82085 ± 6% softirqs.CPU106.TIMER
37203 ± 6% +18.7% 44163 ± 3% softirqs.CPU107.RCU
20913 ± 18% +24.9% 26121 ± 3% softirqs.CPU107.SCHED
71859 ± 4% +15.2% 82784 ± 6% softirqs.CPU107.TIMER
36749 ± 5% +18.5% 43545 ± 6% softirqs.CPU108.RCU
22983 +13.3% 26051 ± 3% softirqs.CPU108.SCHED
71500 ± 3% +16.2% 83050 ± 7% softirqs.CPU108.TIMER
37174 ± 3% +18.0% 43869 ± 3% softirqs.CPU109.RCU
71426 ± 4% +15.6% 82585 ± 6% softirqs.CPU109.TIMER
38693 ± 8% +18.2% 45720 softirqs.CPU11.RCU
22879 +11.8% 25589 softirqs.CPU11.SCHED
36400 ± 8% +21.7% 44307 ± 3% softirqs.CPU110.RCU
23017 +14.3% 26311 ± 3% softirqs.CPU110.SCHED
71941 ± 4% +15.1% 82776 ± 6% softirqs.CPU110.TIMER
36644 ± 7% +18.6% 43477 ± 4% softirqs.CPU111.RCU
22935 +13.3% 25995 ± 2% softirqs.CPU111.SCHED
71704 ± 4% +14.8% 82352 ± 5% softirqs.CPU111.TIMER
34791 ± 7% +19.1% 41428 ± 3% softirqs.CPU112.RCU
22931 +13.7% 26072 ± 3% softirqs.CPU112.SCHED
71843 ± 4% +14.2% 82016 ± 6% softirqs.CPU112.TIMER
35167 ± 6% +17.6% 41349 ± 3% softirqs.CPU113.RCU
23073 ± 2% +12.9% 26057 ± 3% softirqs.CPU113.SCHED
36049 ± 5% +19.2% 42957 softirqs.CPU114.RCU
20819 ± 18% +26.7% 26374 ± 2% softirqs.CPU114.SCHED
71628 ± 4% +15.8% 82917 ± 6% softirqs.CPU114.TIMER
34911 ± 7% +21.0% 42248 ± 2% softirqs.CPU115.RCU
70742 ± 5% +15.8% 81922 ± 5% softirqs.CPU115.TIMER
35469 ± 6% +18.9% 42188 softirqs.CPU116.RCU
23098 +12.9% 26082 ± 2% softirqs.CPU116.SCHED
36436 ± 3% +13.3% 41297 ± 3% softirqs.CPU117.RCU
20572 ± 18% +29.1% 26559 softirqs.CPU117.SCHED
72220 ± 3% +24.7% 90085 ± 8% softirqs.CPU117.TIMER
35180 ± 6% +18.7% 41774 ± 2% softirqs.CPU118.RCU
22950 ± 2% +13.7% 26085 ± 3% softirqs.CPU118.SCHED
72136 ± 4% +13.7% 82032 ± 6% softirqs.CPU118.TIMER
34925 ± 6% +21.6% 42479 ± 5% softirqs.CPU119.RCU
22776 ± 2% +13.7% 25902 ± 2% softirqs.CPU119.SCHED
71796 ± 4% +14.2% 82002 ± 6% softirqs.CPU119.TIMER
38918 ± 9% +17.0% 45550 softirqs.CPU12.RCU
22765 +13.3% 25791 ± 3% softirqs.CPU12.SCHED
36632 ± 4% +15.0% 42126 ± 2% softirqs.CPU121.RCU
23637 ± 3% +10.9% 26208 ± 2% softirqs.CPU121.SCHED
36278 ± 4% +17.1% 42476 ± 3% softirqs.CPU122.RCU
23705 +9.9% 26063 ± 2% softirqs.CPU122.SCHED
36158 ± 4% +14.5% 41387 ± 3% softirqs.CPU123.RCU
23593 ± 2% +10.5% 26059 ± 2% softirqs.CPU123.SCHED
36018 ± 4% +14.9% 41372 ± 3% softirqs.CPU124.RCU
21282 ± 20% +22.4% 26043 ± 2% softirqs.CPU124.SCHED
35677 ± 4% +16.5% 41581 ± 3% softirqs.CPU125.RCU
21802 ± 16% +19.9% 26146 ± 2% softirqs.CPU125.SCHED
35555 ± 5% +17.9% 41903 ± 3% softirqs.CPU126.RCU
23628 +10.1% 26006 ± 2% softirqs.CPU126.SCHED
35465 ± 4% +15.4% 40937 ± 5% softirqs.CPU127.RCU
23621 +10.8% 26167 ± 2% softirqs.CPU127.SCHED
37576 ± 4% +15.8% 43504 ± 4% softirqs.CPU128.RCU
23632 +10.7% 26157 ± 2% softirqs.CPU128.SCHED
37710 ± 3% +14.6% 43203 ± 3% softirqs.CPU129.RCU
23662 +10.6% 26178 ± 2% softirqs.CPU129.SCHED
39596 ± 7% +16.8% 46237 softirqs.CPU13.RCU
22709 +12.1% 25461 ± 2% softirqs.CPU13.SCHED
37673 ± 5% +14.4% 43105 ± 4% softirqs.CPU130.RCU
23669 +9.0% 25800 ± 3% softirqs.CPU130.SCHED
37396 ± 5% +15.8% 43323 ± 4% softirqs.CPU131.RCU
23551 ± 2% +10.5% 26027 ± 2% softirqs.CPU131.SCHED
37645 ± 3% +13.6% 42781 ± 4% softirqs.CPU132.RCU
23679 +9.6% 25963 ± 2% softirqs.CPU132.SCHED
36737 ± 4% +16.3% 42734 ± 4% softirqs.CPU133.RCU
23518 ± 2% +11.0% 26110 ± 2% softirqs.CPU133.SCHED
36985 ± 4% +18.8% 43939 ± 2% softirqs.CPU134.RCU
23471 ± 2% +11.7% 26218 ± 2% softirqs.CPU134.SCHED
37888 ± 2% +15.1% 43606 ± 4% softirqs.CPU135.RCU
23588 +10.3% 26025 ± 2% softirqs.CPU135.SCHED
37660 ± 5% +15.2% 43382 ± 4% softirqs.CPU136.RCU
23541 ± 2% +10.6% 26031 ± 2% softirqs.CPU136.SCHED
37788 ± 5% +16.7% 44099 ± 4% softirqs.CPU137.RCU
23576 ± 2% +10.6% 26085 ± 2% softirqs.CPU137.SCHED
37342 ± 5% +15.9% 43284 ± 4% softirqs.CPU138.RCU
23612 ± 2% +10.5% 26099 ± 2% softirqs.CPU138.SCHED
37334 ± 4% +15.6% 43160 softirqs.CPU139.RCU
23611 ± 2% +10.9% 26196 ± 2% softirqs.CPU139.SCHED
39043 ± 10% +16.0% 45285 softirqs.CPU14.RCU
22623 +12.7% 25501 softirqs.CPU14.SCHED
37910 ± 3% +15.6% 43818 ± 3% softirqs.CPU140.RCU
23741 +9.9% 26096 ± 2% softirqs.CPU140.SCHED
37385 ± 5% +15.9% 43314 ± 2% softirqs.CPU141.RCU
23644 +10.2% 26055 ± 2% softirqs.CPU141.SCHED
37687 ± 4% +14.7% 43214 ± 2% softirqs.CPU142.RCU
23584 +10.9% 26146 ± 2% softirqs.CPU142.SCHED
37464 ± 4% +15.4% 43242 ± 3% softirqs.CPU143.RCU
23566 ± 2% +10.3% 25996 ± 2% softirqs.CPU143.SCHED
40618 ± 5% +11.8% 45399 ± 3% softirqs.CPU144.RCU
38922 +14.9% 44714 ± 3% softirqs.CPU145.RCU
23349 ± 2% +12.8% 26328 ± 3% softirqs.CPU145.SCHED
37845 ± 4% +18.6% 44899 ± 3% softirqs.CPU146.RCU
37671 ± 3% +15.5% 43498 ± 6% softirqs.CPU147.RCU
22851 ± 5% +14.4% 26151 ± 3% softirqs.CPU147.SCHED
37970 ± 2% +15.0% 43682 ± 5% softirqs.CPU148.RCU
23319 ± 2% +12.3% 26190 ± 3% softirqs.CPU148.SCHED
37415 ± 5% +17.2% 43849 ± 6% softirqs.CPU149.RCU
20611 ± 18% +26.8% 26135 ± 3% softirqs.CPU149.SCHED
37987 ± 9% +19.2% 45267 softirqs.CPU15.RCU
22407 +13.9% 25527 softirqs.CPU15.SCHED
37807 ± 4% +14.6% 43309 ± 6% softirqs.CPU150.RCU
23166 ± 3% +12.2% 26004 ± 3% softirqs.CPU150.SCHED
39077 ± 7% +14.0% 44559 ± 6% softirqs.CPU151.RCU
23259 ± 3% +12.5% 26171 ± 3% softirqs.CPU151.SCHED
38494 ± 2% +15.8% 44559 ± 5% softirqs.CPU152.RCU
23514 ± 3% +11.7% 26263 ± 2% softirqs.CPU152.SCHED
38100 ± 2% +16.3% 44296 ± 4% softirqs.CPU153.RCU
23340 ± 3% +12.4% 26244 ± 2% softirqs.CPU153.SCHED
37440 ± 5% +17.5% 43999 ± 6% softirqs.CPU154.RCU
19448 ± 30% +34.6% 26179 ± 2% softirqs.CPU154.SCHED
38227 ± 3% +15.3% 44062 ± 5% softirqs.CPU155.RCU
23320 ± 3% +12.4% 26201 ± 3% softirqs.CPU155.SCHED
37553 ± 5% +18.3% 44424 ± 5% softirqs.CPU156.RCU
23197 ± 2% +13.1% 26244 ± 2% softirqs.CPU156.SCHED
37353 ± 5% +20.4% 44987 ± 4% softirqs.CPU157.RCU
23166 ± 2% +13.8% 26353 softirqs.CPU157.SCHED
37654 ± 4% +17.4% 44190 ± 6% softirqs.CPU158.RCU
23158 ± 3% +13.3% 26250 ± 2% softirqs.CPU158.SCHED
38038 ± 2% +16.8% 44436 ± 4% softirqs.CPU159.RCU
23278 ± 2% +12.9% 26277 ± 2% softirqs.CPU159.SCHED
37825 ± 5% +17.2% 44334 ± 4% softirqs.CPU16.RCU
22657 ± 2% +12.3% 25436 softirqs.CPU16.SCHED
37508 ± 6% +16.2% 43574 ± 7% softirqs.CPU160.RCU
23213 ± 3% +12.7% 26153 ± 2% softirqs.CPU160.SCHED
38423 ± 2% +13.1% 43467 ± 7% softirqs.CPU161.RCU
23571 ± 3% +11.0% 26162 ± 2% softirqs.CPU161.SCHED
37438 ± 4% +15.3% 43184 ± 8% softirqs.CPU162.RCU
23250 ± 3% +11.8% 25986 ± 3% softirqs.CPU162.SCHED
37704 ± 4% +15.9% 43713 ± 5% softirqs.CPU163.RCU
23230 ± 2% +12.8% 26202 ± 2% softirqs.CPU163.SCHED
37535 ± 5% +16.0% 43549 ± 6% softirqs.CPU164.RCU
37824 ± 5% +16.0% 43879 ± 6% softirqs.CPU165.RCU
23221 ± 2% +13.1% 26261 ± 2% softirqs.CPU165.SCHED
37490 ± 5% +15.9% 43458 ± 7% softirqs.CPU166.RCU
23155 ± 3% +13.3% 26223 ± 2% softirqs.CPU166.SCHED
37588 ± 6% +15.1% 43267 ± 7% softirqs.CPU167.RCU
23221 ± 3% +12.7% 26170 ± 2% softirqs.CPU167.SCHED
35517 ± 2% +22.4% 43476 ± 5% softirqs.CPU168.RCU
23310 ± 2% +16.1% 27051 softirqs.CPU168.SCHED
76093 ± 8% +13.8% 86580 softirqs.CPU168.TIMER
35691 +18.6% 42316 ± 3% softirqs.CPU169.RCU
23288 ± 2% +15.2% 26818 softirqs.CPU169.SCHED
76751 ± 8% +13.0% 86714 ± 2% softirqs.CPU169.TIMER
37125 ± 7% +19.9% 44524 ± 4% softirqs.CPU17.RCU
22629 +12.0% 25345 softirqs.CPU17.SCHED
35606 ± 2% +17.0% 41656 ± 3% softirqs.CPU170.RCU
23461 ± 2% +13.3% 26582 softirqs.CPU170.SCHED
35976 +16.4% 41875 ± 3% softirqs.CPU171.RCU
23419 ± 2% +14.1% 26718 softirqs.CPU171.SCHED
35525 ± 2% +17.7% 41829 ± 3% softirqs.CPU172.RCU
23331 ± 2% +13.8% 26555 softirqs.CPU172.SCHED
35040 ± 2% +17.2% 41052 ± 5% softirqs.CPU173.RCU
23809 ± 2% +11.0% 26433 ± 2% softirqs.CPU173.SCHED
35464 ± 2% +16.0% 41153 ± 4% softirqs.CPU174.RCU
23458 ± 3% +12.6% 26412 softirqs.CPU174.SCHED
35984 ± 3% +13.7% 40924 ± 5% softirqs.CPU175.RCU
23623 ± 2% +11.5% 26343 ± 2% softirqs.CPU175.SCHED
23485 ± 2% +12.5% 26417 ± 2% softirqs.CPU176.SCHED
37325 +13.8% 42467 ± 6% softirqs.CPU177.RCU
23422 ± 2% +13.0% 26459 ± 2% softirqs.CPU177.SCHED
37252 +15.8% 43134 ± 5% softirqs.CPU178.RCU
23362 ± 2% +13.4% 26497 softirqs.CPU178.SCHED
23398 ± 2% +13.0% 26438 ± 2% softirqs.CPU179.SCHED
77017 ± 9% +11.9% 86217 ± 2% softirqs.CPU179.TIMER
38417 ± 6% +16.0% 44570 ± 4% softirqs.CPU18.RCU
22927 ± 3% +11.1% 25465 softirqs.CPU18.SCHED
37203 +14.4% 42565 ± 6% softirqs.CPU180.RCU
23461 ± 2% +13.1% 26536 softirqs.CPU180.SCHED
36948 +16.0% 42862 ± 4% softirqs.CPU181.RCU
20978 ± 20% +26.3% 26496 softirqs.CPU181.SCHED
76051 ± 8% +15.5% 87850 ± 2% softirqs.CPU181.TIMER
37023 +15.8% 42889 ± 4% softirqs.CPU182.RCU
23354 ± 2% +13.8% 26573 softirqs.CPU182.SCHED
37052 +15.3% 42710 ± 5% softirqs.CPU183.RCU
23340 ± 2% +13.2% 26425 ± 2% softirqs.CPU183.SCHED
37500 ± 2% +15.3% 43245 ± 4% softirqs.CPU184.RCU
23303 ± 2% +13.9% 26547 softirqs.CPU184.SCHED
37722 ± 2% +14.2% 43089 ± 5% softirqs.CPU185.RCU
23411 ± 2% +12.8% 26415 ± 2% softirqs.CPU185.SCHED
37432 ± 3% +13.4% 42436 ± 5% softirqs.CPU186.RCU
23261 ± 2% +13.8% 26466 softirqs.CPU186.SCHED
37440 ± 2% +16.2% 43509 ± 7% softirqs.CPU187.RCU
23350 ± 2% +13.5% 26513 softirqs.CPU187.SCHED
77139 ± 8% +12.4% 86739 softirqs.CPU187.TIMER
37036 +15.5% 42762 ± 5% softirqs.CPU188.RCU
23398 ± 2% +13.2% 26478 ± 2% softirqs.CPU188.SCHED
37092 +15.5% 42852 ± 6% softirqs.CPU189.RCU
23406 ± 2% +13.5% 26562 ± 2% softirqs.CPU189.SCHED
76788 ± 9% +13.0% 86780 ± 2% softirqs.CPU189.TIMER
37797 ± 6% +20.8% 45658 ± 4% softirqs.CPU19.RCU
37235 +17.3% 43659 ± 8% softirqs.CPU190.RCU
23416 ± 2% +13.1% 26484 ± 2% softirqs.CPU190.SCHED
76806 ± 9% +12.9% 86678 softirqs.CPU190.TIMER
37436 +15.0% 43044 ± 6% softirqs.CPU191.RCU
23377 ± 2% +12.9% 26404 ± 2% softirqs.CPU191.SCHED
76873 ± 8% +13.1% 86951 softirqs.CPU191.TIMER
24154 ± 4% +11.6% 26960 ± 5% softirqs.CPU2.SCHED
37715 ± 4% +18.5% 44682 ± 4% softirqs.CPU20.RCU
37334 ± 7% +20.6% 45015 ± 3% softirqs.CPU21.RCU
22774 +12.6% 25636 ± 2% softirqs.CPU21.SCHED
37209 ± 7% +18.6% 44117 ± 3% softirqs.CPU22.RCU
22842 +11.1% 25385 softirqs.CPU22.SCHED
36989 ± 8% +20.4% 44542 ± 3% softirqs.CPU23.RCU
22408 +13.4% 25414 ± 2% softirqs.CPU23.SCHED
38552 ± 7% +16.4% 44873 ± 4% softirqs.CPU27.RCU
38113 ± 5% +16.2% 44294 ± 4% softirqs.CPU28.RCU
36818 ± 4% +21.1% 44594 ± 3% softirqs.CPU29.RCU
21560 ± 21% +21.5% 26203 ± 4% softirqs.CPU3.SCHED
37247 ± 4% +18.8% 44250 ± 4% softirqs.CPU30.RCU
37095 ± 4% +17.3% 43501 ± 8% softirqs.CPU31.RCU
23029 ± 2% +12.5% 25914 ± 2% softirqs.CPU31.SCHED
41431 ± 3% +14.9% 47594 ± 2% softirqs.CPU32.RCU
39993 +19.4% 47763 softirqs.CPU33.RCU
40773 ± 2% +15.9% 47242 softirqs.CPU34.RCU
40747 ± 2% +16.5% 47480 softirqs.CPU35.RCU
41402 +15.3% 47742 softirqs.CPU36.RCU
41868 ± 3% +14.4% 47901 softirqs.CPU37.RCU
40723 +17.7% 47935 softirqs.CPU38.RCU
41219 ± 2% +15.3% 47544 softirqs.CPU39.RCU
41121 ± 4% +10.4% 45417 softirqs.CPU4.RCU
23445 ± 4% +9.5% 25678 softirqs.CPU4.SCHED
40413 +17.1% 47316 softirqs.CPU40.RCU
40777 ± 2% +17.0% 47714 softirqs.CPU41.RCU
41548 ± 2% +15.2% 47866 softirqs.CPU42.RCU
41292 +16.0% 47893 softirqs.CPU43.RCU
41525 ± 3% +13.3% 47055 softirqs.CPU44.RCU
40733 ± 2% +15.7% 47132 softirqs.CPU45.RCU
40454 +17.5% 47527 softirqs.CPU46.RCU
40582 +15.7% 46961 softirqs.CPU47.RCU
74808 ± 6% +7.6% 80484 ± 4% softirqs.CPU48.TIMER
39358 ± 8% +17.4% 46210 softirqs.CPU5.RCU
22888 ± 2% +11.7% 25577 ± 2% softirqs.CPU5.SCHED
42157 ± 4% +19.9% 50540 ± 11% softirqs.CPU50.RCU
42221 ± 4% +15.0% 48568 ± 4% softirqs.CPU51.RCU
41570 ± 2% +16.5% 48416 ± 2% softirqs.CPU52.RCU
23013 ± 2% +11.5% 25653 ± 2% softirqs.CPU52.SCHED
41498 +16.0% 48155 ± 2% softirqs.CPU53.RCU
22925 ± 2% +11.5% 25566 softirqs.CPU53.SCHED
40689 ± 2% +20.3% 48965 ± 5% softirqs.CPU54.RCU
22868 ± 2% +12.8% 25793 ± 2% softirqs.CPU54.SCHED
40553 +18.6% 48079 ± 2% softirqs.CPU55.RCU
22865 ± 2% +12.1% 25628 ± 2% softirqs.CPU55.SCHED
40568 +19.2% 48339 ± 3% softirqs.CPU56.RCU
22890 ± 2% +12.3% 25697 softirqs.CPU56.SCHED
41044 +16.8% 47944 ± 2% softirqs.CPU57.RCU
22986 ± 2% +11.2% 25565 softirqs.CPU57.SCHED
40776 +19.7% 48815 softirqs.CPU58.RCU
22916 ± 2% +11.5% 25559 softirqs.CPU58.SCHED
41186 +17.6% 48451 ± 2% softirqs.CPU59.RCU
22939 ± 2% +11.5% 25583 softirqs.CPU59.SCHED
38904 ± 8% +19.0% 46315 softirqs.CPU6.RCU
22878 ± 2% +11.7% 25564 ± 3% softirqs.CPU6.SCHED
39689 +19.8% 47555 ± 2% softirqs.CPU60.RCU
22936 ± 2% +11.5% 25585 softirqs.CPU60.SCHED
40190 +18.9% 47783 ± 2% softirqs.CPU61.RCU
40331 +18.5% 47778 ± 2% softirqs.CPU62.RCU
22847 ± 2% +11.9% 25561 softirqs.CPU62.SCHED
40616 +18.2% 48015 ± 2% softirqs.CPU63.RCU
22895 ± 2% +12.1% 25668 softirqs.CPU63.SCHED
39812 ± 2% +15.3% 45906 ± 3% softirqs.CPU64.RCU
22938 ± 2% +11.6% 25605 softirqs.CPU64.SCHED
40162 ± 3% +15.6% 46409 ± 4% softirqs.CPU65.RCU
22949 ± 2% +11.3% 25536 softirqs.CPU65.SCHED
39068 ± 5% +17.0% 45717 ± 4% softirqs.CPU66.RCU
22966 ± 2% +12.0% 25732 ± 2% softirqs.CPU66.SCHED
39653 ± 3% +17.2% 46480 ± 3% softirqs.CPU67.RCU
22857 ± 2% +12.6% 25739 ± 2% softirqs.CPU67.SCHED
39752 ± 2% +15.2% 45788 ± 3% softirqs.CPU68.RCU
22960 ± 2% +11.4% 25582 softirqs.CPU68.SCHED
40083 +14.3% 45820 ± 3% softirqs.CPU69.RCU
23027 ± 2% +11.1% 25587 softirqs.CPU69.SCHED
39790 ± 12% +19.6% 47584 ± 2% softirqs.CPU7.RCU
39400 ± 4% +18.4% 46646 ± 3% softirqs.CPU70.RCU
22875 ± 2% +12.5% 25729 softirqs.CPU70.SCHED
40221 ± 3% +15.2% 46339 ± 3% softirqs.CPU71.RCU
23196 ± 2% +10.3% 25579 softirqs.CPU71.SCHED
38088 ± 2% +104.1% 77745 ± 74% softirqs.CPU72.RCU
22775 ± 2% +43.9% 32770 ± 39% softirqs.CPU72.SCHED
37394 ± 2% +33.9% 50070 ± 21% softirqs.CPU73.RCU
22540 ± 2% +20.3% 27124 ± 10% softirqs.CPU73.SCHED
37147 ± 2% +20.5% 44764 ± 3% softirqs.CPU74.RCU
22567 ± 3% +14.8% 25902 ± 3% softirqs.CPU74.SCHED
37254 ± 3% +18.5% 44164 ± 2% softirqs.CPU75.RCU
22518 ± 2% +14.7% 25838 ± 3% softirqs.CPU75.SCHED
37196 ± 2% +16.9% 43495 ± 2% softirqs.CPU76.RCU
22474 ± 2% +13.9% 25605 softirqs.CPU76.SCHED
37634 ± 3% +15.1% 43319 ± 3% softirqs.CPU77.RCU
20355 ± 19% +25.2% 25491 softirqs.CPU77.SCHED
37561 ± 2% +15.6% 43428 ± 3% softirqs.CPU78.RCU
37383 ± 3% +17.5% 43937 softirqs.CPU79.RCU
22520 ± 2% +13.6% 25577 softirqs.CPU79.SCHED
38851 ± 8% +17.7% 45730 softirqs.CPU8.RCU
22688 +12.5% 25532 softirqs.CPU8.SCHED
38049 +16.3% 44269 ± 3% softirqs.CPU80.RCU
22511 ± 2% +14.0% 25651 softirqs.CPU80.SCHED
38445 +16.3% 44698 ± 2% softirqs.CPU81.RCU
22534 ± 2% +13.7% 25626 softirqs.CPU81.SCHED
38367 +18.0% 45261 softirqs.CPU82.RCU
22543 ± 2% +13.6% 25608 softirqs.CPU82.SCHED
38628 +16.9% 45158 ± 2% softirqs.CPU83.RCU
22497 ± 2% +13.2% 25462 softirqs.CPU83.SCHED
38136 +18.2% 45067 softirqs.CPU84.RCU
22189 ± 3% +15.8% 25692 softirqs.CPU84.SCHED
38459 +16.2% 44671 ± 3% softirqs.CPU85.RCU
22578 ± 2% +13.3% 25572 softirqs.CPU85.SCHED
38472 ± 2% +15.5% 44432 ± 3% softirqs.CPU86.RCU
22668 ± 2% +12.5% 25512 softirqs.CPU86.SCHED
38427 +15.8% 44517 ± 2% softirqs.CPU87.RCU
22511 ± 2% +13.0% 25431 softirqs.CPU87.SCHED
38795 ± 2% +15.9% 44976 softirqs.CPU88.RCU
22446 ± 2% +14.2% 25629 softirqs.CPU88.SCHED
39034 +17.1% 45723 softirqs.CPU89.RCU
22422 ± 2% +14.0% 25569 softirqs.CPU89.SCHED
39295 ± 5% +16.6% 45827 ± 2% softirqs.CPU9.RCU
22890 ± 2% +11.4% 25508 softirqs.CPU9.SCHED
38748 +17.1% 45365 softirqs.CPU90.RCU
22384 ± 2% +15.4% 25822 softirqs.CPU90.SCHED
38370 +16.7% 44793 softirqs.CPU91.RCU
22412 ± 2% +13.7% 25493 softirqs.CPU91.SCHED
38256 +16.0% 44386 ± 2% softirqs.CPU92.RCU
22484 ± 2% +13.5% 25510 softirqs.CPU92.SCHED
38592 ± 2% +15.2% 44446 ± 3% softirqs.CPU93.RCU
22485 ± 2% +12.9% 25388 softirqs.CPU93.SCHED
38636 +15.8% 44729 ± 3% softirqs.CPU94.RCU
22631 ± 2% +12.4% 25427 softirqs.CPU94.SCHED
39104 +14.7% 44866 ± 5% softirqs.CPU95.RCU
39894 ± 3% +13.4% 45224 ± 5% softirqs.CPU96.RCU
39210 ± 2% +15.8% 45419 ± 5% softirqs.CPU97.RCU
23501 ± 3% +12.9% 26539 ± 3% softirqs.CPU97.SCHED
70819 ± 4% +15.5% 81767 ± 5% softirqs.CPU97.TIMER
37452 ± 3% +17.9% 44143 ± 2% softirqs.CPU98.RCU
23119 ± 2% +13.6% 26253 ± 3% softirqs.CPU98.SCHED
71306 ± 2% +15.7% 82467 ± 6% softirqs.CPU98.TIMER
37239 ± 4% +17.6% 43794 ± 2% softirqs.CPU99.RCU
23219 ± 2% +12.8% 26190 ± 2% softirqs.CPU99.SCHED
72423 ± 3% +13.9% 82474 ± 6% softirqs.CPU99.TIMER
7528698 +16.2% 8745025 softirqs.RCU
4441833 +11.8% 4964631 softirqs.SCHED
14417750 ± 4% +9.3% 15759607 softirqs.TIMER
fsmark.time.system_time
180 +---------------------------------------------------------------------+
| O O O O O O |
175 |-+ O O O O O O O O O O O O |
| |
170 |-+ O O O |
| |
165 |-+ |
| |
160 |-+ |
| |
155 |-+ + +.. |
| +..+.+. + + : +.+.+..+.+. .+.+.+.. .+. .+.+. .+.|
150 |++ +..+ +..+. : +. + +. +..+.+.+. |
| + |
145 +---------------------------------------------------------------------+
fsmark.time.elapsed_time
200 +---------------------------------------------------------------------+
| O |
195 |-+ O |
190 |-O O O O O O O O |
| O O O O O O O O O |
185 |-+ O O |
| O |
180 |-+ |
| |
175 |-+ |
170 |-+ |
| +.. + +.. |
165 |:+ +.+.+.. + + .+ : +.+.+..+.+. .+.+.+..+.+. .+.+. .+. .+..+.|
|: + +. + : +. +. +. + |
160 +---------------------------------------------------------------------+
fsmark.time.elapsed_time.max
200 +---------------------------------------------------------------------+
| O |
195 |-+ O |
190 |-O O O O O O O O |
| O O O O O O O O O |
185 |-+ O O |
| O |
180 |-+ |
| |
175 |-+ |
170 |-+ |
| +.. + +.. |
165 |:+ +.+.+.. + + .+ : +.+.+..+.+. .+.+.+..+.+. .+.+. .+. .+..+.|
|: + +. + : +. +. +. + |
160 +---------------------------------------------------------------------+
fsmark.time.voluntary_context_switches
1.3e+06 +----------------------------------------------------------------+
| O O O O O |
1.28e+06 |-+ O O O O O O O O O O O O |
1.26e+06 |-+ O O O O O |
| |
1.24e+06 |-+ |
| |
1.22e+06 |-+ |
| |
1.2e+06 |-+ |
1.18e+06 |-+ |
| |
1.16e+06 |-+ +. |
|.+.+..+. .+.+. .+..+.+.+.+.+.+..+. .+. + +..+.+.+.+.+.+.. .+.+.|
1.14e+06 +----------------------------------------------------------------+
fsmark.files_per_sec
6600 +--------------------------------------------------------------------+
|: |
6400 |:+ .+ +. .+. .+. .+..+. .+.+.+.+.. |
| +..+.+.+. + + +. +.+..+.+.+..+ +.+..+.+ +.+. +.|
6200 |-+ + |
| |
6000 |-+ |
| |
5800 |-+ |
| |
5600 |-+ O |
| O O O O O O O O O O O O O O O |
5400 |-O O O O O |
| O |
5200 +--------------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
***************************************************************************************************
lkp-csl-2ap1: 192 threads Intel(R) Xeon(R) CPU @ 2.20GHz with 192G memory
=========================================================================================
bs/compiler/cpufreq_governor/disk/fs/ioengine/kconfig/nr_task/rootfs/runtime/rw/tbox_group/test_size/testcase/ucode:
4k/gcc-9/performance/1SSD/btrfs/sync/x86_64-rhel-8.3/8/debian-10.4-x86_64-20200603.cgz/300s/randwrite/lkp-csl-2ap1/512g/fio-basic/0x4002f01
commit:
f8d6c28b2a ("block: lift setting the readahead size into the block layer")
54529aac98 ("block: make QUEUE_SYSFS_BIT_FNS a little more useful")
f8d6c28b2a8e1156 54529aac984de8d3928810c85b5
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
:4 25% 1:4 kmsg.ACPI_Error
:4 8% 0:4 perf-profile.children.cycles-pp.error_entry
:4 5% 0:4 perf-profile.self.cycles-pp.error_entry
%stddev %change %stddev
\ | \
0.01 ± 58% +10.6 10.63 ± 9% fio.latency_100us%
51.65 ± 6% -51.1 0.58 ± 4% fio.latency_10ms%
0.02 ± 87% +0.8 0.86 ± 7% fio.latency_10us%
32.23 ± 11% -32.1 0.16 ± 7% fio.latency_20ms%
0.00 ±173% +3.1 3.14 ± 56% fio.latency_20us%
0.00 ±173% +2.8 2.83 ± 3% fio.latency_250us%
0.02 ± 82% +0.2 0.23 ± 7% fio.latency_2ms%
12.31 ± 7% -12.2 0.12 ± 12% fio.latency_50ms%
0.02 ± 28% +78.8 78.79 fio.latency_50us%
23.52 ± 3% +1179.5% 300.87 fio.time.elapsed_time
23.52 ± 3% +1179.5% 300.87 fio.time.elapsed_time.max
8.00 -100.0% 0.00 fio.time.exit_status
132284 +14274.3% 19014850 ± 2% fio.time.file_system_outputs
142.50 ± 5% -82.1% 25.50 ± 4% fio.time.percent_of_cpu_this_job_got
0.68 ± 4% +6643.4% 45.86 ± 2% fio.time.system_time
19211 +11893.5% 2304104 ± 4% fio.time.voluntary_context_switches
16460 +14337.0% 2376400 ± 2% fio.workload
2.83 ± 3% +993.7% 30.93 ± 2% fio.write_bw_MBps
20840448 ± 2% -99.7% 61056 fio.write_clat_90%_us
23068672 ± 2% -99.1% 196608 fio.write_clat_95%_us
26411008 -75.1% 6586368 ± 6% fio.write_clat_99%_us
11056088 ± 3% -90.9% 1010419 ± 2% fio.write_clat_mean_us
5848402 ± 3% +119.9% 12862343 ± 3% fio.write_clat_stddev
724.45 ± 3% +993.0% 7918 ± 2% fio.write_iops
1.88 ± 2% +43.6% 2.70 ± 8% iostat.cpu.system
107585 ± 15% +348.6% 482655 ± 3% vmstat.io.bo
1649211 +424.5% 8650204 vmstat.memory.cache
14676 ± 8% +554.5% 96054 ± 2% vmstat.system.cs
8012640 ± 10% +4267.8% 3.5e+08 ± 11% cpuidle.C1.time
64455 ± 8% +22724.8% 14711844 ± 10% cpuidle.C1.usage
171217 ± 6% +9942.7% 17194801 ±153% cpuidle.POLL.time
42308 ± 6% +5631.7% 2424981 ±136% cpuidle.POLL.usage
0.01 ± 22% -0.0 0.00 ±173% mpstat.cpu.all.iowait%
1.49 +0.3 1.77 ± 12% mpstat.cpu.all.irq%
0.21 -0.0 0.17 ± 2% mpstat.cpu.all.soft%
0.21 ± 8% +0.6 0.77 mpstat.cpu.all.sys%
0.85 ± 5% -0.8 0.08 ± 7% mpstat.cpu.all.usr%
167858 ± 67% +1154.4% 2105580 ± 67% numa-numastat.node0.local_node
183324 ± 55% +1054.5% 2116435 ± 67% numa-numastat.node0.numa_hit
30888 ± 39% +2842.5% 908884 ±107% numa-numastat.node1.local_node
57286 ± 16% +1527.4% 932306 ±103% numa-numastat.node1.numa_hit
19238 ± 78% +4941.8% 969971 ±112% numa-numastat.node2.local_node
42546 ± 18% +2245.6% 997984 ±109% numa-numastat.node2.numa_hit
9.50 ±110% +52163.2% 4965 ± 58% numa-vmstat.node0.nr_dirty
11440 ± 92% +6398.6% 743442 ± 87% numa-vmstat.node0.nr_inactive_file
30062 ± 14% +237.3% 101404 ± 59% numa-vmstat.node0.nr_slab_unreclaimable
11440 ± 92% +6398.6% 743442 ± 87% numa-vmstat.node0.nr_zone_inactive_file
11.50 ±120% +46078.3% 5310 ± 59% numa-vmstat.node0.nr_zone_write_pending
454661 ± 14% +106.9% 940779 ± 57% numa-vmstat.node1.numa_hit
60255 +581.0% 410314 ±127% numa-vmstat.node2.nr_file_pages
48974 ± 96% -96.0% 1947 ± 33% numa-vmstat.node3.nr_mapped
55.75 ± 97% +35017.5% 19578 ± 58% numa-meminfo.node0.Dirty
45683 ± 92% +6408.1% 2973075 ± 87% numa-meminfo.node0.Inactive(file)
120285 ± 14% +237.2% 405548 ± 59% numa-meminfo.node0.SUnreclaim
167194 ± 18% +352.7% 756837 ± 68% numa-meminfo.node0.Slab
88.50 ±116% +946.6% 926.25 ± 75% numa-meminfo.node0.Writeback
241015 +581.0% 1641234 ±127% numa-meminfo.node2.FilePages
4900 ± 94% +27922.1% 1373152 ±150% numa-meminfo.node2.Inactive
126.75 ±138% +42075.7% 53457 ± 73% numa-meminfo.node3.AnonHugePages
195556 ± 96% -96.0% 7800 ± 33% numa-meminfo.node3.Mapped
391075 ± 2% +242.7% 1340070 meminfo.Active
383506 ± 2% -16.0% 322083 meminfo.Active(anon)
7568 ± 8% +13350.8% 1017986 meminfo.Active(file)
84823 ± 7% +150.7% 212623 meminfo.AnonHugePages
382110 ± 2% -17.5% 315194 meminfo.AnonPages
1533425 +418.0% 7943860 meminfo.Cached
127.25 ± 85% +21697.4% 27737 ± 3% meminfo.Dirty
541161 +996.8% 5935426 ± 2% meminfo.Inactive
151550 +3539.4% 5515485 ± 2% meminfo.Inactive(file)
131567 +424.9% 690598 meminfo.KReclaimable
4029450 +177.4% 11177188 meminfo.Memused
7554 ± 3% -38.9% 4617 meminfo.PageTables
131567 +424.9% 690598 meminfo.SReclaimable
347533 +148.4% 863208 meminfo.SUnreclaim
479100 +224.3% 1553806 meminfo.Slab
233.75 ± 72% +544.3% 1506 ± 3% meminfo.Writeback
178754 ± 3% -70.9% 52052 ± 2% meminfo.max_used_kB
95883 ± 2% -16.0% 80513 proc-vmstat.nr_active_anon
1891 ± 8% +13355.1% 254502 proc-vmstat.nr_active_file
95539 ± 2% -17.5% 78789 proc-vmstat.nr_anon_pages
40.75 ± 8% +153.4% 103.25 proc-vmstat.nr_anon_transparent_hugepages
662355 ± 13% +2858.1% 19592962 ± 3% proc-vmstat.nr_dirtied
32.00 ± 85% +21068.0% 6773 ± 4% proc-vmstat.nr_dirty
383376 +418.2% 1986627 proc-vmstat.nr_file_pages
48424268 -3.7% 46636528 proc-vmstat.nr_free_pages
97409 +7.4% 104645 proc-vmstat.nr_inactive_anon
37888 +3541.9% 1379854 ± 2% proc-vmstat.nr_inactive_file
102723 +6.8% 109689 proc-vmstat.nr_mapped
1889 ± 3% -38.8% 1156 proc-vmstat.nr_page_table_pages
97886 +8.8% 106528 proc-vmstat.nr_shmem
32895 +425.0% 172714 proc-vmstat.nr_slab_reclaimable
86888 +148.4% 215854 proc-vmstat.nr_slab_unreclaimable
56.75 ± 71% +625.1% 411.50 ± 2% proc-vmstat.nr_writeback
662685 ± 13% +2847.8% 19534416 ± 3% proc-vmstat.nr_written
95883 ± 2% -16.0% 80513 proc-vmstat.nr_zone_active_anon
1891 ± 8% +13355.1% 254502 proc-vmstat.nr_zone_active_file
97409 +7.4% 104645 proc-vmstat.nr_zone_inactive_anon
37888 +3541.9% 1379854 ± 2% proc-vmstat.nr_zone_inactive_file
24.75 ± 88% +29648.5% 7362 ± 3% proc-vmstat.nr_zone_write_pending
504181 ± 3% +749.5% 4283048 ± 2% proc-vmstat.numa_hit
411017 ± 3% +919.3% 4189581 ± 2% proc-vmstat.numa_local
114824 ± 17% +86.5% 214154 ± 3% proc-vmstat.pgactivate
496476 ± 3% +870.7% 4819481 proc-vmstat.pgalloc_normal
322549 +320.6% 1356514 proc-vmstat.pgfault
404143 ± 4% +361.1% 1863546 proc-vmstat.pgfree
2727761 ± 13% +5274.4% 1.466e+08 ± 3% proc-vmstat.pgpgout
9.336e+08 ± 3% +83.8% 1.716e+09 ± 5% perf-stat.i.branch-instructions
9432178 +80.2% 16996652 ± 15% perf-stat.i.cache-misses
39024949 ± 2% +123.4% 87179267 ± 45% perf-stat.i.cache-references
14747 ± 7% +555.9% 96725 ± 2% perf-stat.i.context-switches
4.26 ± 2% -53.8% 1.97 ± 12% perf-stat.i.cpi
266.31 -21.3% 209.72 perf-stat.i.cpu-migrations
1928 ± 3% -46.7% 1027 ± 4% perf-stat.i.cycles-between-cache-misses
0.01 ± 13% +0.1 0.09 ± 66% perf-stat.i.dTLB-load-miss-rate%
134104 ± 14% +1375.2% 1978299 ± 61% perf-stat.i.dTLB-load-misses
1.115e+09 ± 5% +99.2% 2.222e+09 ± 4% perf-stat.i.dTLB-loads
5.321e+08 ± 5% +109.0% 1.112e+09 ± 4% perf-stat.i.dTLB-stores
4.63e+09 ± 3% +89.1% 8.758e+09 ± 5% perf-stat.i.instructions
998.21 ± 4% +71.7% 1714 ± 19% perf-stat.i.instructions-per-iTLB-miss
0.26 ± 3% +97.5% 0.52 ± 14% perf-stat.i.ipc
1.53 ± 7% -32.6% 1.03 ± 5% perf-stat.i.metric.K/sec
13.68 ± 4% +95.9% 26.81 ± 4% perf-stat.i.metric.M/sec
8800 -54.0% 4048 perf-stat.i.minor-faults
90.77 -39.9 50.90 ± 45% perf-stat.i.node-load-miss-rate%
153794 ± 19% +1166.2% 1947374 ± 54% perf-stat.i.node-loads
80.08 ± 3% -40.1 39.98 ± 56% perf-stat.i.node-store-miss-rate%
62705 ± 13% +474.6% 360324 ± 39% perf-stat.i.node-stores
8800 -54.0% 4048 perf-stat.i.page-faults
2.22 ± 3% -1.1 1.08 ± 51% perf-stat.overall.branch-miss-rate%
4.29 ± 3% -53.8% 1.98 ± 11% perf-stat.overall.cpi
2106 ± 3% -51.2% 1027 ± 4% perf-stat.overall.cycles-between-cache-misses
0.01 ± 11% +0.1 0.09 ± 65% perf-stat.overall.dTLB-load-miss-rate%
999.21 ± 4% +71.5% 1713 ± 19% perf-stat.overall.instructions-per-iTLB-miss
0.23 ± 3% +119.7% 0.51 ± 13% perf-stat.overall.ipc
90.45 -39.7 50.76 ± 45% perf-stat.overall.node-load-miss-rate%
75.99 ± 2% -35.9 40.08 ± 55% perf-stat.overall.node-store-miss-rate%
6517909 -83.0% 1107486 ± 3% perf-stat.overall.path-length
8.955e+08 ± 3% +91.0% 1.71e+09 ± 5% perf-stat.ps.branch-instructions
9042845 +87.3% 16939980 ± 15% perf-stat.ps.cache-misses
37409555 ± 2% +132.3% 86888536 ± 45% perf-stat.ps.cache-references
14129 ± 7% +582.3% 96408 ± 2% perf-stat.ps.context-switches
183945 +4.0% 191354 perf-stat.ps.cpu-clock
255.33 -18.1% 209.10 perf-stat.ps.cpu-migrations
128676 ± 14% +1432.2% 1971598 ± 61% perf-stat.ps.dTLB-load-misses
1.07e+09 ± 4% +107.1% 2.215e+09 ± 4% perf-stat.ps.dTLB-loads
5.101e+08 ± 5% +117.3% 1.109e+09 ± 4% perf-stat.ps.dTLB-stores
4.441e+09 ± 3% +96.5% 8.729e+09 ± 5% perf-stat.ps.instructions
8474 -52.3% 4038 perf-stat.ps.minor-faults
147537 ± 18% +1215.4% 1940671 ± 54% perf-stat.ps.node-loads
60290 ± 13% +495.7% 359127 ± 39% perf-stat.ps.node-stores
8474 -52.3% 4038 perf-stat.ps.page-faults
183945 +4.0% 191354 perf-stat.ps.task-clock
1.073e+11 +2354.7% 2.634e+12 ± 5% perf-stat.total.instructions
0.70 ± 34% +2.8e+05% 1981 ± 15% sched_debug.cfs_rq:/.exec_clock.avg
28.37 ± 37% +36569.2% 10403 ± 14% sched_debug.cfs_rq:/.exec_clock.max
3.22 ± 25% +76913.6% 2476 ± 12% sched_debug.cfs_rq:/.exec_clock.stddev
1670 ± 28% +1670.3% 29569 ± 18% sched_debug.cfs_rq:/.load.avg
49868 ± 22% +1631.4% 863438 sched_debug.cfs_rq:/.load.max
6915 ± 10% +1859.9% 135538 ± 8% sched_debug.cfs_rq:/.load.stddev
59795 ± 3% +42.9% 85423 ± 2% sched_debug.cfs_rq:/.min_vruntime.avg
82225 ± 4% +25.6% 103292 sched_debug.cfs_rq:/.min_vruntime.max
33527 ± 27% +76.6% 59221 ± 19% sched_debug.cfs_rq:/.min_vruntime.min
141.33 ± 30% -66.2% 47.79 ± 6% sched_debug.cfs_rq:/.runnable_avg.avg
1120 ± 14% -38.5% 689.36 ± 4% sched_debug.cfs_rq:/.runnable_avg.max
235.83 ± 14% -50.2% 117.37 ± 3% sched_debug.cfs_rq:/.runnable_avg.stddev
140.81 ± 30% -66.1% 47.70 ± 6% sched_debug.cfs_rq:/.util_avg.avg
1119 ± 14% -38.4% 689.27 ± 4% sched_debug.cfs_rq:/.util_avg.max
235.51 ± 14% -50.2% 117.31 ± 3% sched_debug.cfs_rq:/.util_avg.stddev
829.50 -48.8% 424.72 ± 32% sched_debug.cfs_rq:/.util_est_enqueued.max
86.99 ± 14% -45.6% 47.32 ± 30% sched_debug.cfs_rq:/.util_est_enqueued.stddev
2530164 ± 48% -52.4% 1204917 ± 8% sched_debug.cpu.avg_idle.max
2063 ± 6% +2333.0% 50205 ± 92% sched_debug.cpu.avg_idle.min
328400 ± 29% -32.6% 221320 ± 11% sched_debug.cpu.avg_idle.stddev
43166 +312.4% 178032 ± 8% sched_debug.cpu.clock.avg
43190 +312.2% 178045 ± 8% sched_debug.cpu.clock.max
43140 +312.7% 178019 ± 8% sched_debug.cpu.clock.min
14.81 ± 21% -48.4% 7.64 ± 7% sched_debug.cpu.clock.stddev
43018 +307.5% 175286 ± 8% sched_debug.cpu.clock_task.avg
43162 +309.7% 176824 ± 8% sched_debug.cpu.clock_task.max
35239 +374.5% 167206 ± 7% sched_debug.cpu.clock_task.min
733.52 ± 3% +53.5% 1126 ± 20% sched_debug.cpu.clock_task.stddev
3600 +120.5% 7939 ± 6% sched_debug.cpu.curr->pid.max
1860093 ± 49% -60.7% 731047 ± 15% sched_debug.cpu.max_idle_balance_cost.max
176803 ± 87% -85.0% 26544 ± 55% sched_debug.cpu.max_idle_balance_cost.stddev
0.00 ± 15% -49.0% 0.00 ± 3% sched_debug.cpu.next_balance.stddev
0.04 ± 25% -51.7% 0.02 ± 16% sched_debug.cpu.nr_running.avg
0.20 ± 13% -31.0% 0.14 ± 6% sched_debug.cpu.nr_running.stddev
1655 +4610.3% 77993 ± 6% sched_debug.cpu.nr_switches.avg
11537 ± 15% +5972.4% 700592 ± 9% sched_debug.cpu.nr_switches.max
512.25 ± 25% +205.8% 1566 ± 7% sched_debug.cpu.nr_switches.min
1163 ± 6% +11814.6% 138586 ± 7% sched_debug.cpu.nr_switches.stddev
0.01 ±122% +693.3% 0.04 ± 4% sched_debug.cpu.nr_uninterruptible.avg
-56.00 -47.4% -29.45 sched_debug.cpu.nr_uninterruptible.min
23.40 ± 71% +3.3e+05% 76414 ± 6% sched_debug.cpu.sched_count.avg
3461 ± 72% +20056.5% 697668 ± 9% sched_debug.cpu.sched_count.max
262.10 ± 73% +52693.5% 138370 ± 7% sched_debug.cpu.sched_count.stddev
11.70 ± 71% +3.3e+05% 38143 ± 6% sched_debug.cpu.sched_goidle.avg
1731 ± 72% +20028.3% 348570 ± 9% sched_debug.cpu.sched_goidle.max
131.13 ± 73% +52628.4% 69142 ± 7% sched_debug.cpu.sched_goidle.stddev
17.01 ± 85% +2.2e+05% 38124 ± 6% sched_debug.cpu.ttwu_count.avg
1939 ± 74% +17590.0% 343054 ± 9% sched_debug.cpu.ttwu_count.max
162.46 ± 78% +42822.7% 69733 ± 6% sched_debug.cpu.ttwu_count.stddev
0.13 ± 59% +2.5e+06% 3222 ± 11% sched_debug.cpu.ttwu_local.avg
9.50 ± 82% +4.1e+05% 39215 ± 16% sched_debug.cpu.ttwu_local.max
0.83 ± 68% +8.1e+05% 6746 ± 7% sched_debug.cpu.ttwu_local.stddev
43145 +312.6% 178019 ± 8% sched_debug.cpu_clk
42272 +319.1% 177145 ± 8% sched_debug.ktime
43503 +310.0% 178377 ± 8% sched_debug.sched_clk
20452 ± 4% +6489.8% 1347781 ± 2% slabinfo.Acpi-State.active_objs
400.75 ± 4% +6501.8% 26456 ± 2% slabinfo.Acpi-State.active_slabs
20462 ± 4% +6494.0% 1349317 ± 2% slabinfo.Acpi-State.num_objs
400.75 ± 4% +6501.8% 26456 ± 2% slabinfo.Acpi-State.num_slabs
542.50 ± 7% +91.9% 1041 ± 22% slabinfo.biovec-max.active_objs
566.75 ± 8% +91.3% 1084 ± 23% slabinfo.biovec-max.num_objs
1627 ± 4% +300.8% 6520 ± 26% slabinfo.blkdev_ioc.active_objs
42.00 ± 5% +323.2% 177.75 ± 26% slabinfo.blkdev_ioc.active_slabs
1648 ± 4% +322.0% 6956 ± 26% slabinfo.blkdev_ioc.num_objs
42.00 ± 5% +323.2% 177.75 ± 26% slabinfo.blkdev_ioc.num_slabs
1198 ± 7% +1193.1% 15501 ± 27% slabinfo.btrfs_ordered_extent.active_objs
30.50 ± 7% +1208.2% 399.00 ± 27% slabinfo.btrfs_ordered_extent.active_slabs
1198 ± 7% +1200.0% 15584 ± 27% slabinfo.btrfs_ordered_extent.num_objs
30.50 ± 7% +1208.2% 399.00 ± 27% slabinfo.btrfs_ordered_extent.num_slabs
2770 +44.8% 4012 ± 19% slabinfo.buffer_head.active_objs
2926 +38.1% 4041 ± 19% slabinfo.buffer_head.num_objs
2121 ± 2% +66.3% 3527 ± 14% slabinfo.dmaengine-unmap-16.active_objs
2121 ± 2% +66.3% 3527 ± 14% slabinfo.dmaengine-unmap-16.num_objs
916.75 ± 6% +12.6% 1032 ± 3% slabinfo.file_lock_cache.active_objs
916.75 ± 6% +12.6% 1032 ± 3% slabinfo.file_lock_cache.num_objs
6444 ± 4% +191.6% 18791 ± 23% slabinfo.fsnotify_mark_connector.active_objs
50.00 ± 4% +195.5% 147.75 ± 23% slabinfo.fsnotify_mark_connector.active_slabs
6444 ± 4% +194.4% 18975 ± 22% slabinfo.fsnotify_mark_connector.num_objs
50.00 ± 4% +195.5% 147.75 ± 23% slabinfo.fsnotify_mark_connector.num_slabs
2831 ± 6% +198.2% 8444 ± 26% slabinfo.khugepaged_mm_slot.active_objs
81.00 ± 5% +193.8% 238.00 ± 26% slabinfo.khugepaged_mm_slot.active_slabs
2933 ± 5% +192.8% 8590 ± 26% slabinfo.khugepaged_mm_slot.num_objs
81.00 ± 5% +193.8% 238.00 ± 26% slabinfo.khugepaged_mm_slot.num_slabs
13293 ± 2% +38.2% 18373 ± 5% slabinfo.kmalloc-1k.active_objs
417.50 ± 2% +39.4% 582.00 ± 5% slabinfo.kmalloc-1k.active_slabs
13381 ± 2% +39.3% 18646 ± 5% slabinfo.kmalloc-1k.num_objs
417.50 ± 2% +39.4% 582.00 ± 5% slabinfo.kmalloc-1k.num_slabs
5546 ± 4% +5.1% 5829 ± 2% slabinfo.kmalloc-rcl-64.active_objs
5546 ± 4% +5.1% 5829 ± 2% slabinfo.kmalloc-rcl-64.num_objs
2413 ± 4% +99.5% 4813 ± 13% slabinfo.mnt_cache.active_objs
2413 ± 4% +99.5% 4814 ± 13% slabinfo.mnt_cache.num_objs
63.00 +207.1% 193.50 ± 16% slabinfo.nfs_read_data.active_objs
63.00 +207.1% 193.50 ± 16% slabinfo.nfs_read_data.num_objs
7194 ± 19% +477.8% 41568 ± 15% slabinfo.numa_policy.active_objs
115.75 ± 19% +494.6% 688.25 ± 15% slabinfo.numa_policy.active_slabs
7210 ± 19% +492.3% 42712 ± 15% slabinfo.numa_policy.num_objs
115.75 ± 19% +494.6% 688.25 ± 15% slabinfo.numa_policy.num_slabs
27102 ± 2% +9805.3% 2684621 ± 2% slabinfo.pid_namespace.active_objs
486.25 ± 2% +9760.2% 47945 ± 2% slabinfo.pid_namespace.active_slabs
27261 ± 2% +9749.0% 2684951 ± 2% slabinfo.pid_namespace.num_objs
486.25 ± 2% +9760.2% 47945 ± 2% slabinfo.pid_namespace.num_slabs
44107 +2207.4% 1017721 slabinfo.radix_tree_node.active_objs
787.25 +2209.0% 18177 slabinfo.radix_tree_node.active_slabs
44119 +2207.3% 1017966 slabinfo.radix_tree_node.num_objs
787.25 +2209.0% 18177 slabinfo.radix_tree_node.num_slabs
688.75 ± 9% +46.1% 1006 ± 8% slabinfo.skbuff_fclone_cache.active_objs
688.75 ± 9% +46.1% 1006 ± 8% slabinfo.skbuff_fclone_cache.num_objs
18421 +43.2% 26371 slabinfo.vmap_area.active_objs
288.00 +43.3% 412.75 slabinfo.vmap_area.active_slabs
18480 +43.1% 26451 slabinfo.vmap_area.num_objs
288.00 +43.3% 412.75 slabinfo.vmap_area.num_slabs
75.50 ± 51% +1125.2% 925.00 ± 84% interrupts.31:PCI-MSI.524289-edge.eth0-TxRx-0
104.25 ± 27% +6685.4% 7073 ±151% interrupts.33:PCI-MSI.524291-edge.eth0-TxRx-2
76.00 ± 51% +3560.9% 2782 ±131% interrupts.34:PCI-MSI.524292-edge.eth0-TxRx-3
49.00 ± 4% +1132.7% 604.00 interrupts.9:IO-APIC.9-fasteoi.acpi
179254 ± 2% +133.6% 418691 ± 13% interrupts.CAL:Function_call_interrupts
1147 ± 23% +202.5% 3469 ± 61% interrupts.CPU0.CAL:Function_call_interrupts
49381 ± 3% +987.1% 536845 ± 21% interrupts.CPU0.LOC:Local_timer_interrupts
1.50 ±173% +18650.0% 281.25 ± 40% interrupts.CPU0.NMI:Non-maskable_interrupts
1.50 ±173% +18650.0% 281.25 ± 40% interrupts.CPU0.PMI:Performance_monitoring_interrupts
16.50 ± 64% +1804.5% 314.25 ± 89% interrupts.CPU0.RES:Rescheduling_interrupts
49.00 ± 4% +1132.7% 604.00 interrupts.CPU1.9:IO-APIC.9-fasteoi.acpi
1280 ± 55% +435.2% 6854 ± 55% interrupts.CPU1.CAL:Function_call_interrupts
49635 ± 3% +981.4% 536765 ± 21% interrupts.CPU1.LOC:Local_timer_interrupts
1.00 ±100% +45525.0% 456.25 ± 65% interrupts.CPU1.NMI:Non-maskable_interrupts
1.00 ±100% +45525.0% 456.25 ± 65% interrupts.CPU1.PMI:Performance_monitoring_interrupts
18.00 ±110% +5736.1% 1050 ± 91% interrupts.CPU1.RES:Rescheduling_interrupts
75.50 ± 51% +1125.2% 925.00 ± 84% interrupts.CPU10.31:PCI-MSI.524289-edge.eth0-TxRx-0
49206 ± 3% +992.3% 537492 ± 21% interrupts.CPU10.LOC:Local_timer_interrupts
858.75 +514.8% 5279 ± 80% interrupts.CPU100.CAL:Function_call_interrupts
49478 ± 3% +983.3% 535977 ± 22% interrupts.CPU100.LOC:Local_timer_interrupts
8.00 ±137% +58053.1% 4652 ± 98% interrupts.CPU100.RES:Rescheduling_interrupts
859.25 +274.4% 3217 ± 42% interrupts.CPU101.CAL:Function_call_interrupts
49288 ± 3% +988.3% 536385 ± 21% interrupts.CPU101.LOC:Local_timer_interrupts
9.50 ±144% +26613.2% 2537 ± 74% interrupts.CPU101.RES:Rescheduling_interrupts
857.75 +343.3% 3802 ± 78% interrupts.CPU102.CAL:Function_call_interrupts
49294 ± 3% +986.2% 535457 ± 22% interrupts.CPU102.LOC:Local_timer_interrupts
9.25 ± 74% +38713.5% 3590 ±103% interrupts.CPU102.RES:Rescheduling_interrupts
875.75 +410.6% 4471 ± 80% interrupts.CPU103.CAL:Function_call_interrupts
49184 ± 3% +989.9% 536056 ± 22% interrupts.CPU103.LOC:Local_timer_interrupts
55.75 ±116% +7516.1% 4246 ±101% interrupts.CPU103.RES:Rescheduling_interrupts
871.75 ± 4% +541.6% 5592 ± 79% interrupts.CPU104.CAL:Function_call_interrupts
49253 ± 3% +987.7% 535737 ± 22% interrupts.CPU104.LOC:Local_timer_interrupts
17.75 ±144% +32245.1% 5741 ± 95% interrupts.CPU104.RES:Rescheduling_interrupts
49276 ± 3% +988.2% 536221 ± 22% interrupts.CPU105.LOC:Local_timer_interrupts
879.75 ± 4% +267.6% 3234 ± 73% interrupts.CPU106.CAL:Function_call_interrupts
49261 ± 3% +989.7% 536797 ± 21% interrupts.CPU106.LOC:Local_timer_interrupts
26.75 ± 99% +10216.8% 2759 ±108% interrupts.CPU106.RES:Rescheduling_interrupts
899.00 ± 6% +335.4% 3914 ± 68% interrupts.CPU107.CAL:Function_call_interrupts
49265 ± 3% +988.4% 536200 ± 21% interrupts.CPU107.LOC:Local_timer_interrupts
871.25 ± 7% +350.8% 3927 ± 55% interrupts.CPU108.CAL:Function_call_interrupts
49259 ± 3% +987.2% 535540 ± 22% interrupts.CPU108.LOC:Local_timer_interrupts
34.75 ±163% +11149.6% 3909 ± 81% interrupts.CPU108.RES:Rescheduling_interrupts
49268 ± 3% +988.4% 536234 ± 22% interrupts.CPU109.LOC:Local_timer_interrupts
0.25 ±173% +2.5e+05% 614.00 ± 38% interrupts.CPU109.NMI:Non-maskable_interrupts
0.25 ±173% +2.5e+05% 614.00 ± 38% interrupts.CPU109.PMI:Performance_monitoring_interrupts
85.75 ±164% +2859.5% 2537 ± 98% interrupts.CPU109.RES:Rescheduling_interrupts
1232 ± 50% +101.6% 2483 ± 59% interrupts.CPU11.CAL:Function_call_interrupts
49302 ± 3% +990.5% 537634 ± 21% interrupts.CPU11.LOC:Local_timer_interrupts
0.75 ±110% +56766.7% 426.50 ± 58% interrupts.CPU11.NMI:Non-maskable_interrupts
0.75 ±110% +56766.7% 426.50 ± 58% interrupts.CPU11.PMI:Performance_monitoring_interrupts
7.00 ±109% +9578.6% 677.50 ± 99% interrupts.CPU11.RES:Rescheduling_interrupts
878.75 ± 5% +314.1% 3638 ± 71% interrupts.CPU110.CAL:Function_call_interrupts
49271 ± 3% +988.4% 536248 ± 21% interrupts.CPU110.LOC:Local_timer_interrupts
28.50 ±105% +12543.9% 3603 ±108% interrupts.CPU110.RES:Rescheduling_interrupts
899.75 ± 4% +255.2% 3196 ± 46% interrupts.CPU111.CAL:Function_call_interrupts
49271 ± 3% +986.9% 535521 ± 22% interrupts.CPU111.LOC:Local_timer_interrupts
66.50 ±151% +3644.0% 2489 ± 82% interrupts.CPU111.RES:Rescheduling_interrupts
902.00 ± 4% +225.4% 2935 ± 40% interrupts.CPU112.CAL:Function_call_interrupts
49268 ± 3% +987.5% 535818 ± 22% interrupts.CPU112.LOC:Local_timer_interrupts
120.75 ±155% +1744.1% 2226 ± 79% interrupts.CPU112.RES:Rescheduling_interrupts
909.00 ± 11% +227.9% 2980 ± 72% interrupts.CPU113.CAL:Function_call_interrupts
49270 ± 3% +988.6% 536338 ± 21% interrupts.CPU113.LOC:Local_timer_interrupts
143.50 ±171% +1817.2% 2751 ±103% interrupts.CPU113.RES:Rescheduling_interrupts
867.50 ± 4% +159.9% 2254 ± 55% interrupts.CPU114.CAL:Function_call_interrupts
49289 ± 3% +987.4% 535968 ± 22% interrupts.CPU114.LOC:Local_timer_interrupts
2.00 ±106% +86737.5% 1736 ±107% interrupts.CPU114.RES:Rescheduling_interrupts
891.00 ± 6% +197.1% 2647 ± 57% interrupts.CPU115.CAL:Function_call_interrupts
49260 ± 3% +988.6% 536235 ± 21% interrupts.CPU115.LOC:Local_timer_interrupts
45.50 ±138% +3636.8% 1700 ± 92% interrupts.CPU115.RES:Rescheduling_interrupts
871.50 ± 4% +230.5% 2880 ± 78% interrupts.CPU116.CAL:Function_call_interrupts
49265 ± 3% +989.0% 536484 ± 21% interrupts.CPU116.LOC:Local_timer_interrupts
1.50 ± 74% +1.7e+05% 2600 ±116% interrupts.CPU116.RES:Rescheduling_interrupts
869.25 ± 4% +337.2% 3800 ± 74% interrupts.CPU117.CAL:Function_call_interrupts
49274 ± 3% +986.5% 535378 ± 22% interrupts.CPU117.LOC:Local_timer_interrupts
0.25 ±173% +2.2e+05% 561.75 ± 83% interrupts.CPU117.NMI:Non-maskable_interrupts
0.25 ±173% +2.2e+05% 561.75 ± 83% interrupts.CPU117.PMI:Performance_monitoring_interrupts
896.50 ± 7% +375.8% 4265 ± 79% interrupts.CPU118.CAL:Function_call_interrupts
49275 ± 3% +988.4% 536312 ± 21% interrupts.CPU118.LOC:Local_timer_interrupts
17.00 ±163% +17627.9% 3013 ±122% interrupts.CPU118.RES:Rescheduling_interrupts
873.00 ± 6% +292.3% 3424 ± 99% interrupts.CPU119.CAL:Function_call_interrupts
49264 ± 3% +988.6% 536311 ± 21% interrupts.CPU119.LOC:Local_timer_interrupts
1.50 ±173% +23416.7% 352.75 ± 65% interrupts.CPU119.NMI:Non-maskable_interrupts
1.50 ±173% +23416.7% 352.75 ± 65% interrupts.CPU119.PMI:Performance_monitoring_interrupts
2.00 ± 61% +1.5e+05% 3040 ±134% interrupts.CPU119.RES:Rescheduling_interrupts
104.25 ± 27% +6685.4% 7073 ±151% interrupts.CPU12.33:PCI-MSI.524291-edge.eth0-TxRx-2
49217 ± 3% +990.1% 536526 ± 21% interrupts.CPU12.LOC:Local_timer_interrupts
49259 ± 3% +1092.8% 587552 ± 4% interrupts.CPU120.LOC:Local_timer_interrupts
0.75 ± 57% +1.1e+05% 812.00 ±171% interrupts.CPU120.RES:Rescheduling_interrupts
49258 ± 3% +1099.9% 591035 ± 3% interrupts.CPU121.LOC:Local_timer_interrupts
0.25 ±173% +2.4e+05% 604.25 ±171% interrupts.CPU121.RES:Rescheduling_interrupts
49250 ± 3% +1100.0% 590986 ± 3% interrupts.CPU122.LOC:Local_timer_interrupts
0.50 ±173% +34600.0% 173.50 ± 34% interrupts.CPU122.NMI:Non-maskable_interrupts
0.50 ±173% +34600.0% 173.50 ± 34% interrupts.CPU122.PMI:Performance_monitoring_interrupts
0.75 ±173% +1e+05% 762.50 ±170% interrupts.CPU122.RES:Rescheduling_interrupts
49270 ± 3% +1097.6% 590043 ± 4% interrupts.CPU123.LOC:Local_timer_interrupts
1.00 ± 70% +75425.0% 755.25 ±170% interrupts.CPU123.RES:Rescheduling_interrupts
49146 ± 2% +1101.2% 590359 ± 4% interrupts.CPU124.LOC:Local_timer_interrupts
0.25 ±173% +54900.0% 137.50 ± 42% interrupts.CPU124.NMI:Non-maskable_interrupts
0.25 ±173% +54900.0% 137.50 ± 42% interrupts.CPU124.PMI:Performance_monitoring_interrupts
853.50 +62.1% 1383 ± 58% interrupts.CPU125.CAL:Function_call_interrupts
49248 ± 3% +1095.0% 588506 ± 4% interrupts.CPU125.LOC:Local_timer_interrupts
1.75 ± 47% +23000.0% 404.25 ±170% interrupts.CPU125.RES:Rescheduling_interrupts
839.00 ± 3% +138.3% 1999 ± 73% interrupts.CPU126.CAL:Function_call_interrupts
49235 ± 3% +1095.9% 588790 ± 4% interrupts.CPU126.LOC:Local_timer_interrupts
1.75 ± 62% +44085.7% 773.25 ±171% interrupts.CPU126.RES:Rescheduling_interrupts
847.75 +129.3% 1943 ± 59% interrupts.CPU127.CAL:Function_call_interrupts
49233 ± 3% +1092.1% 586903 ± 5% interrupts.CPU127.LOC:Local_timer_interrupts
1.50 ± 74% +21533.3% 324.50 ±165% interrupts.CPU127.RES:Rescheduling_interrupts
49238 ± 3% +1089.3% 585574 ± 5% interrupts.CPU128.LOC:Local_timer_interrupts
49155 ± 3% +1096.4% 588084 ± 4% interrupts.CPU129.LOC:Local_timer_interrupts
76.00 ± 51% +3560.9% 2782 ±131% interrupts.CPU13.34:PCI-MSI.524292-edge.eth0-TxRx-3
49321 ± 3% +991.3% 538223 ± 22% interrupts.CPU13.LOC:Local_timer_interrupts
17.75 ±101% +4026.8% 732.50 ± 83% interrupts.CPU13.RES:Rescheduling_interrupts
849.75 ± 2% +63.2% 1386 ± 59% interrupts.CPU130.CAL:Function_call_interrupts
49248 ± 3% +1094.3% 588155 ± 4% interrupts.CPU130.LOC:Local_timer_interrupts
49233 ± 3% +1098.6% 590125 ± 4% interrupts.CPU131.LOC:Local_timer_interrupts
1.75 ± 74% +65700.0% 1151 ±171% interrupts.CPU131.RES:Rescheduling_interrupts
49232 ± 3% +1094.0% 587826 ± 4% interrupts.CPU132.LOC:Local_timer_interrupts
842.75 ± 2% +172.7% 2298 ±104% interrupts.CPU133.CAL:Function_call_interrupts
49233 ± 3% +1100.3% 590953 ± 3% interrupts.CPU133.LOC:Local_timer_interrupts
1.50 ± 57% +74066.7% 1112 ±171% interrupts.CPU133.RES:Rescheduling_interrupts
862.75 +109.0% 1803 ± 85% interrupts.CPU134.CAL:Function_call_interrupts
49237 ± 3% +1099.6% 590629 ± 3% interrupts.CPU134.LOC:Local_timer_interrupts
850.00 ± 2% +205.9% 2599 ± 68% interrupts.CPU135.CAL:Function_call_interrupts
49233 ± 3% +1096.3% 588961 ± 4% interrupts.CPU135.LOC:Local_timer_interrupts
1.50 ± 74% +63083.3% 947.75 ±169% interrupts.CPU135.RES:Rescheduling_interrupts
852.50 +94.8% 1660 ± 76% interrupts.CPU136.CAL:Function_call_interrupts
49223 ± 3% +1097.2% 589295 ± 4% interrupts.CPU136.LOC:Local_timer_interrupts
1.25 ± 87% +48480.0% 607.25 ±169% interrupts.CPU136.RES:Rescheduling_interrupts
49237 ± 3% +1093.0% 587396 ± 4% interrupts.CPU137.LOC:Local_timer_interrupts
1.75 ± 24% +10814.3% 191.00 ±166% interrupts.CPU137.RES:Rescheduling_interrupts
848.25 ± 2% +116.7% 1838 ± 55% interrupts.CPU138.CAL:Function_call_interrupts
49234 ± 3% +1092.7% 587201 ± 5% interrupts.CPU138.LOC:Local_timer_interrupts
1.50 ± 74% +48583.3% 730.25 ±105% interrupts.CPU138.RES:Rescheduling_interrupts
847.75 ± 2% +54.2% 1307 ± 52% interrupts.CPU139.CAL:Function_call_interrupts
49225 ± 3% +1091.9% 586707 ± 5% interrupts.CPU139.LOC:Local_timer_interrupts
3.75 ± 22% +8800.0% 333.75 ±168% interrupts.CPU139.RES:Rescheduling_interrupts
1226 ± 37% +167.1% 3275 ± 57% interrupts.CPU14.CAL:Function_call_interrupts
49381 ± 3% +986.7% 536610 ± 21% interrupts.CPU14.LOC:Local_timer_interrupts
0.50 ±100% +94550.0% 473.25 ± 60% interrupts.CPU14.NMI:Non-maskable_interrupts
0.50 ±100% +94550.0% 473.25 ± 60% interrupts.CPU14.PMI:Performance_monitoring_interrupts
8.75 ± 79% +9505.7% 840.50 ± 82% interrupts.CPU14.RES:Rescheduling_interrupts
49230 ± 3% +1096.2% 588919 ± 4% interrupts.CPU140.LOC:Local_timer_interrupts
49249 ± 3% +1098.5% 590271 ± 4% interrupts.CPU141.LOC:Local_timer_interrupts
1.50 ±137% +38050.0% 572.25 ±170% interrupts.CPU141.RES:Rescheduling_interrupts
49235 ± 3% +1093.6% 587678 ± 4% interrupts.CPU142.LOC:Local_timer_interrupts
3.00 ± 33% +21441.7% 646.25 ±170% interrupts.CPU142.RES:Rescheduling_interrupts
49232 ± 3% +1095.9% 588791 ± 4% interrupts.CPU143.LOC:Local_timer_interrupts
0.75 ±110% +1.3e+05% 939.50 ±171% interrupts.CPU143.RES:Rescheduling_interrupts
858.00 ± 3% +69.4% 1453 ± 65% interrupts.CPU144.CAL:Function_call_interrupts
49259 ± 3% +975.2% 529624 ± 24% interrupts.CPU144.LOC:Local_timer_interrupts
0.25 ±173% +1.5e+05% 382.00 ±118% interrupts.CPU144.NMI:Non-maskable_interrupts
0.25 ±173% +1.5e+05% 382.00 ±118% interrupts.CPU144.PMI:Performance_monitoring_interrupts
2.50 ± 91% +29930.0% 750.75 ±170% interrupts.CPU144.RES:Rescheduling_interrupts
853.25 ± 4% +73.1% 1476 ± 66% interrupts.CPU145.CAL:Function_call_interrupts
49240 ± 3% +976.4% 530009 ± 24% interrupts.CPU145.LOC:Local_timer_interrupts
1.00 ± 70% +65025.0% 651.25 ±170% interrupts.CPU145.RES:Rescheduling_interrupts
49234 ± 3% +976.3% 529907 ± 24% interrupts.CPU146.LOC:Local_timer_interrupts
1.75 ± 74% +57600.0% 1009 ±171% interrupts.CPU146.RES:Rescheduling_interrupts
49231 ± 3% +976.5% 529979 ± 24% interrupts.CPU147.LOC:Local_timer_interrupts
1.75 ± 84% +23085.7% 405.75 ±161% interrupts.CPU147.RES:Rescheduling_interrupts
849.75 ± 4% +97.5% 1678 ± 79% interrupts.CPU148.CAL:Function_call_interrupts
49231 ± 3% +977.0% 530203 ± 24% interrupts.CPU148.LOC:Local_timer_interrupts
2.00 ± 61% +45387.5% 909.75 ±167% interrupts.CPU148.RES:Rescheduling_interrupts
861.00 ± 2% +72.4% 1484 ± 65% interrupts.CPU149.CAL:Function_call_interrupts
49230 ± 3% +976.8% 530110 ± 24% interrupts.CPU149.LOC:Local_timer_interrupts
1.75 ± 47% +33414.3% 586.50 ±168% interrupts.CPU149.RES:Rescheduling_interrupts
49385 ± 3% +984.8% 535734 ± 22% interrupts.CPU15.LOC:Local_timer_interrupts
0.25 ±173% +1.6e+05% 392.00 ± 91% interrupts.CPU15.NMI:Non-maskable_interrupts
0.25 ±173% +1.6e+05% 392.00 ± 91% interrupts.CPU15.PMI:Performance_monitoring_interrupts
49233 ± 3% +976.8% 530160 ± 24% interrupts.CPU150.LOC:Local_timer_interrupts
1.75 ± 47% +44571.4% 781.75 ±167% interrupts.CPU150.RES:Rescheduling_interrupts
49234 ± 3% +977.4% 530438 ± 24% interrupts.CPU151.LOC:Local_timer_interrupts
2.00 ± 79% +36600.0% 734.00 ±166% interrupts.CPU151.RES:Rescheduling_interrupts
859.75 ± 3% +139.2% 2056 ± 70% interrupts.CPU152.CAL:Function_call_interrupts
49244 ± 3% +977.2% 530471 ± 24% interrupts.CPU152.LOC:Local_timer_interrupts
1.75 ± 74% +58214.3% 1020 ±169% interrupts.CPU152.RES:Rescheduling_interrupts
49242 ± 3% +976.2% 529944 ± 24% interrupts.CPU153.LOC:Local_timer_interrupts
49231 ± 3% +976.3% 529853 ± 24% interrupts.CPU154.LOC:Local_timer_interrupts
861.50 ± 3% +99.8% 1721 ± 81% interrupts.CPU155.CAL:Function_call_interrupts
49232 ± 3% +976.2% 529830 ± 24% interrupts.CPU155.LOC:Local_timer_interrupts
2.00 ± 61% +41700.0% 836.00 ±170% interrupts.CPU155.RES:Rescheduling_interrupts
846.50 ± 3% +136.5% 2002 ± 95% interrupts.CPU156.CAL:Function_call_interrupts
49230 ± 3% +977.4% 530387 ± 24% interrupts.CPU156.LOC:Local_timer_interrupts
2.25 ±110% +57455.6% 1295 ±172% interrupts.CPU156.RES:Rescheduling_interrupts
49229 ± 3% +977.1% 530266 ± 24% interrupts.CPU157.LOC:Local_timer_interrupts
1.50 ±137% +97883.3% 1469 ±171% interrupts.CPU157.RES:Rescheduling_interrupts
49230 ± 3% +976.5% 529955 ± 24% interrupts.CPU158.LOC:Local_timer_interrupts
1.50 ±100% +1e+05% 1546 ±171% interrupts.CPU158.RES:Rescheduling_interrupts
49242 ± 3% +976.6% 530162 ± 24% interrupts.CPU159.LOC:Local_timer_interrupts
1.25 ± 66% +1.2e+05% 1515 ±172% interrupts.CPU159.RES:Rescheduling_interrupts
834.75 ± 2% +240.9% 2845 ± 54% interrupts.CPU16.CAL:Function_call_interrupts
49318 ± 3% +985.0% 535107 ± 22% interrupts.CPU16.LOC:Local_timer_interrupts
1.50 ± 57% +1.7e+05% 2587 ± 86% interrupts.CPU16.RES:Rescheduling_interrupts
854.00 ± 3% +69.0% 1443 ± 64% interrupts.CPU160.CAL:Function_call_interrupts
49229 ± 3% +977.1% 530250 ± 24% interrupts.CPU160.LOC:Local_timer_interrupts
0.75 ± 57% +76633.3% 575.50 ±171% interrupts.CPU160.RES:Rescheduling_interrupts
865.00 ± 4% +191.4% 2520 ±110% interrupts.CPU161.CAL:Function_call_interrupts
49229 ± 3% +977.0% 530180 ± 24% interrupts.CPU161.LOC:Local_timer_interrupts
1.75 ± 62% +95642.9% 1675 ±172% interrupts.CPU161.RES:Rescheduling_interrupts
49231 ± 3% +976.4% 529906 ± 24% interrupts.CPU162.LOC:Local_timer_interrupts
49737 ± 4% +965.5% 529926 ± 24% interrupts.CPU163.LOC:Local_timer_interrupts
2.00 ± 35% +55175.0% 1105 ±172% interrupts.CPU163.RES:Rescheduling_interrupts
49232 ± 3% +976.8% 530140 ± 24% interrupts.CPU164.LOC:Local_timer_interrupts
49231 ± 3% +976.8% 530124 ± 24% interrupts.CPU165.LOC:Local_timer_interrupts
0.25 ±173% +8.5e+05% 2134 ±172% interrupts.CPU165.RES:Rescheduling_interrupts
867.25 ± 3% +138.7% 2070 ± 61% interrupts.CPU166.CAL:Function_call_interrupts
49229 ± 3% +977.3% 530338 ± 24% interrupts.CPU166.LOC:Local_timer_interrupts
0.50 ±173% +29000.0% 145.50 ± 26% interrupts.CPU166.NMI:Non-maskable_interrupts
0.50 ±173% +29000.0% 145.50 ± 26% interrupts.CPU166.PMI:Performance_monitoring_interrupts
1.75 ± 47% +63400.0% 1111 ±126% interrupts.CPU166.RES:Rescheduling_interrupts
846.25 ± 4% +15.2% 974.75 ± 13% interrupts.CPU167.CAL:Function_call_interrupts
49230 ± 3% +976.9% 530156 ± 24% interrupts.CPU167.LOC:Local_timer_interrupts
1.25 ± 66% +7940.0% 100.50 ±150% interrupts.CPU167.RES:Rescheduling_interrupts
49280 ± 3% +977.0% 530766 ± 23% interrupts.CPU168.LOC:Local_timer_interrupts
49295 ± 3% +977.3% 531046 ± 23% interrupts.CPU169.LOC:Local_timer_interrupts
838.75 +383.2% 4053 ± 68% interrupts.CPU17.CAL:Function_call_interrupts
49310 ± 3% +986.6% 535789 ± 22% interrupts.CPU17.LOC:Local_timer_interrupts
1.75 ± 47% +1.9e+05% 3398 ±104% interrupts.CPU17.RES:Rescheduling_interrupts
49318 ± 3% +985.5% 535352 ± 22% interrupts.CPU170.LOC:Local_timer_interrupts
49299 ± 3% +977.7% 531310 ± 23% interrupts.CPU171.LOC:Local_timer_interrupts
49316 ± 3% +976.6% 530933 ± 23% interrupts.CPU172.LOC:Local_timer_interrupts
49258 ± 3% +977.8% 530893 ± 23% interrupts.CPU173.LOC:Local_timer_interrupts
49244 ± 3% +977.8% 530761 ± 23% interrupts.CPU174.LOC:Local_timer_interrupts
49249 ± 3% +983.5% 533635 ± 22% interrupts.CPU175.LOC:Local_timer_interrupts
1.75 ±173% +10557.1% 186.50 ± 20% interrupts.CPU175.NMI:Non-maskable_interrupts
1.75 ±173% +10557.1% 186.50 ± 20% interrupts.CPU175.PMI:Performance_monitoring_interrupts
49251 ± 3% +978.1% 530997 ± 23% interrupts.CPU176.LOC:Local_timer_interrupts
49280 ± 3% +977.6% 531024 ± 23% interrupts.CPU177.LOC:Local_timer_interrupts
49257 ± 3% +978.0% 530974 ± 23% interrupts.CPU178.LOC:Local_timer_interrupts
49262 ± 3% +977.7% 530914 ± 23% interrupts.CPU179.LOC:Local_timer_interrupts
817.75 ± 2% +338.9% 3589 ±104% interrupts.CPU18.CAL:Function_call_interrupts
49319 ± 3% +986.4% 535796 ± 22% interrupts.CPU18.LOC:Local_timer_interrupts
49258 ± 3% +977.9% 530945 ± 23% interrupts.CPU180.LOC:Local_timer_interrupts
49265 ± 3% +977.4% 530813 ± 23% interrupts.CPU181.LOC:Local_timer_interrupts
0.75 ±173% +24666.7% 185.75 ± 21% interrupts.CPU181.NMI:Non-maskable_interrupts
1.00 ±173% +18475.0% 185.75 ± 21% interrupts.CPU181.PMI:Performance_monitoring_interrupts
49255 ± 3% +976.8% 530355 ± 24% interrupts.CPU182.LOC:Local_timer_interrupts
0.25 ±173% +41200.0% 103.25 ±163% interrupts.CPU182.RES:Rescheduling_interrupts
49261 ± 3% +977.8% 530924 ± 23% interrupts.CPU183.LOC:Local_timer_interrupts
49261 ± 3% +976.9% 530468 ± 24% interrupts.CPU184.LOC:Local_timer_interrupts
49256 ± 3% +977.2% 530580 ± 24% interrupts.CPU185.LOC:Local_timer_interrupts
49239 ± 3% +976.7% 530145 ± 24% interrupts.CPU186.LOC:Local_timer_interrupts
49253 ± 3% +976.7% 530291 ± 24% interrupts.CPU187.LOC:Local_timer_interrupts
0.75 ±173% +17800.0% 134.25 ± 36% interrupts.CPU187.NMI:Non-maskable_interrupts
0.75 ±173% +17800.0% 134.25 ± 36% interrupts.CPU187.PMI:Performance_monitoring_interrupts
201.25 ± 21% -49.7% 101.25 ± 53% interrupts.CPU187.TLB:TLB_shootdowns
49258 ± 3% +975.9% 529965 ± 24% interrupts.CPU188.LOC:Local_timer_interrupts
49243 ± 3% +976.4% 530057 ± 24% interrupts.CPU189.LOC:Local_timer_interrupts
845.00 ± 2% +373.0% 3997 ± 77% interrupts.CPU19.CAL:Function_call_interrupts
49293 ± 3% +986.8% 535726 ± 22% interrupts.CPU19.LOC:Local_timer_interrupts
0.50 ±173% +62850.0% 314.75 ± 56% interrupts.CPU19.NMI:Non-maskable_interrupts
0.50 ±173% +62850.0% 314.75 ± 56% interrupts.CPU19.PMI:Performance_monitoring_interrupts
49253 ± 3% +977.3% 530619 ± 23% interrupts.CPU190.LOC:Local_timer_interrupts
4.25 ± 73% +1882.4% 84.25 ±150% interrupts.CPU190.RES:Rescheduling_interrupts
49292 ± 3% +976.9% 530802 ± 24% interrupts.CPU191.LOC:Local_timer_interrupts
1.00 +10875.0% 109.75 ± 10% interrupts.CPU191.NMI:Non-maskable_interrupts
1.00 +10875.0% 109.75 ± 10% interrupts.CPU191.PMI:Performance_monitoring_interrupts
49407 ± 3% +986.3% 536729 ± 21% interrupts.CPU2.LOC:Local_timer_interrupts
15.75 ± 83% +2901.6% 472.75 ±128% interrupts.CPU2.RES:Rescheduling_interrupts
880.50 ± 12% +327.7% 3766 ± 46% interrupts.CPU20.CAL:Function_call_interrupts
49280 ± 3% +989.2% 536784 ± 21% interrupts.CPU20.LOC:Local_timer_interrupts
2.50 ± 44% +1.3e+05% 3186 ± 74% interrupts.CPU20.RES:Rescheduling_interrupts
840.25 ± 3% +449.4% 4616 ± 52% interrupts.CPU21.CAL:Function_call_interrupts
49301 ± 3% +988.3% 536547 ± 21% interrupts.CPU21.LOC:Local_timer_interrupts
0.25 ±173% +3.1e+05% 771.25 ± 94% interrupts.CPU21.NMI:Non-maskable_interrupts
0.25 ±173% +3.1e+05% 771.25 ± 94% interrupts.CPU21.PMI:Performance_monitoring_interrupts
3.50 ±111% +96621.4% 3385 ±102% interrupts.CPU21.RES:Rescheduling_interrupts
49349 ± 3% +984.7% 535289 ± 22% interrupts.CPU22.LOC:Local_timer_interrupts
1.75 ± 74% +1.9e+05% 3255 ± 87% interrupts.CPU22.RES:Rescheduling_interrupts
830.75 ± 2% +289.2% 3233 ± 60% interrupts.CPU23.CAL:Function_call_interrupts
49327 ± 3% +987.0% 536170 ± 21% interrupts.CPU23.LOC:Local_timer_interrupts
0.25 ±173% +1.7e+05% 435.75 ± 95% interrupts.CPU23.NMI:Non-maskable_interrupts
0.25 ±173% +1.7e+05% 435.75 ± 95% interrupts.CPU23.PMI:Performance_monitoring_interrupts
3.25 ± 70% +94638.5% 3079 ± 90% interrupts.CPU23.RES:Rescheduling_interrupts
49319 ± 3% +1101.4% 592500 ± 3% interrupts.CPU24.LOC:Local_timer_interrupts
2.25 ± 48% +15066.7% 341.25 ±167% interrupts.CPU24.RES:Rescheduling_interrupts
49277 ± 3% +1103.6% 593107 ± 3% interrupts.CPU25.LOC:Local_timer_interrupts
0.50 ±100% +25600.0% 128.50 ±157% interrupts.CPU25.RES:Rescheduling_interrupts
810.75 ± 2% +769.0% 7045 ±147% interrupts.CPU26.CAL:Function_call_interrupts
49295 ± 3% +1102.1% 592559 ± 3% interrupts.CPU26.LOC:Local_timer_interrupts
0.00 +1.9e+104% 189.00 ± 33% interrupts.CPU26.NMI:Non-maskable_interrupts
0.00 +1.9e+104% 189.00 ± 33% interrupts.CPU26.PMI:Performance_monitoring_interrupts
0.75 ±110% +20766.7% 156.50 ±162% interrupts.CPU26.RES:Rescheduling_interrupts
876.25 ± 10% +1298.6% 12255 ±143% interrupts.CPU27.CAL:Function_call_interrupts
49253 ± 3% +1102.4% 592217 ± 3% interrupts.CPU27.LOC:Local_timer_interrupts
0.00 +1.7e+104% 172.50 ± 37% interrupts.CPU27.NMI:Non-maskable_interrupts
0.00 +1.7e+104% 172.50 ± 37% interrupts.CPU27.PMI:Performance_monitoring_interrupts
2.25 ± 72% +9722.2% 221.00 ±160% interrupts.CPU27.RES:Rescheduling_interrupts
49252 ± 3% +1100.1% 591086 ± 3% interrupts.CPU28.LOC:Local_timer_interrupts
0.50 ±100% +35800.0% 179.50 ±165% interrupts.CPU28.RES:Rescheduling_interrupts
49288 ± 3% +1099.7% 591292 ± 3% interrupts.CPU29.LOC:Local_timer_interrupts
1.25 ±103% +9940.0% 125.50 ±161% interrupts.CPU29.RES:Rescheduling_interrupts
49318 ± 3% +990.2% 537664 ± 22% interrupts.CPU3.LOC:Local_timer_interrupts
0.25 ±173% +2.5e+05% 628.25 ± 66% interrupts.CPU3.NMI:Non-maskable_interrupts
0.25 ±173% +2.5e+05% 628.25 ± 66% interrupts.CPU3.PMI:Performance_monitoring_interrupts
13.75 ± 78% +5141.8% 720.75 ± 93% interrupts.CPU3.RES:Rescheduling_interrupts
49291 ± 3% +1101.3% 592123 ± 3% interrupts.CPU30.LOC:Local_timer_interrupts
0.25 ±173% +81200.0% 203.25 ±165% interrupts.CPU30.RES:Rescheduling_interrupts
49290 ± 3% +1102.1% 592509 ± 3% interrupts.CPU31.LOC:Local_timer_interrupts
0.25 ±173% +70300.0% 176.00 ± 19% interrupts.CPU31.NMI:Non-maskable_interrupts
0.25 ±173% +70300.0% 176.00 ± 19% interrupts.CPU31.PMI:Performance_monitoring_interrupts
0.25 ±173% +96300.0% 241.00 ±167% interrupts.CPU31.RES:Rescheduling_interrupts
49275 ± 3% +1100.9% 591757 ± 3% interrupts.CPU32.LOC:Local_timer_interrupts
829.00 +82.7% 1514 ± 66% interrupts.CPU33.CAL:Function_call_interrupts
49261 ± 3% +1096.7% 589490 ± 4% interrupts.CPU33.LOC:Local_timer_interrupts
0.25 ±173% +70500.0% 176.50 ± 20% interrupts.CPU33.NMI:Non-maskable_interrupts
0.25 ±173% +70500.0% 176.50 ± 20% interrupts.CPU33.PMI:Performance_monitoring_interrupts
1.00 ± 70% +11100.0% 112.00 ±144% interrupts.CPU33.RES:Rescheduling_interrupts
49275 ± 3% +1096.4% 589503 ± 4% interrupts.CPU34.LOC:Local_timer_interrupts
3.00 ±122% +2333.3% 73.00 ±147% interrupts.CPU34.RES:Rescheduling_interrupts
816.00 ± 2% +120.3% 1798 ± 87% interrupts.CPU35.CAL:Function_call_interrupts
49277 ± 3% +1099.3% 590974 ± 3% interrupts.CPU35.LOC:Local_timer_interrupts
1.25 ± 87% +12140.0% 153.00 ±155% interrupts.CPU35.RES:Rescheduling_interrupts
50.00 ± 90% +548.5% 324.25 ±115% interrupts.CPU35.TLB:TLB_shootdowns
49271 ± 3% +1094.2% 588423 ± 4% interrupts.CPU36.LOC:Local_timer_interrupts
0.75 ±110% +28466.7% 214.25 ±159% interrupts.CPU36.RES:Rescheduling_interrupts
49275 ± 3% +1100.6% 591617 ± 3% interrupts.CPU37.LOC:Local_timer_interrupts
1.50 ±100% +8150.0% 123.75 ±162% interrupts.CPU37.RES:Rescheduling_interrupts
830.00 ± 2% +121.1% 1835 ± 87% interrupts.CPU38.CAL:Function_call_interrupts
49271 ± 3% +1100.3% 591415 ± 3% interrupts.CPU38.LOC:Local_timer_interrupts
0.75 ±173% +25933.3% 195.25 ±165% interrupts.CPU38.RES:Rescheduling_interrupts
49257 ± 3% +1099.4% 590788 ± 3% interrupts.CPU39.LOC:Local_timer_interrupts
1.50 ±100% +17783.3% 268.25 ±167% interrupts.CPU39.RES:Rescheduling_interrupts
49326 ± 3% +989.1% 537216 ± 21% interrupts.CPU4.LOC:Local_timer_interrupts
9.75 ± 74% +11100.0% 1092 ± 98% interrupts.CPU4.RES:Rescheduling_interrupts
49270 ± 3% +1103.2% 592804 ± 3% interrupts.CPU40.LOC:Local_timer_interrupts
2.00 ±117% +86125.0% 1724 ±172% interrupts.CPU40.RES:Rescheduling_interrupts
833.75 ± 2% +108.8% 1741 ± 83% interrupts.CPU41.CAL:Function_call_interrupts
49275 ± 3% +1097.8% 590204 ± 4% interrupts.CPU41.LOC:Local_timer_interrupts
49276 ± 3% +1099.1% 590869 ± 3% interrupts.CPU42.LOC:Local_timer_interrupts
49273 ± 3% +1101.7% 592123 ± 3% interrupts.CPU43.LOC:Local_timer_interrupts
0.25 ±173% +2e+05% 491.50 ±171% interrupts.CPU43.RES:Rescheduling_interrupts
49301 ± 3% +1094.9% 589108 ± 4% interrupts.CPU44.LOC:Local_timer_interrupts
49291 ± 3% +1097.1% 590062 ± 4% interrupts.CPU45.LOC:Local_timer_interrupts
0.75 ± 57% +1.1e+05% 852.75 ±172% interrupts.CPU45.RES:Rescheduling_interrupts
831.25 +132.2% 1930 ± 92% interrupts.CPU46.CAL:Function_call_interrupts
49293 ± 3% +1095.6% 589326 ± 4% interrupts.CPU46.LOC:Local_timer_interrupts
49281 ± 3% +1098.3% 590561 ± 3% interrupts.CPU47.LOC:Local_timer_interrupts
1.50 ±110% +33666.7% 506.50 ±170% interrupts.CPU47.RES:Rescheduling_interrupts
824.75 ± 3% +67.5% 1381 ± 60% interrupts.CPU48.CAL:Function_call_interrupts
49218 ± 3% +987.0% 534997 ± 24% interrupts.CPU48.LOC:Local_timer_interrupts
3.00 ± 62% +10675.0% 323.25 ±162% interrupts.CPU48.RES:Rescheduling_interrupts
49268 ± 3% +976.0% 530106 ± 24% interrupts.CPU49.LOC:Local_timer_interrupts
1.25 ±103% +9880.0% 124.75 ±150% interrupts.CPU49.RES:Rescheduling_interrupts
49370 ± 3% +986.9% 536578 ± 21% interrupts.CPU5.LOC:Local_timer_interrupts
0.25 ±173% +1.2e+05% 301.25 ± 28% interrupts.CPU5.NMI:Non-maskable_interrupts
0.25 ±173% +1.2e+05% 301.25 ± 28% interrupts.CPU5.PMI:Performance_monitoring_interrupts
12.25 ±114% +5344.9% 667.00 ± 92% interrupts.CPU5.RES:Rescheduling_interrupts
49249 ± 3% +976.2% 530004 ± 24% interrupts.CPU50.LOC:Local_timer_interrupts
1.50 ± 74% +57133.3% 858.50 ±170% interrupts.CPU50.RES:Rescheduling_interrupts
816.00 +121.5% 1807 ± 89% interrupts.CPU51.CAL:Function_call_interrupts
49248 ± 3% +976.5% 530166 ± 24% interrupts.CPU51.LOC:Local_timer_interrupts
1.25 ± 34% +30040.0% 376.75 ±167% interrupts.CPU51.RES:Rescheduling_interrupts
820.25 ± 2% +82.7% 1498 ± 70% interrupts.CPU52.CAL:Function_call_interrupts
49250 ± 3% +976.4% 530138 ± 24% interrupts.CPU52.LOC:Local_timer_interrupts
0.25 ±173% +54600.0% 136.75 ± 29% interrupts.CPU52.NMI:Non-maskable_interrupts
0.25 ±173% +54600.0% 136.75 ± 29% interrupts.CPU52.PMI:Performance_monitoring_interrupts
1.50 ± 57% +12666.7% 191.50 ±160% interrupts.CPU52.RES:Rescheduling_interrupts
49249 ± 3% +976.6% 530214 ± 24% interrupts.CPU53.LOC:Local_timer_interrupts
1.00 ± 70% +44050.0% 441.50 ±167% interrupts.CPU53.RES:Rescheduling_interrupts
815.25 ± 2% +88.4% 1536 ± 74% interrupts.CPU54.CAL:Function_call_interrupts
49268 ± 3% +976.4% 530302 ± 24% interrupts.CPU54.LOC:Local_timer_interrupts
1.75 ± 47% +11771.4% 207.75 ±163% interrupts.CPU54.RES:Rescheduling_interrupts
821.25 +155.8% 2100 ±100% interrupts.CPU55.CAL:Function_call_interrupts
49247 ± 3% +977.2% 530483 ± 24% interrupts.CPU55.LOC:Local_timer_interrupts
1.75 ± 47% +21528.6% 378.50 ±167% interrupts.CPU55.RES:Rescheduling_interrupts
49230 ± 3% +977.6% 530504 ± 24% interrupts.CPU56.LOC:Local_timer_interrupts
1.25 ± 66% +29400.0% 368.75 ±168% interrupts.CPU56.RES:Rescheduling_interrupts
819.50 +180.4% 2298 ±106% interrupts.CPU57.CAL:Function_call_interrupts
49244 ± 3% +976.7% 530203 ± 24% interrupts.CPU57.LOC:Local_timer_interrupts
0.25 ±173% +94600.0% 236.75 ± 70% interrupts.CPU57.NMI:Non-maskable_interrupts
0.25 ±173% +94600.0% 236.75 ± 70% interrupts.CPU57.PMI:Performance_monitoring_interrupts
1.50 ± 74% +32066.7% 482.50 ±169% interrupts.CPU57.RES:Rescheduling_interrupts
49308 ± 3% +975.0% 530051 ± 24% interrupts.CPU58.LOC:Local_timer_interrupts
0.25 ±173% +1e+05% 256.50 ± 79% interrupts.CPU58.NMI:Non-maskable_interrupts
0.25 ±173% +1e+05% 256.50 ± 79% interrupts.CPU58.PMI:Performance_monitoring_interrupts
3.50 ± 84% +12007.1% 423.75 ±167% interrupts.CPU58.RES:Rescheduling_interrupts
817.50 +103.4% 1663 ± 79% interrupts.CPU59.CAL:Function_call_interrupts
49266 ± 3% +975.9% 530043 ± 24% interrupts.CPU59.LOC:Local_timer_interrupts
2.00 ± 35% +14050.0% 283.00 ±158% interrupts.CPU59.RES:Rescheduling_interrupts
49309 ± 3% +988.0% 536501 ± 21% interrupts.CPU6.LOC:Local_timer_interrupts
8.25 ± 95% +9421.2% 785.50 ± 95% interrupts.CPU6.RES:Rescheduling_interrupts
826.75 ± 4% +137.0% 1959 ± 94% interrupts.CPU60.CAL:Function_call_interrupts
49247 ± 3% +977.2% 530470 ± 24% interrupts.CPU60.LOC:Local_timer_interrupts
2.25 ±148% +19777.8% 447.25 ±159% interrupts.CPU60.RES:Rescheduling_interrupts
49247 ± 3% +976.9% 530345 ± 24% interrupts.CPU61.LOC:Local_timer_interrupts
1.50 ± 33% +10083.3% 152.75 ±163% interrupts.CPU61.RES:Rescheduling_interrupts
824.50 ± 3% +107.1% 1707 ± 82% interrupts.CPU62.CAL:Function_call_interrupts
49247 ± 3% +976.6% 530191 ± 24% interrupts.CPU62.LOC:Local_timer_interrupts
0.25 ±173% +77000.0% 192.75 ± 49% interrupts.CPU62.NMI:Non-maskable_interrupts
0.25 ±173% +77000.0% 192.75 ± 49% interrupts.CPU62.PMI:Performance_monitoring_interrupts
1.50 ±100% +17050.0% 257.25 ±162% interrupts.CPU62.RES:Rescheduling_interrupts
819.50 ± 2% +121.9% 1818 ± 78% interrupts.CPU63.CAL:Function_call_interrupts
49245 ± 3% +976.9% 530307 ± 24% interrupts.CPU63.LOC:Local_timer_interrupts
0.25 ±173% +1.3e+05% 335.25 ±158% interrupts.CPU63.RES:Rescheduling_interrupts
811.25 +100.0% 1622 ± 78% interrupts.CPU64.CAL:Function_call_interrupts
49168 ± 3% +979.1% 530566 ± 24% interrupts.CPU64.LOC:Local_timer_interrupts
1.50 ±110% +47833.3% 719.00 ±168% interrupts.CPU64.RES:Rescheduling_interrupts
846.50 ± 3% +56.6% 1326 ± 54% interrupts.CPU65.CAL:Function_call_interrupts
49257 ± 3% +976.8% 530382 ± 24% interrupts.CPU65.LOC:Local_timer_interrupts
811.75 +96.9% 1598 ± 43% interrupts.CPU66.CAL:Function_call_interrupts
49255 ± 3% +976.3% 530149 ± 24% interrupts.CPU66.LOC:Local_timer_interrupts
820.75 ± 3% +77.7% 1458 ± 37% interrupts.CPU67.CAL:Function_call_interrupts
49265 ± 3% +976.1% 530131 ± 24% interrupts.CPU67.LOC:Local_timer_interrupts
3.00 ±122% +10550.0% 319.50 ±165% interrupts.CPU67.RES:Rescheduling_interrupts
805.25 +63.9% 1319 ± 57% interrupts.CPU68.CAL:Function_call_interrupts
49248 ± 3% +976.8% 530330 ± 24% interrupts.CPU68.LOC:Local_timer_interrupts
1.75 ± 84% +24000.0% 421.75 ±159% interrupts.CPU68.RES:Rescheduling_interrupts
830.00 ± 3% +49.3% 1239 ± 47% interrupts.CPU69.CAL:Function_call_interrupts
49270 ± 3% +976.2% 530235 ± 24% interrupts.CPU69.LOC:Local_timer_interrupts
1.00 ± 70% +32800.0% 329.00 ±166% interrupts.CPU69.RES:Rescheduling_interrupts
49299 ± 3% +998.3% 541453 ± 22% interrupts.CPU7.LOC:Local_timer_interrupts
11.75 ± 87% +4563.8% 548.00 ± 95% interrupts.CPU7.RES:Rescheduling_interrupts
836.50 +79.2% 1499 ± 70% interrupts.CPU70.CAL:Function_call_interrupts
49154 ± 3% +979.2% 530495 ± 24% interrupts.CPU70.LOC:Local_timer_interrupts
1.25 ±131% +48500.0% 607.50 ±169% interrupts.CPU70.RES:Rescheduling_interrupts
815.75 ± 2% +51.0% 1232 ± 47% interrupts.CPU71.CAL:Function_call_interrupts
49318 ± 3% +975.3% 530324 ± 24% interrupts.CPU71.LOC:Local_timer_interrupts
1.75 ± 84% +18185.7% 320.00 ±166% interrupts.CPU71.RES:Rescheduling_interrupts
49174 ± 3% +979.5% 530831 ± 23% interrupts.CPU72.LOC:Local_timer_interrupts
49328 ± 3% +976.8% 531165 ± 23% interrupts.CPU73.LOC:Local_timer_interrupts
49291 ± 3% +977.2% 530967 ± 23% interrupts.CPU74.LOC:Local_timer_interrupts
49322 ± 3% +975.3% 530368 ± 24% interrupts.CPU75.LOC:Local_timer_interrupts
144.50 ± 26% -58.0% 60.75 ± 63% interrupts.CPU75.TLB:TLB_shootdowns
49584 ± 2% +969.9% 530478 ± 24% interrupts.CPU76.LOC:Local_timer_interrupts
49412 ± 2% +974.6% 530972 ± 23% interrupts.CPU77.LOC:Local_timer_interrupts
49307 ± 3% +977.0% 531020 ± 23% interrupts.CPU78.LOC:Local_timer_interrupts
49278 ± 3% +975.8% 530161 ± 24% interrupts.CPU79.LOC:Local_timer_interrupts
49364 ± 3% +987.4% 536767 ± 21% interrupts.CPU8.LOC:Local_timer_interrupts
11.00 ±133% +5561.4% 622.75 ± 88% interrupts.CPU8.RES:Rescheduling_interrupts
49281 ± 3% +977.3% 530889 ± 23% interrupts.CPU80.LOC:Local_timer_interrupts
49273 ± 3% +977.7% 531031 ± 23% interrupts.CPU81.LOC:Local_timer_interrupts
157.50 ± 16% -57.9% 66.25 ± 74% interrupts.CPU81.TLB:TLB_shootdowns
49277 ± 3% +977.1% 530767 ± 23% interrupts.CPU82.LOC:Local_timer_interrupts
49260 ± 3% +977.9% 530978 ± 23% interrupts.CPU83.LOC:Local_timer_interrupts
140.50 ± 26% -42.9% 80.25 ± 40% interrupts.CPU83.TLB:TLB_shootdowns
49277 ± 3% +977.5% 530986 ± 23% interrupts.CPU84.LOC:Local_timer_interrupts
49276 ± 3% +977.1% 530739 ± 23% interrupts.CPU85.LOC:Local_timer_interrupts
49275 ± 3% +976.3% 530357 ± 24% interrupts.CPU86.LOC:Local_timer_interrupts
49271 ± 3% +976.2% 530266 ± 24% interrupts.CPU87.LOC:Local_timer_interrupts
49286 ± 3% +976.2% 530422 ± 24% interrupts.CPU88.LOC:Local_timer_interrupts
49277 ± 3% +975.9% 530159 ± 24% interrupts.CPU89.LOC:Local_timer_interrupts
49337 ± 3% +989.1% 537342 ± 21% interrupts.CPU9.LOC:Local_timer_interrupts
20.75 ± 99% +3696.4% 787.75 ± 85% interrupts.CPU9.RES:Rescheduling_interrupts
49269 ± 3% +976.4% 530355 ± 24% interrupts.CPU90.LOC:Local_timer_interrupts
49279 ± 3% +977.8% 531115 ± 23% interrupts.CPU91.LOC:Local_timer_interrupts
0.25 ±173% +74400.0% 186.25 ± 18% interrupts.CPU91.NMI:Non-maskable_interrupts
0.25 ±173% +74400.0% 186.25 ± 18% interrupts.CPU91.PMI:Performance_monitoring_interrupts
148.00 ± 28% -76.9% 34.25 ± 23% interrupts.CPU91.TLB:TLB_shootdowns
49272 ± 3% +977.3% 530826 ± 23% interrupts.CPU92.LOC:Local_timer_interrupts
49273 ± 3% +976.4% 530359 ± 24% interrupts.CPU93.LOC:Local_timer_interrupts
49289 ± 3% +976.2% 530443 ± 24% interrupts.CPU94.LOC:Local_timer_interrupts
49305 ± 3% +976.6% 530812 ± 23% interrupts.CPU95.LOC:Local_timer_interrupts
49303 ± 3% +986.8% 535801 ± 22% interrupts.CPU96.LOC:Local_timer_interrupts
0.25 ±173% +1.1e+05% 275.50 ± 54% interrupts.CPU96.NMI:Non-maskable_interrupts
0.25 ±173% +1.1e+05% 275.50 ± 54% interrupts.CPU96.PMI:Performance_monitoring_interrupts
15.25 ±150% +15845.9% 2431 ±106% interrupts.CPU96.RES:Rescheduling_interrupts
895.25 ± 5% +310.1% 3671 ± 77% interrupts.CPU97.CAL:Function_call_interrupts
49280 ± 3% +987.5% 535931 ± 22% interrupts.CPU97.LOC:Local_timer_interrupts
0.75 ±110% +67700.0% 508.50 ± 81% interrupts.CPU97.NMI:Non-maskable_interrupts
0.75 ±110% +67700.0% 508.50 ± 81% interrupts.CPU97.PMI:Performance_monitoring_interrupts
98.25 ±166% +3216.8% 3258 ± 99% interrupts.CPU97.RES:Rescheduling_interrupts
872.75 ± 4% +323.6% 3697 ± 74% interrupts.CPU98.CAL:Function_call_interrupts
49223 ± 3% +988.5% 535800 ± 22% interrupts.CPU98.LOC:Local_timer_interrupts
0.25 ±173% +2e+05% 495.75 ± 80% interrupts.CPU98.NMI:Non-maskable_interrupts
0.25 ±173% +2e+05% 495.75 ± 80% interrupts.CPU98.PMI:Performance_monitoring_interrupts
7.25 ±126% +50834.5% 3692 ± 97% interrupts.CPU98.RES:Rescheduling_interrupts
859.50 +385.8% 4175 ± 65% interrupts.CPU99.CAL:Function_call_interrupts
49463 ± 3% +991.5% 539879 ± 22% interrupts.CPU99.LOC:Local_timer_interrupts
0.25 ±173% +2.3e+05% 580.50 ± 62% interrupts.CPU99.NMI:Non-maskable_interrupts
0.25 ±173% +2.3e+05% 580.50 ± 62% interrupts.CPU99.PMI:Performance_monitoring_interrupts
9460816 ± 3% +1009.9% 1.05e+08 ± 18% interrupts.LOC:Local_timer_interrupts
0.00 +1.9e+104% 192.00 interrupts.MCP:Machine_check_polls
16.50 ± 15% +3.1e+05% 50541 ± 17% interrupts.NMI:Non-maskable_interrupts
16.75 ± 16% +3e+05% 50541 ± 17% interrupts.PMI:Performance_monitoring_interrupts
2453 ± 10% +7214.2% 179472 ± 23% interrupts.RES:Rescheduling_interrupts
28.86 ± 54% -28.9 0.00 perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe
28.86 ± 54% -28.9 0.00 perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe
50.00 ± 23% -15.4 34.64 ± 13% perf-profile.calltrace.cycles-pp.intel_idle.cpuidle_enter_state.cpuidle_enter.do_idle.cpu_startup_entry
11.82 ± 31% -11.8 0.00 perf-profile.calltrace.cycles-pp.__libc_start_main
11.82 ± 31% -11.8 0.00 perf-profile.calltrace.cycles-pp.main.__libc_start_main
11.82 ± 31% -11.8 0.00 perf-profile.calltrace.cycles-pp.run_builtin.main.__libc_start_main
11.82 ± 31% -11.8 0.00 perf-profile.calltrace.cycles-pp.cmd_record.run_builtin.main.__libc_start_main
11.82 ± 31% -11.8 0.00 perf-profile.calltrace.cycles-pp.perf_mmap__read_head.perf_mmap__push.record__mmap_read_evlist.cmd_record.run_builtin
11.59 ±128% -11.6 0.00 perf-profile.calltrace.cycles-pp.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe
11.59 ±128% -11.6 0.00 perf-profile.calltrace.cycles-pp.proc_reg_read.vfs_read.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe
11.59 ±128% -11.6 0.00 perf-profile.calltrace.cycles-pp.seq_read.proc_reg_read.vfs_read.ksys_read.do_syscall_64
11.59 ±128% -11.6 0.00 perf-profile.calltrace.cycles-pp.vfs_read.ksys_read.do_syscall_64.entry_SYSCALL_64_after_hwframe
10.00 ±100% -10.0 0.00 perf-profile.calltrace.cycles-pp.__prepare_exit_to_usermode.do_syscall_64.entry_SYSCALL_64_after_hwframe
10.00 ±100% -10.0 0.00 perf-profile.calltrace.cycles-pp.do_signal.__prepare_exit_to_usermode.do_syscall_64.entry_SYSCALL_64_after_hwframe
10.00 ±100% -10.0 0.00 perf-profile.calltrace.cycles-pp.get_signal.do_signal.__prepare_exit_to_usermode.do_syscall_64.entry_SYSCALL_64_after_hwframe
10.00 ±100% -10.0 0.00 perf-profile.calltrace.cycles-pp.do_group_exit.get_signal.do_signal.__prepare_exit_to_usermode.do_syscall_64
10.00 ±100% -10.0 0.00 perf-profile.calltrace.cycles-pp.do_exit.do_group_exit.get_signal.do_signal.__prepare_exit_to_usermode
9.54 ± 4% -9.5 0.00 perf-profile.calltrace.cycles-pp.record__mmap_read_evlist.cmd_record.run_builtin.main.__libc_start_main
9.54 ± 4% -9.5 0.00 perf-profile.calltrace.cycles-pp.perf_mmap__push.record__mmap_read_evlist.cmd_record.run_builtin.main
9.54 ± 67% -9.5 0.00 perf-profile.calltrace.cycles-pp.__handle_mm_fault.handle_mm_fault.do_user_addr_fault.exc_page_fault.asm_exc_page_fault
9.54 ±100% -9.5 0.00 perf-profile.calltrace.cycles-pp.step_into.walk_component.link_path_walk.path_openat.do_filp_open
7.50 ±110% -7.5 0.00 perf-profile.calltrace.cycles-pp.mmput.do_exit.do_group_exit.get_signal.do_signal
7.50 ±110% -7.5 0.00 perf-profile.calltrace.cycles-pp.exit_mmap.mmput.do_exit.do_group_exit.get_signal
7.27 ± 57% -7.3 0.00 perf-profile.calltrace.cycles-pp.asm_exc_page_fault.perf_mmap__read_head.perf_mmap__push.record__mmap_read_evlist.cmd_record
7.27 ± 57% -7.3 0.00 perf-profile.calltrace.cycles-pp.exc_page_fault.asm_exc_page_fault.perf_mmap__read_head.perf_mmap__push.record__mmap_read_evlist
7.27 ± 57% -7.3 0.00 perf-profile.calltrace.cycles-pp.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.perf_mmap__read_head.perf_mmap__push
7.27 ± 57% -7.3 0.00 perf-profile.calltrace.cycles-pp.handle_mm_fault.do_user_addr_fault.exc_page_fault.asm_exc_page_fault.perf_mmap__read_head
7.27 ±113% -7.3 0.00 perf-profile.calltrace.cycles-pp.do_filp_open.do_sys_openat2.do_sys_open.do_syscall_64.entry_SYSCALL_64_after_hwframe
7.27 ±113% -7.3 0.00 perf-profile.calltrace.cycles-pp.path_openat.do_filp_open.do_sys_openat2.do_sys_open.do_syscall_64
7.27 ±113% -7.3 0.00 perf-profile.calltrace.cycles-pp.link_path_walk.path_openat.do_filp_open.do_sys_openat2.do_sys_open
7.27 ±113% -7.3 0.00 perf-profile.calltrace.cycles-pp.walk_component.link_path_walk.path_openat.do_filp_open.do_sys_openat2
5.00 ±100% -5.0 0.00 perf-profile.calltrace.cycles-pp.tlb_finish_mmu.exit_mmap.mmput.do_exit.do_group_exit
5.00 ±100% -5.0 0.00 perf-profile.calltrace.cycles-pp.tlb_flush_mmu.tlb_finish_mmu.exit_mmap.mmput.do_exit
5.00 ±100% -5.0 0.00 perf-profile.calltrace.cycles-pp.release_pages.tlb_flush_mmu.tlb_finish_mmu.exit_mmap.mmput
4.77 ±100% -4.8 0.00 perf-profile.calltrace.cycles-pp.do_fault.__handle_mm_fault.handle_mm_fault.do_user_addr_fault.exc_page_fault
4.77 ±100% -4.8 0.00 perf-profile.calltrace.cycles-pp.__do_fault.do_fault.__handle_mm_fault.handle_mm_fault.do_user_addr_fault
0.00 +0.6 0.59 ± 12% perf-profile.calltrace.cycles-pp.btrfs_reserve_metadata_bytes.btrfs_delalloc_reserve_metadata.btrfs_buffered_write.btrfs_file_write_iter.new_sync_write
0.00 +0.6 0.61 ± 12% perf-profile.calltrace.cycles-pp.btrfs_delalloc_reserve_metadata.btrfs_buffered_write.btrfs_file_write_iter.new_sync_write.vfs_write
0.00 +0.6 0.65 ± 12% perf-profile.calltrace.cycles-pp.ktime_get.tick_irq_enter.irq_enter_rcu.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt
0.00 +0.7 0.67 ± 20% perf-profile.calltrace.cycles-pp.setup_leaf_for_split.btrfs_duplicate_item.btrfs_mark_extent_written.btrfs_finish_ordered_io.btrfs_work_helper
0.00 +0.7 0.68 ± 18% perf-profile.calltrace.cycles-pp.read_extent_buffer.check_leaf.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio
0.00 +0.7 0.68 ± 8% perf-profile.calltrace.cycles-pp.__next_timer_interrupt.get_next_timer_interrupt.tick_nohz_next_event.tick_nohz_get_sleep_length.menu_select
0.00 +0.7 0.71 ± 14% perf-profile.calltrace.cycles-pp.perf_mux_hrtimer_handler.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt.asm_call_on_stack
0.00 +0.7 0.74 ± 26% perf-profile.calltrace.cycles-pp.btrfs_cow_block.btrfs_search_slot.btrfs_lookup_csum.btrfs_csum_file_blocks.add_pending_csums
0.00 +0.7 0.74 ± 26% perf-profile.calltrace.cycles-pp.__btrfs_cow_block.btrfs_cow_block.btrfs_search_slot.btrfs_lookup_csum.btrfs_csum_file_blocks
0.00 +0.8 0.83 ± 44% perf-profile.calltrace.cycles-pp.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues
0.00 +0.9 0.88 ± 27% perf-profile.calltrace.cycles-pp.btrfs_insert_empty_items.btrfs_csum_file_blocks.add_pending_csums.btrfs_finish_ordered_io.btrfs_work_helper
0.00 +0.9 0.89 ± 21% perf-profile.calltrace.cycles-pp.tick_irq_enter.irq_enter_rcu.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state
0.00 +1.0 1.04 ± 10% perf-profile.calltrace.cycles-pp.get_next_timer_interrupt.tick_nohz_next_event.tick_nohz_get_sleep_length.menu_select.do_idle
0.00 +1.1 1.06 ± 25% perf-profile.calltrace.cycles-pp.btrfs_search_slot.btrfs_lookup_file_extent.__btrfs_drop_extents.insert_reserved_file_extent.btrfs_finish_ordered_io
0.00 +1.1 1.06 ± 25% perf-profile.calltrace.cycles-pp.btrfs_lookup_file_extent.__btrfs_drop_extents.insert_reserved_file_extent.btrfs_finish_ordered_io.btrfs_work_helper
0.00 +1.1 1.06 ± 34% perf-profile.calltrace.cycles-pp.btrfs_cow_block.btrfs_search_slot.btrfs_mark_extent_written.btrfs_finish_ordered_io.btrfs_work_helper
0.00 +1.1 1.06 ± 34% perf-profile.calltrace.cycles-pp.__btrfs_cow_block.btrfs_cow_block.btrfs_search_slot.btrfs_mark_extent_written.btrfs_finish_ordered_io
0.00 +1.1 1.08 ± 21% perf-profile.calltrace.cycles-pp.btrfs_duplicate_item.btrfs_mark_extent_written.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work
0.00 +1.1 1.08 ± 23% perf-profile.calltrace.cycles-pp.btrfs_search_slot.btrfs_lookup_csum.btrfs_csum_file_blocks.add_pending_csums.btrfs_finish_ordered_io
0.00 +1.1 1.09 ± 23% perf-profile.calltrace.cycles-pp.btrfs_lookup_csum.btrfs_csum_file_blocks.add_pending_csums.btrfs_finish_ordered_io.btrfs_work_helper
0.00 +1.1 1.13 ± 7% perf-profile.calltrace.cycles-pp.rebalance_domains.__softirqentry_text_start.asm_call_on_stack.do_softirq_own_stack.irq_exit_rcu
0.00 +1.2 1.20 ± 21% perf-profile.calltrace.cycles-pp.run_delalloc_nocow.btrfs_run_delalloc_range.writepage_delalloc.__extent_writepage.extent_write_cache_pages
0.00 +1.2 1.22 ± 18% perf-profile.calltrace.cycles-pp.irq_enter_rcu.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state.cpuidle_enter
0.00 +1.3 1.34 ± 20% perf-profile.calltrace.cycles-pp.btrfs_get_64.check_extent_data_item.check_leaf.btree_csum_one_bio.btree_submit_bio_hook
0.00 +1.3 1.34 ± 24% perf-profile.calltrace.cycles-pp.__btrfs_drop_extents.insert_reserved_file_extent.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work
0.00 +1.4 1.39 ± 34% perf-profile.calltrace.cycles-pp.btrfs_run_delayed_refs_for_head.__btrfs_run_delayed_refs.btrfs_run_delayed_refs.flush_space.btrfs_async_reclaim_metadata_space
0.00 +1.4 1.39 ± 24% perf-profile.calltrace.cycles-pp.insert_reserved_file_extent.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work.worker_thread
0.00 +1.4 1.40 ± 30% perf-profile.calltrace.cycles-pp.clockevents_program_event.hrtimer_interrupt.__sysvec_apic_timer_interrupt.asm_call_on_stack.sysvec_apic_timer_interrupt
0.00 +1.7 1.70 ± 27% perf-profile.calltrace.cycles-pp.btrfs_search_slot.btrfs_mark_extent_written.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work
0.00 +1.7 1.72 ± 23% perf-profile.calltrace.cycles-pp.btrfs_buffered_write.btrfs_file_write_iter.new_sync_write.vfs_write.ksys_write
0.00 +1.7 1.72 ± 21% perf-profile.calltrace.cycles-pp.btrfs_run_delalloc_range.writepage_delalloc.__extent_writepage.extent_write_cache_pages.extent_writepages
0.00 +1.7 1.73 ± 23% perf-profile.calltrace.cycles-pp.new_sync_write.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe
0.00 +1.7 1.73 ± 23% perf-profile.calltrace.cycles-pp.btrfs_file_write_iter.new_sync_write.vfs_write.ksys_write.do_syscall_64
0.00 +1.7 1.74 ± 33% perf-profile.calltrace.cycles-pp.btrfs_run_delayed_refs.flush_space.btrfs_async_reclaim_metadata_space.process_one_work.worker_thread
0.00 +1.7 1.74 ± 33% perf-profile.calltrace.cycles-pp.__btrfs_run_delayed_refs.btrfs_run_delayed_refs.flush_space.btrfs_async_reclaim_metadata_space.process_one_work
0.00 +1.8 1.75 ± 23% perf-profile.calltrace.cycles-pp.vfs_write.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write
0.00 +1.8 1.75 ± 23% perf-profile.calltrace.cycles-pp.ksys_write.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write
0.00 +1.8 1.76 ± 23% perf-profile.calltrace.cycles-pp.do_syscall_64.entry_SYSCALL_64_after_hwframe.__libc_write
0.00 +1.8 1.77 ± 23% perf-profile.calltrace.cycles-pp.entry_SYSCALL_64_after_hwframe.__libc_write
0.00 +1.8 1.79 ± 23% perf-profile.calltrace.cycles-pp.__libc_write
0.00 +1.8 1.84 ± 20% perf-profile.calltrace.cycles-pp.btrfs_get_32.check_leaf.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio
0.00 +1.9 1.87 ± 21% perf-profile.calltrace.cycles-pp.writepage_delalloc.__extent_writepage.extent_write_cache_pages.extent_writepages.do_writepages
0.00 +1.9 1.90 ± 56% perf-profile.calltrace.cycles-pp.update_process_times.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt
0.00 +2.0 2.01 ± 58% perf-profile.calltrace.cycles-pp.tick_sched_handle.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt
0.00 +2.0 2.03 ± 24% perf-profile.calltrace.cycles-pp.add_pending_csums.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work.worker_thread
0.00 +2.0 2.03 ± 24% perf-profile.calltrace.cycles-pp.btrfs_csum_file_blocks.add_pending_csums.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work
0.00 +2.1 2.10 ± 21% perf-profile.calltrace.cycles-pp.__extent_writepage.extent_write_cache_pages.extent_writepages.do_writepages.__writeback_single_inode
0.00 +2.1 2.11 ± 13% perf-profile.calltrace.cycles-pp.tick_nohz_next_event.tick_nohz_get_sleep_length.menu_select.do_idle.cpu_startup_entry
0.00 +2.2 2.19 ± 21% perf-profile.calltrace.cycles-pp.extent_writepages.do_writepages.__writeback_single_inode.writeback_sb_inodes.wb_writeback
0.00 +2.2 2.19 ± 21% perf-profile.calltrace.cycles-pp.extent_write_cache_pages.extent_writepages.do_writepages.__writeback_single_inode.writeback_sb_inodes
0.00 +2.3 2.35 ± 9% perf-profile.calltrace.cycles-pp.__softirqentry_text_start.asm_call_on_stack.do_softirq_own_stack.irq_exit_rcu.sysvec_apic_timer_interrupt
0.00 +2.4 2.35 ± 9% perf-profile.calltrace.cycles-pp.asm_call_on_stack.do_softirq_own_stack.irq_exit_rcu.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt
0.00 +2.4 2.38 ± 10% perf-profile.calltrace.cycles-pp.do_softirq_own_stack.irq_exit_rcu.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state
0.00 +2.4 2.40 ± 20% perf-profile.calltrace.cycles-pp.check_extent_data_item.check_leaf.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio
0.00 +2.4 2.45 ± 46% perf-profile.calltrace.cycles-pp.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt.asm_call_on_stack
0.00 +2.5 2.46 ± 8% perf-profile.calltrace.cycles-pp.tick_nohz_get_sleep_length.menu_select.do_idle.cpu_startup_entry.start_secondary
0.00 +2.5 2.48 ± 32% perf-profile.calltrace.cycles-pp.flush_space.btrfs_async_reclaim_metadata_space.process_one_work.worker_thread.kthread
0.00 +2.5 2.48 ± 33% perf-profile.calltrace.cycles-pp.btrfs_async_reclaim_metadata_space.process_one_work.worker_thread.kthread.ret_from_fork
0.00 +2.8 2.79 ± 10% perf-profile.calltrace.cycles-pp.irq_exit_rcu.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state.cpuidle_enter
0.00 +2.9 2.94 ± 25% perf-profile.calltrace.cycles-pp.btrfs_mark_extent_written.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work.worker_thread
0.00 +3.8 3.80 ± 42% perf-profile.calltrace.cycles-pp.__hrtimer_run_queues.hrtimer_interrupt.__sysvec_apic_timer_interrupt.asm_call_on_stack.sysvec_apic_timer_interrupt
0.00 +6.1 6.10 ± 20% perf-profile.calltrace.cycles-pp.hrtimer_interrupt.__sysvec_apic_timer_interrupt.asm_call_on_stack.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt
0.00 +6.2 6.25 ± 21% perf-profile.calltrace.cycles-pp.__sysvec_apic_timer_interrupt.asm_call_on_stack.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state
0.00 +6.3 6.30 ± 22% perf-profile.calltrace.cycles-pp.asm_call_on_stack.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state.cpuidle_enter
0.00 +6.6 6.60 ± 18% perf-profile.calltrace.cycles-pp.check_leaf.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio.submit_extent_page
0.00 +6.7 6.75 ± 19% perf-profile.calltrace.cycles-pp.btree_csum_one_bio.btree_submit_bio_hook.submit_one_bio.submit_extent_page.write_one_eb
0.00 +6.8 6.82 ± 19% perf-profile.calltrace.cycles-pp.submit_extent_page.write_one_eb.btree_write_cache_pages.do_writepages.__writeback_single_inode
0.00 +6.8 6.84 ± 24% perf-profile.calltrace.cycles-pp.btrfs_finish_ordered_io.btrfs_work_helper.process_one_work.worker_thread.kthread
0.00 +6.9 6.93 ± 19% perf-profile.calltrace.cycles-pp.btree_submit_bio_hook.submit_one_bio.submit_extent_page.write_one_eb.btree_write_cache_pages
0.00 +6.9 6.93 ± 19% perf-profile.calltrace.cycles-pp.submit_one_bio.submit_extent_page.write_one_eb.btree_write_cache_pages.do_writepages
0.00 +7.1 7.06 ± 24% perf-profile.calltrace.cycles-pp.btrfs_work_helper.process_one_work.worker_thread.kthread.ret_from_fork
0.00 +7.1 7.10 ± 19% perf-profile.calltrace.cycles-pp.write_one_eb.btree_write_cache_pages.do_writepages.__writeback_single_inode.writeback_sb_inodes
0.00 +7.2 7.21 ± 19% perf-profile.calltrace.cycles-pp.btree_write_cache_pages.do_writepages.__writeback_single_inode.writeback_sb_inodes.wb_writeback
0.00 +9.4 9.41 ± 20% perf-profile.calltrace.cycles-pp.do_writepages.__writeback_single_inode.writeback_sb_inodes.wb_writeback.wb_workfn
0.00 +9.4 9.41 ± 20% perf-profile.calltrace.cycles-pp.writeback_sb_inodes.wb_writeback.wb_workfn.process_one_work.worker_thread
0.00 +9.4 9.41 ± 20% perf-profile.calltrace.cycles-pp.__writeback_single_inode.writeback_sb_inodes.wb_writeback.wb_workfn.process_one_work
0.00 +9.4 9.45 ± 20% perf-profile.calltrace.cycles-pp.wb_writeback.wb_workfn.process_one_work.worker_thread.kthread
0.00 +9.5 9.45 ± 20% perf-profile.calltrace.cycles-pp.wb_workfn.process_one_work.worker_thread.kthread.ret_from_fork
0.00 +11.2 11.24 ± 15% perf-profile.calltrace.cycles-pp.sysvec_apic_timer_interrupt.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state.cpuidle_enter.do_idle
0.00 +15.9 15.94 ± 43% perf-profile.calltrace.cycles-pp.menu_select.do_idle.cpu_startup_entry.start_secondary.secondary_startup_64
0.00 +19.1 19.14 ± 22% perf-profile.calltrace.cycles-pp.process_one_work.worker_thread.kthread.ret_from_fork
0.00 +19.3 19.29 ± 22% perf-profile.calltrace.cycles-pp.worker_thread.kthread.ret_from_fork
0.00 +19.4 19.36 ± 22% perf-profile.calltrace.cycles-pp.ret_from_fork
0.00 +19.4 19.36 ± 22% perf-profile.calltrace.cycles-pp.kthread.ret_from_fork
0.00 +20.9 20.93 ± 18% perf-profile.calltrace.cycles-pp.asm_sysvec_apic_timer_interrupt.cpuidle_enter_state.cpuidle_enter.do_idle.cpu_startup_entry
50.00 ± 23% +27.1 77.09 ± 6% perf-profile.calltrace.cycles-pp.do_idle.cpu_startup_entry.start_secondary.secondary_startup_64
50.00 ± 23% +27.1 77.12 ± 6% perf-profile.calltrace.cycles-pp.cpu_startup_entry.start_secondary.secondary_startup_64
50.00 ± 23% +27.1 77.12 ± 6% perf-profile.calltrace.cycles-pp.start_secondary.secondary_startup_64
50.00 ± 23% +27.6 77.61 ± 6% perf-profile.calltrace.cycles-pp.secondary_startup_64
35.91 ± 50% -33.8 2.08 ± 23% perf-profile.children.cycles-pp.do_syscall_64
35.91 ± 50% -33.8 2.09 ± 23% perf-profile.children.cycles-pp.entry_SYSCALL_64_after_hwframe
50.00 ± 23% -15.2 34.85 ± 13% perf-profile.children.cycles-pp.intel_idle
14.09 ± 95% -14.1 0.01 ±173% perf-profile.children.cycles-pp.seq_read
11.82 ± 31% -11.8 0.00 perf-profile.children.cycles-pp.cmd_record
11.82 ± 31% -11.8 0.02 ±173% perf-profile.children.cycles-pp.__libc_start_main
11.82 ± 31% -11.8 0.02 ±173% perf-profile.children.cycles-pp.main
11.82 ± 31% -11.8 0.02 ±173% perf-profile.children.cycles-pp.run_builtin
11.59 ±128% -11.6 0.00 perf-profile.children.cycles-pp.proc_reg_read
10.00 ±100% -10.0 0.00 perf-profile.children.cycles-pp.__prepare_exit_to_usermode
10.00 ±100% -10.0 0.00 perf-profile.children.cycles-pp.do_signal
10.00 ±100% -10.0 0.00 perf-profile.children.cycles-pp.get_signal
10.00 ±100% -10.0 0.00 perf-profile.children.cycles-pp.do_group_exit
10.00 ±100% -10.0 0.00 perf-profile.children.cycles-pp.do_exit
9.54 ± 4% -9.5 0.00 perf-profile.children.cycles-pp.record__mmap_read_evlist
9.54 ± 4% -9.5 0.00 perf-profile.children.cycles-pp.perf_mmap__push
9.54 ± 4% -9.5 0.00 perf-profile.children.cycles-pp.perf_mmap__read_head
7.50 ±110% -7.5 0.01 ±173% perf-profile.children.cycles-pp.mmput
7.50 ±110% -7.5 0.01 ±173% perf-profile.children.cycles-pp.exit_mmap
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.dput
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.do_sys_open
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.do_sys_openat2
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.do_filp_open
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.path_openat
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.link_path_walk
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.walk_component
7.27 ±113% -7.3 0.00 perf-profile.children.cycles-pp.step_into
5.00 ±100% -5.0 0.00 perf-profile.children.cycles-pp.tlb_finish_mmu
5.00 ±100% -5.0 0.00 perf-profile.children.cycles-pp.tlb_flush_mmu
5.00 ±100% -5.0 0.00 perf-profile.children.cycles-pp.release_pages
4.77 ±100% -4.8 0.00 perf-profile.children.cycles-pp.seq_printf
4.77 ±100% -4.8 0.00 perf-profile.children.cycles-pp.seq_vprintf
4.77 ±100% -4.8 0.00 perf-profile.children.cycles-pp.vsnprintf
4.77 ±100% -4.8 0.00 perf-profile.children.cycles-pp.__do_fault
4.77 ±100% -4.8 0.02 ±173% perf-profile.children.cycles-pp.do_fault
0.00 +0.1 0.05 ± 8% perf-profile.children.cycles-pp.find_ref_head
0.00 +0.1 0.06 ± 14% perf-profile.children.cycles-pp.memzero_extent_buffer
0.00 +0.1 0.06 ± 16% perf-profile.children.cycles-pp.btrfs_release_extent_buffer_pages
0.00 +0.1 0.06 ± 13% perf-profile.children.cycles-pp.btrfs_get_block_group
0.00 +0.1 0.07 ± 26% perf-profile.children.cycles-pp.__update_load_avg_cfs_rq
0.00 +0.1 0.07 ± 26% perf-profile.children.cycles-pp.__switch_to_asm
0.00 +0.1 0.07 ± 26% perf-profile.children.cycles-pp.check_committed_ref
0.00 +0.1 0.07 ± 31% perf-profile.children.cycles-pp.pagevec_lookup_range_tag
0.00 +0.1 0.07 ± 31% perf-profile.children.cycles-pp.find_get_pages_range_tag
0.00 +0.1 0.07 ± 17% perf-profile.children.cycles-pp.btrfs_free_path
0.00 +0.1 0.07 ± 22% perf-profile.children.cycles-pp.btrfs_bio_fits_in_stripe
0.00 +0.1 0.07 ± 30% perf-profile.children.cycles-pp.btrfs_check_node
0.00 +0.1 0.07 ± 38% perf-profile.children.cycles-pp.__slab_free
0.00 +0.1 0.08 ± 14% perf-profile.children.cycles-pp.set_next_entity
0.00 +0.1 0.08 ± 21% perf-profile.children.cycles-pp.__switch_to
0.00 +0.1 0.08 ± 32% perf-profile.children.cycles-pp.mark_page_accessed
0.00 +0.1 0.08 ± 40% perf-profile.children.cycles-pp.xas_find_marked
0.00 +0.1 0.08 ± 26% perf-profile.children.cycles-pp.__module_address
0.00 +0.1 0.08 ± 8% perf-profile.children.cycles-pp.dequeue_entity
0.00 +0.1 0.08 ± 19% perf-profile.children.cycles-pp.btrfs_get_chunk_map
0.00 +0.1 0.08 ± 31% perf-profile.children.cycles-pp.kernel_text_address
0.00 +0.1 0.08 ± 27% perf-profile.children.cycles-pp.block_group_cache_tree_search
0.00 +0.1 0.09 ± 28% perf-profile.children.cycles-pp.__blk_mq_try_issue_directly
0.00 +0.1 0.09 ± 14% perf-profile.children.cycles-pp.check_extent_item
0.00 +0.1 0.09 ± 30% perf-profile.children.cycles-pp.___slab_alloc
0.00 +0.1 0.09 ± 29% perf-profile.children.cycles-pp.free_extent_buffer
0.00 +0.1 0.09 ± 23% perf-profile.children.cycles-pp.mark_extent_buffer_accessed
0.00 +0.1 0.09 ± 36% perf-profile.children.cycles-pp.btrfs_inc_extent_ref
0.00 +0.1 0.09 ± 17% perf-profile.children.cycles-pp.select_task_rq_fair
0.00 +0.1 0.09 ± 27% perf-profile.children.cycles-pp.find_next_bit
0.00 +0.1 0.09 ± 27% perf-profile.children.cycles-pp.__kernel_text_address
0.00 +0.1 0.09 ± 29% perf-profile.children.cycles-pp.crc_128
0.00 +0.1 0.10 ± 18% perf-profile.children.cycles-pp.note_gp_changes
0.00 +0.1 0.10 ± 5% perf-profile.children.cycles-pp.update_load_avg
0.00 +0.1 0.10 ± 30% perf-profile.children.cycles-pp.__slab_alloc
0.00 +0.1 0.10 ± 11% perf-profile.children.cycles-pp.btrfs_super_csum_size
0.00 +0.1 0.10 ± 26% perf-profile.children.cycles-pp.push_leaf_left
0.00 +0.1 0.10 ± 39% perf-profile.children.cycles-pp.cpumask_next_and
0.00 +0.1 0.10 ± 29% perf-profile.children.cycles-pp.rcu_nmi_exit
0.00 +0.1 0.10 ± 15% perf-profile.children.cycles-pp.dequeue_task_fair
0.00 +0.1 0.10 ± 32% perf-profile.children.cycles-pp.unwind_get_return_address
0.00 +0.1 0.10 ± 24% perf-profile.children.cycles-pp.btrfs_get_io_geometry
0.00 +0.1 0.10 ± 27% perf-profile.children.cycles-pp.idle_cpu
0.00 +0.1 0.10 ± 39% perf-profile.children.cycles-pp.calc_global_load_tick
0.00 +0.1 0.10 ± 44% perf-profile.children.cycles-pp.memcpy_erms
0.00 +0.1 0.10 ± 35% perf-profile.children.cycles-pp.btrfs_add_delayed_data_ref
0.00 +0.1 0.11 ± 10% perf-profile.children.cycles-pp.call_cpuidle
0.00 +0.1 0.11 ± 35% perf-profile.children.cycles-pp.__percpu_counter_sum
0.00 +0.1 0.11 ± 35% perf-profile.children.cycles-pp.__percpu_counter_compare
0.00 +0.1 0.11 ± 23% perf-profile.children.cycles-pp.rcu_eqs_enter
0.00 +0.1 0.11 ± 28% perf-profile.children.cycles-pp.insert_state
0.00 +0.1 0.11 ± 39% perf-profile.children.cycles-pp.__btrfs_btree_balance_dirty
0.00 +0.1 0.11 ± 20% perf-profile.children.cycles-pp.prepare_pages
0.00 +0.1 0.11 ± 17% perf-profile.children.cycles-pp.rcu_dynticks_eqs_exit
0.00 +0.1 0.11 ± 27% perf-profile.children.cycles-pp.memcpy_extent_buffer
0.00 +0.1 0.12 ± 42% perf-profile.children.cycles-pp.rb_erase
0.00 +0.1 0.12 ± 38% perf-profile.children.cycles-pp.idtentry_exit_cond_rcu
0.00 +0.1 0.12 ± 13% perf-profile.children.cycles-pp.rcu_dynticks_eqs_enter
0.00 +0.1 0.12 ± 20% perf-profile.children.cycles-pp.account_process_tick
0.00 +0.1 0.12 ± 16% perf-profile.children.cycles-pp.btrfs_cross_ref_exist
0.00 +0.1 0.12 ± 15% perf-profile.children.cycles-pp.__test_set_page_writeback
0.00 +0.1 0.12 ± 18% perf-profile.children.cycles-pp.rcu_eqs_exit
0.00 +0.1 0.12 ± 21% perf-profile.children.cycles-pp.___might_sleep
0.00 +0.1 0.12 ± 6% perf-profile.children.cycles-pp.end_extent_writepage
0.00 +0.1 0.12 ± 6% perf-profile.children.cycles-pp.btrfs_writepage_endio_finish_ordered
0.00 +0.1 0.12 ± 29% perf-profile.children.cycles-pp.btrfs_try_tree_write_lock
0.00 +0.1 0.13 ± 27% perf-profile.children.cycles-pp.__clear_extent_bit
0.00 +0.1 0.13 ± 17% perf-profile.children.cycles-pp.end_bio_extent_buffer_writepage
0.00 +0.1 0.13 ± 14% perf-profile.children.cycles-pp.clear_page_dirty_for_io
0.00 +0.1 0.13 ± 31% perf-profile.children.cycles-pp.unpin_extent_range
0.00 +0.1 0.13 ± 29% perf-profile.children.cycles-pp.release_extent_buffer
0.00 +0.1 0.13 ± 21% perf-profile.children.cycles-pp.btrfs_wq_submit_bio
0.00 +0.1 0.13 ± 38% perf-profile.children.cycles-pp.btrfs_find_delalloc_range
0.00 +0.1 0.13 ± 94% perf-profile.children.cycles-pp.timerqueue_add
0.00 +0.1 0.14 ± 25% perf-profile.children.cycles-pp.run_one_async_done
0.00 +0.1 0.14 ± 19% perf-profile.children.cycles-pp.btrfs_submit_bio_hook
0.00 +0.1 0.14 ± 38% perf-profile.children.cycles-pp.unpin_extent_cache
0.00 +0.1 0.14 ± 13% perf-profile.children.cycles-pp.pick_next_task_fair
0.00 +0.1 0.14 ± 91% perf-profile.children.cycles-pp.update_ts_time_stats
0.00 +0.1 0.14 ± 72% perf-profile.children.cycles-pp.rb_next
0.00 +0.1 0.14 ± 35% perf-profile.children.cycles-pp.btrfs_finish_extent_commit
0.00 +0.1 0.14 ± 52% perf-profile.children.cycles-pp.rcu_nmi_enter
0.00 +0.1 0.14 ± 32% perf-profile.children.cycles-pp.tsc_verify_tsc_adjust
0.00 +0.1 0.15 ± 65% perf-profile.children.cycles-pp.nr_iowait_cpu
0.00 +0.1 0.15 ± 31% perf-profile.children.cycles-pp.arch_cpu_idle_enter
0.00 +0.1 0.15 ± 51% perf-profile.children.cycles-pp.rb_insert_color
0.00 +0.1 0.15 ± 34% perf-profile.children.cycles-pp.find_lock_delalloc_range
0.00 +0.2 0.15 ± 86% perf-profile.children.cycles-pp.enqueue_hrtimer
0.00 +0.2 0.16 ± 62% perf-profile.children.cycles-pp.cpuidle_governor_latency_req
0.00 +0.2 0.16 ± 19% perf-profile.children.cycles-pp.btrfs_comp_cpu_keys
0.00 +0.2 0.16 ± 2% perf-profile.children.cycles-pp.hrtimer_get_next_event
0.00 +0.2 0.16 ± 35% perf-profile.children.cycles-pp.btrfs_update_block_group
0.00 +0.2 0.17 ± 25% perf-profile.children.cycles-pp.queued_write_lock_slowpath
0.00 +0.2 0.17 ± 19% perf-profile.children.cycles-pp.finish_wait
0.00 +0.2 0.17 ± 21% perf-profile.children.cycles-pp.test_clear_page_writeback
0.00 +0.2 0.17 ± 36% perf-profile.children.cycles-pp.kmem_cache_free
0.00 +0.2 0.17 ± 26% perf-profile.children.cycles-pp.down_read
0.00 +0.2 0.17 ± 27% perf-profile.children.cycles-pp.__orc_find
0.00 +0.2 0.17 ± 27% perf-profile.children.cycles-pp.__radix_tree_lookup
0.00 +0.2 0.17 ± 59% perf-profile.children.cycles-pp.idtentry_enter_cond_rcu
0.00 +0.2 0.17 ± 21% perf-profile.children.cycles-pp.end_page_writeback
0.00 +0.2 0.17 ± 14% perf-profile.children.cycles-pp.rcu_idle_exit
0.00 +0.2 0.17 ± 22% perf-profile.children.cycles-pp._raw_spin_lock_irq
0.00 +0.2 0.17 ± 45% perf-profile.children.cycles-pp.update_irq_load_avg
0.00 +0.2 0.18 ± 16% perf-profile.children.cycles-pp.run_local_timers
0.00 +0.2 0.18 ± 40% perf-profile.children.cycles-pp.find_get_entry
0.00 +0.2 0.18 ± 21% perf-profile.children.cycles-pp.xas_load
0.00 +0.2 0.18 ± 28% perf-profile.children.cycles-pp.memmove_extent_buffer
0.00 +0.2 0.18 ± 16% perf-profile.children.cycles-pp.end_bio_extent_writepage
0.00 +0.2 0.18 ± 32% perf-profile.children.cycles-pp.__set_page_dirty_nobuffers
0.00 +0.2 0.18 ± 72% perf-profile.children.cycles-pp.timerqueue_del
0.00 +0.2 0.18 ± 18% perf-profile.children.cycles-pp.btrfs_free_tree_block
0.00 +0.2 0.19 ± 21% perf-profile.children.cycles-pp.blk_mq_submit_bio
0.00 +0.2 0.20 ± 27% perf-profile.children.cycles-pp.btrfs_release_path
0.00 +0.2 0.20 ± 32% perf-profile.children.cycles-pp.set_extent_buffer_dirty
0.00 +0.2 0.20 ± 38% perf-profile.children.cycles-pp.run_timer_softirq
0.00 +0.2 0.20 ± 25% perf-profile.children.cycles-pp.orc_find
0.00 +0.2 0.20 ± 21% perf-profile.children.cycles-pp.btrfs_unlock_up_safe
0.00 +0.2 0.20 ± 29% perf-profile.children.cycles-pp.arch_scale_freq_tick
0.00 +0.2 0.20 ± 38% perf-profile.children.cycles-pp.__hrtimer_next_event_base
0.00 +0.2 0.21 ± 25% perf-profile.children.cycles-pp.__push_leaf_right
0.00 +0.2 0.21 ± 44% perf-profile.children.cycles-pp.lock_and_cleanup_extent_if_need
0.00 +0.2 0.21 ± 55% perf-profile.children.cycles-pp.io_serial_in
0.00 +0.2 0.21 ± 19% perf-profile.children.cycles-pp.__queue_work
0.00 +0.2 0.21 ± 48% perf-profile.children.cycles-pp.btrfs_get_extent
0.00 +0.2 0.22 ± 25% perf-profile.children.cycles-pp.memmove
0.00 +0.2 0.22 ± 17% perf-profile.children.cycles-pp.queue_work_on
0.00 +0.2 0.22 ± 25% perf-profile.children.cycles-pp.submit_bio_noacct
0.00 +0.2 0.22 ± 22% perf-profile.children.cycles-pp.btrfs_delayed_refs_rsv_release
0.00 +0.2 0.22 ± 15% perf-profile.children.cycles-pp.__extent_writepage_io
0.00 +0.2 0.22 ± 36% perf-profile.children.cycles-pp.run_rebalance_domains
0.00 +0.2 0.23 ± 25% perf-profile.children.cycles-pp.submit_bio
0.00 +0.2 0.23 ± 33% perf-profile.children.cycles-pp.btrfs_mark_buffer_dirty
0.00 +0.2 0.23 ± 16% perf-profile.children.cycles-pp.schedule_idle
0.00 +0.2 0.24 ± 17% perf-profile.children.cycles-pp.add_delayed_ref_head
0.00 +0.2 0.24 ± 21% perf-profile.children.cycles-pp.csum_exist_in_range
0.00 +0.2 0.24 ± 21% perf-profile.children.cycles-pp.btrfs_lookup_csums_range
0.00 +0.2 0.24 ± 79% perf-profile.children.cycles-pp.__remove_hrtimer
0.00 +0.2 0.24 ± 57% perf-profile.children.cycles-pp.update_blocked_averages
0.00 +0.2 0.25 ± 47% perf-profile.children.cycles-pp.serial8250_console_putchar
0.00 +0.2 0.25 ± 5% perf-profile.children.cycles-pp.__intel_pmu_enable_all
0.00 +0.2 0.25 ± 48% perf-profile.children.cycles-pp.uart_console_write
0.00 +0.3 0.25 ± 14% perf-profile.children.cycles-pp.kmem_cache_alloc
0.00 +0.3 0.25 ± 45% perf-profile.children.cycles-pp.wait_for_xmitr
0.00 +0.3 0.26 ± 45% perf-profile.children.cycles-pp.serial8250_console_write
0.00 +0.3 0.26 ± 32% perf-profile.children.cycles-pp.set_extent_bit
0.00 +0.3 0.27 ± 14% perf-profile.children.cycles-pp.btrfs_try_granting_tickets
0.00 +0.3 0.27 ± 33% perf-profile.children.cycles-pp.create_io_em
0.00 +0.3 0.27 ± 33% perf-profile.children.cycles-pp.pagecache_get_page
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.asm_sysvec_irq_work
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.sysvec_irq_work
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.__sysvec_irq_work
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.irq_work_run
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.irq_work_single
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.printk
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.vprintk_emit
0.00 +0.3 0.27 ± 44% perf-profile.children.cycles-pp.console_unlock
0.00 +0.3 0.27 ± 36% perf-profile.children.cycles-pp.btrfs_drop_extent_cache
0.00 +0.3 0.28 ± 13% perf-profile.children.cycles-pp.rcu_core
0.00 +0.3 0.28 ± 45% perf-profile.children.cycles-pp.btrfs_dirty_pages
0.00 +0.3 0.29 ± 48% perf-profile.children.cycles-pp.irq_work_run_list
0.00 +0.3 0.29 ± 8% perf-profile.children.cycles-pp.btrfs_inode_rsv_release
0.00 +0.3 0.29 ± 15% perf-profile.children.cycles-pp.hrtimer_next_event_without
0.00 +0.3 0.29 ± 38% perf-profile.children.cycles-pp.alloc_extent_buffer
0.00 +0.3 0.30 ± 40% perf-profile.children.cycles-pp.lookup_inline_extent_backref
0.00 +0.3 0.31 ± 13% perf-profile.children.cycles-pp.btrfs_add_delayed_tree_ref
0.00 +0.3 0.32 ± 23% perf-profile.children.cycles-pp.btrfs_map_bio
0.00 +0.3 0.33 ± 32% perf-profile.children.cycles-pp.lock_extent_bits
0.00 +0.3 0.33 ± 35% perf-profile.children.cycles-pp.generic_bin_search
0.00 +0.3 0.34 ± 26% perf-profile.children.cycles-pp.schedule
0.00 +0.4 0.36 ± 16% perf-profile.children.cycles-pp.btrfs_end_bio
0.00 +0.4 0.37 ± 27% perf-profile.children.cycles-pp.find_extent_buffer
0.00 +0.4 0.38 ± 14% perf-profile.children.cycles-pp.blk_update_request
0.00 +0.4 0.39 ± 34% perf-profile.children.cycles-pp.btrfs_del_items
0.00 +0.4 0.39 ± 13% perf-profile.children.cycles-pp.__lock_text_start
0.00 +0.4 0.39 ± 15% perf-profile.children.cycles-pp.blk_mq_end_request
0.00 +0.4 0.40 ± 44% perf-profile.children.cycles-pp.update_rq_clock
0.00 +0.4 0.42 ± 19% perf-profile.children.cycles-pp.update_sd_lb_stats
0.00 +0.4 0.44 ± 21% perf-profile.children.cycles-pp.find_busiest_group
0.00 +0.4 0.44 ± 16% perf-profile.children.cycles-pp.native_sched_clock
0.00 +0.4 0.45 ± 13% perf-profile.children.cycles-pp.unlock_up
0.00 +0.5 0.45 ± 23% perf-profile.children.cycles-pp.btrfs_get_8
0.00 +0.5 0.45 ± 13% perf-profile.children.cycles-pp.nvme_irq
0.00 +0.5 0.46 ± 21% perf-profile.children.cycles-pp.unwind_next_frame
0.00 +0.5 0.46 ± 13% perf-profile.children.cycles-pp.__handle_irq_event_percpu
0.00 +0.5 0.46 ± 8% perf-profile.children.cycles-pp.read_tsc
0.00 +0.5 0.46 ± 15% perf-profile.children.cycles-pp.sched_clock
0.00 +0.5 0.47 ± 13% perf-profile.children.cycles-pp.handle_irq_event_percpu
0.00 +0.5 0.47 ± 83% perf-profile.children.cycles-pp.__filemap_fdatawrite_range
0.00 +0.5 0.48 ± 16% perf-profile.children.cycles-pp.start_kernel
0.00 +0.5 0.48 ± 82% perf-profile.children.cycles-pp.btrfs_write_marked_extents
0.00 +0.5 0.49 ± 13% perf-profile.children.cycles-pp.handle_irq_event
0.00 +0.5 0.49 ± 30% perf-profile.children.cycles-pp.read_block_for_search
0.00 +0.5 0.49 ± 10% perf-profile.children.cycles-pp.lapic_next_deadline
0.00 +0.5 0.49 ± 80% perf-profile.children.cycles-pp.btrfs_write_and_wait_transaction
0.00 +0.5 0.50 ± 14% perf-profile.children.cycles-pp.handle_edge_irq
0.00 +0.5 0.51 ± 11% perf-profile.children.cycles-pp.ktime_get_update_offsets_now
0.00 +0.5 0.51 ± 14% perf-profile.children.cycles-pp.btrfs_block_rsv_release
0.00 +0.5 0.52 ± 41% perf-profile.children.cycles-pp.__lookup_extent_mapping
0.00 +0.5 0.52 ± 39% perf-profile.children.cycles-pp._raw_spin_trylock
0.00 +0.5 0.52 ± 38% perf-profile.children.cycles-pp.__etree_search
0.00 +0.5 0.53 ± 12% perf-profile.children.cycles-pp.common_interrupt
0.00 +0.5 0.53 ± 20% perf-profile.children.cycles-pp.cow_file_range
0.00 +0.5 0.53 ± 13% perf-profile.children.cycles-pp.asm_common_interrupt
0.00 +0.5 0.55 ± 23% perf-profile.children.cycles-pp.btrfs_set_token_32
0.00 +0.6 0.55 ± 20% perf-profile.children.cycles-pp.__sched_text_start
0.00 +0.6 0.56 ± 98% perf-profile.children.cycles-pp.rcu_sched_clock_irq
0.00 +0.6 0.58 ± 28% perf-profile.children.cycles-pp.sched_clock_cpu
0.00 +0.6 0.59 ± 31% perf-profile.children.cycles-pp.__set_extent_bit
0.00 +0.6 0.59 ± 12% perf-profile.children.cycles-pp.btrfs_reserve_metadata_bytes
0.00 +0.6 0.60 ± 34% perf-profile.children.cycles-pp.btrfs_get_token_32
0.00 +0.6 0.60 ± 25% perf-profile.children.cycles-pp.load_balance
0.00 +0.6 0.61 ± 12% perf-profile.children.cycles-pp.btrfs_delalloc_reserve_metadata
0.00 +0.6 0.63 ± 19% perf-profile.children.cycles-pp.arch_stack_walk
0.00 +0.6 0.64 ± 22% perf-profile.children.cycles-pp.find_free_extent
0.00 +0.6 0.64 ± 20% perf-profile.children.cycles-pp.btrfs_read_lock_root_node
0.00 +0.7 0.66 ± 49% perf-profile.children.cycles-pp.timekeeping_max_deferment
0.00 +0.7 0.67 ± 40% perf-profile.children.cycles-pp.tick_nohz_irq_exit
0.00 +0.7 0.67 ± 21% perf-profile.children.cycles-pp.btrfs_reserve_extent
0.00 +0.7 0.68 ± 20% perf-profile.children.cycles-pp.btrfs_tree_read_lock
0.00 +0.7 0.68 ± 18% perf-profile.children.cycles-pp.stack_trace_save_tsk
0.00 +0.7 0.70 ± 30% perf-profile.children.cycles-pp.push_leaf_right
0.00 +0.7 0.72 ± 72% perf-profile.children.cycles-pp.btrfs_commit_transaction
0.00 +0.7 0.74 ± 17% perf-profile.children.cycles-pp.perf_mux_hrtimer_handler
0.00 +0.7 0.75 ± 9% perf-profile.children.cycles-pp.__next_timer_interrupt
0.00 +0.8 0.77 ± 22% perf-profile.children.cycles-pp.btrfs_lock_root_node
0.00 +0.8 0.78 ± 17% perf-profile.children.cycles-pp.__account_scheduler_latency
0.00 +0.8 0.81 ± 36% perf-profile.children.cycles-pp.__btrfs_free_extent
0.00 +0.8 0.81 ± 21% perf-profile.children.cycles-pp.btrfs_tree_lock
0.00 +0.8 0.81 ± 29% perf-profile.children.cycles-pp.split_leaf
0.00 +0.8 0.85 ± 24% perf-profile.children.cycles-pp.btrfs_alloc_tree_block
0.00 +0.9 0.85 ± 10% perf-profile.children.cycles-pp.irqtime_account_irq
0.00 +0.9 0.85 ± 24% perf-profile.children.cycles-pp.alloc_tree_block_no_bg_flush
0.00 +0.9 0.86 ± 42% perf-profile.children.cycles-pp.scheduler_tick
0.00 +0.9 0.87 ± 18% perf-profile.children.cycles-pp.read_extent_buffer
0.00 +0.9 0.89 ± 20% perf-profile.children.cycles-pp.setup_leaf_for_split
0.00 +0.9 0.90 ± 18% perf-profile.children.cycles-pp.enqueue_entity
0.00 +0.9 0.90 ± 22% perf-profile.children.cycles-pp.tick_irq_enter
0.00 +0.9 0.92 ± 18% perf-profile.children.cycles-pp.enqueue_task_fair
0.00 +0.9 0.93 ± 18% perf-profile.children.cycles-pp.ttwu_do_activate
0.00 +1.0 0.99 ± 21% perf-profile.children.cycles-pp.prepare_to_wait_event
0.00 +1.0 1.02 ± 17% perf-profile.children.cycles-pp.autoremove_wake_function
0.00 +1.0 1.04 ± 16% perf-profile.children.cycles-pp.__wake_up_common
0.00 +1.1 1.05 ± 10% perf-profile.children.cycles-pp.get_next_timer_interrupt
0.00 +1.1 1.09 ± 23% perf-profile.children.cycles-pp.btrfs_lookup_csum
0.00 +1.1 1.09 ± 16% perf-profile.children.cycles-pp.__wake_up_common_lock
0.00 +1.1 1.12 ± 44% perf-profile.children.cycles-pp.copy_extent_buffer_full
0.00 +1.1 1.12 ± 44% perf-profile.children.cycles-pp.copy_page
0.00 +1.2 1.16 ± 25% perf-profile.children.cycles-pp.setup_items_for_insert
0.00 +1.2 1.17 ± 5% perf-profile.children.cycles-pp.rebalance_domains
0.00 +1.2 1.22 ± 17% perf-profile.children.cycles-pp.try_to_wake_up
0.00 +1.2 1.22 ± 21% perf-profile.children.cycles-pp.run_delalloc_nocow
0.00 +1.3 1.25 ± 19% perf-profile.children.cycles-pp.irq_enter_rcu
0.00 +1.3 1.31 ± 23% perf-profile.children.cycles-pp.check_setget_bounds
0.00 +1.3 1.34 ± 26% perf-profile.children.cycles-pp.btrfs_insert_empty_items
0.00 +1.3 1.34 ± 24% perf-profile.children.cycles-pp.__btrfs_drop_extents
0.00 +1.3 1.35 ± 21% perf-profile.children.cycles-pp.btrfs_duplicate_item
0.00 +1.4 1.39 ± 24% perf-profile.children.cycles-pp.insert_reserved_file_extent
0.00 +1.4 1.43 ± 32% perf-profile.children.cycles-pp.btrfs_run_delayed_refs_for_head
0.00 +1.4 1.45 ± 28% perf-profile.children.cycles-pp.clockevents_program_event
0.00 +1.5 1.49 ± 24% perf-profile.children.cycles-pp.btrfs_lookup_file_extent
0.00 +1.6 1.56 ± 21% perf-profile.children.cycles-pp.btrfs_get_64
0.00 +1.7 1.72 ± 23% perf-profile.children.cycles-pp.btrfs_buffered_write
0.00 +1.7 1.73 ± 23% perf-profile.children.cycles-pp.btrfs_file_write_iter
0.00 +1.8 1.75 ± 21% perf-profile.children.cycles-pp.btrfs_run_delalloc_range
0.00 +1.8 1.76 ± 23% perf-profile.children.cycles-pp.new_sync_write
0.00 +1.8 1.78 ± 23% perf-profile.children.cycles-pp.ksys_write
0.00 +1.8 1.78 ± 23% perf-profile.children.cycles-pp.vfs_write
0.00 +1.8 1.79 ± 23% perf-profile.children.cycles-pp.__libc_write
0.00 +1.8 1.80 ± 31% perf-profile.children.cycles-pp.btrfs_run_delayed_refs
0.00 +1.8 1.80 ± 31% perf-profile.children.cycles-pp.__btrfs_run_delayed_refs
0.00 +1.9 1.91 ± 21% perf-profile.children.cycles-pp.writepage_delalloc
0.00 +1.9 1.95 ± 55% perf-profile.children.cycles-pp.update_process_times
0.00 +2.0 1.97 ± 16% perf-profile.children.cycles-pp.native_queued_spin_lock_slowpath
0.00 +2.0 2.03 ± 24% perf-profile.children.cycles-pp.add_pending_csums
0.00 +2.0 2.03 ± 24% perf-profile.children.cycles-pp.btrfs_csum_file_blocks
0.00 +2.0 2.04 ± 57% perf-profile.children.cycles-pp.tick_sched_handle
0.00 +2.1 2.14 ± 21% perf-profile.children.cycles-pp.__extent_writepage
0.00 +2.1 2.15 ± 12% perf-profile.children.cycles-pp.tick_nohz_next_event
0.00 +2.2 2.23 ± 21% perf-profile.children.cycles-pp.extent_writepages
0.00 +2.2 2.23 ± 21% perf-profile.children.cycles-pp.extent_write_cache_pages
0.00 +2.5 2.46 ± 11% perf-profile.children.cycles-pp.__softirqentry_text_start
0.00 +2.5 2.48 ± 32% perf-profile.children.cycles-pp.flush_space
0.00 +2.5 2.48 ± 13% perf-profile.children.cycles-pp.do_softirq_own_stack
0.00 +2.5 2.48 ± 33% perf-profile.children.cycles-pp.btrfs_async_reclaim_metadata_space
0.00 +2.5 2.49 ± 8% perf-profile.children.cycles-pp.tick_nohz_get_sleep_length
0.00 +2.5 2.50 ± 45% perf-profile.children.cycles-pp.tick_sched_timer
0.00 +2.5 2.54 ± 31% perf-profile.children.cycles-pp.__btrfs_cow_block
0.00 +2.5 2.55 ± 32% perf-profile.children.cycles-pp.btrfs_cow_block
0.00 +2.6 2.60 ± 20% perf-profile.children.cycles-pp.check_extent_data_item
0.00 +2.6 2.63 ± 28% perf-profile.children.cycles-pp.ktime_get
0.00 +2.6 2.65 ± 19% perf-profile.children.cycles-pp.btrfs_get_32
0.00 +2.9 2.92 ± 12% perf-profile.children.cycles-pp.irq_exit_rcu
0.00 +2.9 2.94 ± 25% perf-profile.children.cycles-pp.btrfs_mark_extent_written
0.00 +3.9 3.89 ± 43% perf-profile.children.cycles-pp.__hrtimer_run_queues
0.00 +5.7 5.72 ± 24% perf-profile.children.cycles-pp.btrfs_search_slot
0.00 +6.3 6.26 ± 21% perf-profile.children.cycles-pp.hrtimer_interrupt
0.00 +6.4 6.41 ± 22% perf-profile.children.cycles-pp.__sysvec_apic_timer_interrupt
0.00 +6.8 6.84 ± 24% perf-profile.children.cycles-pp.btrfs_finish_ordered_io
0.00 +6.9 6.90 ± 19% perf-profile.children.cycles-pp.check_leaf
0.00 +7.0 7.00 ± 19% perf-profile.children.cycles-pp.btree_csum_one_bio
0.00 +7.1 7.06 ± 24% perf-profile.children.cycles-pp.btrfs_work_helper
0.00 +7.2 7.19 ± 19% perf-profile.children.cycles-pp.btree_submit_bio_hook
0.00 +7.3 7.29 ± 19% perf-profile.children.cycles-pp.submit_extent_page
0.00 +7.3 7.33 ± 19% perf-profile.children.cycles-pp.submit_one_bio
0.00 +7.4 7.41 ± 19% perf-profile.children.cycles-pp.write_one_eb
0.00 +7.7 7.68 ± 19% perf-profile.children.cycles-pp.btree_write_cache_pages
0.00 +9.4 9.45 ± 20% perf-profile.children.cycles-pp.__writeback_single_inode
0.00 +9.4 9.45 ± 20% perf-profile.children.cycles-pp.wb_writeback
0.00 +9.4 9.45 ± 20% perf-profile.children.cycles-pp.writeback_sb_inodes
0.00 +9.5 9.45 ± 20% perf-profile.children.cycles-pp.wb_workfn
0.00 +9.6 9.63 ± 20% perf-profile.children.cycles-pp.asm_call_on_stack
0.00 +9.9 9.92 ± 19% perf-profile.children.cycles-pp.do_writepages
0.00 +11.6 11.55 ± 16% perf-profile.children.cycles-pp.sysvec_apic_timer_interrupt
0.00 +16.1 16.05 ± 43% perf-profile.children.cycles-pp.menu_select
0.00 +16.6 16.56 ± 8% perf-profile.children.cycles-pp.asm_sysvec_apic_timer_interrupt
0.00 +19.1 19.14 ± 22% perf-profile.children.cycles-pp.process_one_work
0.00 +19.3 19.29 ± 22% perf-profile.children.cycles-pp.worker_thread
0.00 +19.4 19.36 ± 22% perf-profile.children.cycles-pp.kthread
0.00 +19.4 19.36 ± 22% perf-profile.children.cycles-pp.ret_from_fork
50.00 ± 23% +27.1 77.12 ± 6% perf-profile.children.cycles-pp.start_secondary
50.00 ± 23% +27.6 77.60 ± 6% perf-profile.children.cycles-pp.do_idle
50.00 ± 23% +27.6 77.61 ± 6% perf-profile.children.cycles-pp.secondary_startup_64
50.00 ± 23% +27.6 77.61 ± 6% perf-profile.children.cycles-pp.cpu_startup_entry
50.00 ± 23% -15.2 34.80 ± 13% perf-profile.self.cycles-pp.intel_idle
4.77 ±100% -4.8 0.00 perf-profile.self.cycles-pp.__handle_mm_fault
0.00 +0.1 0.05 perf-profile.self.cycles-pp.find_ref_head
0.00 +0.1 0.06 ± 14% perf-profile.self.cycles-pp.memzero_extent_buffer
0.00 +0.1 0.06 ± 28% perf-profile.self.cycles-pp.clockevents_program_event
0.00 +0.1 0.06 ± 11% perf-profile.self.cycles-pp.rcu_idle_exit
0.00 +0.1 0.07 ± 26% perf-profile.self.cycles-pp.__switch_to_asm
0.00 +0.1 0.07 ± 33% perf-profile.self.cycles-pp.mark_page_accessed
0.00 +0.1 0.07 ± 31% perf-profile.self.cycles-pp.__btrfs_cow_block
0.00 +0.1 0.07 ± 35% perf-profile.self.cycles-pp.__sysvec_apic_timer_interrupt
0.00 +0.1 0.07 ± 38% perf-profile.self.cycles-pp.__slab_free
0.00 +0.1 0.08 ± 21% perf-profile.self.cycles-pp.__switch_to
0.00 +0.1 0.08 ± 23% perf-profile.self.cycles-pp.__module_address
0.00 +0.1 0.08 ± 32% perf-profile.self.cycles-pp.get_next_timer_interrupt
0.00 +0.1 0.08 ± 40% perf-profile.self.cycles-pp.xas_find_marked
0.00 +0.1 0.08 ± 17% perf-profile.self.cycles-pp.btrfs_super_csum_size
0.00 +0.1 0.08 ± 13% perf-profile.self.cycles-pp.btrfs_search_slot
0.00 +0.1 0.08 ± 15% perf-profile.self.cycles-pp.clear_page_dirty_for_io
0.00 +0.1 0.09 ± 35% perf-profile.self.cycles-pp.crc_128
0.00 +0.1 0.09 ± 34% perf-profile.self.cycles-pp.kmem_cache_free
0.00 +0.1 0.09 ± 8% perf-profile.self.cycles-pp.kmem_cache_alloc
0.00 +0.1 0.09 ± 25% perf-profile.self.cycles-pp.asm_sysvec_apic_timer_interrupt
0.00 +0.1 0.10 ± 15% perf-profile.self.cycles-pp.tick_sched_timer
0.00 +0.1 0.10 ± 46% perf-profile.self.cycles-pp.load_balance
0.00 +0.1 0.10 ± 33% perf-profile.self.cycles-pp.idle_cpu
0.00 +0.1 0.10 ± 39% perf-profile.self.cycles-pp.calc_global_load_tick
0.00 +0.1 0.10 ± 44% perf-profile.self.cycles-pp.memcpy_erms
0.00 +0.1 0.11 ± 10% perf-profile.self.cycles-pp.call_cpuidle
0.00 +0.1 0.11 ± 87% perf-profile.self.cycles-pp.sched_clock_cpu
0.00 +0.1 0.11 ± 39% perf-profile.self.cycles-pp.rb_erase
0.00 +0.1 0.11 ± 14% perf-profile.self.cycles-pp.rcu_dynticks_eqs_exit
0.00 +0.1 0.11 ± 27% perf-profile.self.cycles-pp.find_extent_buffer
0.00 +0.1 0.11 ± 21% perf-profile.self.cycles-pp.setup_items_for_insert
0.00 +0.1 0.11 ± 14% perf-profile.self.cycles-pp.rcu_dynticks_eqs_enter
0.00 +0.1 0.11 ± 19% perf-profile.self.cycles-pp.hrtimer_interrupt
0.00 +0.1 0.12 ± 25% perf-profile.self.cycles-pp.___might_sleep
0.00 +0.1 0.12 ± 20% perf-profile.self.cycles-pp.account_process_tick
0.00 +0.1 0.12 ± 25% perf-profile.self.cycles-pp.down_read
0.00 +0.1 0.12 ± 21% perf-profile.self.cycles-pp.__sched_text_start
0.00 +0.1 0.12 ± 19% perf-profile.self.cycles-pp.tsc_verify_tsc_adjust
0.00 +0.1 0.12 ± 48% perf-profile.self.cycles-pp.__hrtimer_run_queues
0.00 +0.1 0.13 ± 23% perf-profile.self.cycles-pp.rebalance_domains
0.00 +0.1 0.13 ± 78% perf-profile.self.cycles-pp.rb_next
0.00 +0.1 0.13 ± 49% perf-profile.self.cycles-pp.run_timer_softirq
0.00 +0.1 0.14 ± 23% perf-profile.self.cycles-pp.queued_write_lock_slowpath
0.00 +0.1 0.14 ± 26% perf-profile.self.cycles-pp.find_free_extent
0.00 +0.1 0.14 ± 64% perf-profile.self.cycles-pp.nr_iowait_cpu
0.00 +0.1 0.15 ± 51% perf-profile.self.cycles-pp.rb_insert_color
0.00 +0.2 0.15 ± 48% perf-profile.self.cycles-pp.update_rq_clock
0.00 +0.2 0.15 ± 17% perf-profile.self.cycles-pp.btrfs_comp_cpu_keys
0.00 +0.2 0.15 ± 26% perf-profile.self.cycles-pp.__hrtimer_next_event_base
0.00 +0.2 0.16 ± 24% perf-profile.self.cycles-pp._raw_spin_lock_irq
0.00 +0.2 0.17 ± 19% perf-profile.self.cycles-pp.run_local_timers
0.00 +0.2 0.17 ± 27% perf-profile.self.cycles-pp.xas_load
0.00 +0.2 0.17 ± 28% perf-profile.self.cycles-pp.__radix_tree_lookup
0.00 +0.2 0.17 ± 27% perf-profile.self.cycles-pp.__orc_find
0.00 +0.2 0.17 ± 43% perf-profile.self.cycles-pp.update_irq_load_avg
0.00 +0.2 0.18 ± 18% perf-profile.self.cycles-pp.add_delayed_ref_head
0.00 +0.2 0.20 ± 9% perf-profile.self.cycles-pp.__softirqentry_text_start
0.00 +0.2 0.20 ± 4% perf-profile.self.cycles-pp.perf_mux_hrtimer_handler
0.00 +0.2 0.20 ± 29% perf-profile.self.cycles-pp.arch_scale_freq_tick
0.00 +0.2 0.21 ± 17% perf-profile.self.cycles-pp.unwind_next_frame
0.00 +0.2 0.21 ± 55% perf-profile.self.cycles-pp.io_serial_in
0.00 +0.2 0.22 ± 25% perf-profile.self.cycles-pp.memmove
0.00 +0.2 0.25 ± 5% perf-profile.self.cycles-pp.__intel_pmu_enable_all
0.00 +0.3 0.27 ± 15% perf-profile.self.cycles-pp.__next_timer_interrupt
0.00 +0.3 0.27 ± 39% perf-profile.self.cycles-pp.generic_bin_search
0.00 +0.3 0.29 ± 8% perf-profile.self.cycles-pp.update_sd_lb_stats
0.00 +0.3 0.30 ± 14% perf-profile.self.cycles-pp.do_idle
0.00 +0.3 0.30 ± 24% perf-profile.self.cycles-pp.btrfs_get_8
0.00 +0.3 0.33 ± 21% perf-profile.self.cycles-pp.check_extent_data_item
0.00 +0.4 0.36 ± 30% perf-profile.self.cycles-pp.tick_nohz_next_event
0.00 +0.4 0.36 ± 12% perf-profile.self.cycles-pp.__lock_text_start
0.00 +0.4 0.37 ± 23% perf-profile.self.cycles-pp.btrfs_set_token_32
0.00 +0.4 0.42 ± 13% perf-profile.self.cycles-pp.native_sched_clock
0.00 +0.4 0.42 ± 16% perf-profile.self.cycles-pp.ktime_get_update_offsets_now
0.00 +0.4 0.44 ± 7% perf-profile.self.cycles-pp.read_tsc
0.00 +0.5 0.46 ± 34% perf-profile.self.cycles-pp.btrfs_get_token_32
0.00 +0.5 0.47 ±106% perf-profile.self.cycles-pp.rcu_sched_clock_irq
0.00 +0.5 0.49 ± 10% perf-profile.self.cycles-pp.lapic_next_deadline
0.00 +0.5 0.51 ± 41% perf-profile.self.cycles-pp._raw_spin_trylock
0.00 +0.5 0.51 ± 38% perf-profile.self.cycles-pp.__etree_search
0.00 +0.5 0.52 ± 41% perf-profile.self.cycles-pp.__lookup_extent_mapping
0.00 +0.6 0.65 ± 17% perf-profile.self.cycles-pp.irqtime_account_irq
0.00 +0.7 0.65 ± 51% perf-profile.self.cycles-pp.timekeeping_max_deferment
0.00 +0.8 0.84 ± 19% perf-profile.self.cycles-pp.read_extent_buffer
0.00 +1.1 1.12 ± 44% perf-profile.self.cycles-pp.copy_page
0.00 +1.1 1.13 ± 23% perf-profile.self.cycles-pp.check_setget_bounds
0.00 +1.2 1.16 ± 22% perf-profile.self.cycles-pp.check_leaf
0.00 +1.2 1.22 ± 20% perf-profile.self.cycles-pp.btrfs_get_64
0.00 +2.0 1.96 ± 16% perf-profile.self.cycles-pp.native_queued_spin_lock_slowpath
0.00 +2.0 2.04 ± 18% perf-profile.self.cycles-pp.btrfs_get_32
0.00 +2.2 2.23 ± 34% perf-profile.self.cycles-pp.ktime_get
0.00 +11.6 11.63 ± 50% perf-profile.self.cycles-pp.cpuidle_enter_state
0.00 +13.3 13.34 ± 51% perf-profile.self.cycles-pp.menu_select
9929 ± 19% +275.1% 37243 ± 8% softirqs.CPU0.RCU
7725 ± 4% +446.9% 42252 ± 4% softirqs.CPU0.SCHED
24234 ± 2% +480.7% 140715 ± 10% softirqs.CPU0.TIMER
7563 ± 7% +376.2% 36020 ± 8% softirqs.CPU1.RCU
5632 ± 3% +592.5% 39000 ± 12% softirqs.CPU1.SCHED
19850 +585.1% 136004 ± 9% softirqs.CPU1.TIMER
7338 ± 5% +385.9% 35657 ± 12% softirqs.CPU10.RCU
4924 ± 10% +664.1% 37626 ± 9% softirqs.CPU10.SCHED
18937 ± 3% +605.7% 133629 ± 10% softirqs.CPU10.TIMER
6745 ± 6% +435.1% 36096 ± 16% softirqs.CPU100.RCU
5306 ± 2% +567.5% 35423 ± 20% softirqs.CPU100.SCHED
18790 ± 4% +586.9% 129065 ± 11% softirqs.CPU100.TIMER
6804 ± 7% +432.2% 36211 ± 16% softirqs.CPU101.RCU
5317 ± 3% +644.8% 39603 ± 2% softirqs.CPU101.SCHED
19083 ± 4% +579.8% 129734 ± 11% softirqs.CPU101.TIMER
7404 ± 11% +349.3% 33265 ± 3% softirqs.CPU102.RCU
5550 ± 7% +580.5% 37773 ± 4% softirqs.CPU102.SCHED
18967 ± 3% +577.1% 128424 ± 10% softirqs.CPU102.TIMER
6864 ± 5% +422.2% 35845 ± 14% softirqs.CPU103.RCU
5330 ± 4% +635.8% 39220 ± 2% softirqs.CPU103.SCHED
18679 ± 5% +594.3% 129694 ± 10% softirqs.CPU103.TIMER
6945 ± 6% +393.1% 34249 ± 6% softirqs.CPU104.RCU
5287 ± 4% +645.7% 39430 softirqs.CPU104.SCHED
18858 ± 2% +586.2% 129410 ± 10% softirqs.CPU104.TIMER
6717 ± 4% +440.1% 36280 ± 15% softirqs.CPU105.RCU
4916 ± 10% +697.7% 39221 softirqs.CPU105.SCHED
18633 ± 2% +600.2% 130475 ± 10% softirqs.CPU105.TIMER
6933 ± 5% +421.7% 36171 ± 18% softirqs.CPU106.RCU
4959 ± 8% +707.5% 40046 ± 4% softirqs.CPU106.SCHED
18385 ± 2% +611.4% 130796 ± 12% softirqs.CPU106.TIMER
6815 ± 4% +424.8% 35766 ± 14% softirqs.CPU107.RCU
5342 ± 2% +639.0% 39480 ± 2% softirqs.CPU107.SCHED
18823 ± 2% +587.4% 129388 ± 11% softirqs.CPU107.TIMER
6677 ± 4% +429.1% 35335 ± 12% softirqs.CPU108.RCU
5081 ± 9% +676.4% 39454 softirqs.CPU108.SCHED
18551 ± 2% +594.5% 128846 ± 10% softirqs.CPU108.TIMER
6563 ± 3% +429.7% 34768 ± 12% softirqs.CPU109.RCU
5297 ± 2% +609.6% 37590 ± 8% softirqs.CPU109.SCHED
18539 ± 2% +604.9% 130674 ± 9% softirqs.CPU109.TIMER
7671 ± 8% +360.1% 35292 ± 11% softirqs.CPU11.RCU
5481 ± 5% +603.4% 38557 ± 5% softirqs.CPU11.SCHED
19914 ± 5% +565.8% 132592 ± 11% softirqs.CPU11.TIMER
6707 ± 4% +426.4% 35310 ± 13% softirqs.CPU110.RCU
4972 ± 11% +703.5% 39953 ± 2% softirqs.CPU110.SCHED
18639 ± 2% +603.4% 131105 ± 10% softirqs.CPU110.TIMER
6526 ± 3% +435.8% 34964 ± 15% softirqs.CPU111.RCU
5261 ± 2% +648.7% 39393 softirqs.CPU111.SCHED
18487 ± 2% +604.9% 130317 ± 10% softirqs.CPU111.TIMER
6375 ± 6% +402.7% 32049 ± 7% softirqs.CPU112.RCU
5288 ± 3% +644.6% 39379 ± 2% softirqs.CPU112.SCHED
18429 ± 3% +596.5% 128361 ± 11% softirqs.CPU112.TIMER
6376 ± 5% +407.6% 32366 ± 7% softirqs.CPU113.RCU
5106 ± 7% +670.7% 39354 softirqs.CPU113.SCHED
18514 ± 2% +598.1% 129257 ± 11% softirqs.CPU113.TIMER
6447 ± 3% +413.3% 33093 ± 11% softirqs.CPU114.RCU
5258 ± 2% +646.2% 39240 ± 2% softirqs.CPU114.SCHED
18546 ± 2% +601.2% 130050 ± 11% softirqs.CPU114.TIMER
6361 ± 4% +418.0% 32950 ± 11% softirqs.CPU115.RCU
5268 ± 2% +644.8% 39237 ± 2% softirqs.CPU115.SCHED
18389 ± 3% +607.5% 130112 ± 11% softirqs.CPU115.TIMER
6377 ± 4% +404.8% 32196 ± 8% softirqs.CPU116.RCU
4865 ± 14% +712.1% 39510 ± 2% softirqs.CPU116.SCHED
18637 ± 3% +600.1% 130485 ± 12% softirqs.CPU116.TIMER
6664 ± 9% +381.7% 32102 ± 8% softirqs.CPU117.RCU
5246 ± 3% +524.5% 32764 ± 32% softirqs.CPU117.SCHED
19461 ± 8% +565.4% 129501 ± 10% softirqs.CPU117.TIMER
6405 ± 4% +410.6% 32705 ± 10% softirqs.CPU118.RCU
5297 ± 2% +650.9% 39781 ± 2% softirqs.CPU118.SCHED
18629 ± 2% +601.0% 130592 ± 11% softirqs.CPU118.TIMER
6370 ± 6% +406.1% 32240 ± 8% softirqs.CPU119.RCU
5270 ± 3% +643.2% 39170 softirqs.CPU119.SCHED
18376 ± 2% +603.0% 129194 ± 11% softirqs.CPU119.TIMER
158.25 ± 15% +2643.0% 4340 ±150% softirqs.CPU12.NET_RX
7430 ± 4% +384.3% 35986 ± 10% softirqs.CPU12.RCU
4872 ± 11% +702.9% 39119 ± 3% softirqs.CPU12.SCHED
19056 ± 2% +603.6% 134089 ± 11% softirqs.CPU12.TIMER
6417 ± 3% +390.5% 31473 ± 5% softirqs.CPU120.RCU
5418 +603.9% 38137 ± 9% softirqs.CPU120.SCHED
19298 ± 3% +563.0% 127952 ± 8% softirqs.CPU120.TIMER
6483 ± 4% +389.2% 31716 ± 5% softirqs.CPU121.RCU
5080 ± 9% +666.6% 38946 ± 4% softirqs.CPU121.SCHED
19017 ± 3% +565.5% 126565 ± 11% softirqs.CPU121.TIMER
6424 ± 3% +398.1% 31998 ± 6% softirqs.CPU122.RCU
5288 ± 4% +644.3% 39362 ± 2% softirqs.CPU122.SCHED
18685 ± 4% +582.5% 127527 ± 11% softirqs.CPU122.TIMER
7462 ± 27% +321.6% 31461 ± 6% softirqs.CPU123.RCU
5454 ± 3% +612.9% 38887 ± 4% softirqs.CPU123.SCHED
20044 ± 10% +529.7% 126218 ± 11% softirqs.CPU123.TIMER
6664 ± 7% +384.0% 32255 ± 6% softirqs.CPU124.RCU
5807 ± 8% +571.2% 38979 ± 5% softirqs.CPU124.SCHED
19465 ± 3% +556.5% 127800 ± 10% softirqs.CPU124.TIMER
6528 ± 3% +385.6% 31705 ± 6% softirqs.CPU125.RCU
5413 ± 2% +616.6% 38791 ± 7% softirqs.CPU125.SCHED
18994 +569.8% 127225 ± 9% softirqs.CPU125.TIMER
6753 ± 3% +370.0% 31740 ± 6% softirqs.CPU126.RCU
5406 +606.8% 38212 ± 8% softirqs.CPU126.SCHED
19088 +559.9% 125967 ± 10% softirqs.CPU126.TIMER
6823 ± 10% +359.2% 31335 ± 5% softirqs.CPU127.RCU
5385 +593.8% 37364 ± 12% softirqs.CPU127.SCHED
18756 +566.3% 124972 ± 9% softirqs.CPU127.TIMER
6561 ± 3% +423.1% 34321 ± 7% softirqs.CPU128.RCU
5362 +595.0% 37267 ± 12% softirqs.CPU128.SCHED
18749 +562.2% 124165 ± 8% softirqs.CPU128.TIMER
6607 ± 3% +420.0% 34358 ± 12% softirqs.CPU129.RCU
5371 ± 2% +609.2% 38092 ± 8% softirqs.CPU129.SCHED
18808 +572.1% 126407 ± 12% softirqs.CPU129.TIMER
7340 ± 4% +378.8% 35147 ± 10% softirqs.CPU13.RCU
5315 ± 2% +640.0% 39332 ± 2% softirqs.CPU13.SCHED
19274 ± 4% +598.9% 134712 ± 10% softirqs.CPU13.TIMER
6551 ± 2% +422.0% 34203 ± 7% softirqs.CPU130.RCU
5058 ± 13% +651.7% 38026 ± 9% softirqs.CPU130.SCHED
18805 +571.1% 126203 ± 9% softirqs.CPU130.TIMER
6971 ± 11% +406.9% 35337 ± 13% softirqs.CPU131.RCU
5407 ± 2% +633.0% 39635 ± 2% softirqs.CPU131.SCHED
19024 +574.8% 128382 ± 10% softirqs.CPU131.TIMER
6523 ± 2% +422.6% 34092 ± 10% softirqs.CPU132.RCU
5393 +531.7% 34068 ± 20% softirqs.CPU132.SCHED
18699 +576.7% 126544 ± 9% softirqs.CPU132.TIMER
6872 ± 7% +407.9% 34906 ± 10% softirqs.CPU133.RCU
5390 +627.2% 39197 ± 3% softirqs.CPU133.SCHED
18833 +573.1% 126762 ± 11% softirqs.CPU133.TIMER
6599 ± 3% +423.6% 34554 ± 9% softirqs.CPU134.RCU
5405 ± 2% +631.6% 39549 softirqs.CPU134.SCHED
18731 +571.7% 125829 ± 11% softirqs.CPU134.TIMER
6543 ± 3% +427.1% 34491 ± 9% softirqs.CPU135.RCU
5466 ± 2% +568.6% 36548 ± 16% softirqs.CPU135.SCHED
18926 ± 2% +565.2% 125886 ± 10% softirqs.CPU135.TIMER
6535 ± 3% +435.1% 34966 ± 10% softirqs.CPU136.RCU
5380 ± 2% +611.0% 38254 ± 8% softirqs.CPU136.SCHED
18901 ± 3% +569.8% 126604 ± 10% softirqs.CPU136.TIMER
6585 ± 3% +426.6% 34680 ± 7% softirqs.CPU137.RCU
5429 ± 2% +594.1% 37684 ± 11% softirqs.CPU137.SCHED
18988 ± 2% +562.2% 125741 ± 9% softirqs.CPU137.TIMER
6567 ± 3% +428.7% 34720 ± 8% softirqs.CPU138.RCU
5415 ± 2% +605.1% 38183 ± 9% softirqs.CPU138.SCHED
18872 ± 2% +572.8% 126980 ± 8% softirqs.CPU138.TIMER
7237 ± 9% +382.5% 34920 ± 8% softirqs.CPU139.RCU
5319 ± 3% +611.6% 37855 ± 9% softirqs.CPU139.SCHED
19559 ± 6% +539.8% 125130 ± 8% softirqs.CPU139.TIMER
7400 ± 4% +380.9% 35586 ± 11% softirqs.CPU14.RCU
5214 +654.4% 39333 ± 2% softirqs.CPU14.SCHED
19159 ± 3% +604.4% 134951 ± 10% softirqs.CPU14.TIMER
6535 ± 3% +431.8% 34757 ± 10% softirqs.CPU140.RCU
5424 ± 2% +606.8% 38340 ± 7% softirqs.CPU140.SCHED
18805 +568.7% 125746 ± 10% softirqs.CPU140.TIMER
6632 +421.9% 34613 ± 9% softirqs.CPU141.RCU
5461 ± 2% +607.2% 38619 ± 6% softirqs.CPU141.SCHED
18835 ± 2% +572.5% 126669 ± 11% softirqs.CPU141.TIMER
6607 ± 3% +418.0% 34228 ± 8% softirqs.CPU142.RCU
5410 +603.0% 38037 ± 9% softirqs.CPU142.SCHED
19045 +565.3% 126713 ± 9% softirqs.CPU142.TIMER
6552 ± 3% +427.9% 34593 ± 10% softirqs.CPU143.RCU
5408 +616.1% 38733 ± 5% softirqs.CPU143.SCHED
18719 ± 2% +571.1% 125618 ± 10% softirqs.CPU143.TIMER
6889 ± 8% +407.1% 34934 ± 8% softirqs.CPU144.RCU
5369 +540.7% 34401 ± 20% softirqs.CPU144.SCHED
18794 ± 2% +582.6% 128291 ± 8% softirqs.CPU144.TIMER
6481 ± 3% +431.9% 34474 ± 7% softirqs.CPU145.RCU
5377 ± 2% +641.0% 39844 softirqs.CPU145.SCHED
18616 ± 2% +578.1% 126234 ± 10% softirqs.CPU145.TIMER
6439 ± 3% +431.8% 34246 ± 7% softirqs.CPU146.RCU
5367 +636.2% 39513 softirqs.CPU146.SCHED
18650 ± 2% +579.4% 126710 ± 10% softirqs.CPU146.TIMER
6489 ± 3% +437.0% 34847 ± 10% softirqs.CPU147.RCU
5353 +644.6% 39859 ± 2% softirqs.CPU147.SCHED
18706 +573.9% 126072 ± 10% softirqs.CPU147.TIMER
6558 ± 3% +430.9% 34813 ± 10% softirqs.CPU148.RCU
5438 +636.0% 40022 softirqs.CPU148.SCHED
18809 ± 2% +579.1% 127744 ± 10% softirqs.CPU148.TIMER
6481 ± 2% +437.3% 34823 ± 10% softirqs.CPU149.RCU
5398 +562.0% 35736 ± 20% softirqs.CPU149.SCHED
18837 +578.6% 127835 ± 9% softirqs.CPU149.TIMER
7192 ± 3% +385.8% 34937 ± 12% softirqs.CPU15.RCU
4933 ± 13% +692.4% 39093 ± 2% softirqs.CPU15.SCHED
19002 ± 2% +605.3% 134017 ± 10% softirqs.CPU15.TIMER
6503 ± 2% +435.7% 34839 ± 9% softirqs.CPU150.RCU
5343 +648.6% 39995 softirqs.CPU150.SCHED
18649 ± 2% +579.3% 126678 ± 10% softirqs.CPU150.TIMER
6838 ± 9% +419.3% 35514 ± 13% softirqs.CPU151.RCU
5313 ± 2% +651.2% 39914 softirqs.CPU151.SCHED
19167 ± 4% +567.0% 127856 ± 10% softirqs.CPU151.TIMER
6664 ± 3% +442.0% 36120 ± 12% softirqs.CPU152.RCU
5370 ± 15% +650.9% 40327 softirqs.CPU152.SCHED
18888 ± 3% +575.6% 127615 ± 10% softirqs.CPU152.TIMER
6779 ± 7% +418.5% 35152 ± 13% softirqs.CPU153.RCU
5505 ± 4% +622.4% 39770 softirqs.CPU153.SCHED
19241 ± 6% +553.3% 125704 ± 10% softirqs.CPU153.TIMER
6446 ± 3% +446.5% 35227 ± 13% softirqs.CPU154.RCU
5350 +644.8% 39848 softirqs.CPU154.SCHED
18631 +576.1% 125962 ± 10% softirqs.CPU154.TIMER
6470 ± 2% +446.0% 35329 ± 13% softirqs.CPU155.RCU
5437 +633.9% 39906 softirqs.CPU155.SCHED
18909 ± 2% +568.7% 126453 ± 9% softirqs.CPU155.TIMER
6486 ± 3% +448.2% 35554 ± 13% softirqs.CPU156.RCU
5395 +642.1% 40035 softirqs.CPU156.SCHED
18659 ± 2% +576.5% 126240 ± 10% softirqs.CPU156.TIMER
6373 ± 3% +447.8% 34918 ± 10% softirqs.CPU157.RCU
5327 +565.4% 35450 ± 20% softirqs.CPU157.SCHED
18452 +585.6% 126513 ± 9% softirqs.CPU157.TIMER
6426 ± 3% +445.7% 35065 ± 9% softirqs.CPU158.RCU
5428 ± 2% +636.3% 39970 softirqs.CPU158.SCHED
18536 +580.7% 126181 ± 10% softirqs.CPU158.TIMER
7106 ± 10% +389.2% 34765 ± 9% softirqs.CPU159.RCU
5371 +646.6% 40107 softirqs.CPU159.SCHED
19996 ± 7% +533.6% 126694 ± 9% softirqs.CPU159.TIMER
7066 ± 2% +357.3% 32318 ± 4% softirqs.CPU16.RCU
5291 ± 3% +552.3% 34517 ± 20% softirqs.CPU16.SCHED
19126 +583.2% 130671 ± 10% softirqs.CPU16.TIMER
6414 ± 4% +424.5% 33643 ± 7% softirqs.CPU160.RCU
5356 +637.7% 39515 softirqs.CPU160.SCHED
18510 +579.2% 125728 ± 10% softirqs.CPU160.TIMER
6753 ± 6% +422.3% 35271 ± 12% softirqs.CPU161.RCU
5530 ± 4% +622.5% 39956 softirqs.CPU161.SCHED
18837 +566.6% 125572 ± 10% softirqs.CPU161.TIMER
6398 ± 4% +451.6% 35291 ± 11% softirqs.CPU162.RCU
5365 ± 2% +645.9% 40019 softirqs.CPU162.SCHED
18641 +579.4% 126643 ± 9% softirqs.CPU162.TIMER
6386 ± 3% +451.9% 35245 ± 11% softirqs.CPU163.RCU
5312 ± 2% +648.6% 39766 softirqs.CPU163.SCHED
18426 +584.7% 126169 ± 10% softirqs.CPU163.TIMER
6368 ± 3% +458.9% 35594 ± 11% softirqs.CPU164.RCU
5369 +644.0% 39951 softirqs.CPU164.SCHED
18559 ± 2% +577.3% 125695 ± 10% softirqs.CPU164.TIMER
6574 ± 5% +434.4% 35133 ± 10% softirqs.CPU165.RCU
5481 ± 5% +629.5% 39986 softirqs.CPU165.SCHED
18637 ± 2% +575.6% 125918 ± 10% softirqs.CPU165.TIMER
6582 ± 2% +426.9% 34681 ± 10% softirqs.CPU166.RCU
5463 +631.9% 39988 softirqs.CPU166.SCHED
18982 ± 2% +568.0% 126810 ± 9% softirqs.CPU166.TIMER
6459 ± 3% +427.7% 34084 ± 10% softirqs.CPU167.RCU
5475 ± 2% +628.5% 39891 ± 2% softirqs.CPU167.SCHED
18716 ± 2% +570.7% 125523 ± 10% softirqs.CPU167.TIMER
6286 ± 2% +437.8% 33808 ± 10% softirqs.CPU168.RCU
5320 +494.8% 31643 ± 26% softirqs.CPU168.SCHED
18449 +596.5% 128501 ± 9% softirqs.CPU168.TIMER
6145 ± 4% +427.7% 32430 ± 6% softirqs.CPU169.RCU
5326 ± 2% +654.2% 40176 softirqs.CPU169.SCHED
18200 ± 3% +600.8% 127549 ± 9% softirqs.CPU169.TIMER
7028 ± 2% +373.9% 33305 ± 5% softirqs.CPU17.RCU
5303 ± 4% +631.0% 38765 softirqs.CPU17.SCHED
18849 +597.6% 131488 ± 11% softirqs.CPU17.TIMER
6537 ± 6% +395.2% 32373 ± 7% softirqs.CPU170.RCU
5383 +609.7% 38202 ± 9% softirqs.CPU170.SCHED
18379 +587.9% 126439 ± 7% softirqs.CPU170.TIMER
6064 ± 4% +450.9% 33409 ± 12% softirqs.CPU171.RCU
5359 +653.0% 40355 softirqs.CPU171.SCHED
18159 ± 2% +606.6% 128309 ± 10% softirqs.CPU171.TIMER
6120 ± 5% +446.2% 33434 ± 12% softirqs.CPU172.RCU
5077 ± 11% +695.6% 40394 softirqs.CPU172.SCHED
18276 ± 2% +600.4% 128011 ± 10% softirqs.CPU172.TIMER
6183 ± 4% +448.1% 33889 ± 12% softirqs.CPU173.RCU
5391 +592.0% 37306 ± 14% softirqs.CPU173.SCHED
18435 ± 2% +601.3% 129282 ± 10% softirqs.CPU173.TIMER
6546 ± 15% +407.8% 33243 ± 12% softirqs.CPU174.RCU
5379 +648.1% 40246 softirqs.CPU174.SCHED
18359 ± 2% +598.5% 128239 ± 10% softirqs.CPU174.TIMER
6245 ± 5% +426.1% 32855 ± 9% softirqs.CPU175.RCU
5040 ± 12% +696.7% 40155 softirqs.CPU175.SCHED
18342 ± 2% +585.9% 125800 ± 9% softirqs.CPU175.TIMER
6294 ± 7% +430.5% 33386 ± 9% softirqs.CPU176.RCU
5377 ± 2% +646.6% 40152 softirqs.CPU176.SCHED
18322 ± 2% +594.7% 127276 ± 10% softirqs.CPU176.TIMER
6226 ± 7% +439.2% 33572 ± 7% softirqs.CPU177.RCU
5373 ± 2% +649.5% 40276 softirqs.CPU177.SCHED
18166 ± 4% +601.1% 127367 ± 10% softirqs.CPU177.TIMER
6550 ± 12% +423.0% 34261 ± 8% softirqs.CPU178.RCU
5384 ± 2% +572.9% 36234 ± 21% softirqs.CPU178.SCHED
18267 ± 2% +604.0% 128610 ± 10% softirqs.CPU178.TIMER
6185 ± 6% +463.1% 34826 ± 13% softirqs.CPU179.RCU
5360 ± 2% +652.0% 40311 softirqs.CPU179.SCHED
18141 ± 3% +600.4% 127053 ± 10% softirqs.CPU179.TIMER
7371 +359.3% 33855 ± 7% softirqs.CPU18.RCU
5200 ± 2% +642.0% 38581 ± 2% softirqs.CPU18.SCHED
19031 +592.7% 131837 ± 11% softirqs.CPU18.TIMER
6486 ± 7% +431.1% 34452 ± 11% softirqs.CPU180.RCU
5294 +660.6% 40270 softirqs.CPU180.SCHED
18554 ± 4% +583.9% 126890 ± 9% softirqs.CPU180.TIMER
6203 ± 6% +467.8% 35224 ± 12% softirqs.CPU181.RCU
5347 ± 2% +647.3% 39959 softirqs.CPU181.SCHED
18030 ± 3% +605.8% 127252 ± 9% softirqs.CPU181.TIMER
6192 ± 6% +460.3% 34699 ± 14% softirqs.CPU182.RCU
5368 ± 2% +648.7% 40196 softirqs.CPU182.SCHED
18378 ± 3% +593.5% 127450 ± 10% softirqs.CPU182.TIMER
6173 ± 6% +464.0% 34817 ± 13% softirqs.CPU183.RCU
5338 ± 2% +652.1% 40144 softirqs.CPU183.SCHED
18127 +599.2% 126746 ± 10% softirqs.CPU183.TIMER
7264 ± 21% +385.9% 35293 ± 14% softirqs.CPU184.RCU
5241 ± 5% +666.4% 40170 softirqs.CPU184.SCHED
19299 ± 7% +558.9% 127161 ± 10% softirqs.CPU184.TIMER
6283 ± 3% +468.2% 35700 ± 14% softirqs.CPU185.RCU
5384 +651.3% 40451 softirqs.CPU185.SCHED
18454 +602.1% 129562 ± 9% softirqs.CPU185.TIMER
6214 ± 6% +453.4% 34389 ± 10% softirqs.CPU186.RCU
5344 ± 2% +650.9% 40128 softirqs.CPU186.SCHED
18212 ± 3% +594.7% 126516 ± 11% softirqs.CPU186.TIMER
6293 ± 3% +468.0% 35747 ± 15% softirqs.CPU187.RCU
5431 +644.0% 40413 softirqs.CPU187.SCHED
18195 ± 3% +598.3% 127060 ± 10% softirqs.CPU187.TIMER
6437 ± 6% +438.7% 34679 ± 15% softirqs.CPU188.RCU
5504 ± 2% +628.9% 40124 softirqs.CPU188.SCHED
18667 ± 4% +578.5% 126653 ± 10% softirqs.CPU188.TIMER
6336 ± 8% +451.9% 34968 ± 13% softirqs.CPU189.RCU
5117 ± 14% +684.3% 40137 softirqs.CPU189.SCHED
18430 ± 5% +587.5% 126708 ± 10% softirqs.CPU189.TIMER
6964 ± 3% +380.5% 33463 ± 5% softirqs.CPU19.RCU
5029 ± 4% +667.7% 38614 softirqs.CPU19.SCHED
18684 ± 3% +604.9% 131704 ± 10% softirqs.CPU19.TIMER
6346 ± 7% +436.3% 34033 ± 7% softirqs.CPU190.RCU
5459 ± 4% +643.2% 40576 softirqs.CPU190.SCHED
18874 ± 4% +596.8% 131509 ± 8% softirqs.CPU190.TIMER
6163 ± 5% +453.2% 34093 ± 8% softirqs.CPU191.RCU
5375 +648.7% 40245 softirqs.CPU191.SCHED
18150 +600.9% 127220 ± 10% softirqs.CPU191.TIMER
7507 ± 7% +385.5% 36444 ± 7% softirqs.CPU2.RCU
5009 ± 12% +682.2% 39187 ± 3% softirqs.CPU2.SCHED
19156 ± 3% +599.4% 133988 ± 10% softirqs.CPU2.TIMER
7041 ± 4% +367.9% 32946 ± 6% softirqs.CPU20.RCU
5121 +663.2% 39085 ± 2% softirqs.CPU20.SCHED
18825 ± 2% +604.8% 132679 ± 10% softirqs.CPU20.TIMER
6890 ± 5% +381.1% 33151 ± 6% softirqs.CPU21.RCU
5120 ± 3% +664.5% 39146 softirqs.CPU21.SCHED
18735 ± 3% +617.9% 134493 ± 10% softirqs.CPU21.TIMER
7314 ± 8% +354.1% 33212 ± 7% softirqs.CPU22.RCU
5151 ± 2% +659.4% 39123 softirqs.CPU22.SCHED
19722 ± 5% +574.1% 132956 ± 9% softirqs.CPU22.TIMER
7556 ± 13% +342.6% 33446 ± 6% softirqs.CPU23.RCU
5106 ± 3% +658.3% 38718 softirqs.CPU23.SCHED
19588 ± 5% +581.4% 133477 ± 10% softirqs.CPU23.TIMER
6818 +381.5% 32828 ± 7% softirqs.CPU24.RCU
5272 ± 2% +681.0% 41175 ± 10% softirqs.CPU24.SCHED
19857 ± 10% +511.9% 121506 ± 16% softirqs.CPU24.TIMER
6943 ± 2% +367.1% 32433 ± 4% softirqs.CPU25.RCU
5259 +673.5% 40679 ± 9% softirqs.CPU25.SCHED
18748 +543.6% 120658 ± 17% softirqs.CPU25.TIMER
6986 ± 3% +363.3% 32369 ± 6% softirqs.CPU26.RCU
3439 ± 32% +1067.5% 40149 ± 13% softirqs.CPU26.SCHED
15431 ± 12% +663.2% 117776 ± 18% softirqs.CPU26.TIMER
6893 ± 3% +372.8% 32594 ± 6% softirqs.CPU27.RCU
4837 ± 16% +777.4% 42444 ± 17% softirqs.CPU27.SCHED
17406 ± 13% +591.6% 120379 ± 16% softirqs.CPU27.TIMER
6818 ± 3% +370.9% 32110 ± 6% softirqs.CPU28.RCU
4980 ± 7% +692.5% 39468 ± 5% softirqs.CPU28.SCHED
18791 +537.9% 119870 ± 16% softirqs.CPU28.TIMER
6883 ± 3% +371.9% 32480 ± 5% softirqs.CPU29.RCU
5198 +653.0% 39145 ± 3% softirqs.CPU29.SCHED
18484 +548.4% 119852 ± 16% softirqs.CPU29.TIMER
7446 ± 5% +383.3% 35992 ± 14% softirqs.CPU3.RCU
4851 ± 9% +707.3% 39166 ± 4% softirqs.CPU3.SCHED
19043 ± 2% +592.4% 131862 ± 12% softirqs.CPU3.TIMER
7467 ± 14% +335.1% 32491 ± 5% softirqs.CPU30.RCU
5471 ± 8% +615.1% 39125 ± 2% softirqs.CPU30.SCHED
19625 ± 9% +515.5% 120800 ± 16% softirqs.CPU30.TIMER
6878 ± 2% +383.4% 33248 ± 6% softirqs.CPU31.RCU
5193 +651.4% 39026 ± 2% softirqs.CPU31.SCHED
18574 +552.9% 121262 ± 18% softirqs.CPU31.TIMER
7160 ± 2% +405.9% 36226 ± 7% softirqs.CPU32.RCU
5187 +649.5% 38874 ± 2% softirqs.CPU32.SCHED
18474 +557.3% 121434 ± 15% softirqs.CPU32.TIMER
7455 ± 6% +349.4% 33503 ± 11% softirqs.CPU33.RCU
5261 +562.3% 34843 ± 23% softirqs.CPU33.SCHED
19872 ± 10% +569.1% 132966 ± 17% softirqs.CPU33.TIMER
7114 ± 2% +388.6% 34758 ± 7% softirqs.CPU34.RCU
5246 +613.5% 37434 ± 3% softirqs.CPU34.SCHED
18812 ± 2% +534.3% 119321 ± 15% softirqs.CPU34.TIMER
7154 ± 2% +388.6% 34957 ± 8% softirqs.CPU35.RCU
5283 +634.7% 38811 ± 2% softirqs.CPU35.SCHED
18712 +540.0% 119754 ± 16% softirqs.CPU35.TIMER
7044 ± 2% +382.8% 34011 ± 12% softirqs.CPU36.RCU
5187 +644.3% 38609 ± 5% softirqs.CPU36.SCHED
18426 +616.8% 132080 ± 17% softirqs.CPU36.TIMER
7089 ± 3% +393.7% 35003 ± 9% softirqs.CPU37.RCU
5205 +625.3% 37755 ± 2% softirqs.CPU37.SCHED
18508 +544.0% 119194 ± 17% softirqs.CPU37.TIMER
7023 ± 2% +398.7% 35023 ± 9% softirqs.CPU38.RCU
5223 +630.5% 38159 softirqs.CPU38.SCHED
18580 +548.7% 120538 ± 15% softirqs.CPU38.TIMER
7070 ± 3% +398.8% 35266 ± 10% softirqs.CPU39.RCU
5129 +642.9% 38104 softirqs.CPU39.SCHED
18485 +547.4% 119677 ± 16% softirqs.CPU39.TIMER
7348 ± 6% +394.6% 36341 ± 13% softirqs.CPU4.RCU
5319 ± 3% +636.0% 39151 ± 3% softirqs.CPU4.SCHED
19085 ± 2% +590.6% 131807 ± 11% softirqs.CPU4.TIMER
7004 ± 2% +408.9% 35644 ± 11% softirqs.CPU40.RCU
5194 +646.1% 38756 ± 2% softirqs.CPU40.SCHED
18435 +554.0% 120559 ± 17% softirqs.CPU40.TIMER
7292 ± 5% +382.1% 35157 ± 8% softirqs.CPU41.RCU
5185 ± 3% +646.8% 38726 softirqs.CPU41.SCHED
19538 ± 6% +512.3% 119632 ± 15% softirqs.CPU41.TIMER
6998 ± 2% +404.5% 35310 ± 9% softirqs.CPU42.RCU
5232 +562.1% 34648 ± 20% softirqs.CPU42.SCHED
18573 ± 2% +544.9% 119768 ± 16% softirqs.CPU42.TIMER
7116 ± 3% +402.2% 35739 ± 9% softirqs.CPU43.RCU
5170 +647.5% 38644 softirqs.CPU43.SCHED
18410 +552.6% 120139 ± 17% softirqs.CPU43.TIMER
7471 ± 11% +368.1% 34978 ± 9% softirqs.CPU44.RCU
5249 +618.2% 37697 ± 3% softirqs.CPU44.SCHED
18597 +538.6% 118761 ± 15% softirqs.CPU44.TIMER
6995 ± 3% +398.0% 34833 ± 8% softirqs.CPU45.RCU
5191 +635.2% 38167 softirqs.CPU45.SCHED
18439 +547.3% 119356 ± 15% softirqs.CPU45.TIMER
7331 ± 11% +373.5% 34718 ± 9% softirqs.CPU46.RCU
5314 ± 3% +619.3% 38226 ± 2% softirqs.CPU46.SCHED
19745 ± 10% +508.5% 120149 ± 14% softirqs.CPU46.TIMER
6900 ± 2% +408.3% 35073 ± 9% softirqs.CPU47.RCU
5149 +562.4% 34113 ± 19% softirqs.CPU47.SCHED
18375 +551.3% 119683 ± 15% softirqs.CPU47.TIMER
7198 ± 3% +407.5% 36527 ± 12% softirqs.CPU48.RCU
5221 +662.4% 39807 softirqs.CPU48.SCHED
18427 +596.8% 128396 ± 13% softirqs.CPU48.TIMER
7248 ± 2% +398.1% 36107 ± 9% softirqs.CPU49.RCU
5321 ± 3% +634.1% 39065 ± 2% softirqs.CPU49.SCHED
18799 ± 4% +572.8% 126478 ± 15% softirqs.CPU49.TIMER
7486 ± 9% +387.1% 36467 ± 13% softirqs.CPU5.RCU
5495 ± 5% +607.0% 38851 ± 3% softirqs.CPU5.SCHED
20673 ± 7% +537.7% 131828 ± 11% softirqs.CPU5.TIMER
7071 ± 2% +419.5% 36731 ± 9% softirqs.CPU50.RCU
5236 +643.2% 38915 softirqs.CPU50.SCHED
18433 ± 2% +583.4% 125966 ± 15% softirqs.CPU50.TIMER
7195 ± 3% +405.2% 36353 ± 10% softirqs.CPU51.RCU
5322 ± 2% +636.1% 39178 ± 2% softirqs.CPU51.SCHED
18960 ± 3% +560.2% 125181 ± 14% softirqs.CPU51.TIMER
6997 ± 2% +421.7% 36501 ± 10% softirqs.CPU52.RCU
5322 +640.4% 39404 ± 2% softirqs.CPU52.SCHED
18851 +573.4% 126948 ± 15% softirqs.CPU52.TIMER
6943 ± 3% +428.5% 36693 ± 10% softirqs.CPU53.RCU
5286 +643.6% 39310 ± 2% softirqs.CPU53.SCHED
18709 +576.0% 126481 ± 15% softirqs.CPU53.TIMER
7009 ± 2% +426.8% 36927 ± 9% softirqs.CPU54.RCU
5189 +648.7% 38854 softirqs.CPU54.SCHED
18335 +590.2% 126541 ± 15% softirqs.CPU54.TIMER
7287 ± 9% +411.8% 37301 ± 12% softirqs.CPU55.RCU
5172 +661.6% 39388 ± 2% softirqs.CPU55.SCHED
19102 ± 7% +564.0% 126831 ± 16% softirqs.CPU55.TIMER
7061 ± 2% +423.2% 36947 ± 10% softirqs.CPU56.RCU
5249 +562.7% 34790 ± 19% softirqs.CPU56.SCHED
18566 ± 2% +583.8% 126964 ± 15% softirqs.CPU56.TIMER
7629 ± 12% +385.0% 37001 ± 10% softirqs.CPU57.RCU
5282 ± 2% +644.6% 39331 ± 2% softirqs.CPU57.SCHED
19675 ± 6% +539.1% 125734 ± 14% softirqs.CPU57.TIMER
7975 ± 21% +365.8% 37150 ± 12% softirqs.CPU58.RCU
5017 ± 10% +686.5% 39466 ± 2% softirqs.CPU58.SCHED
20555 ± 15% +515.3% 126483 ± 15% softirqs.CPU58.TIMER
7401 ± 5% +397.4% 36813 ± 12% softirqs.CPU59.RCU
5555 ± 8% +605.6% 39199 ± 2% softirqs.CPU59.SCHED
18748 +571.3% 125861 ± 15% softirqs.CPU59.TIMER
7303 ± 3% +360.3% 33615 ± 3% softirqs.CPU6.RCU
5233 ± 2% +634.8% 38451 ± 4% softirqs.CPU6.SCHED
19087 ± 2% +594.9% 132637 ± 10% softirqs.CPU6.TIMER
6999 ± 3% +427.1% 36891 ± 11% softirqs.CPU60.RCU
5261 ± 2% +646.8% 39290 ± 2% softirqs.CPU60.SCHED
18444 ± 2% +583.1% 125991 ± 15% softirqs.CPU60.TIMER
6913 ± 3% +422.2% 36101 ± 8% softirqs.CPU61.RCU
5222 ± 2% +648.2% 39072 ± 2% softirqs.CPU61.SCHED
18391 ± 2% +585.6% 126094 ± 15% softirqs.CPU61.TIMER
6922 ± 2% +407.4% 35121 ± 4% softirqs.CPU62.RCU
5193 +647.5% 38824 softirqs.CPU62.SCHED
18255 +591.5% 126235 ± 15% softirqs.CPU62.TIMER
6941 ± 2% +412.8% 35596 ± 5% softirqs.CPU63.RCU
5221 ± 2% +607.7% 36951 ± 11% softirqs.CPU63.SCHED
18317 ± 2% +586.8% 125808 ± 15% softirqs.CPU63.TIMER
6975 ± 4% +397.8% 34722 ± 4% softirqs.CPU64.RCU
5185 +655.1% 39154 ± 2% softirqs.CPU64.SCHED
18105 +591.9% 125278 ± 15% softirqs.CPU64.TIMER
6980 ± 5% +411.2% 35689 ± 11% softirqs.CPU65.RCU
5259 +644.0% 39125 ± 2% softirqs.CPU65.SCHED
18289 +586.4% 125541 ± 15% softirqs.CPU65.TIMER
6963 ± 3% +417.0% 36002 ± 11% softirqs.CPU66.RCU
5241 +646.0% 39105 softirqs.CPU66.SCHED
18198 ± 2% +585.8% 124796 ± 14% softirqs.CPU66.TIMER
6961 ± 3% +415.7% 35897 ± 11% softirqs.CPU67.RCU
5215 +651.5% 39188 ± 2% softirqs.CPU67.SCHED
18260 ± 2% +588.1% 125653 ± 15% softirqs.CPU67.TIMER
7021 ± 5% +410.8% 35861 ± 10% softirqs.CPU68.RCU
5228 +646.3% 39020 ± 2% softirqs.CPU68.SCHED
18242 ± 2% +585.0% 124958 ± 15% softirqs.CPU68.TIMER
6989 ± 4% +408.6% 35549 ± 9% softirqs.CPU69.RCU
5238 ± 2% +642.8% 38909 softirqs.CPU69.SCHED
18308 ± 2% +586.0% 125595 ± 15% softirqs.CPU69.TIMER
7279 ± 3% +387.2% 35462 ± 13% softirqs.CPU7.RCU
4828 ± 9% +710.4% 39130 ± 3% softirqs.CPU7.SCHED
19112 ± 2% +599.6% 133702 ± 10% softirqs.CPU7.TIMER
6870 ± 3% +412.2% 35187 ± 7% softirqs.CPU70.RCU
5262 +643.7% 39141 ± 2% softirqs.CPU70.SCHED
18395 ± 2% +582.2% 125500 ± 14% softirqs.CPU70.TIMER
6886 ± 4% +405.7% 34825 ± 6% softirqs.CPU71.RCU
5249 ± 2% +640.9% 38895 ± 2% softirqs.CPU71.SCHED
18181 +591.2% 125664 ± 15% softirqs.CPU71.TIMER
6828 ± 3% +397.7% 33985 ± 9% softirqs.CPU72.RCU
5204 ± 2% +649.5% 39004 ± 2% softirqs.CPU72.SCHED
18047 ± 3% +561.9% 119452 ± 14% softirqs.CPU72.TIMER
6745 ± 3% +402.2% 33874 ± 8% softirqs.CPU73.RCU
5268 +635.7% 38763 ± 3% softirqs.CPU73.SCHED
18249 ± 2% +550.8% 118765 ± 14% softirqs.CPU73.TIMER
6708 ± 3% +390.6% 32907 ± 4% softirqs.CPU74.RCU
5246 +635.3% 38576 ± 2% softirqs.CPU74.SCHED
18174 ± 2% +543.2% 116900 ± 12% softirqs.CPU74.TIMER
6831 ± 3% +402.3% 34314 ± 10% softirqs.CPU75.RCU
5359 ± 2% +621.6% 38672 ± 2% softirqs.CPU75.SCHED
18384 ± 2% +545.8% 118734 ± 15% softirqs.CPU75.TIMER
6913 ± 6% +398.6% 34466 ± 10% softirqs.CPU76.RCU
4941 ± 12% +681.9% 38637 ± 2% softirqs.CPU76.SCHED
18146 ± 2% +560.3% 119817 ± 15% softirqs.CPU76.TIMER
6707 ± 3% +406.0% 33936 ± 12% softirqs.CPU77.RCU
5487 ± 5% +611.3% 39033 ± 2% softirqs.CPU77.SCHED
18877 ± 6% +538.9% 120609 ± 14% softirqs.CPU77.TIMER
6824 ± 5% +396.7% 33895 ± 11% softirqs.CPU78.RCU
5315 +566.7% 35436 ± 21% softirqs.CPU78.SCHED
19213 ± 7% +589.8% 132542 ± 18% softirqs.CPU78.TIMER
6708 ± 4% +397.5% 33372 ± 6% softirqs.CPU79.RCU
5237 ± 2% +637.1% 38609 ± 2% softirqs.CPU79.SCHED
18173 ± 2% +547.3% 117633 ± 13% softirqs.CPU79.TIMER
7620 ± 6% +352.5% 34482 ± 7% softirqs.CPU8.RCU
5351 ± 5% +558.1% 35220 ± 21% softirqs.CPU8.SCHED
19298 ± 2% +590.4% 133242 ± 10% softirqs.CPU8.TIMER
6646 ± 4% +410.4% 33919 ± 10% softirqs.CPU80.RCU
5213 ± 2% +640.6% 38613 ± 3% softirqs.CPU80.SCHED
18279 ± 3% +553.7% 119493 ± 15% softirqs.CPU80.TIMER
6671 ± 4% +419.3% 34643 ± 9% softirqs.CPU81.RCU
5180 ± 2% +650.2% 38862 ± 2% softirqs.CPU81.SCHED
18178 ± 3% +560.2% 120009 ± 14% softirqs.CPU81.TIMER
6669 ± 4% +418.4% 34571 ± 11% softirqs.CPU82.RCU
5192 +646.3% 38752 ± 3% softirqs.CPU82.SCHED
18039 ± 2% +563.6% 119710 ± 15% softirqs.CPU82.TIMER
6677 ± 5% +419.3% 34671 ± 12% softirqs.CPU83.RCU
4875 ± 11% +689.8% 38503 ± 2% softirqs.CPU83.SCHED
18180 ± 3% +556.8% 119397 ± 15% softirqs.CPU83.TIMER
6670 ± 4% +408.2% 33899 ± 8% softirqs.CPU84.RCU
5149 ± 2% +650.3% 38636 ± 3% softirqs.CPU84.SCHED
18130 ± 2% +553.7% 118527 ± 14% softirqs.CPU84.TIMER
6656 ± 4% +423.3% 34831 ± 12% softirqs.CPU85.RCU
5162 +646.8% 38549 ± 3% softirqs.CPU85.SCHED
18116 ± 2% +556.6% 118947 ± 14% softirqs.CPU85.TIMER
6622 ± 3% +428.3% 34987 ± 14% softirqs.CPU86.RCU
5187 +644.7% 38630 ± 2% softirqs.CPU86.SCHED
18211 ± 2% +557.2% 119680 ± 15% softirqs.CPU86.TIMER
6718 ± 4% +416.6% 34707 ± 12% softirqs.CPU87.RCU
5244 ± 2% +635.9% 38593 ± 3% softirqs.CPU87.SCHED
18225 ± 3% +551.8% 118792 ± 14% softirqs.CPU87.TIMER
6677 ± 3% +429.1% 35330 ± 15% softirqs.CPU88.RCU
5196 +635.7% 38234 softirqs.CPU88.SCHED
18176 +556.6% 119354 ± 15% softirqs.CPU88.TIMER
6743 ± 2% +424.0% 35336 ± 15% softirqs.CPU89.RCU
5204 +638.9% 38455 ± 2% softirqs.CPU89.SCHED
18042 ± 2% +559.7% 119015 ± 15% softirqs.CPU89.TIMER
7417 ± 5% +383.6% 35865 ± 13% softirqs.CPU9.RCU
4984 ± 11% +687.6% 39257 ± 3% softirqs.CPU9.SCHED
19419 ± 2% +593.1% 134592 ± 10% softirqs.CPU9.TIMER
6678 ± 3% +411.1% 34131 ± 9% softirqs.CPU90.RCU
5188 ± 2% +644.6% 38635 ± 2% softirqs.CPU90.SCHED
18061 ± 2% +557.0% 118655 ± 13% softirqs.CPU90.TIMER
6699 +430.3% 35524 ± 15% softirqs.CPU91.RCU
5154 +648.4% 38571 ± 3% softirqs.CPU91.SCHED
18054 +557.9% 118780 ± 15% softirqs.CPU91.TIMER
6545 ± 4% +436.3% 35100 ± 15% softirqs.CPU92.RCU
5183 ± 2% +640.6% 38391 softirqs.CPU92.SCHED
18097 ± 3% +561.2% 119656 ± 15% softirqs.CPU92.TIMER
6649 ± 2% +423.6% 34819 ± 13% softirqs.CPU93.RCU
5210 +642.1% 38671 ± 2% softirqs.CPU93.SCHED
18095 ± 2% +557.9% 119046 ± 14% softirqs.CPU93.TIMER
6759 +392.2% 33267 ± 9% softirqs.CPU94.RCU
5210 +653.1% 39241 ± 2% softirqs.CPU94.SCHED
18143 ± 2% +573.4% 122174 ± 13% softirqs.CPU94.TIMER
6677 ± 2% +414.8% 34374 ± 10% softirqs.CPU95.RCU
5172 ± 2% +418.0% 26794 ± 11% softirqs.CPU95.SCHED
18063 ± 2% +559.7% 119167 ± 15% softirqs.CPU95.TIMER
6785 ± 10% +382.8% 32757 ± 6% softirqs.CPU96.RCU
5193 ± 20% +576.9% 35154 ± 17% softirqs.CPU96.SCHED
18836 ± 6% +602.7% 132354 ± 10% softirqs.CPU96.TIMER
6724 ± 7% +417.1% 34773 ± 10% softirqs.CPU97.RCU
5270 ± 3% +640.4% 39023 softirqs.CPU97.SCHED
18469 ± 4% +597.1% 128754 ± 9% softirqs.CPU97.TIMER
6657 ± 5% +428.6% 35189 ± 13% softirqs.CPU98.RCU
5282 ± 3% +642.8% 39237 softirqs.CPU98.SCHED
18584 ± 3% +594.1% 129004 ± 10% softirqs.CPU98.TIMER
6905 ± 8% +422.5% 36081 ± 16% softirqs.CPU99.RCU
5308 ± 2% +648.2% 39719 ± 2% softirqs.CPU99.SCHED
18905 ± 3% +581.1% 128757 ± 11% softirqs.CPU99.TIMER
697.00 ± 38% +1367.1% 10226 ± 62% softirqs.NET_RX
1309285 ± 3% +407.1% 6639778 ± 9% softirqs.RCU
1012468 +634.4% 7435362 softirqs.SCHED
3593340 +575.0% 24256691 ± 11% softirqs.TIMER
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Rong Chen
Greeting,
FYI, we noticed the following commit (built with gcc-9):
commit: 54529aac984de8d3928810c85b575efac0f9d616 ("[PATCH 07/14] block: make QUEUE_SYSFS_BIT_FNS a little more useful")
url: https://github.com/0day-ci/linux/commits/Christoph-Hellwig/fs-remove-the-unused-SB_I_MULTIROOT-flag/20200727-000342
base: https://git.kernel.org/cgit/linux/kernel/git/axboe/linux-block.git for-next
in testcase: blktests
with following parameters:
disk: 1SSD
test: block-group1
ucode: 0x21
on test machine: 4 threads Intel(R) Core(TM) i3-3220 CPU @ 3.30GHz with 8G memory
caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
block/005 => sdb1 (switch schedulers while doing IO)
block/005 => sdb1 (switch schedulers while doing IO) [failed]
read iops ... 31828
runtime ... 33.620s
--- tests/block/005.out 2020-07-13 05:13:39.000000000 +0000
+++ /lkp/benchmarks/blktests/results/sdb1/block/005.out.bad 2020-07-29 09:54:17.895325186 +0000
@@ -1,2 +1,3 @@
Running block/005
+cat: '/sys/devices/pci0000:00/0000:00:1f.2/ata5/host4/target4:0:0/4:0:0:0/block/sdb/queue/rotational': No such file or directory
Test complete
To reproduce:
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
Thanks,
Rong Chen
On Sun, Jul 26 2020 at 11:03am -0400,
Christoph Hellwig <[email protected]> wrote:
> Drivers shouldn't really mess with the readahead size, as that is a VM
> concept. Instead set it based on the optimal I/O size by lifting the
> algorithm from the md driver when registering the disk. Also set
> bdi->io_pages there as well by applying the same scheme based on
> max_sectors.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> block/blk-settings.c | 5 ++---
> block/blk-sysfs.c | 1 -
> block/genhd.c | 13 +++++++++++--
> drivers/block/aoe/aoeblk.c | 2 --
> drivers/block/drbd/drbd_nl.c | 12 +-----------
> drivers/md/bcache/super.c | 4 ----
> drivers/md/dm-table.c | 3 ---
> drivers/md/raid0.c | 16 ----------------
> drivers/md/raid10.c | 24 +-----------------------
> drivers/md/raid5.c | 13 +------------
> 10 files changed, 16 insertions(+), 77 deletions(-)
In general these changes need a solid audit relative to stacking
drivers. That is, the limits stacking methods (blk_stack_limits)
vs lower level allocation methods (__device_add_disk).
You optimized for lowlevel __device_add_disk establishing the bdi's
ra_pages and io_pages. That is at the beginning of disk allocation,
well before any build up of stacking driver's queue_io_opt() -- which
was previously done in disk_stack_limits or driver specific methods
(e.g. dm_table_set_restrictions) that are called _after_ all the limits
stacking occurs.
By inverting the setting of the bdi's ra_pages and io_pages to be done
so early in __device_add_disk it'll break properly setting these values
for at least DM afaict.
Mike
> diff --git a/block/blk-settings.c b/block/blk-settings.c
> index 76a7e03bcd6cac..01049e9b998f1d 100644
> --- a/block/blk-settings.c
> +++ b/block/blk-settings.c
> @@ -452,6 +452,8 @@ EXPORT_SYMBOL(blk_limits_io_opt);
> void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
> {
> blk_limits_io_opt(&q->limits, opt);
> + q->backing_dev_info->ra_pages =
> + max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
> }
> EXPORT_SYMBOL(blk_queue_io_opt);
>
> @@ -628,9 +630,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
> printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
> top, bottom);
> }
> -
> - t->backing_dev_info->io_pages =
> - t->limits.max_sectors >> (PAGE_SHIFT - 9);
> }
> EXPORT_SYMBOL(disk_stack_limits);
>
> diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
> index 7dda709f3ccb6f..ce418d9128a0b2 100644
> --- a/block/blk-sysfs.c
> +++ b/block/blk-sysfs.c
> @@ -245,7 +245,6 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
>
> spin_lock_irq(&q->queue_lock);
> q->limits.max_sectors = max_sectors_kb << 1;
> - q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
> spin_unlock_irq(&q->queue_lock);
>
> return ret;
> diff --git a/block/genhd.c b/block/genhd.c
> index 8b1e9f48957cb5..097d4e4bc0b8a2 100644
> --- a/block/genhd.c
> +++ b/block/genhd.c
> @@ -775,6 +775,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
> const struct attribute_group **groups,
> bool register_queue)
> {
> + struct request_queue *q = disk->queue;
> dev_t devt;
> int retval;
>
> @@ -785,7 +786,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
> * registration.
> */
> if (register_queue)
> - elevator_init_mq(disk->queue);
> + elevator_init_mq(q);
>
> /* minors == 0 indicates to use ext devt from part0 and should
> * be accompanied with EXT_DEVT flag. Make sure all
> @@ -815,10 +816,18 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
> disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
> disk->flags |= GENHD_FL_NO_PART_SCAN;
> } else {
> - struct backing_dev_info *bdi = disk->queue->backing_dev_info;
> + struct backing_dev_info *bdi = q->backing_dev_info;
> struct device *dev = disk_to_dev(disk);
> int ret;
>
> + /*
> + * For read-ahead of large files to be effective, we need to
> + * readahead at least twice the optimal I/O size.
> + */
> + bdi->ra_pages = max(queue_io_opt(q) * 2 / PAGE_SIZE,
> + VM_READAHEAD_PAGES);
> + bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
> +
> /* Register BDI before referencing it from bdev */
> dev->devt = devt;
> ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
> diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
> index 5ca7216e9e01f3..89b33b402b4e52 100644
> --- a/drivers/block/aoe/aoeblk.c
> +++ b/drivers/block/aoe/aoeblk.c
> @@ -347,7 +347,6 @@ aoeblk_gdalloc(void *vp)
> mempool_t *mp;
> struct request_queue *q;
> struct blk_mq_tag_set *set;
> - enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
> ulong flags;
> int late = 0;
> int err;
> @@ -407,7 +406,6 @@ aoeblk_gdalloc(void *vp)
> WARN_ON(d->gd);
> WARN_ON(d->flags & DEVFL_UP);
> blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
> - q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
> d->bufpool = mp;
> d->blkq = gd->queue = q;
> q->queuedata = d;
> diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
> index 650372ee2c7822..212bf711fb6b41 100644
> --- a/drivers/block/drbd/drbd_nl.c
> +++ b/drivers/block/drbd/drbd_nl.c
> @@ -1360,18 +1360,8 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
> decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
> decide_on_write_same_support(device, q, b, o, disable_write_same);
>
> - if (b) {
> + if (b)
> blk_stack_limits(&q->limits, &b->limits, 0);
> -
> - if (q->backing_dev_info->ra_pages !=
> - b->backing_dev_info->ra_pages) {
> - drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
> - q->backing_dev_info->ra_pages,
> - b->backing_dev_info->ra_pages);
> - q->backing_dev_info->ra_pages =
> - b->backing_dev_info->ra_pages;
> - }
> - }
> fixup_discard_if_not_supported(q);
> fixup_write_zeroes(device, q);
> }
> diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
> index 9e45faa054b6f4..9d3f0711be030f 100644
> --- a/drivers/md/bcache/super.c
> +++ b/drivers/md/bcache/super.c
> @@ -1367,10 +1367,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
> if (ret)
> return ret;
>
> - dc->disk.disk->queue->backing_dev_info->ra_pages =
> - max(dc->disk.disk->queue->backing_dev_info->ra_pages,
> - q->backing_dev_info->ra_pages);
> -
> atomic_set(&dc->io_errors, 0);
> dc->io_disable = false;
> dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
> index aac4c31cfc8498..324a42ed2f8894 100644
> --- a/drivers/md/dm-table.c
> +++ b/drivers/md/dm-table.c
> @@ -1924,9 +1924,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
> q->nr_zones = blkdev_nr_zones(t->md->disk);
> }
> #endif
> -
> - /* Allow reads to exceed readahead limits */
> - q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
> }
>
> unsigned int dm_table_get_num_targets(struct dm_table *t)
> diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
> index f54a449f97aa79..aa2d7279176880 100644
> --- a/drivers/md/raid0.c
> +++ b/drivers/md/raid0.c
> @@ -410,22 +410,6 @@ static int raid0_run(struct mddev *mddev)
> mdname(mddev),
> (unsigned long long)mddev->array_sectors);
>
> - if (mddev->queue) {
> - /* calculate the max read-ahead size.
> - * For read-ahead of large files to be effective, we need to
> - * readahead at least twice a whole stripe. i.e. number of devices
> - * multiplied by chunk size times 2.
> - * If an individual device has an ra_pages greater than the
> - * chunk size, then we will not drive that device as hard as it
> - * wants. We consider this a configuration error: a larger
> - * chunksize should be used in that case.
> - */
> - int stripe = mddev->raid_disks *
> - (mddev->chunk_sectors << 9) / PAGE_SIZE;
> - if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
> - mddev->queue->backing_dev_info->ra_pages = 2* stripe;
> - }
> -
> dump_zones(mddev);
>
> ret = md_integrity_register(mddev);
> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
> index 9f88ff9bdee437..23d15acbf457d4 100644
> --- a/drivers/md/raid10.c
> +++ b/drivers/md/raid10.c
> @@ -3865,19 +3865,6 @@ static int raid10_run(struct mddev *mddev)
> mddev->resync_max_sectors = size;
> set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
>
> - if (mddev->queue) {
> - int stripe = conf->geo.raid_disks *
> - ((mddev->chunk_sectors << 9) / PAGE_SIZE);
> -
> - /* Calculate max read-ahead size.
> - * We need to readahead at least twice a whole stripe....
> - * maybe...
> - */
> - stripe /= conf->geo.near_copies;
> - if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
> - mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
> - }
> -
> if (md_integrity_register(mddev))
> goto out_free_conf;
>
> @@ -4715,17 +4702,8 @@ static void end_reshape(struct r10conf *conf)
> conf->reshape_safe = MaxSector;
> spin_unlock_irq(&conf->device_lock);
>
> - /* read-ahead size must cover two whole stripes, which is
> - * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
> - */
> - if (conf->mddev->queue) {
> - int stripe = conf->geo.raid_disks *
> - ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
> - stripe /= conf->geo.near_copies;
> - if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
> - conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
> + if (conf->mddev->queue)
> raid10_set_io_opt(conf);
> - }
> conf->fullsync = 0;
> }
>
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index 68e41ce3ca75cc..415ce3cc155698 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -7413,8 +7413,6 @@ static int raid5_run(struct mddev *mddev)
> int data_disks = conf->previous_raid_disks - conf->max_degraded;
> int stripe = data_disks *
> ((mddev->chunk_sectors << 9) / PAGE_SIZE);
> - if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
> - mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
>
> chunk_size = mddev->chunk_sectors << 9;
> blk_queue_io_min(mddev->queue, chunk_size);
> @@ -8002,17 +8000,8 @@ static void end_reshape(struct r5conf *conf)
> spin_unlock_irq(&conf->device_lock);
> wake_up(&conf->wait_for_overlap);
>
> - /* read-ahead size must cover two whole stripes, which is
> - * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
> - */
> - if (conf->mddev->queue) {
> - int data_disks = conf->raid_disks - conf->max_degraded;
> - int stripe = data_disks * ((conf->chunk_sectors << 9)
> - / PAGE_SIZE);
> - if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
> - conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
> + if (conf->mddev->queue)
> raid5_set_io_opt(conf);
> - }
> }
> }
>
> --
> 2.27.0
>
> --
> dm-devel mailing list
> [email protected]
> https://www.redhat.com/mailman/listinfo/dm-devel
On Wed, Aug 26, 2020 at 06:07:38PM -0400, Mike Snitzer wrote:
> On Sun, Jul 26 2020 at 11:03am -0400,
> Christoph Hellwig <[email protected]> wrote:
>
> > Drivers shouldn't really mess with the readahead size, as that is a VM
> > concept. Instead set it based on the optimal I/O size by lifting the
> > algorithm from the md driver when registering the disk. Also set
> > bdi->io_pages there as well by applying the same scheme based on
> > max_sectors.
> >
> > Signed-off-by: Christoph Hellwig <[email protected]>
> > ---
> > block/blk-settings.c | 5 ++---
> > block/blk-sysfs.c | 1 -
> > block/genhd.c | 13 +++++++++++--
> > drivers/block/aoe/aoeblk.c | 2 --
> > drivers/block/drbd/drbd_nl.c | 12 +-----------
> > drivers/md/bcache/super.c | 4 ----
> > drivers/md/dm-table.c | 3 ---
> > drivers/md/raid0.c | 16 ----------------
> > drivers/md/raid10.c | 24 +-----------------------
> > drivers/md/raid5.c | 13 +------------
> > 10 files changed, 16 insertions(+), 77 deletions(-)
>
>
> In general these changes need a solid audit relative to stacking
> drivers. That is, the limits stacking methods (blk_stack_limits)
> vs lower level allocation methods (__device_add_disk).
>
> You optimized for lowlevel __device_add_disk establishing the bdi's
> ra_pages and io_pages. That is at the beginning of disk allocation,
> well before any build up of stacking driver's queue_io_opt() -- which
> was previously done in disk_stack_limits or driver specific methods
> (e.g. dm_table_set_restrictions) that are called _after_ all the limits
> stacking occurs.
>
> By inverting the setting of the bdi's ra_pages and io_pages to be done
> so early in __device_add_disk it'll break properly setting these values
> for at least DM afaict.
ra_pages never got inherited by stacking drivers, check it by modifying
it on an underlying device and then creating a trivial dm or md one.
And I think that is a good thing - in general we shouldn't really mess
with this thing from drivers if we can avoid it. I've kept the legacy
aoe and md parity raid cases, out of which the first looks pretty weird
and the md one at least remotely sensible.
->io_pages is still inherited in disk_stack_limits, just like before
so no change either.
On Wed, Sep 02 2020 at 11:11am -0400,
Christoph Hellwig <[email protected]> wrote:
> On Wed, Aug 26, 2020 at 06:07:38PM -0400, Mike Snitzer wrote:
> > On Sun, Jul 26 2020 at 11:03am -0400,
> > Christoph Hellwig <[email protected]> wrote:
> >
> > > Drivers shouldn't really mess with the readahead size, as that is a VM
> > > concept. Instead set it based on the optimal I/O size by lifting the
> > > algorithm from the md driver when registering the disk. Also set
> > > bdi->io_pages there as well by applying the same scheme based on
> > > max_sectors.
> > >
> > > Signed-off-by: Christoph Hellwig <[email protected]>
> > > ---
> > > block/blk-settings.c | 5 ++---
> > > block/blk-sysfs.c | 1 -
> > > block/genhd.c | 13 +++++++++++--
> > > drivers/block/aoe/aoeblk.c | 2 --
> > > drivers/block/drbd/drbd_nl.c | 12 +-----------
> > > drivers/md/bcache/super.c | 4 ----
> > > drivers/md/dm-table.c | 3 ---
> > > drivers/md/raid0.c | 16 ----------------
> > > drivers/md/raid10.c | 24 +-----------------------
> > > drivers/md/raid5.c | 13 +------------
> > > 10 files changed, 16 insertions(+), 77 deletions(-)
> >
> >
> > In general these changes need a solid audit relative to stacking
> > drivers. That is, the limits stacking methods (blk_stack_limits)
> > vs lower level allocation methods (__device_add_disk).
> >
> > You optimized for lowlevel __device_add_disk establishing the bdi's
> > ra_pages and io_pages. That is at the beginning of disk allocation,
> > well before any build up of stacking driver's queue_io_opt() -- which
> > was previously done in disk_stack_limits or driver specific methods
> > (e.g. dm_table_set_restrictions) that are called _after_ all the limits
> > stacking occurs.
> >
> > By inverting the setting of the bdi's ra_pages and io_pages to be done
> > so early in __device_add_disk it'll break properly setting these values
> > for at least DM afaict.
>
> ra_pages never got inherited by stacking drivers, check it by modifying
> it on an underlying device and then creating a trivial dm or md one.
Sure, not saying that it did. But if the goal is to set ra_pages based
on io_opt then to do that correctly on stacking drivers it must be done
in terms of limits stacking right? Or at least done at a location that
is after the limits stacking has occurred? So should DM just open-code
setting ra_pages like it did for io_pages?
Because setting ra_pages in __device_add_disk() is way too early for DM
-- given it uses device_add_disk_no_queue_reg via add_disk_no_queue_reg
at DM device creation (before stacking all underlying devices' limits).
> And I think that is a good thing - in general we shouldn't really mess
> with this thing from drivers if we can avoid it. I've kept the legacy
> aoe and md parity raid cases, out of which the first looks pretty weird
> and the md one at least remotely sensible.
I don't want drivers, like DM, to have to worry about these. So I agree
with that goal ;)
> ->io_pages is still inherited in disk_stack_limits, just like before
> so no change either.
I'm missing where, but I only looked closer at this 06/14 patch.
In it I see io_pages is no longer adjusted in disk_stack_limits().
Mike
On Wed, Sep 02, 2020 at 12:20:07PM -0400, Mike Snitzer wrote:
> On Wed, Sep 02 2020 at 11:11am -0400,
> Christoph Hellwig <[email protected]> wrote:
>
> > On Wed, Aug 26, 2020 at 06:07:38PM -0400, Mike Snitzer wrote:
> > > On Sun, Jul 26 2020 at 11:03am -0400,
> > > Christoph Hellwig <[email protected]> wrote:
> > >
> > > > Drivers shouldn't really mess with the readahead size, as that is a VM
> > > > concept. Instead set it based on the optimal I/O size by lifting the
> > > > algorithm from the md driver when registering the disk. Also set
> > > > bdi->io_pages there as well by applying the same scheme based on
> > > > max_sectors.
> > > >
> > > > Signed-off-by: Christoph Hellwig <[email protected]>
> > > > ---
> > > > block/blk-settings.c | 5 ++---
> > > > block/blk-sysfs.c | 1 -
> > > > block/genhd.c | 13 +++++++++++--
> > > > drivers/block/aoe/aoeblk.c | 2 --
> > > > drivers/block/drbd/drbd_nl.c | 12 +-----------
> > > > drivers/md/bcache/super.c | 4 ----
> > > > drivers/md/dm-table.c | 3 ---
> > > > drivers/md/raid0.c | 16 ----------------
> > > > drivers/md/raid10.c | 24 +-----------------------
> > > > drivers/md/raid5.c | 13 +------------
> > > > 10 files changed, 16 insertions(+), 77 deletions(-)
> > >
> > >
> > > In general these changes need a solid audit relative to stacking
> > > drivers. That is, the limits stacking methods (blk_stack_limits)
> > > vs lower level allocation methods (__device_add_disk).
> > >
> > > You optimized for lowlevel __device_add_disk establishing the bdi's
> > > ra_pages and io_pages. That is at the beginning of disk allocation,
> > > well before any build up of stacking driver's queue_io_opt() -- which
> > > was previously done in disk_stack_limits or driver specific methods
> > > (e.g. dm_table_set_restrictions) that are called _after_ all the limits
> > > stacking occurs.
> > >
> > > By inverting the setting of the bdi's ra_pages and io_pages to be done
> > > so early in __device_add_disk it'll break properly setting these values
> > > for at least DM afaict.
> >
> > ra_pages never got inherited by stacking drivers, check it by modifying
> > it on an underlying device and then creating a trivial dm or md one.
>
> Sure, not saying that it did. But if the goal is to set ra_pages based
> on io_opt then to do that correctly on stacking drivers it must be done
> in terms of limits stacking right? Or at least done at a location that
> is after the limits stacking has occurred? So should DM just open-code
> setting ra_pages like it did for io_pages?
>
> Because setting ra_pages in __device_add_disk() is way too early for DM
> -- given it uses device_add_disk_no_queue_reg via add_disk_no_queue_reg
> at DM device creation (before stacking all underlying devices' limits).
I'll move it to blk_register_queue, which should work just fine.
On Thu, Sep 10 2020 at 5:28am -0400,
Christoph Hellwig <[email protected]> wrote:
> On Wed, Sep 02, 2020 at 12:20:07PM -0400, Mike Snitzer wrote:
> > On Wed, Sep 02 2020 at 11:11am -0400,
> > Christoph Hellwig <[email protected]> wrote:
> >
> > > On Wed, Aug 26, 2020 at 06:07:38PM -0400, Mike Snitzer wrote:
> > > > On Sun, Jul 26 2020 at 11:03am -0400,
> > > > Christoph Hellwig <[email protected]> wrote:
> > > >
> > > > > Drivers shouldn't really mess with the readahead size, as that is a VM
> > > > > concept. Instead set it based on the optimal I/O size by lifting the
> > > > > algorithm from the md driver when registering the disk. Also set
> > > > > bdi->io_pages there as well by applying the same scheme based on
> > > > > max_sectors.
> > > > >
> > > > > Signed-off-by: Christoph Hellwig <[email protected]>
> > > > > ---
> > > > > block/blk-settings.c | 5 ++---
> > > > > block/blk-sysfs.c | 1 -
> > > > > block/genhd.c | 13 +++++++++++--
> > > > > drivers/block/aoe/aoeblk.c | 2 --
> > > > > drivers/block/drbd/drbd_nl.c | 12 +-----------
> > > > > drivers/md/bcache/super.c | 4 ----
> > > > > drivers/md/dm-table.c | 3 ---
> > > > > drivers/md/raid0.c | 16 ----------------
> > > > > drivers/md/raid10.c | 24 +-----------------------
> > > > > drivers/md/raid5.c | 13 +------------
> > > > > 10 files changed, 16 insertions(+), 77 deletions(-)
> > > >
> > > >
> > > > In general these changes need a solid audit relative to stacking
> > > > drivers. That is, the limits stacking methods (blk_stack_limits)
> > > > vs lower level allocation methods (__device_add_disk).
> > > >
> > > > You optimized for lowlevel __device_add_disk establishing the bdi's
> > > > ra_pages and io_pages. That is at the beginning of disk allocation,
> > > > well before any build up of stacking driver's queue_io_opt() -- which
> > > > was previously done in disk_stack_limits or driver specific methods
> > > > (e.g. dm_table_set_restrictions) that are called _after_ all the limits
> > > > stacking occurs.
> > > >
> > > > By inverting the setting of the bdi's ra_pages and io_pages to be done
> > > > so early in __device_add_disk it'll break properly setting these values
> > > > for at least DM afaict.
> > >
> > > ra_pages never got inherited by stacking drivers, check it by modifying
> > > it on an underlying device and then creating a trivial dm or md one.
> >
> > Sure, not saying that it did. But if the goal is to set ra_pages based
> > on io_opt then to do that correctly on stacking drivers it must be done
> > in terms of limits stacking right? Or at least done at a location that
> > is after the limits stacking has occurred? So should DM just open-code
> > setting ra_pages like it did for io_pages?
> >
> > Because setting ra_pages in __device_add_disk() is way too early for DM
> > -- given it uses device_add_disk_no_queue_reg via add_disk_no_queue_reg
> > at DM device creation (before stacking all underlying devices' limits).
>
> I'll move it to blk_register_queue, which should work just fine.
That'll work for initial DM table load as part of DM device creation
(dm_setup_md_queue). But it won't account for DM table reloads that
might change underlying devices on a live DM device (done using
__bind).
Both dm_setup_md_queue() and __bind() call dm_table_set_restrictions()
to set/update queue_limits. It feels like __bind() will need to call a
new block helper to set/update parts of queue_limits (e.g. ra_pages and
io_pages).
Any chance you're open to factoring out that block function as an
exported symbol for use by blk_register_queue() and code like DM's
__bind()?
Thanks,
Mike
On Thu, Sep 10, 2020 at 01:15:41PM -0400, Mike Snitzer wrote:
> > I'll move it to blk_register_queue, which should work just fine.
>
> That'll work for initial DM table load as part of DM device creation
> (dm_setup_md_queue). But it won't account for DM table reloads that
> might change underlying devices on a live DM device (done using
> __bind).
>
> Both dm_setup_md_queue() and __bind() call dm_table_set_restrictions()
> to set/update queue_limits. It feels like __bind() will need to call a
> new block helper to set/update parts of queue_limits (e.g. ra_pages and
> io_pages).
>
> Any chance you're open to factoring out that block function as an
> exported symbol for use by blk_register_queue() and code like DM's
> __bind()?
I agree with the problem statement. OTOH adding an exported helper
for two trivial assignments seems a little silly..
For now I'll just keep the open coded ->io_pages assignment in
dm. Note that dm doesn't currently update the ->ra_pages value
based on the underlying devices, so an incremental patch to do that
might be useful as well.