Hi Jens,
this series is posted early because it has wide-ranging changes and could use some
early ACKs before -rc1.
It changes the interface to the bio allocators to always pass a block_device and
the operation, which is information needed for every bio submitted through
bio_submit. This means the fields can be directly initialized in bio_init instead
of first being zeroed and thus should help to micro-optimize even better than the
__bio_set_dev that Pavel proposed while also cleaning up code.
I have a follow on series to also deal with the bio cloning interfaces that need
even more love, and additional cleanups for the callers which might be material
for the next merge window.
Diffstat:
block/bio.c | 73 ++++++++++++++++++++++++------------
block/blk-flush.c | 4 -
block/blk-lib.c | 32 ++-------------
block/blk-zoned.c | 14 +-----
block/blk.h | 2
block/bounce.c | 6 --
block/fops.c | 35 +++++++----------
drivers/block/drbd/drbd_actlog.c | 5 --
drivers/block/drbd/drbd_bitmap.c | 7 +--
drivers/block/drbd/drbd_receiver.c | 31 +++------------
drivers/block/floppy.c | 4 -
drivers/block/pktcdvd.c | 8 ---
drivers/block/rnbd/rnbd-srv-dev.c | 61 ------------------------------
drivers/block/rnbd/rnbd-srv-dev.h | 18 --------
drivers/block/rnbd/rnbd-srv.c | 45 ++++++++--------------
drivers/block/rnbd/rnbd-srv.h | 1
drivers/block/xen-blkback/blkback.c | 25 ++----------
drivers/block/zram/zram_drv.c | 16 ++-----
drivers/md/bcache/io.c | 3 -
drivers/md/bcache/journal.c | 16 ++-----
drivers/md/bcache/movinggc.c | 4 -
drivers/md/bcache/request.c | 18 +++-----
drivers/md/bcache/super.c | 8 +--
drivers/md/bcache/writeback.c | 4 -
drivers/md/dm-crypt.c | 27 ++++---------
drivers/md/dm-io.c | 5 --
drivers/md/dm-log-writes.c | 39 +++----------------
drivers/md/dm-snap.c | 21 ----------
drivers/md/dm-thin.c | 41 +++++---------------
drivers/md/dm-writecache.c | 7 +--
drivers/md/dm-zoned-metadata.c | 26 ++----------
drivers/md/dm.c | 12 ++---
drivers/md/md-multipath.c | 2
drivers/md/md.c | 24 +++++------
drivers/md/raid1.c | 8 +--
drivers/md/raid10.c | 14 ++----
drivers/md/raid5-cache.c | 19 +++------
drivers/md/raid5-ppl.c | 13 ++----
drivers/md/raid5.c | 12 ++---
drivers/nvdimm/nd_virtio.c | 6 +-
drivers/nvme/target/io-cmd-bdev.c | 18 +++-----
drivers/nvme/target/passthru.c | 7 +--
drivers/nvme/target/zns.c | 14 +++---
drivers/scsi/ufs/ufshpb.c | 4 -
drivers/target/target_core_iblock.c | 11 +----
fs/btrfs/disk-io.c | 10 +---
fs/btrfs/extent_io.c | 2
fs/buffer.c | 14 ++----
fs/crypto/bio.c | 13 ++----
fs/direct-io.c | 5 --
fs/erofs/zdata.c | 5 --
fs/ext4/page-io.c | 3 -
fs/ext4/readpage.c | 8 +--
fs/f2fs/data.c | 7 +--
fs/gfs2/lops.c | 8 +--
fs/gfs2/meta_io.c | 4 -
fs/gfs2/ops_fstype.c | 4 -
fs/hfsplus/wrapper.c | 4 -
fs/iomap/buffered-io.c | 26 +++++-------
fs/iomap/direct-io.c | 8 ---
fs/jfs/jfs_logmgr.c | 11 -----
fs/jfs/jfs_metapage.c | 9 +---
fs/mpage.c | 34 ++--------------
fs/nfs/blocklayout/blocklayout.c | 26 +-----------
fs/nilfs2/segbuf.c | 31 +--------------
fs/ntfs3/fsntfs.c | 27 -------------
fs/ocfs2/cluster/heartbeat.c | 4 -
fs/squashfs/block.c | 11 ++---
fs/xfs/xfs_bio_io.c | 14 ++----
fs/xfs/xfs_buf.c | 4 -
fs/xfs/xfs_log.c | 14 +++---
fs/zonefs/super.c | 9 +---
include/linux/bio.h | 30 ++++++--------
kernel/power/swap.c | 5 --
mm/page_io.c | 10 +---
75 files changed, 372 insertions(+), 758 deletions(-)
open code mpage_alloc in it's two callers and simplify the results
because of the context:
- __mpage_writepage always passes GFP_NOFS and can thus always sleep and
will never get a NULL return from bio_alloc at all.
- do_mpage_readpage can only get a non-sleeping context for readahead
which never sets PF_MEMALLOC and thus doesn't need the retry loop
either.
Both cases will never have __GFP_HIGH set.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/mpage.c | 35 ++++++-----------------------------
1 file changed, 6 insertions(+), 29 deletions(-)
diff --git a/fs/mpage.c b/fs/mpage.c
index 334e7d09aa652..c5817699b369b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -67,29 +67,6 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
return NULL;
}
-static struct bio *
-mpage_alloc(struct block_device *bdev,
- sector_t first_sector, int nr_vecs,
- gfp_t gfp_flags)
-{
- struct bio *bio;
-
- /* Restrict the given (page cache) mask for slab allocations */
- gfp_flags &= GFP_KERNEL;
- bio = bio_alloc(gfp_flags, nr_vecs);
-
- if (bio == NULL && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
- }
-
- if (bio) {
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = first_sector;
- }
- return bio;
-}
-
/*
* support function for mpage_readahead. The fs supplied get_block might
* return an up to date buffer. This is used to map that buffer into
@@ -303,10 +280,11 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
page))
goto out;
}
- args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- bio_max_segs(args->nr_pages), gfp);
+ args->bio = bio_alloc(gfp, bio_max_segs(args->nr_pages));
if (args->bio == NULL)
goto confused;
+ bio_set_dev(args->bio, bdev);
+ args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
}
length = first_hole << blkbits;
@@ -615,10 +593,9 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
page, wbc))
goto out;
}
- bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- BIO_MAX_VECS, GFP_NOFS|__GFP_HIGH);
- if (bio == NULL)
- goto confused;
+ bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
+ bio_set_dev(bio, bdev);
+ bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
wbc_init_bio(wbc, bio);
bio->bi_write_hint = inode->i_write_hint;
--
2.30.2
bio_alloc will never fail when it can sleep. Remove the now simple
nilfs_alloc_seg_bio helper and open code it in the only caller.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/nilfs2/segbuf.c | 31 ++++---------------------------
1 file changed, 4 insertions(+), 27 deletions(-)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 43287b0d3e9b6..53b7c6d21cdd8 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -371,29 +371,6 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
return err;
}
-/**
- * nilfs_alloc_seg_bio - allocate a new bio for writing log
- * @nilfs: nilfs object
- * @start: start block number of the bio
- * @nr_vecs: request size of page vector.
- *
- * Return Value: On success, pointer to the struct bio is returned.
- * On error, NULL is returned.
- */
-static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
- int nr_vecs)
-{
- struct bio *bio;
-
- bio = bio_alloc(GFP_NOIO, nr_vecs);
- if (likely(bio)) {
- bio_set_dev(bio, nilfs->ns_bdev);
- bio->bi_iter.bi_sector =
- start << (nilfs->ns_blocksize_bits - 9);
- }
- return bio;
-}
-
static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi)
{
@@ -414,10 +391,10 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
BUG_ON(wi->nr_vecs <= 0);
repeat:
if (!wi->bio) {
- wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
- wi->nr_vecs);
- if (unlikely(!wi->bio))
- return -ENOMEM;
+ wi->bio = bio_alloc(GFP_NOIO, wi->nr_vecs);
+ bio_set_dev(wi->bio, wi->nilfs->ns_bdev);
+ wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) <<
+ (wi->nilfs->ns_blocksize_bits - 9);
}
len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
--
2.30.2
bio_alloc will never fail if it is allowed to sleep, so there is no
need for this loop. Also remove the __GFP_HIGH specifier as it doesn't
make sense here given that we'll always fall back to the mempool anyway.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/ntfs3/fsntfs.c | 23 ++---------------------
1 file changed, 2 insertions(+), 21 deletions(-)
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 4de9acb169689..4a255e21ecf5f 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -1443,17 +1443,6 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
return err;
}
-static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
-{
- struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
-
- if (!bio && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
- }
- return bio;
-}
-
/*
* ntfs_bio_pages - Read/write pages from/to disk.
*/
@@ -1496,11 +1485,7 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
lbo = ((u64)lcn << cluster_bits) + off;
len = ((u64)clen << cluster_bits) - off;
new_bio:
- new = ntfs_alloc_bio(nr_pages - page_idx);
- if (!new) {
- err = -ENOMEM;
- goto out;
- }
+ new = bio_alloc(GFP_NOFS, nr_pages - page_idx);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
@@ -1599,11 +1584,7 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
lbo = (u64)lcn << cluster_bits;
len = (u64)clen << cluster_bits;
new_bio:
- new = ntfs_alloc_bio(BIO_MAX_VECS);
- if (!new) {
- err = -ENOMEM;
- break;
- }
+ new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
--
2.30.2
bio_alloc will never fail when it can sleep. Remove the now simple
bl_alloc_init_bio helper and open code it in the only caller.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/nfs/blocklayout/blocklayout.c | 26 +++++---------------------
1 file changed, 5 insertions(+), 21 deletions(-)
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index fe860c5387476..38e063af7e98a 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -115,23 +115,6 @@ bl_submit_bio(struct bio *bio)
return NULL;
}
-static struct bio *bl_alloc_init_bio(unsigned int npg,
- struct block_device *bdev, sector_t disk_sector,
- bio_end_io_t end_io, struct parallel_io *par)
-{
- struct bio *bio;
-
- npg = bio_max_segs(npg);
- bio = bio_alloc(GFP_NOIO, npg);
- if (bio) {
- bio->bi_iter.bi_sector = disk_sector;
- bio_set_dev(bio, bdev);
- bio->bi_end_io = end_io;
- bio->bi_private = par;
- }
- return bio;
-}
-
static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
{
return offset >= map->start && offset < map->start + map->len;
@@ -171,10 +154,11 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
retry:
if (!bio) {
- bio = bl_alloc_init_bio(npg, map->bdev,
- disk_addr >> SECTOR_SHIFT, end_io, par);
- if (!bio)
- return ERR_PTR(-ENOMEM);
+ bio = bio_alloc(GFP_NOIO, bio_max_segs(npg));
+ bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT;
+ bio_set_dev(bio, map->bdev);
+ bio->bi_end_io = end_io;
+ bio->bi_private = par;
bio_set_op_attrs(bio, rw, 0);
}
if (bio_add_page(bio, page, *len, offset) < *len) {
--
2.30.2
Just open code it next to the bio allocations, which saves a few lines
of code, prepares for future changes and allows to remove the duplicate
bi_opf assignment for the bio_clone_fast case in kcryptd_io_read.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/md/dm-crypt.c | 21 ++++++++-------------
1 file changed, 8 insertions(+), 13 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 20abe3486aba1..3c5ecd35d3483 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -234,7 +234,7 @@ static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
-static void clone_init(struct dm_crypt_io *, struct bio *);
+static void crypt_endio(struct bio *clone);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
@@ -1673,7 +1673,10 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
mutex_lock(&cc->bio_alloc_lock);
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
- clone_init(io, clone);
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
+ bio_set_dev(clone, cc->dev->bdev);
+ clone->bi_opf = io->base_bio->bi_opf;
remaining_size = size;
@@ -1826,16 +1829,6 @@ static void crypt_endio(struct bio *clone)
crypt_dec_pending(io);
}
-static void clone_init(struct dm_crypt_io *io, struct bio *clone)
-{
- struct crypt_config *cc = io->cc;
-
- clone->bi_private = io;
- clone->bi_end_io = crypt_endio;
- bio_set_dev(clone, cc->dev->bdev);
- clone->bi_opf = io->base_bio->bi_opf;
-}
-
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
@@ -1850,10 +1843,12 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
if (!clone)
return 1;
+ clone->bi_private = io;
+ clone->bi_end_io = crypt_endio;
+ bio_set_dev(clone, cc->dev->bdev);
crypt_inc_pending(io);
- clone_init(io, clone);
clone->bi_iter.bi_sector = cc->start + io->sector;
if (dm_crypt_integrity_io_alloc(io, clone)) {
--
2.30.2
Remove handling of NULL returns from sleeping bio_alloc calls given that
those can't fail.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/md/dm-crypt.c | 5 +----
drivers/md/dm-log-writes.c | 18 ------------------
drivers/md/dm-thin.c | 25 +++++++++----------------
drivers/md/dm-zoned-metadata.c | 11 -----------
drivers/md/dm.c | 2 --
5 files changed, 10 insertions(+), 51 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d4ae31558826a..20abe3486aba1 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1673,9 +1673,6 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
mutex_lock(&cc->bio_alloc_lock);
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
- if (!clone)
- goto out;
-
clone_init(io, clone);
remaining_size = size;
@@ -1702,7 +1699,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
bio_put(clone);
clone = NULL;
}
-out:
+
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_unlock(&cc->bio_alloc_lock);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 139b09b06eda9..25f5e8d2d417b 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -218,10 +218,6 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
size_t ret;
bio = bio_alloc(GFP_KERNEL, 1);
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
@@ -276,11 +272,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, bio_pages);
- if (!bio) {
- DMERR("Couldn't alloc inline data bio");
- goto error;
- }
-
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
@@ -322,7 +313,6 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
error_bio:
bio_free_pages(bio);
bio_put(bio);
-error:
put_io_block(lc);
return -1;
}
@@ -364,10 +354,6 @@ static int log_one_block(struct log_writes_c *lc,
atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
@@ -387,10 +373,6 @@ static int log_one_block(struct log_writes_c *lc,
submit_bio(bio);
bio = bio_alloc(GFP_KERNEL,
bio_max_segs(block->vec_cnt - i));
- if (!bio) {
- DMERR("Couldn't alloc log bio");
- goto error;
- }
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ec119d2422d5d..76a9c2e9aeeea 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1180,24 +1180,17 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
}
discard_parent = bio_alloc(GFP_NOIO, 1);
- if (!discard_parent) {
- DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
- dm_device_name(tc->pool->pool_md));
- queue_passdown_pt2(m);
+ discard_parent->bi_end_io = passdown_endio;
+ discard_parent->bi_private = m;
- } else {
- discard_parent->bi_end_io = passdown_endio;
- discard_parent->bi_private = m;
-
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
- struct discard_op op;
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
+ struct discard_op op;
- begin_discard(&op, tc, discard_parent);
- r = issue_discard(&op, m->data_block, data_end);
- end_discard(&op, r);
- }
+ begin_discard(&op, tc, discard_parent);
+ r = issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, r);
}
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index ee4626d085574..5718b83cc7182 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -551,10 +551,6 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
return ERR_PTR(-ENOMEM);
bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- dmz_free_mblock(zmd, mblk);
- return ERR_PTR(-ENOMEM);
- }
spin_lock(&zmd->mblk_lock);
@@ -726,10 +722,6 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- set_bit(DMZ_META_ERROR, &mblk->state);
- return -ENOMEM;
- }
set_bit(DMZ_META_WRITING, &mblk->state);
@@ -760,9 +752,6 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op,
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
- if (!bio)
- return -ENOMEM;
-
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c0ae8087c6027..81449cbdafa81 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -520,8 +520,6 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct bio *clone;
clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
- if (!clone)
- return NULL;
tio = container_of(clone, struct dm_target_io, clone);
tio->inside_dm_io = true;
--
2.30.2
Use blkdev_issue_flush, which uses an on-stack bio instead of an
opencoded version with a bio embedded into struct pool.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/md/dm-thin.c | 11 +----------
1 file changed, 1 insertion(+), 10 deletions(-)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 76a9c2e9aeeea..411a3f56ed90c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -282,8 +282,6 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array;
mempool_t mapping_pool;
-
- struct bio flush_bio;
};
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
@@ -2906,7 +2904,6 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, &pool->mapping_pool);
mempool_exit(&pool->mapping_pool);
- bio_uninit(&pool->flush_bio);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
@@ -2987,7 +2984,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->low_water_triggered = false;
pool->suspended = true;
pool->out_of_data_space = false;
- bio_init(&pool->flush_bio, NULL, 0);
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3194,13 +3190,8 @@ static void metadata_low_callback(void *context)
static int metadata_pre_commit_callback(void *context)
{
struct pool *pool = context;
- struct bio *flush_bio = &pool->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, pool->data_dev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return submit_bio_wait(flush_bio);
+ return blkdev_issue_flush(pool->data_dev);
}
static sector_t get_dev_size(struct block_device *bdev)
--
2.30.2
Use blkdev_issue_flush, which uses an on-stack bio instead of an
opencoded version with a bio embedded into struct dm_snapshot.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/md/dm-snap.c | 21 +--------------------
1 file changed, 1 insertion(+), 20 deletions(-)
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index dcf34c6b05ad3..0d336b5ec5714 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -141,11 +141,6 @@ struct dm_snapshot {
* for them to be committed.
*/
struct bio_list bios_queued_during_merge;
-
- /*
- * Flush data after merge.
- */
- struct bio flush_bio;
};
/*
@@ -1127,17 +1122,6 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
static void error_bios(struct bio *bio);
-static int flush_data(struct dm_snapshot *s)
-{
- struct bio *flush_bio = &s->flush_bio;
-
- bio_reset(flush_bio);
- bio_set_dev(flush_bio, s->origin->bdev);
- flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- return submit_bio_wait(flush_bio);
-}
-
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
struct dm_snapshot *s = context;
@@ -1151,7 +1135,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
goto shut;
}
- if (flush_data(s) < 0) {
+ if (blkdev_issue_flush(s->origin->bdev) < 0) {
DMERR("Flush after merge failed: shutting down merge");
goto shut;
}
@@ -1340,7 +1324,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
- bio_init(&s->flush_bio, NULL, 0);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -1528,8 +1511,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- bio_uninit(&s->flush_bio);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
--
2.30.2
Remove handling of NULL returns from sleeping bio_alloc calls given that
those can't fail.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/block/drbd/drbd_receiver.c | 21 ++++-----------------
1 file changed, 4 insertions(+), 17 deletions(-)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6df2539e215ba..90402abb77e93 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1281,14 +1281,13 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
{
struct bio *bio = bio_alloc(GFP_NOIO, 0);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
- if (!bio || !octx) {
- drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
+
+ if (!octx) {
+ drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
/* FIXME: what else can I do now? disconnecting or detaching
* really does not help to improve the state of the world, either.
*/
- kfree(octx);
- if (bio)
- bio_put(bio);
+ bio_put(bio);
ctx->error = -ENOMEM;
put_ldev(device);
@@ -1688,10 +1687,6 @@ int drbd_submit_peer_request(struct drbd_device *device,
*/
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
- if (!bio) {
- drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages);
- goto fail;
- }
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, device->ldev->backing_bdev);
@@ -1726,14 +1721,6 @@ int drbd_submit_peer_request(struct drbd_device *device,
drbd_submit_bio_noacct(device, fault_type, bio);
} while (bios);
return 0;
-
-fail:
- while (bios) {
- bio = bios;
- bios = bios->bi_next;
- bio_put(bio);
- }
- return err;
}
static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
--
2.30.2
Only the priv field of rnbd_dev_blk_io is used, so store the value of
that in bio->bi_private directly and remove the entire bio_set overhead.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/block/rnbd/rnbd-srv-dev.c | 4 +---
drivers/block/rnbd/rnbd-srv-dev.h | 13 ++-----------
drivers/block/rnbd/rnbd-srv.c | 30 +++++-------------------------
drivers/block/rnbd/rnbd-srv.h | 1 -
4 files changed, 8 insertions(+), 40 deletions(-)
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index 98d3e591a0885..c5d0a03911659 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -12,8 +12,7 @@
#include "rnbd-srv-dev.h"
#include "rnbd-log.h"
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
- struct bio_set *bs)
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
{
struct rnbd_dev *dev;
int ret;
@@ -30,7 +29,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
dev->blk_open_flags = flags;
bdevname(dev->bdev, dev->name);
- dev->ibd_bio_set = bs;
return dev;
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 1a14ece0be726..2c3df02b5e8ec 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -14,25 +14,16 @@
struct rnbd_dev {
struct block_device *bdev;
- struct bio_set *ibd_bio_set;
fmode_t blk_open_flags;
char name[BDEVNAME_SIZE];
};
-struct rnbd_dev_blk_io {
- struct rnbd_dev *dev;
- void *priv;
- /* have to be last member for front_pad usage of bioset_init */
- struct bio bio;
-};
-
/**
* rnbd_dev_open() - Open a device
+ * @path: path to open
* @flags: open flags
- * @bs: bio_set to use during block io,
*/
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
- struct bio_set *bs);
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags);
/**
* rnbd_dev_close() - Close a device
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 65c670e96075b..b1ac1414b56d5 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -116,9 +116,7 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
static void rnbd_dev_bi_end_io(struct bio *bio)
{
- struct rnbd_dev_blk_io *io = bio->bi_private;
-
- rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
+ rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status));
bio_put(bio);
}
@@ -131,7 +129,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_sess_dev *sess_dev;
u32 dev_id;
int err;
- struct rnbd_dev_blk_io *io;
struct bio *bio;
short prio;
@@ -152,20 +149,16 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc_bioset(GFP_KERNEL, 1, sess_dev->rnbd_dev->ibd_bio_set);
+ bio = bio_alloc(GFP_KERNEL, 1);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data))) {
rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
err = -EINVAL;
- goto sess_dev_put;
+ goto bio_put;
}
- io = container_of(bio, struct rnbd_dev_blk_io, bio);
- io->dev = sess_dev->rnbd_dev;
- io->priv = priv;
-
bio->bi_end_io = rnbd_dev_bi_end_io;
- bio->bi_private = io;
+ bio->bi_private = priv;
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
@@ -180,7 +173,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio_put:
bio_put(bio);
-sess_dev_put:
rnbd_put_sess_dev(sess_dev);
err:
kfree(priv);
@@ -261,7 +253,6 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess)
out:
xa_destroy(&srv_sess->index_idr);
- bioset_exit(&srv_sess->sess_bio_set);
pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
@@ -290,16 +281,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
return -ENOMEM;
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
- err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
- offsetof(struct rnbd_dev_blk_io, bio),
- BIOSET_NEED_BVECS);
- if (err) {
- pr_err("Allocating srv_session for path %s failed\n",
- pathname);
- kfree(srv_sess);
- return err;
- }
-
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
INIT_LIST_HEAD(&srv_sess->sess_dev_list);
mutex_init(&srv_sess->lock);
@@ -748,8 +729,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- rnbd_dev = rnbd_dev_open(full_path, open_flags,
- &srv_sess->sess_bio_set);
+ rnbd_dev = rnbd_dev_open(full_path, open_flags);
if (IS_ERR(rnbd_dev)) {
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index e5604bce123ab..be2ae486d407e 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -23,7 +23,6 @@ struct rnbd_srv_session {
struct rtrs_srv_sess *rtrs;
char sessname[NAME_MAX];
int queue_depth;
- struct bio_set sess_bio_set;
struct xarray index_idr;
/* List of struct rnbd_srv_sess_dev */
--
2.30.2
The memory mapped in process_rdma is contiguous, so there is no need
to loop over bio_add_page. Remove rnbd_bio_map_kern and just open code
the bio allocation and mapping in the caller.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/block/rnbd/rnbd-srv-dev.c | 57 -------------------------------
drivers/block/rnbd/rnbd-srv-dev.h | 5 ---
drivers/block/rnbd/rnbd-srv.c | 20 ++++++++---
3 files changed, 15 insertions(+), 67 deletions(-)
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
index b241a099aeae2..98d3e591a0885 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -44,60 +44,3 @@ void rnbd_dev_close(struct rnbd_dev *dev)
blkdev_put(dev->bdev, dev->blk_open_flags);
kfree(dev);
}
-
-void rnbd_dev_bi_end_io(struct bio *bio)
-{
- struct rnbd_dev_blk_io *io = bio->bi_private;
-
- rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
- bio_put(bio);
-}
-
-/**
- * rnbd_bio_map_kern - map kernel address into bio
- * @data: pointer to buffer to map
- * @bs: bio_set to use.
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio allocation
- *
- * Map the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask)
-{
- unsigned long kaddr = (unsigned long)data;
- unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = kaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int offset, i;
- struct bio *bio;
-
- bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- offset = offset_in_page(kaddr);
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
-
- if (bio_add_page(bio, virt_to_page(data), bytes,
- offset) < bytes) {
- /* we don't support partial mappings */
- bio_put(bio);
- return ERR_PTR(-EINVAL);
- }
-
- data += bytes;
- len -= bytes;
- offset = 0;
- }
-
- return bio;
-}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
index 0eb23850afb95..1a14ece0be726 100644
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -41,11 +41,6 @@ void rnbd_dev_close(struct rnbd_dev *dev);
void rnbd_endio(void *priv, int error);
-void rnbd_dev_bi_end_io(struct bio *bio);
-
-struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
- unsigned int len, gfp_t gfp_mask);
-
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
{
return queue_max_segments(bdev_get_queue(dev->bdev));
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 1ee808fc600cf..65c670e96075b 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -114,6 +114,14 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
return sess_dev;
}
+static void rnbd_dev_bi_end_io(struct bio *bio)
+{
+ struct rnbd_dev_blk_io *io = bio->bi_private;
+
+ rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
static int process_rdma(struct rnbd_srv_session *srv_sess,
struct rtrs_srv_op *id, void *data, u32 datalen,
const void *usr, size_t usrlen)
@@ -144,11 +152,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- /* Generate bio with pages pointing to the rdma buffer */
- bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
+ bio = bio_alloc_bioset(GFP_KERNEL, 1, sess_dev->rnbd_dev->ibd_bio_set);
+ if (bio_add_page(bio, virt_to_page(data), datalen,
+ offset_in_page(data))) {
+ rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
+ err = -EINVAL;
goto sess_dev_put;
}
@@ -170,6 +178,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
return 0;
+bio_put:
+ bio_put(bio);
sess_dev_put:
rnbd_put_sess_dev(sess_dev);
err:
--
2.30.2
Keep blk_next_bio next to the core bio infrastructure.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 13 +++++++++++++
block/blk-lib.c | 13 -------------
2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 0d400ba2dbd18..43fb28ac6b44e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -344,6 +344,19 @@ void bio_chain(struct bio *bio, struct bio *parent)
}
EXPORT_SYMBOL(bio_chain);
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
+{
+ struct bio *new = bio_alloc(gfp, nr_pages);
+
+ if (bio) {
+ bio_chain(bio, new);
+ submit_bio(bio);
+ }
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(blk_next_bio);
+
static void bio_alloc_rescue(struct work_struct *work)
{
struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9f09beadcbe30..9245b300ef73e 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -10,19 +10,6 @@
#include "blk.h"
-struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
-{
- struct bio *new = bio_alloc(gfp, nr_pages);
-
- if (bio) {
- bio_chain(bio, new);
- submit_bio(bio);
- }
-
- return new;
-}
-EXPORT_SYMBOL_GPL(blk_next_bio);
-
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int flags,
struct bio **biop)
--
2.30.2
Remove handling of NULL returns from sleeping bio_alloc calls given that
those can't fail.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/block/xen-blkback/blkback.c | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 14e452896d04c..6bb2ad7692065 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1327,9 +1327,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
- if (unlikely(bio == NULL))
- goto fail_put_bio;
-
biolist[nbio++] = bio;
bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
@@ -1346,9 +1343,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(GFP_KERNEL, 0);
- if (unlikely(bio == NULL))
- goto fail_put_bio;
-
biolist[nbio++] = bio;
bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
@@ -1381,14 +1375,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
free_req(ring, pending_req);
msleep(1); /* back off a bit */
return -EIO;
-
- fail_put_bio:
- for (i = 0; i < nbio; i++)
- bio_put(biolist[i]);
- atomic_set(&pending_req->pendcnt, 1);
- __end_block_io_op(pending_req, BLK_STS_RESOURCE);
- msleep(1); /* back off a bit */
- return -EIO;
}
--
2.30.2
Pass the block_device and operation that we plan to use this bio for to
bio_alloc_kiocb to optimize the assigment.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 12 ++++++++----
block/fops.c | 17 ++++++++---------
include/linux/bio.h | 4 ++--
3 files changed, 18 insertions(+), 15 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index a58bc82d3c85f..8853ed6260cfd 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1729,7 +1729,9 @@ EXPORT_SYMBOL(bioset_init_from_src);
/**
* bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
* @kiocb: kiocb describing the IO
+ * @bdev: block device to allocate the bio for (can be %NULL)
* @nr_vecs: number of iovecs to pre-allocate
+ * @opf: operation and flags for bio
* @bs: bio_set to allocate from
*
* Description:
@@ -1740,14 +1742,14 @@ EXPORT_SYMBOL(bioset_init_from_src);
* MUST be done from process context, not hard/soft IRQ.
*
*/
-struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
- struct bio_set *bs)
+struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
+ unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
{
struct bio_alloc_cache *cache;
struct bio *bio;
if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
- return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
+ return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
cache = per_cpu_ptr(bs->cache, get_cpu());
if (cache->free_list) {
@@ -1756,12 +1758,14 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
cache->nr--;
put_cpu();
bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
+ bio_set_dev(bio, bdev);
+ bio->bi_opf = opf;
bio->bi_pool = bs;
bio_set_flag(bio, BIO_PERCPU_CACHE);
return bio;
}
put_cpu();
- bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
+ bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
bio_set_flag(bio, BIO_PERCPU_CACHE);
return bio;
}
diff --git a/block/fops.c b/block/fops.c
index 26bf15c770d21..3a62b8b912750 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -190,6 +190,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
struct blkdev_dio *dio;
struct bio *bio;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
loff_t pos = iocb->ki_pos;
int ret = 0;
@@ -197,7 +198,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
+ bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
atomic_set(&dio->ref, 1);
@@ -223,7 +224,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
blk_start_plug(&plug);
for (;;) {
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_private = dio;
@@ -238,11 +238,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
if (is_read) {
- bio->bi_opf = REQ_OP_READ;
if (dio->flags & DIO_SHOULD_DIRTY)
bio_set_pages_dirty(bio);
} else {
- bio->bi_opf = dio_bio_write_op(iocb);
task_io_account_write(bio->bi_iter.bi_size);
}
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -259,6 +257,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
atomic_inc(&dio->ref);
submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, nr_pages);
+ bio_set_dev(bio, bdev);
+ bio->bi_opf = opf;
}
blk_finish_plug(&plug);
@@ -311,6 +311,8 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
unsigned int nr_pages)
{
struct block_device *bdev = iocb->ki_filp->private_data;
+ bool is_read = iov_iter_rw(iter) == READ;
+ unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
struct blkdev_dio *dio;
struct bio *bio;
loff_t pos = iocb->ki_pos;
@@ -320,11 +322,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
+ bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->flags = 0;
dio->iocb = iocb;
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_end_io = blkdev_bio_end_io_async;
@@ -347,14 +348,12 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
}
dio->size = bio->bi_iter.bi_size;
- if (iov_iter_rw(iter) == READ) {
- bio->bi_opf = REQ_OP_READ;
+ if (is_read) {
if (iter_is_iovec(iter)) {
dio->flags |= DIO_SHOULD_DIRTY;
bio_set_pages_dirty(bio);
}
} else {
- bio->bi_opf = dio_bio_write_op(iocb);
task_io_account_write(bio->bi_iter.bi_size);
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2f63ae9a71e1a..5c5ada2ebb270 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -408,8 +408,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
struct bio_set *bs);
-struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
- struct bio_set *bs);
+struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
+ unsigned short nr_vecs, unsigned int opf, struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
extern void bio_put(struct bio *);
--
2.30.2
Pass the block_device and operation that we plan to use this bio for to
bio_alloc_bioset to optimize the assigment. NULL/0 can be passed, both
for the passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.
Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 29 +++++++++++++++++------------
block/bounce.c | 6 ++----
drivers/block/drbd/drbd_actlog.c | 5 ++---
drivers/block/drbd/drbd_bitmap.c | 7 +++----
drivers/md/bcache/request.c | 12 +++++-------
drivers/md/dm-crypt.c | 5 ++---
drivers/md/dm-io.c | 5 ++---
drivers/md/dm-writecache.c | 7 ++++---
drivers/md/dm.c | 5 +++--
drivers/md/md.c | 16 ++++++++--------
drivers/md/raid1.c | 3 ++-
drivers/md/raid10.c | 6 ++----
drivers/md/raid5-cache.c | 8 ++++----
drivers/md/raid5-ppl.c | 11 +++++------
drivers/target/target_core_iblock.c | 6 ++----
fs/btrfs/extent_io.c | 2 +-
fs/f2fs/data.c | 7 +++----
fs/iomap/buffered-io.c | 6 +++---
include/linux/bio.h | 7 ++++---
19 files changed, 74 insertions(+), 79 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 52c99bfa8008d..a58bc82d3c85f 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -417,8 +417,10 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
/**
* bio_alloc_bioset - allocate a bio for I/O
+ * @bdev: block device to allocate the bio for (can be %NULL)
+ * @nr_vecs: number of bvecs to pre-allocate
+ * @opf: operation and flags for bio
* @gfp_mask: the GFP_* mask given to the slab allocator
- * @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
* Allocate a bio from the mempools in @bs.
@@ -447,15 +449,16 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
*
* Returns: Pointer to new bio on success, NULL on failure.
*/
-struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ unsigned int opf, gfp_t gfp_mask,
struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
struct bio *bio;
void *p;
- /* should not use nobvec bioset for nr_iovecs > 0 */
- if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
+ /* should not use nobvec bioset for nr_vecs > 0 */
+ if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
return NULL;
/*
@@ -492,26 +495,28 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
return NULL;
bio = p + bs->front_pad;
- if (nr_iovecs > BIO_INLINE_VECS) {
+ if (nr_vecs > BIO_INLINE_VECS) {
struct bio_vec *bvl = NULL;
- bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
+ bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
if (!bvl && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
gfp_mask = saved_gfp;
- bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
+ bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
}
if (unlikely(!bvl))
goto err_free;
- bio_init(bio, bvl, nr_iovecs);
- } else if (nr_iovecs) {
+ bio_init(bio, bvl, nr_vecs);
+ } else if (nr_vecs) {
bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
} else {
bio_init(bio, NULL, 0);
}
bio->bi_pool = bs;
+ bio_set_dev(bio, bdev);
+ bio->bi_opf = opf;
return bio;
err_free:
@@ -766,7 +771,7 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
{
struct bio *b;
- b = bio_alloc_bioset(gfp_mask, 0, bs);
+ b = bio_alloc_bioset(NULL, 0, 0, gfp_mask, bs);
if (!b)
return NULL;
@@ -1742,7 +1747,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
struct bio *bio;
if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
- return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
+ return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
cache = per_cpu_ptr(bs->cache, get_cpu());
if (cache->free_list) {
@@ -1756,7 +1761,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
return bio;
}
put_cpu();
- bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
+ bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
bio_set_flag(bio, BIO_PERCPU_CACHE);
return bio;
}
diff --git a/block/bounce.c b/block/bounce.c
index 7af1a72835b99..330ddde25b460 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -165,12 +165,10 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
* asking for trouble and would force extra work on
* __bio_clone_fast() anyways.
*/
- bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
- &bounce_bio_set);
- bio->bi_bdev = bio_src->bi_bdev;
+ bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
+ bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
- bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 72cf7603d51fc..f5bcded3640da 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -138,15 +138,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
- bio_set_dev(bio, bdev->md_bdev);
+ bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
goto out;
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
- bio_set_op_attrs(bio, op, op_flags);
if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index c1f816f896a89..df25eecf80af0 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -976,12 +976,13 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
+ unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
+ struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
+ GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
sector_t on_disk_sector =
device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1006,14 +1007,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
- bio_set_op_attrs(bio, op, 0);
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio_io_error(bio);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d15aae6c51c13..c4b7e434de8ac 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -913,14 +913,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
- cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- &dc->disk.bio_split);
+ 0, GFP_NOWAIT, &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = backing_request_endio;
@@ -1025,16 +1024,15 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
*/
struct bio *flush;
- flush = bio_alloc_bioset(GFP_NOIO, 0,
- &dc->disk.bio_split);
+ flush = bio_alloc_bioset(bio->bi_bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
- bio_copy_dev(flush, bio);
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
- flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, flush, cl);
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3c5ecd35d3483..f7e4435b7439a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1672,11 +1672,10 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_lock(&cc->bio_alloc_lock);
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
+ clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
+ GFP_NOIO, &cc->bs);
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
- bio_set_dev(clone, cc->dev->bdev);
- clone->bi_opf = io->base_bio->bi_opf;
remaining_size = size;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2d3cda0acacb6..23e038f8dc845 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -345,11 +345,10 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT)));
}
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
+ bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags,
+ GFP_NOIO, &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
- bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4f31591d2d25e..5630b470ba429 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1821,11 +1821,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
max_pages = e->wc_list_contiguous;
- bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
+ bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
+ GFP_NOIO, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
- bio_set_dev(bio, wc->dev->bdev);
bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
@@ -1852,7 +1852,8 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ if (WC_MODE_FUA(wc))
+ bio->bi_opf |= REQ_FUA;
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 81449cbdafa81..84f3dd58d1a16 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -519,7 +519,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
+ clone = bio_alloc_bioset(NULL, 0, 0, GFP_NOIO, &md->io_bs);
tio = container_of(clone, struct dm_target_io, clone);
tio->inside_dm_io = true;
@@ -552,7 +552,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
} else {
- struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
+ struct bio *clone = bio_alloc_bioset(NULL, 0, 0, gfp_mask,
+ &ci->io->md->bs);
if (!clone)
return NULL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5881d05a76ebc..40fc1f7e65c5d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -562,11 +562,11 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
+ bi = bio_alloc_bioset(rdev->bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bio_set_dev(bi, rdev->bdev);
- bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
rcu_read_lock();
@@ -955,7 +955,6 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
* If an error occurred, call md_error
*/
struct bio *bio;
- int ff = 0;
if (!page)
return;
@@ -963,11 +962,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
+ bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+ 1,
+ REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
+ GFP_NOIO, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
- bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -976,8 +977,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
test_bit(FailFast, &rdev->flags) &&
!test_bit(LastDev, &rdev->flags))
- ff = MD_FAILFAST;
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
+ bio->bi_opf |= MD_FAILFAST;
atomic_inc(&mddev->pending_writes);
submit_bio(bio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2d8acb1e9881..43276f8fdc815 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1126,7 +1126,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
+ behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
+ &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2b969f70a31fb..cb7c58050708e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4892,14 +4892,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
return sectors_done;
}
- read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
-
- bio_set_dev(read_bio, rdev->bdev);
+ read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
+ GFP_KERNEL, &mddev->bio_set);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
- bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0b5dcaabbc155..66313adf99875 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -735,10 +735,9 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs);
+ struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE, GFP_NOIO, &log->bs);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
@@ -1634,7 +1633,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs);
+ ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
+ &log->bs);
if (!ctx->ra_bio)
return -ENOMEM;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 4ab417915d7f1..054d3bb252d48 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -496,11 +496,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
struct bio *prev = bio;
- bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS,
+ bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
+ prev->bi_opf, GFP_NOIO,
&ppl_conf->bs);
- bio->bi_opf = prev->bi_opf;
bio->bi_write_hint = prev->bi_write_hint;
- bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -637,10 +636,10 @@ static void ppl_do_flush(struct ppl_io_unit *io)
struct bio *bio;
char b[BDEVNAME_SIZE];
- bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ &ppl_conf->flush_bs);
bio->bi_private = io;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
bio->bi_end_io = ppl_flush_endio;
pr_debug("%s: dev: %s\n", __func__,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index bf8ae4825a06e..f7c6e822f345a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -353,18 +353,16 @@ static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
- bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
- &ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
+ GFP_NOIO, &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
- bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
- bio->bi_opf = opf;
return bio;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d6d48ecf823c9..f982687c5b00d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3144,7 +3144,7 @@ struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
struct bio *bio;
ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
- bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
+ bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset);
btrfs_bio_init(btrfs_bio(bio));
return bio;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index aacf5e4dcc576..5cb07ca7c4f52 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -394,7 +394,7 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
struct f2fs_sb_info *sbi = fio->sbi;
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
+ bio = bio_alloc_bioset(NULL, npages, 0, GFP_NOIO, &f2fs_bioset);
f2fs_target_device(sbi, fio->new_blkaddr, bio);
if (is_read_io(fio->op)) {
@@ -985,8 +985,8 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
struct bio_post_read_ctx *ctx = NULL;
unsigned int post_read_steps = 0;
- bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
- bio_max_segs(nr_pages), &f2fs_bioset);
+ bio = bio_alloc_bioset(NULL, bio_max_segs(nr_pages), REQ_OP_READ,
+ for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
if (!bio)
return ERR_PTR(-ENOMEM);
@@ -994,7 +994,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (fscrypt_inode_uses_fs_layer_crypto(inode))
post_read_steps |= STEP_DECRYPT;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index c938bbad075e1..340d373cb1bf9 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1196,10 +1196,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
struct iomap_ioend *ioend;
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
- bio_set_dev(bio, wpc->iomap.bdev);
+ bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
+ REQ_OP_WRITE | wbc_to_write_flags(wbc),
+ GFP_NOFS, &iomap_ioend_bioset);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
bio->bi_write_hint = inode->i_write_hint;
wbc_init_bio(wbc, bio);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index edeae54074ede..2f63ae9a71e1a 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -405,8 +405,9 @@ extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
-struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
- struct bio_set *bs);
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ unsigned int opf, gfp_t gfp_mask,
+ struct bio_set *bs);
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
@@ -419,7 +420,7 @@ extern struct bio_set fs_bio_set;
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
+ return bio_alloc_bioset(NULL, nr_iovecs, 0, gfp_mask, &fs_bio_set);
}
void submit_bio(struct bio *bio);
--
2.30.2
Pass the block_device that we plan to use this bio for and the
operation to bio_reset to optimize the assigment. A NULL block_device
can be passed, both for the passthrough case on a raw request_queue and
to temporarily avoid refactoring some nasty code.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 6 +++++-
drivers/block/pktcdvd.c | 8 ++------
drivers/md/bcache/journal.c | 12 ++++--------
drivers/md/bcache/request.c | 4 ++--
drivers/md/raid1.c | 5 ++---
drivers/md/raid10.c | 8 +++-----
drivers/md/raid5-cache.c | 9 +++------
drivers/md/raid5.c | 8 ++++----
fs/btrfs/disk-io.c | 4 +---
fs/crypto/bio.c | 8 ++------
include/linux/bio.h | 9 +--------
11 files changed, 29 insertions(+), 52 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 2480f80296eac..9050b0cc1e43e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -295,6 +295,8 @@ EXPORT_SYMBOL(bio_init);
/**
* bio_reset - reinitialize a bio
* @bio: bio to reset
+ * @bdev: block device to use the bio for
+ * @opf: operation and flags for bio
*
* Description:
* After calling bio_reset(), @bio will be in the same state as a freshly
@@ -302,11 +304,13 @@ EXPORT_SYMBOL(bio_init);
* preserved are the ones that are initialized by bio_alloc_bioset(). See
* comment in struct bio.
*/
-void bio_reset(struct bio *bio)
+void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
{
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
atomic_set(&bio->__bi_remaining, 1);
+ bio->bi_bdev = bdev;
+ bio->bi_opf = opf;
}
EXPORT_SYMBOL(bio_reset);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2b6b70a39e760..3aa5954429462 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_reset(bio);
+ bio_reset(bio, pd->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG();
atomic_inc(&pkt->io_wait);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int f;
- bio_reset(pkt->w_bio);
+ bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
- bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio);
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6d26c5b06e2b6..7c2ca52ca3e43 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = bucket + offset;
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(ca->set, bio, &cl);
@@ -771,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
- bio_reset(bio);
+ bio_reset(bio, ca->bdev, REQ_OP_WRITE |
+ REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
+ bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
- bio_set_op_attrs(bio, REQ_OP_WRITE,
- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d4b98ebffd948..7ba59d08ed870 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -831,11 +831,11 @@ static void cached_dev_read_done(struct closure *cl)
*/
if (s->iop.bio) {
- bio_reset(s->iop.bio);
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
- bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 43276f8fdc815..e7710fb5befb4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2166,11 +2166,10 @@ static void process_checks(struct r1bio *r1_bio)
continue;
/* fixup the bio for reuse, but preserve errno */
status = b->bi_status;
- bio_reset(b);
+ bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
@@ -2651,7 +2650,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rps;
}
r1bio->master_bio = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index cb7c58050708e..da07bcbc06d08 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2422,7 +2422,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* bi_vecs, as the read request might have corrupted these
*/
rp = get_resync_pages(tbio);
- bio_reset(tbio);
+ bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
@@ -2430,7 +2430,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
- bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
@@ -2441,7 +2440,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
submit_bio_noacct(tbio);
}
@@ -3160,12 +3158,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio;
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio;
if (bio) {
rp = bio->bi_private;
- bio_reset(bio);
+ bio_reset(bio, NULL, 0);
bio->bi_private = rp;
}
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 98b9ca11c28d8..86e2bb89d9c7b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1301,10 +1301,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush)
return;
- bio_reset(&log->flush_bio);
- bio_set_dev(&log->flush_bio, log->rdev->bdev);
+ bio_reset(&log->flush_bio, log->rdev->bdev,
+ REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
}
@@ -1678,9 +1677,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
- bio_reset(ctx->ra_bio);
- bio_set_dev(ctx->ra_bio, log->rdev->bdev);
- bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
+ bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a9dcc5bc9c329..7c119208a2143 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2677,7 +2677,7 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
BUG();
return;
}
@@ -2785,7 +2785,7 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -2823,7 +2823,7 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status);
if (i == disks) {
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
BUG();
return;
}
@@ -2860,7 +2860,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
- bio_reset(bi);
+ bio_reset(bi, NULL, 0);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f45aa506f9a6f..505ba21230b1f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4154,10 +4154,8 @@ static void write_dev_flush(struct btrfs_device *device)
return;
#endif
- bio_reset(bio);
+ bio_reset(bio, device->bdev, REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
bio->bi_end_io = btrfs_end_empty_barrier;
- bio_set_dev(bio, device->bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 755e985a42e0b..2217fe5ece6f9 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -80,9 +80,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
err = submit_bio_wait(bio);
if (err)
goto out;
- bio_reset(bio);
- bio_set_dev(bio, inode->i_sb->s_bdev);
- bio->bi_opf = REQ_OP_WRITE;
+ bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
num_pages = 0;
}
}
@@ -181,9 +179,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
err = submit_bio_wait(bio);
if (err)
goto out;
- bio_reset(bio);
- bio_set_dev(bio, inode->i_sb->s_bdev);
- bio->bi_opf = REQ_OP_WRITE;
+ bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
} while (len != 0);
err = 0;
out:
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 41bedf727f59c..18cfe5bb41ea8 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -459,7 +459,7 @@ extern int submit_bio_wait(struct bio *bio);
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, unsigned int opf);
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf);
void bio_chain(struct bio *, struct bio *);
int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
@@ -517,13 +517,6 @@ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
bio_associate_blkg(bio);
}
-static inline void bio_copy_dev(struct bio *dst, struct bio *src)
-{
- bio_clear_flag(dst, BIO_REMAPPED);
- dst->bi_bdev = src->bi_bdev;
- bio_clone_blkg_association(dst, src);
-}
-
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
--
2.30.2
Pass the block_device that we plan to use this bio for and the
operation to bio_init to optimize the assigment. A NULL block_device
can be passed, both for the passthrough case on a raw request_queue and
to temporarily avoid refactoring some nasty code.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 26 +++++++++++++-------------
block/blk-flush.c | 4 +---
block/blk-zoned.c | 5 +----
block/fops.c | 18 +++++++++---------
drivers/block/floppy.c | 4 +---
drivers/block/zram/zram_drv.c | 5 ++---
drivers/md/bcache/io.c | 3 ++-
drivers/md/bcache/journal.c | 4 +---
drivers/md/bcache/movinggc.c | 4 ++--
drivers/md/bcache/request.c | 2 +-
drivers/md/bcache/super.c | 8 +++-----
drivers/md/bcache/writeback.c | 4 ++--
drivers/md/dm.c | 5 ++---
drivers/md/md-multipath.c | 2 +-
drivers/md/md.c | 8 +++-----
drivers/md/raid5-cache.c | 2 +-
drivers/md/raid5-ppl.c | 2 +-
drivers/md/raid5.c | 4 ++--
drivers/nvme/target/io-cmd-bdev.c | 10 ++++------
drivers/nvme/target/passthru.c | 4 ++--
drivers/nvme/target/zns.c | 4 ++--
fs/iomap/buffered-io.c | 4 +---
fs/xfs/xfs_bio_io.c | 4 +---
fs/xfs/xfs_log.c | 14 +++++++-------
fs/zonefs/super.c | 4 +---
include/linux/bio.h | 4 ++--
26 files changed, 68 insertions(+), 90 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 5c97f56c1ee50..2480f80296eac 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -249,12 +249,14 @@ static void bio_free(struct bio *bio)
* they must remember to pair any call to bio_init() with bio_uninit()
* when IO has completed, or when the bio is released.
*/
-void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs)
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, unsigned int opf)
{
bio->bi_next = NULL;
- bio->bi_bdev = NULL;
- bio->bi_opf = 0;
+ bio->bi_bdev = bdev;
+ if (bdev)
+ bio_associate_blkg(bio);
+ bio->bi_opf = opf;
bio->bi_flags = 0;
bio->bi_ioprio = 0;
bio->bi_write_hint = 0;
@@ -504,16 +506,14 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
if (unlikely(!bvl))
goto err_free;
- bio_init(bio, bvl, nr_vecs);
+ bio_init(bio, bdev, bvl, nr_vecs, opf);
} else if (nr_vecs) {
- bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
+ bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
} else {
- bio_init(bio, NULL, 0);
+ bio_init(bio, bdev, NULL, 0, opf);
}
bio->bi_pool = bs;
- bio_set_dev(bio, bdev);
- bio->bi_opf = opf;
return bio;
err_free:
@@ -541,7 +541,8 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
if (unlikely(!bio))
return NULL;
- bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
+ bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs,
+ 0);
bio->bi_pool = NULL;
return bio;
}
@@ -1754,9 +1755,8 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
cache->free_list = bio->bi_next;
cache->nr--;
put_cpu();
- bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
- bio_set_dev(bio, bdev);
- bio->bi_opf = opf;
+ bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
+ nr_vecs, opf);
bio->bi_pool = bs;
bio_set_flag(bio, BIO_PERCPU_CACHE);
return bio;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index e4df894189ced..c689687248706 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -460,9 +460,7 @@ int blkdev_issue_flush(struct block_device *bdev)
{
struct bio bio;
- bio_init(&bio, NULL, 0);
- bio_set_dev(&bio, bdev);
- bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
return submit_bio_wait(&bio);
}
EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 5ab755d792c81..602bef54c8134 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -238,10 +238,7 @@ static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
{
struct bio bio;
- bio_init(&bio, NULL, 0);
- bio_set_dev(&bio, bdev);
- bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
-
+ bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
return submit_bio_wait(&bio);
}
diff --git a/block/fops.c b/block/fops.c
index c683596847731..3696665e586a8 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -75,8 +75,13 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
return -ENOMEM;
}
- bio_init(&bio, vecs, nr_pages);
- bio_set_dev(&bio, bdev);
+ if (iov_iter_rw(iter) == READ) {
+ bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
+ if (iter_is_iovec(iter))
+ should_dirty = true;
+ } else {
+ bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
+ }
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
bio.bi_write_hint = iocb->ki_hint;
bio.bi_private = current;
@@ -88,14 +93,9 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
goto out;
ret = bio.bi_iter.bi_size;
- if (iov_iter_rw(iter) == READ) {
- bio.bi_opf = REQ_OP_READ;
- if (iter_is_iovec(iter))
- should_dirty = true;
- } else {
- bio.bi_opf = dio_bio_write_op(iocb);
+ if (iov_iter_rw(iter) == WRITE)
task_io_account_write(ret);
- }
+
if (iocb->ki_flags & IOCB_NOWAIT)
bio.bi_opf |= REQ_NOWAIT;
if (iocb->ki_flags & IOCB_HIPRI)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e611411a934ce..19c2d0327e157 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4129,15 +4129,13 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, bdev);
+ bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
- bio_set_op_attrs(&bio, REQ_OP_READ, 0);
init_completion(&cbdata.complete);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d8dc7ed1c78fc..47c966cffe690 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -744,10 +744,9 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, &bio_vec, 1);
- bio_set_dev(&bio, zram->bdev);
+ bio_init(&bio, zram->bdev, &bio_vec, 1,
+ REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
bvec.bv_offset);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9c6f9ec55b724..020712c5203fd 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 61bd79babf7ae..6d26c5b06e2b6 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -611,11 +611,9 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, bio->bi_inline_vecs, 1);
- bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
+ bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index b9c3d27ec093a..99499d1f6e666 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index c4b7e434de8ac..d4b98ebffd948 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init(bio, NULL, 0);
+ bio_init(bio, NULL, NULL, 0, 0);
__bio_clone_fast(bio, orig_bio);
/*
* bi_end_io can be set separately somewhere else, e.g. the
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 140f35dc0c457..85577c9f971d2 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -343,8 +343,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_init(bio, dc->sb_bv, 1);
- bio_set_dev(bio, dc->bdev);
+ bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
@@ -387,8 +386,7 @@ void bcache_write_super(struct cache_set *c)
if (ca->sb.version < version)
ca->sb.version = version;
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
+ bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
@@ -2240,7 +2238,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
+ bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
/*
* when ca->sb.njournal_buckets is not zero, journal exists,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c7560f66dca88..d42301e6309d4 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, bio->bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
+ bio_init(bio, NULL, bio->bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 84f3dd58d1a16..09d9b674bd851 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1303,9 +1303,8 @@ static int __send_empty_flush(struct clone_info *ci)
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- bio_set_dev(&flush_bio, ci->io->md->disk->part0);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
ci->bio = &flush_bio;
ci->sector_count = 0;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index e7d6486f090ff..5e15940634d85 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -121,7 +121,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- bio_init(&mp_bh->bio, NULL, 0);
+ bio_init(&mp_bh->bio, NULL, NULL, 0, 0);
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 40fc1f7e65c5d..0a89f072dae0d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio bio;
struct bio_vec bvec;
- bio_init(&bio, &bvec, 1);
-
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(&bio, rdev->meta_bdev);
+ bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags);
else
- bio_set_dev(&bio, rdev->bdev);
- bio.bi_opf = op | op_flags;
+ bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags);
+
if (metadata_op)
bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 66313adf99875..98b9ca11c28d8 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3108,7 +3108,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
INIT_LIST_HEAD(&log->io_end_ios);
INIT_LIST_HEAD(&log->flushing_ios);
INIT_LIST_HEAD(&log->finished_ios);
- bio_init(&log->flush_bio, NULL, 0);
+ bio_init(&log->flush_bio, NULL, NULL, 0, 0);
log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
if (!log->io_kc)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 054d3bb252d48..3446797fa0aca 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -250,7 +250,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
atomic_set(&io->pending_flushes, 0);
- bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
+ bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0);
pplhdr = page_address(io->header_page);
clear_page(pplhdr);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ffe720c73b0a5..a9dcc5bc9c329 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2310,8 +2310,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
for (i = 0; i < disks; i++) {
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req, &dev->vec, 1);
- bio_init(&dev->rreq, &dev->rvec, 1);
+ bio_init(&dev->req, NULL, &dev->vec, 1, 0);
+ bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0);
}
if (raid5_has_ppl(conf)) {
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index e092af3abc710..95c2bbb0b2f5f 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -267,9 +267,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
- bio->bi_opf = op;
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), op);
} else {
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
GFP_KERNEL);
@@ -328,11 +327,10 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 38f72968c3fde..a810bf569fff8 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -206,8 +206,8 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio->bi_opf = req_op(rq);
+ bio_init(bio, NULL, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), req_op(rq));
} else {
bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
GFP_KERNEL);
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 62c53e8f26d35..3e421217a7ade 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -552,8 +552,8 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio->bi_opf = op;
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), op);
} else {
bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 70f3657a6ec06..491534e908615 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -549,10 +549,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
struct bio_vec bvec;
struct bio bio;
- bio_init(&bio, &bvec, 1);
- bio.bi_opf = REQ_OP_READ;
+ bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
- bio_set_dev(&bio, iomap->bdev);
bio_add_folio(&bio, folio, plen, poff);
return submit_bio_wait(&bio);
}
diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c
index eff4a9f21dcff..32fa02945f739 100644
--- a/fs/xfs/xfs_bio_io.c
+++ b/fs/xfs/xfs_bio_io.c
@@ -36,9 +36,7 @@ xfs_flush_bdev_async(
return;
}
- bio_init(bio, NULL, 0);
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
bio->bi_private = done;
bio->bi_end_io = xfs_flush_bdev_async_endio;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 89fec9a18c349..16f9edbda4eb3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1883,19 +1883,19 @@ xlog_write_iclog(
return;
}
- bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
- bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
- iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
- iclog->ic_bio.bi_end_io = xlog_bio_end_io;
- iclog->ic_bio.bi_private = iclog;
-
/*
* We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
* IOs coming immediately after this one. This prevents the block layer
* writeback throttle from throttling log writes behind background
* metadata writeback and causing priority inversions.
*/
- iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
+ bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
+ howmany(count, PAGE_SIZE),
+ REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
+ iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
+ iclog->ic_bio.bi_end_io = xlog_bio_end_io;
+ iclog->ic_bio.bi_private = iclog;
+
if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
/*
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index c0fc2c326dcee..d331b52592a0a 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1540,10 +1540,8 @@ static int zonefs_read_super(struct super_block *sb)
if (!page)
return -ENOMEM;
- bio_init(&bio, &bio_vec, 1);
+ bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = 0;
- bio.bi_opf = REQ_OP_READ;
- bio_set_dev(&bio, sb->s_bdev);
bio_add_page(&bio, page, PAGE_SIZE, 0);
ret = submit_bio_wait(&bio);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index be6ac92913d48..41bedf727f59c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -456,8 +456,8 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
struct request_queue;
extern int submit_bio_wait(struct bio *bio);
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, unsigned int opf);
extern void bio_uninit(struct bio *);
extern void bio_reset(struct bio *);
void bio_chain(struct bio *, struct bio *);
--
2.30.2
Pass the block_device and operation that we plan to use this bio for to
bio_alloc to optimize the assigment. NULL/0 can be passed, both for the
passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.
Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 5 +----
block/fops.c | 4 +---
drivers/block/drbd/drbd_receiver.c | 10 ++++------
drivers/block/rnbd/rnbd-srv.c | 5 ++---
drivers/block/xen-blkback/blkback.c | 11 +++++------
drivers/block/zram/zram_drv.c | 11 ++++-------
drivers/md/dm-log-writes.c | 21 ++++++++-------------
drivers/md/dm-thin.c | 9 ++++-----
drivers/md/dm-zoned-metadata.c | 15 ++++++---------
drivers/nvdimm/nd_virtio.c | 6 +++---
drivers/nvme/target/io-cmd-bdev.c | 12 ++++++------
drivers/nvme/target/passthru.c | 5 +++--
drivers/nvme/target/zns.c | 6 +++---
drivers/scsi/ufs/ufshpb.c | 4 ++--
drivers/target/target_core_iblock.c | 5 ++---
fs/btrfs/disk-io.c | 6 +++---
fs/buffer.c | 14 ++++++--------
fs/crypto/bio.c | 13 +++++++------
fs/direct-io.c | 5 +----
fs/erofs/zdata.c | 5 ++---
fs/ext4/page-io.c | 3 +--
fs/ext4/readpage.c | 8 ++++----
fs/gfs2/lops.c | 8 +++-----
fs/gfs2/meta_io.c | 4 +---
fs/gfs2/ops_fstype.c | 4 +---
fs/hfsplus/wrapper.c | 4 +---
fs/iomap/buffered-io.c | 16 ++++++++--------
fs/iomap/direct-io.c | 8 ++------
fs/jfs/jfs_logmgr.c | 11 ++---------
fs/jfs/jfs_metapage.c | 9 +++------
fs/mpage.c | 7 +++----
fs/nfs/blocklayout/blocklayout.c | 4 +---
fs/nilfs2/segbuf.c | 4 ++--
fs/ntfs3/fsntfs.c | 8 ++------
fs/ocfs2/cluster/heartbeat.c | 4 +---
fs/squashfs/block.c | 11 ++++++-----
fs/xfs/xfs_bio_io.c | 10 ++++------
fs/xfs/xfs_buf.c | 4 +---
fs/zonefs/super.c | 5 ++---
include/linux/bio.h | 5 +++--
kernel/power/swap.c | 5 ++---
mm/page_io.c | 10 ++++------
42 files changed, 130 insertions(+), 194 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 8853ed6260cfd..5c97f56c1ee50 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -347,10 +347,7 @@ EXPORT_SYMBOL(bio_chain);
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp)
{
- struct bio *new = bio_alloc(gfp, nr_pages);
-
- bio_set_dev(new, bdev);
- new->bi_opf = opf;
+ struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
if (bio) {
bio_chain(bio, new);
diff --git a/block/fops.c b/block/fops.c
index 3a62b8b912750..c683596847731 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -256,9 +256,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
atomic_inc(&dio->ref);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- bio_set_dev(bio, bdev);
- bio->bi_opf = opf;
+ bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
}
blk_finish_plug(&plug);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 90402abb77e93..6f589ed9fa0c5 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1279,7 +1279,8 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
- struct bio *bio = bio_alloc(GFP_NOIO, 0);
+ struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
+ REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!octx) {
@@ -1297,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
@@ -1686,11 +1685,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio:
- bio = bio_alloc(GFP_NOIO, nr_pages);
+ bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
+ GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, device->ldev->backing_bdev);
- bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index b1ac1414b56d5..021532ab9dc30 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -149,7 +149,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
+ rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data))) {
rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
@@ -159,13 +160,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = priv;
- bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio);
- bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
submit_bio(bio);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6bb2ad7692065..d1e26461a64ed 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1326,13 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
+ bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
+ operation | operation_flags,
+ GFP_KERNEL);
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
- bio_set_op_attrs(bio, operation, operation_flags);
}
preq.sector_number += seg[i].nsec;
@@ -1342,12 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (!bio) {
BUG_ON(operation_flags != REQ_PREFLUSH);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
+ GFP_KERNEL);
biolist[nbio++] = bio;
- bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio_set_op_attrs(bio, operation, operation_flags);
}
atomic_set(&pending_req->pendcnt, nbio);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index cb253d80d72b9..d8dc7ed1c78fc 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -617,24 +617,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
{
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
+ GFP_NOIO);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
- bio_set_dev(bio, zram->bdev);
if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
bio_put(bio);
return -EIO;
}
- if (!parent) {
- bio->bi_opf = REQ_OP_READ;
+ if (!parent)
bio->bi_end_io = zram_page_end_io;
- } else {
- bio->bi_opf = parent->bi_opf;
+ else
bio_chain(bio, parent);
- }
submit_bio(bio);
return 1;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 25f5e8d2d417b..c9d036d6bb2ee 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -217,14 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
void *ptr;
size_t ret;
- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -271,13 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_pages);
+ bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
+ GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < bio_pages; i++) {
pg_datalen = min_t(int, datalen, PAGE_SIZE);
@@ -353,13 +350,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
+ bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -371,14 +367,13 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL,
- bio_max_segs(block->vec_cnt - i));
+ bio = bio_alloc(lc->logdev->bdev,
+ bio_max_segs(block->vec_cnt - i),
+ REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 411a3f56ed90c..f4234d615aa1b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1177,13 +1177,12 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return;
}
- discard_parent = bio_alloc(GFP_NOIO, 1);
+ discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
discard_parent->bi_end_io = passdown_endio;
discard_parent->bi_private = m;
-
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
struct discard_op op;
begin_discard(&op, tc, discard_parent);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 5718b83cc7182..e5f1eb27ce2e9 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -550,7 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
if (!mblk)
return ERR_PTR(-ENOMEM);
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
+ GFP_NOIO);
spin_lock(&zmd->mblk_lock);
@@ -574,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -721,15 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
+ GFP_NOIO);
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
@@ -751,10 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op,
if (dmz_bdev_is_dying(dev))
return -EIO;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
+ GFP_NOIO);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, dev->bdev);
- bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 10351d5b49fac..c6a648fd8744a 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
* parent bio. Otherwise directly call nd_region flush.
*/
if (bio && bio->bi_iter.bi_sector != -1) {
- struct bio *child = bio_alloc(GFP_ATOMIC, 0);
+ struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH,
+ GFP_ATOMIC);
if (!child)
return -ENOMEM;
- bio_copy_dev(child, bio);
- child->bi_opf = REQ_PREFLUSH;
+ bio_clone_blkg_association(child, bio);
child->bi_iter.bi_sector = -1;
bio_chain(child, bio);
submit_bio(child);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 70ca9dfc1771a..e092af3abc710 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -268,14 +268,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_set_dev(bio, req->ns->bdev);
+ bio->bi_opf = op;
} else {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
+ GFP_KERNEL);
}
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = op;
blk_start_plug(&plug);
if (req->metadata_len)
@@ -296,10 +297,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
}
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
- bio_set_dev(bio, req->ns->bdev);
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
+ op, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 9e5b89ae29dfe..38f72968c3fde 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -207,11 +207,12 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio->bi_opf = req_op(rq);
} else {
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt));
+ bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
+ GFP_KERNEL);
bio->bi_end_io = bio_put;
}
- bio->bi_opf = req_op(rq);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 247de74247fab..62c53e8f26d35 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -522,6 +522,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
@@ -552,13 +553,12 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio->bi_opf = op;
} else {
- bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
}
- bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sect;
bio->bi_private = req;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 2d36a0715fca6..8970068314ef2 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
if (!map_req)
return NULL;
- bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+ bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
if (!bio) {
ufshpb_put_req(hpb, map_req);
return NULL;
@@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&pre_req->list_req);
pre_req->req = NULL;
- pre_req->bio = bio_alloc(GFP_KERNEL, 1);
+ pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
if (!pre_req->bio)
goto release_mem;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f7c6e822f345a..0907fa3cf2b43 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -416,10 +416,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush;
- bio_set_dev(bio, ib_dev->ibd_bd);
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 87a5addbedf6d..f45aa506f9a6f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4029,8 +4029,9 @@ static int write_dev_supers(struct btrfs_device *device,
* to do I/O, so we don't lose the ability to do integrity
* checking.
*/
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, device->bdev);
+ bio = bio_alloc(device->bdev, 1,
+ REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
+ GFP_NOFS);
bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
bio->bi_private = device;
bio->bi_end_io = btrfs_end_super_write;
@@ -4042,7 +4043,6 @@ static int write_dev_supers(struct btrfs_device *device,
* go down lazy and there's a short window where the on-disk
* copies might still contain the older version.
*/
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
bio->bi_opf |= REQ_FUA;
diff --git a/fs/buffer.c b/fs/buffer.c
index 8e112b6bd3719..a17c386a142c7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3024,12 +3024,16 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
clear_buffer_write_io_error(bh);
- bio = bio_alloc(GFP_NOIO, 1);
+ if (buffer_meta(bh))
+ op_flags |= REQ_META;
+ if (buffer_prio(bh))
+ op_flags |= REQ_PRIO;
+
+ bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
@@ -3038,12 +3042,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
- if (buffer_meta(bh))
- op_flags |= REQ_META;
- if (buffer_prio(bh))
- op_flags |= REQ_PRIO;
- bio_set_op_attrs(bio, op, op_flags);
-
/* Take care of bh's that straddle the end of the device */
guard_bio_eod(bio);
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index bfc2a5b74ed39..755e985a42e0b 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -54,7 +54,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
+ bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
+ GFP_NOFS);
while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page);
@@ -62,10 +63,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
if (num_pages == 0) {
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
- bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector =
pblk << (blockbits - SECTOR_SHIFT);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
}
ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
if (WARN_ON(ret != bytes_this_page)) {
@@ -82,6 +81,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
if (err)
goto out;
bio_reset(bio);
+ bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio->bi_opf = REQ_OP_WRITE;
num_pages = 0;
}
}
@@ -150,12 +151,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return -EINVAL;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, nr_pages);
+ bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
do {
- bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
i = 0;
offset = 0;
@@ -183,6 +182,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
if (err)
goto out;
bio_reset(bio);
+ bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio->bi_opf = REQ_OP_WRITE;
} while (len != 0);
err = 0;
out:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 6544435580470..38bca4980a1ca 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -396,11 +396,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
* bio_alloc() is guaranteed to return a bio when allowed to sleep and
* we request a valid number of vectors.
*/
- bio = bio_alloc(GFP_KERNEL, nr_vecs);
-
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, nr_vecs, dio->op | dio->op_flags, GFP_KERNEL);
bio->bi_iter.bi_sector = first_sector;
- bio_set_op_attrs(bio, dio->op, dio->op_flags);
if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio;
else
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 498b7666efe85..db7de2dbac739 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1371,15 +1371,14 @@ static void z_erofs_submit_queue(struct super_block *sb,
}
if (!bio) {
- bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
+ bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
+ REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_decompressqueue_endio;
- bio_set_dev(bio, mdev.m_bdev);
last_bdev = mdev.m_bdev;
bio->bi_iter.bi_sector = (sector_t)cur <<
LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private;
- bio->bi_opf = REQ_OP_READ;
if (f->readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 1d370364230e8..1253982268730 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -398,10 +398,9 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
+ bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 3db9234035053..812b1871d8900 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -371,15 +371,15 @@ int ext4_mpage_readpages(struct inode *inode,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages));
+ bio = bio_alloc(bdev, bio_max_segs(nr_pages),
+ REQ_OP_READ, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, page->index);
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ,
- rac ? REQ_RAHEAD : 0);
+ if (rac)
+ bio->bi_opf |= REQ_RAHEAD;
}
length = first_hole << blkbits;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index ca0bb3a73912a..4ae1eefae616d 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -265,10 +265,9 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
bio_end_io_t *end_io)
{
struct super_block *sb = sdp->sd_vfs;
- struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
+ struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
- bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
@@ -489,10 +488,9 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
{
struct bio *new;
- new = bio_alloc(GFP_NOIO, nr_iovecs);
- bio_copy_dev(new, prev);
+ new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
+ bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint;
bio_chain(new, prev);
submit_bio(prev);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 72d30a682ecec..a580b90b75222 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -222,9 +222,8 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
struct buffer_head *bh = *bhs;
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, num);
+ bio = bio_alloc(bh->b_bdev, num, op | op_flags, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
while (num > 0) {
bh = *bhs;
if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
@@ -235,7 +234,6 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
num--;
}
bio->bi_end_io = gfs2_meta_read_endio;
- bio_set_op_attrs(bio, op, op_flags);
submit_bio(bio);
}
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 7f8410d8fdc1d..c9b423c874a32 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -251,14 +251,12 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
ClearPageDirty(page);
lock_page(page);
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(sb->s_bdev, 1, REQ_OP_READ | REQ_META, GFP_NOFS);
bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
- bio_set_dev(bio, sb->s_bdev);
bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
submit_bio(bio);
wait_on_page_locked(page);
bio_put(bio);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 51ae6f1eb4a55..6acce8563031d 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -64,10 +64,8 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(sb->s_bdev, 1, op | op_flags, GFP_NOIO);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, sb->s_bdev);
- bio_set_op_attrs(bio, op, op_flags);
if (op != WRITE && data)
*data = (u8 *)buf + offset;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 340d373cb1bf9..70f3657a6ec06 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -290,19 +290,20 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
+ ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
+ REQ_OP_READ, gfp);
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_readpage does.
*/
- if (!ctx->bio)
- ctx->bio = bio_alloc(orig_gfp, 1);
- ctx->bio->bi_opf = REQ_OP_READ;
+ if (!ctx->bio) {
+ ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
+ orig_gfp);
+ }
if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector;
- bio_set_dev(ctx->bio, iomap->bdev);
ctx->bio->bi_end_io = iomap_read_end_io;
bio_add_folio(ctx->bio, folio, plen, poff);
}
@@ -1226,10 +1227,9 @@ iomap_chain_bio(struct bio *prev)
{
struct bio *new;
- new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
+ new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
+ bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint;
bio_chain(prev, new);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 03ea367df19a4..e2ba13645ef28 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -183,15 +183,13 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
int flags = REQ_SYNC | REQ_IDLE;
struct bio *bio;
- bio = bio_alloc(GFP_KERNEL, 1);
- bio_set_dev(bio, iter->iomap.bdev);
+ bio = bio_alloc(iter->iomap.bdev, 1, REQ_OP_WRITE | flags, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
get_page(page);
__bio_add_page(bio, page, len, 0);
- bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
iomap_dio_submit_bio(iter, dio, bio, pos);
}
@@ -309,14 +307,12 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
}
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- bio_set_dev(bio, iomap->bdev);
+ bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio->bi_write_hint = dio->iocb->ki_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- bio->bi_opf = bio_opf;
ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
if (unlikely(ret)) {
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 78fd136ac13b9..997c81fcea349 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1980,17 +1980,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD;
- bio = bio_alloc(GFP_NOFS, 1);
-
+ bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio_set_dev(bio, log->bdev);
-
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
- bio->bi_opf = REQ_OP_READ;
/*check if journaling to disk has been disabled*/
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
@@ -2125,16 +2121,13 @@ static void lbmStartIO(struct lbuf * bp)
jfs_info("lbmStartIO");
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
- bio_set_dev(bio, log->bdev);
-
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 104ae698443ed..fde1a9cf902e8 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -417,12 +417,10 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
}
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
/* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset;
@@ -497,13 +495,12 @@ static int metapage_readpage(struct file *fp, struct page *page)
if (bio)
submit_bio(bio);
- bio = bio_alloc(GFP_NOFS, 1);
- bio_set_dev(bio, inode->i_sb->s_bdev);
+ bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
+ GFP_NOFS);
bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
bio->bi_private = page;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits;
if (bio_add_page(bio, page, len, offset) < len)
diff --git a/fs/mpage.c b/fs/mpage.c
index c5817699b369b..12f41de78c539 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -280,10 +280,10 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
page))
goto out;
}
- args->bio = bio_alloc(gfp, bio_max_segs(args->nr_pages));
+ args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), 0,
+ gfp);
if (args->bio == NULL)
goto confused;
- bio_set_dev(args->bio, bdev);
args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
}
@@ -593,8 +593,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
page, wbc))
goto out;
}
- bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, BIO_MAX_VECS, 0, GFP_NOFS);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
wbc_init_bio(wbc, bio);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 38e063af7e98a..79a8b451791f5 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -154,12 +154,10 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
retry:
if (!bio) {
- bio = bio_alloc(GFP_NOIO, bio_max_segs(npg));
+ bio = bio_alloc(map->bdev, bio_max_segs(npg), rw, GFP_NOIO);
bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT;
- bio_set_dev(bio, map->bdev);
bio->bi_end_io = end_io;
bio->bi_private = par;
- bio_set_op_attrs(bio, rw, 0);
}
if (bio_add_page(bio, page, *len, offset) < *len) {
bio = bl_submit_bio(bio);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 53b7c6d21cdd8..4f71faacd8253 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -391,8 +391,8 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
BUG_ON(wi->nr_vecs <= 0);
repeat:
if (!wi->bio) {
- wi->bio = bio_alloc(GFP_NOIO, wi->nr_vecs);
- bio_set_dev(wi->bio, wi->nilfs->ns_bdev);
+ wi->bio = bio_alloc(wi->nilfs->ns_bdev, wi->nr_vecs, 0,
+ GFP_NOIO);
wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) <<
(wi->nilfs->ns_blocksize_bits - 9);
}
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 4a255e21ecf5f..0660a07c5a96e 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -1485,15 +1485,13 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
lbo = ((u64)lcn << cluster_bits) + off;
len = ((u64)clen << cluster_bits) - off;
new_bio:
- new = bio_alloc(GFP_NOFS, nr_pages - page_idx);
+ new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = lbo >> 9;
- bio->bi_opf = op;
while (len) {
off = vbo & (PAGE_SIZE - 1);
@@ -1584,14 +1582,12 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
lbo = (u64)lcn << cluster_bits;
len = (u64)clen << cluster_bits;
new_bio:
- new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
+ new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_WRITE;
bio->bi_iter.bi_sector = lbo >> 9;
for (;;) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index f89ffcbd585ff..5cef858264b60 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -518,7 +518,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
* GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this
* all together. */
- bio = bio_alloc(GFP_ATOMIC, 16);
+ bio = bio_alloc(reg->hr_bdev, 16, op | op_flags, GFP_ATOMIC);
if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM);
@@ -527,10 +527,8 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
/* Must put everything in 512 byte sectors for the bio... */
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
- bio_set_dev(bio, reg->hr_bdev);
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
- bio_set_op_attrs(bio, op, op_flags);
vec_start = (cs << bits) % PAGE_SIZE;
while(cs < max_slots) {
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2db8bcf7ff859..622c844f6d118 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -86,16 +86,17 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
int error, i;
struct bio *bio;
- if (page_count <= BIO_MAX_VECS)
- bio = bio_alloc(GFP_NOIO, page_count);
- else
+ if (page_count <= BIO_MAX_VECS) {
+ bio = bio_alloc(sb->s_bdev, page_count, REQ_OP_READ, GFP_NOIO);
+ } else {
bio = bio_kmalloc(GFP_NOIO, page_count);
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_opf = REQ_OP_READ;
+ }
if (!bio)
return -ENOMEM;
- bio_set_dev(bio, sb->s_bdev);
- bio->bi_opf = READ;
bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
for (i = 0; i < page_count; ++i) {
diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c
index 667e297f59b16..eff4a9f21dcff 100644
--- a/fs/xfs/xfs_bio_io.c
+++ b/fs/xfs/xfs_bio_io.c
@@ -61,10 +61,9 @@ xfs_rw_bdev(
if (is_vmalloc && op == REQ_OP_WRITE)
flush_kernel_vmap_range(data, count);
- bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = op | REQ_META | REQ_SYNC;
do {
struct page *page = kmem_to_page(data);
@@ -74,10 +73,9 @@ xfs_rw_bdev(
while (bio_add_page(bio, page, len, off) != len) {
struct bio *prev = bio;
- bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
- bio_copy_dev(bio, prev);
+ bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left),
+ prev->bi_opf, GFP_KERNEL);
bio->bi_iter.bi_sector = bio_end_sector(prev);
- bio->bi_opf = prev->bi_opf;
bio_chain(prev, bio);
submit_bio(prev);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index b45e0d50a4052..ae87fd95b17e2 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1440,12 +1440,10 @@ xfs_buf_ioapply_map(
atomic_inc(&bp->b_io_remaining);
nr_pages = bio_max_segs(total_nr_pages);
- bio = bio_alloc(GFP_NOIO, nr_pages);
- bio_set_dev(bio, bp->b_target->bt_bdev);
+ bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
- bio->bi_opf = op;
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index b76dfb310ab65..c0fc2c326dcee 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -692,12 +692,11 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
if (!nr_pages)
return 0;
- bio = bio_alloc(GFP_NOFS, nr_pages);
- bio_set_dev(bio, bdev);
+ bio = bio_alloc(bdev, nr_pages,
+ REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_ioprio = iocb->ki_ioprio;
- bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
if (iocb->ki_flags & IOCB_DSYNC)
bio->bi_opf |= REQ_FUA;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5c5ada2ebb270..be6ac92913d48 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -418,9 +418,10 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask)
{
- return bio_alloc_bioset(NULL, nr_iovecs, 0, gfp_mask, &fs_bio_set);
+ return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
void submit_bio(struct bio *bio);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index ad10359030a4c..9bce177454b82 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -277,10 +277,9 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
struct bio *bio;
int error = 0;
- bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
+ bio = bio_alloc(hib_resume_bdev, 1, op | op_flags,
+ GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
- bio_set_dev(bio, hib_resume_bdev);
- bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
pr_err("Adding page to bio failed at %llu\n",
diff --git a/mm/page_io.c b/mm/page_io.c
index 9725c7e1eeea1..874f134d6d79c 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -337,10 +337,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
return 0;
}
- bio = bio_alloc(GFP_NOIO, 1);
- bio_set_dev(bio, sis->bdev);
+ bio = bio_alloc(sis->bdev, 1,
+ REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
+ GFP_NOIO);
bio->bi_iter.bi_sector = swap_page_sector(page);
- bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
bio->bi_end_io = end_write_func;
bio_add_page(bio, page, thp_size(page), 0);
@@ -401,9 +401,7 @@ int swap_readpage(struct page *page, bool synchronous)
}
ret = 0;
- bio = bio_alloc(GFP_KERNEL, 1);
- bio_set_dev(bio, sis->bdev);
- bio->bi_opf = REQ_OP_READ;
+ bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_end_io = end_swap_bio_read;
bio_add_page(bio, page, thp_size(page), 0);
--
2.30.2
All callers need to set the block_device and operation, so lift that into
the common code.
Signed-off-by: Christoph Hellwig <[email protected]>
---
block/bio.c | 6 +++++-
block/blk-lib.c | 19 +++++--------------
block/blk-zoned.c | 9 +++------
block/blk.h | 2 --
drivers/nvme/target/zns.c | 6 +++---
include/linux/bio.h | 3 ++-
6 files changed, 18 insertions(+), 27 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 43fb28ac6b44e..52c99bfa8008d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -344,10 +344,14 @@ void bio_chain(struct bio *bio, struct bio *parent)
}
EXPORT_SYMBOL(bio_chain);
-struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, unsigned int opf, gfp_t gfp)
{
struct bio *new = bio_alloc(gfp, nr_pages);
+ bio_set_dev(new, bdev);
+ new->bi_opf = opf;
+
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9245b300ef73e..1b8ced45e4e55 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -82,11 +82,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
- bio = blk_next_bio(bio, 0, gfp_mask);
+ bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, op, 0);
-
bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects;
nr_sects -= req_sects;
@@ -176,14 +173,12 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
- bio = blk_next_bio(bio, 1, gfp_mask);
+ bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
- bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
if (nr_sects > max_write_same_sectors) {
bio->bi_iter.bi_size = max_write_same_sectors << 9;
@@ -252,10 +247,8 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP;
while (nr_sects) {
- bio = blk_next_bio(bio, 0, gfp_mask);
+ bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_WRITE_ZEROES;
if (flags & BLKDEV_ZERO_NOUNMAP)
bio->bi_opf |= REQ_NOUNMAP;
@@ -303,11 +296,9 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM;
while (nr_sects != 0) {
- bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
- gfp_mask);
+ bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
+ REQ_OP_WRITE, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 774ecc598bee2..5ab755d792c81 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -215,9 +215,8 @@ static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
continue;
}
- bio = blk_next_bio(bio, 0, gfp_mask);
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC;
+ bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
+ gfp_mask);
bio->bi_iter.bi_sector = sector;
sector += zone_sectors;
@@ -306,9 +305,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
}
while (sector < end_sector) {
- bio = blk_next_bio(bio, 0, gfp_mask);
- bio_set_dev(bio, bdev);
- bio->bi_opf = op | REQ_SYNC;
+ bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
bio->bi_iter.bi_sector = sector;
sector += zone_sectors;
diff --git a/block/blk.h b/block/blk.h
index 8bd43b3ad33d5..6b93de33e8a43 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -406,8 +406,6 @@ extern int blk_iolatency_init(struct request_queue *q);
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif
-struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
-
#ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q);
void blk_queue_clear_zone_settings(struct request_queue *q);
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 46bc30fe85d2b..247de74247fab 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -412,10 +412,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
while (sector < get_capacity(bdev->bd_disk)) {
if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
- bio = blk_next_bio(bio, 0, GFP_KERNEL);
- bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC;
+ bio = blk_next_bio(bio, bdev, 0,
+ zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
+ GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
/* This may take a while, so be nice to others */
cond_resched();
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 117d7f248ac96..edeae54074ede 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -790,6 +790,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio->bi_opf |= REQ_NOWAIT;
}
-struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, unsigned int opf, gfp_t gfp);
#endif /* __LINUX_BIO_H */
--
2.30.2
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> All callers need to set the block_device and operation, so lift that into
> the common code.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
I sent out the exact patch for this one, anyways looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> Keep blk_next_bio next to the core bio infrastructure.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> Pass the block_device and operation that we plan to use this bio for to
> bio_alloc_bioset to optimize the assigment. NULL/0 can be passed, both
> for the passthrough case on a raw request_queue and to temporarily avoid
> refactoring some nasty code.
>
> Also move the gfp_mask argument after the nr_vecs argument for a much
> more logical calling convention matching what most of the kernel does.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> Pass the block_device and operation that we plan to use this bio for to
> bio_alloc_kiocb to optimize the assigment.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
Looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> Pass the block_device and operation that we plan to use this bio for to
> bio_alloc to optimize the assigment. NULL/0 can be passed, both for the
's/assigment/assignment/'
> passthrough case on a raw request_queue and to temporarily avoid
> refactoring some nasty code.
>
> Also move the gfp_mask argument after the nr_vecs argument for a much
> more logical calling convention matching what most of the kernel does.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
Looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> Pass the block_device that we plan to use this bio for and the
> operation to bio_init to optimize the assigment. A NULL block_device
's/assigment/assignment' ?
> can be passed, both for the passthrough case on a raw request_queue and
> to temporarily avoid refactoring some nasty code.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
Looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> Pass the block_device that we plan to use this bio for and the
> operation to bio_reset to optimize the assigment. A NULL block_device
> can be passed, both for the passthrough case on a raw request_queue and
> to temporarily avoid refactoring some nasty code.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
Looks good.
Reviewed-by: Chaitanya Kulkarni <[email protected]>
Hi Christoph,
Thanks for the patch.
On Tue, Jan 18, 2022 at 8:20 AM Christoph Hellwig <[email protected]> wrote:
>
> The memory mapped in process_rdma is contiguous, so there is no need
> to loop over bio_add_page. Remove rnbd_bio_map_kern and just open code
> the bio allocation and mapping in the caller.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> drivers/block/rnbd/rnbd-srv-dev.c | 57 -------------------------------
> drivers/block/rnbd/rnbd-srv-dev.h | 5 ---
> drivers/block/rnbd/rnbd-srv.c | 20 ++++++++---
> 3 files changed, 15 insertions(+), 67 deletions(-)
>
> diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
> index b241a099aeae2..98d3e591a0885 100644
> --- a/drivers/block/rnbd/rnbd-srv-dev.c
> +++ b/drivers/block/rnbd/rnbd-srv-dev.c
> @@ -44,60 +44,3 @@ void rnbd_dev_close(struct rnbd_dev *dev)
> blkdev_put(dev->bdev, dev->blk_open_flags);
> kfree(dev);
> }
> -
> -void rnbd_dev_bi_end_io(struct bio *bio)
> -{
> - struct rnbd_dev_blk_io *io = bio->bi_private;
> -
> - rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> - bio_put(bio);
> -}
> -
> -/**
> - * rnbd_bio_map_kern - map kernel address into bio
> - * @data: pointer to buffer to map
> - * @bs: bio_set to use.
> - * @len: length in bytes
> - * @gfp_mask: allocation flags for bio allocation
> - *
> - * Map the kernel address into a bio suitable for io to a block
> - * device. Returns an error pointer in case of error.
> - */
> -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
> - unsigned int len, gfp_t gfp_mask)
> -{
> - unsigned long kaddr = (unsigned long)data;
> - unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
> - unsigned long start = kaddr >> PAGE_SHIFT;
> - const int nr_pages = end - start;
> - int offset, i;
> - struct bio *bio;
> -
> - bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
> - if (!bio)
> - return ERR_PTR(-ENOMEM);
> -
> - offset = offset_in_page(kaddr);
> - for (i = 0; i < nr_pages; i++) {
> - unsigned int bytes = PAGE_SIZE - offset;
> -
> - if (len <= 0)
> - break;
> -
> - if (bytes > len)
> - bytes = len;
> -
> - if (bio_add_page(bio, virt_to_page(data), bytes,
> - offset) < bytes) {
> - /* we don't support partial mappings */
> - bio_put(bio);
> - return ERR_PTR(-EINVAL);
> - }
> -
> - data += bytes;
> - len -= bytes;
> - offset = 0;
> - }
> -
> - return bio;
> -}
> diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
> index 0eb23850afb95..1a14ece0be726 100644
> --- a/drivers/block/rnbd/rnbd-srv-dev.h
> +++ b/drivers/block/rnbd/rnbd-srv-dev.h
> @@ -41,11 +41,6 @@ void rnbd_dev_close(struct rnbd_dev *dev);
>
> void rnbd_endio(void *priv, int error);
>
> -void rnbd_dev_bi_end_io(struct bio *bio);
> -
> -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
> - unsigned int len, gfp_t gfp_mask);
> -
> static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
> {
> return queue_max_segments(bdev_get_queue(dev->bdev));
> diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
> index 1ee808fc600cf..65c670e96075b 100644
> --- a/drivers/block/rnbd/rnbd-srv.c
> +++ b/drivers/block/rnbd/rnbd-srv.c
> @@ -114,6 +114,14 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
> return sess_dev;
> }
>
> +static void rnbd_dev_bi_end_io(struct bio *bio)
> +{
> + struct rnbd_dev_blk_io *io = bio->bi_private;
> +
> + rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> + bio_put(bio);
> +}
> +
> static int process_rdma(struct rnbd_srv_session *srv_sess,
> struct rtrs_srv_op *id, void *data, u32 datalen,
> const void *usr, size_t usrlen)
> @@ -144,11 +152,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
> priv->sess_dev = sess_dev;
> priv->id = id;
>
> - /* Generate bio with pages pointing to the rdma buffer */
> - bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
> - if (IS_ERR(bio)) {
> - err = PTR_ERR(bio);
> - rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
> + bio = bio_alloc_bioset(GFP_KERNEL, 1, sess_dev->rnbd_dev->ibd_bio_set);
> + if (bio_add_page(bio, virt_to_page(data), datalen,
> + offset_in_page(data))) {
this changes lead to IO error all the time, because bio_add_page return len.
We need if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) < datalen)
Thanks!
> + rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
> + err = -EINVAL;
> goto sess_dev_put;
> }
>
> @@ -170,6 +178,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
>
> return 0;
>
> +bio_put:
> + bio_put(bio);
> sess_dev_put:
> rnbd_put_sess_dev(sess_dev);
> err:
> --
> 2.30.2
>
On Wed, Jan 19, 2022 at 1:20 AM Jinpu Wang <[email protected]> wrote:
>
> Hi Christoph,
>
> Thanks for the patch.
>
> On Tue, Jan 18, 2022 at 8:20 AM Christoph Hellwig <[email protected]> wrote:
> >
> > The memory mapped in process_rdma is contiguous, so there is no need
> > to loop over bio_add_page. Remove rnbd_bio_map_kern and just open code
> > the bio allocation and mapping in the caller.
> >
> > Signed-off-by: Christoph Hellwig <[email protected]>
> > ---
> > drivers/block/rnbd/rnbd-srv-dev.c | 57 -------------------------------
> > drivers/block/rnbd/rnbd-srv-dev.h | 5 ---
> > drivers/block/rnbd/rnbd-srv.c | 20 ++++++++---
> > 3 files changed, 15 insertions(+), 67 deletions(-)
> >
> > diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
> > index b241a099aeae2..98d3e591a0885 100644
> > --- a/drivers/block/rnbd/rnbd-srv-dev.c
> > +++ b/drivers/block/rnbd/rnbd-srv-dev.c
> > @@ -44,60 +44,3 @@ void rnbd_dev_close(struct rnbd_dev *dev)
> > blkdev_put(dev->bdev, dev->blk_open_flags);
> > kfree(dev);
> > }
> > -
> > -void rnbd_dev_bi_end_io(struct bio *bio)
> > -{
> > - struct rnbd_dev_blk_io *io = bio->bi_private;
> > -
> > - rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> > - bio_put(bio);
> > -}
> > -
> > -/**
> > - * rnbd_bio_map_kern - map kernel address into bio
> > - * @data: pointer to buffer to map
> > - * @bs: bio_set to use.
> > - * @len: length in bytes
> > - * @gfp_mask: allocation flags for bio allocation
> > - *
> > - * Map the kernel address into a bio suitable for io to a block
> > - * device. Returns an error pointer in case of error.
> > - */
> > -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
> > - unsigned int len, gfp_t gfp_mask)
> > -{
> > - unsigned long kaddr = (unsigned long)data;
> > - unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
> > - unsigned long start = kaddr >> PAGE_SHIFT;
> > - const int nr_pages = end - start;
> > - int offset, i;
> > - struct bio *bio;
> > -
> > - bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
> > - if (!bio)
> > - return ERR_PTR(-ENOMEM);
> > -
> > - offset = offset_in_page(kaddr);
> > - for (i = 0; i < nr_pages; i++) {
> > - unsigned int bytes = PAGE_SIZE - offset;
> > -
> > - if (len <= 0)
> > - break;
> > -
> > - if (bytes > len)
> > - bytes = len;
> > -
> > - if (bio_add_page(bio, virt_to_page(data), bytes,
> > - offset) < bytes) {
> > - /* we don't support partial mappings */
> > - bio_put(bio);
> > - return ERR_PTR(-EINVAL);
> > - }
> > -
> > - data += bytes;
> > - len -= bytes;
> > - offset = 0;
> > - }
> > -
> > - return bio;
> > -}
> > diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
> > index 0eb23850afb95..1a14ece0be726 100644
> > --- a/drivers/block/rnbd/rnbd-srv-dev.h
> > +++ b/drivers/block/rnbd/rnbd-srv-dev.h
> > @@ -41,11 +41,6 @@ void rnbd_dev_close(struct rnbd_dev *dev);
> >
> > void rnbd_endio(void *priv, int error);
> >
> > -void rnbd_dev_bi_end_io(struct bio *bio);
> > -
> > -struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
> > - unsigned int len, gfp_t gfp_mask);
> > -
> > static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
> > {
> > return queue_max_segments(bdev_get_queue(dev->bdev));
> > diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
> > index 1ee808fc600cf..65c670e96075b 100644
> > --- a/drivers/block/rnbd/rnbd-srv.c
> > +++ b/drivers/block/rnbd/rnbd-srv.c
> > @@ -114,6 +114,14 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
> > return sess_dev;
> > }
> >
> > +static void rnbd_dev_bi_end_io(struct bio *bio)
> > +{
> > + struct rnbd_dev_blk_io *io = bio->bi_private;
> > +
> > + rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> > + bio_put(bio);
> > +}
> > +
> > static int process_rdma(struct rnbd_srv_session *srv_sess,
> > struct rtrs_srv_op *id, void *data, u32 datalen,
> > const void *usr, size_t usrlen)
> > @@ -144,11 +152,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
> > priv->sess_dev = sess_dev;
> > priv->id = id;
> >
> > - /* Generate bio with pages pointing to the rdma buffer */
> > - bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
> > - if (IS_ERR(bio)) {
> > - err = PTR_ERR(bio);
> > - rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
> > + bio = bio_alloc_bioset(GFP_KERNEL, 1, sess_dev->rnbd_dev->ibd_bio_set);
> > + if (bio_add_page(bio, virt_to_page(data), datalen,
> > + offset_in_page(data))) {
> this changes lead to IO error all the time, because bio_add_page return len.
> We need if (bio_add_page(bio, virt_to_page(data), datalen,
> offset_in_page(data)) < datalen)
>
> Thanks!
> > + rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
> > + err = -EINVAL;
> > goto sess_dev_put;
> > }
> >
> > @@ -170,6 +178,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
> >
> > return 0;
> >
> > +bio_put:
> > + bio_put(bio);
and bio_put is not used, should move bio_put(bio); below, no need for
bio_put: label.
> > sess_dev_put:
> > rnbd_put_sess_dev(sess_dev);
> > err:
> > --
> > 2.30.2
> >
On Tue, Jan 18, 2022 at 8:20 AM Christoph Hellwig <[email protected]> wrote:
>
> Only the priv field of rnbd_dev_blk_io is used, so store the value of
> that in bio->bi_private directly and remove the entire bio_set overhead.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
there is one typo in the subject line, should be rnbd-srv.
> ---
> drivers/block/rnbd/rnbd-srv-dev.c | 4 +---
> drivers/block/rnbd/rnbd-srv-dev.h | 13 ++-----------
> drivers/block/rnbd/rnbd-srv.c | 30 +++++-------------------------
> drivers/block/rnbd/rnbd-srv.h | 1 -
> 4 files changed, 8 insertions(+), 40 deletions(-)
>
> diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
> index 98d3e591a0885..c5d0a03911659 100644
> --- a/drivers/block/rnbd/rnbd-srv-dev.c
> +++ b/drivers/block/rnbd/rnbd-srv-dev.c
> @@ -12,8 +12,7 @@
> #include "rnbd-srv-dev.h"
> #include "rnbd-log.h"
>
> -struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
> - struct bio_set *bs)
> +struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
> {
> struct rnbd_dev *dev;
> int ret;
> @@ -30,7 +29,6 @@ struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
>
> dev->blk_open_flags = flags;
> bdevname(dev->bdev, dev->name);
> - dev->ibd_bio_set = bs;
>
> return dev;
>
> diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
> index 1a14ece0be726..2c3df02b5e8ec 100644
> --- a/drivers/block/rnbd/rnbd-srv-dev.h
> +++ b/drivers/block/rnbd/rnbd-srv-dev.h
> @@ -14,25 +14,16 @@
>
> struct rnbd_dev {
> struct block_device *bdev;
> - struct bio_set *ibd_bio_set;
> fmode_t blk_open_flags;
> char name[BDEVNAME_SIZE];
> };
>
> -struct rnbd_dev_blk_io {
> - struct rnbd_dev *dev;
> - void *priv;
> - /* have to be last member for front_pad usage of bioset_init */
> - struct bio bio;
> -};
> -
> /**
> * rnbd_dev_open() - Open a device
> + * @path: path to open
> * @flags: open flags
> - * @bs: bio_set to use during block io,
> */
> -struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
> - struct bio_set *bs);
> +struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags);
>
> /**
> * rnbd_dev_close() - Close a device
> diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
> index 65c670e96075b..b1ac1414b56d5 100644
> --- a/drivers/block/rnbd/rnbd-srv.c
> +++ b/drivers/block/rnbd/rnbd-srv.c
> @@ -116,9 +116,7 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
>
> static void rnbd_dev_bi_end_io(struct bio *bio)
> {
> - struct rnbd_dev_blk_io *io = bio->bi_private;
> -
> - rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
> + rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status));
> bio_put(bio);
> }
>
> @@ -131,7 +129,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
> struct rnbd_srv_sess_dev *sess_dev;
> u32 dev_id;
> int err;
> - struct rnbd_dev_blk_io *io;
> struct bio *bio;
> short prio;
>
> @@ -152,20 +149,16 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
> priv->sess_dev = sess_dev;
> priv->id = id;
>
> - bio = bio_alloc_bioset(GFP_KERNEL, 1, sess_dev->rnbd_dev->ibd_bio_set);
> + bio = bio_alloc(GFP_KERNEL, 1);
> if (bio_add_page(bio, virt_to_page(data), datalen,
> offset_in_page(data))) {
> rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
> err = -EINVAL;
> - goto sess_dev_put;
> + goto bio_put;
ok, bio_put is used here, I think it's better the move to patch 10.
> }
>
> - io = container_of(bio, struct rnbd_dev_blk_io, bio);
> - io->dev = sess_dev->rnbd_dev;
> - io->priv = priv;
> -
> bio->bi_end_io = rnbd_dev_bi_end_io;
> - bio->bi_private = io;
> + bio->bi_private = priv;
> bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
> bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
> bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
> @@ -180,7 +173,6 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
>
> bio_put:
> bio_put(bio);
> -sess_dev_put:
> rnbd_put_sess_dev(sess_dev);
> err:
> kfree(priv);
> @@ -261,7 +253,6 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess)
>
> out:
> xa_destroy(&srv_sess->index_idr);
> - bioset_exit(&srv_sess->sess_bio_set);
>
> pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
>
> @@ -290,16 +281,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
> return -ENOMEM;
>
> srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
> - err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
> - offsetof(struct rnbd_dev_blk_io, bio),
> - BIOSET_NEED_BVECS);
> - if (err) {
> - pr_err("Allocating srv_session for path %s failed\n",
> - pathname);
> - kfree(srv_sess);
> - return err;
> - }
> -
> xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
> INIT_LIST_HEAD(&srv_sess->sess_dev_list);
> mutex_init(&srv_sess->lock);
> @@ -748,8 +729,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
> goto reject;
> }
>
> - rnbd_dev = rnbd_dev_open(full_path, open_flags,
> - &srv_sess->sess_bio_set);
> + rnbd_dev = rnbd_dev_open(full_path, open_flags);
> if (IS_ERR(rnbd_dev)) {
> pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
> full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
> diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
> index e5604bce123ab..be2ae486d407e 100644
> --- a/drivers/block/rnbd/rnbd-srv.h
> +++ b/drivers/block/rnbd/rnbd-srv.h
> @@ -23,7 +23,6 @@ struct rnbd_srv_session {
> struct rtrs_srv_sess *rtrs;
> char sessname[NAME_MAX];
> int queue_depth;
> - struct bio_set sess_bio_set;
>
> struct xarray index_idr;
> /* List of struct rnbd_srv_sess_dev */
> --
> 2.30.2
>
with it fixed, the patch looks good to me.
Thanks!
On Wed, Jan 19, 2022 at 01:20:54AM +0100, Jinpu Wang wrote:
> this changes lead to IO error all the time, because bio_add_page return len.
> We need if (bio_add_page(bio, virt_to_page(data), datalen,
> offset_in_page(data)) < datalen)
Does this version look good to you?
http://git.infradead.org/users/hch/block.git/commitdiff/62adb08e765b889dd8db4227cad33a710e36d631
On Thu, Jan 20, 2022 at 9:37 AM Christoph Hellwig <[email protected]> wrote:
>
> On Wed, Jan 19, 2022 at 01:20:54AM +0100, Jinpu Wang wrote:
> > this changes lead to IO error all the time, because bio_add_page return len.
> > We need if (bio_add_page(bio, virt_to_page(data), datalen,
> > offset_in_page(data)) < datalen)
>
> Does this version look good to you?
>
> http://git.infradead.org/users/hch/block.git/commitdiff/62adb08e765b889dd8db4227cad33a710e36d631
Yes, lgtm, thank you!
Reviewed-by: Jack Wang <[email protected]>
Tested-by: Jack Wang <[email protected]>
On Tue, Jan 18, 2022 at 10:11:06PM +0000, Chaitanya Kulkarni wrote:
> On 1/17/22 11:19 PM, Christoph Hellwig wrote:
> > All callers need to set the block_device and operation, so lift that into
> > the common code.
> >
> > Signed-off-by: Christoph Hellwig <[email protected]>
> > ---
>
> I sent out the exact patch for this one, anyways looks good.
>
> Reviewed-by: Chaitanya Kulkarni <[email protected]>
I'll switch attribution to you.