2023-06-05 21:36:41

by Kent Overstreet

[permalink] [raw]
Subject: [PATCH v2 1/5] block: Rework bio_for_each_segment_all()

This patch reworks bio_for_each_segment_all() to be more inline with how
the other bio iterators work:

- bio_iter_all_peek() now returns a synthesized bio_vec; we don't stash
one in the iterator and pass a pointer to it - bad. This way makes it
clearer what's a constructed value vs. a reference to something
pre-existing, and it also will help with cleaning up and
consolidating code with bio_for_each_folio_all().

- We now provide bio_for_each_segment_all_continue(), for squashfs:
this makes their code clearer.

Signed-off-by: Kent Overstreet <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: [email protected]
Cc: Ming Lei <[email protected]>
Cc: Phillip Lougher <[email protected]>
---
block/bio.c | 38 ++++++++++++------------
block/blk-map.c | 38 ++++++++++++------------
block/bounce.c | 12 ++++----
drivers/md/bcache/btree.c | 8 ++---
drivers/md/dm-crypt.c | 10 +++----
drivers/md/raid1.c | 4 +--
fs/btrfs/disk-io.c | 4 +--
fs/btrfs/extent_io.c | 50 +++++++++++++++----------------
fs/btrfs/raid56.c | 14 ++++-----
fs/erofs/zdata.c | 4 +--
fs/f2fs/data.c | 20 ++++++-------
fs/gfs2/lops.c | 10 +++----
fs/gfs2/meta_io.c | 8 ++---
fs/squashfs/block.c | 48 +++++++++++++++++-------------
fs/squashfs/lz4_wrapper.c | 17 ++++++-----
fs/squashfs/lzo_wrapper.c | 17 ++++++-----
fs/squashfs/xz_wrapper.c | 19 ++++++------
fs/squashfs/zlib_wrapper.c | 18 ++++++-----
fs/squashfs/zstd_wrapper.c | 19 ++++++------
include/linux/bio.h | 28 ++++++++++++-----
include/linux/bvec.h | 61 ++++++++++++++++++++++----------------
21 files changed, 242 insertions(+), 205 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 798cc4cf3b..2af9f50643 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1163,13 +1163,13 @@ EXPORT_SYMBOL(bio_add_folio);

void __bio_release_pages(struct bio *bio, bool mark_dirty)
{
- struct bvec_iter_all iter_all;
- struct bio_vec *bvec;
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (mark_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- bio_release_page(bio, bvec->bv_page);
+ bio_for_each_segment_all(bvec, bio, iter) {
+ if (mark_dirty && !PageCompound(bvec.bv_page))
+ set_page_dirty_lock(bvec.bv_page);
+ bio_release_page(bio, bvec.bv_page);
}
}
EXPORT_SYMBOL_GPL(__bio_release_pages);
@@ -1435,11 +1435,11 @@ EXPORT_SYMBOL(bio_copy_data);

void bio_free_pages(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;

- bio_for_each_segment_all(bvec, bio, iter_all)
- __free_page(bvec->bv_page);
+ bio_for_each_segment_all(bvec, bio, iter)
+ __free_page(bvec.bv_page);
}
EXPORT_SYMBOL(bio_free_pages);

@@ -1474,12 +1474,12 @@ EXPORT_SYMBOL(bio_free_pages);
*/
void bio_set_pages_dirty(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
+ bio_for_each_segment_all(bvec, bio, iter) {
+ if (!PageCompound(bvec.bv_page))
+ set_page_dirty_lock(bvec.bv_page);
}
}

@@ -1522,12 +1522,12 @@ static void bio_dirty_fn(struct work_struct *work)

void bio_check_pages_dirty(struct bio *bio)
{
- struct bio_vec *bvec;
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;
unsigned long flags;
- struct bvec_iter_all iter_all;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
+ bio_for_each_segment_all(bvec, bio, iter) {
+ if (!PageDirty(bvec.bv_page) && !PageCompound(bvec.bv_page))
goto defer;
}

diff --git a/block/blk-map.c b/block/blk-map.c
index 3551c3ff17..524d84011f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -47,21 +47,21 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
*/
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all bv_iter;
+ struct bio_vec bvec;

- bio_for_each_segment_all(bvec, bio, iter_all) {
+ bio_for_each_segment_all(bvec, bio, bv_iter) {
ssize_t ret;

- ret = copy_page_from_iter(bvec->bv_page,
- bvec->bv_offset,
- bvec->bv_len,
+ ret = copy_page_from_iter(bvec.bv_page,
+ bvec.bv_offset,
+ bvec.bv_len,
iter);

if (!iov_iter_count(iter))
break;

- if (ret < bvec->bv_len)
+ if (ret < bvec.bv_len)
return -EFAULT;
}

@@ -78,21 +78,21 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
*/
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all bv_iter;
+ struct bio_vec bvec;

- bio_for_each_segment_all(bvec, bio, iter_all) {
+ bio_for_each_segment_all(bvec, bio, bv_iter) {
ssize_t ret;

- ret = copy_page_to_iter(bvec->bv_page,
- bvec->bv_offset,
- bvec->bv_len,
+ ret = copy_page_to_iter(bvec.bv_page,
+ bvec.bv_offset,
+ bvec.bv_len,
&iter);

if (!iov_iter_count(&iter))
break;

- if (ret < bvec->bv_len)
+ if (ret < bvec.bv_len)
return -EFAULT;
}

@@ -443,12 +443,12 @@ static void bio_copy_kern_endio(struct bio *bio)
static void bio_copy_kern_endio_read(struct bio *bio)
{
char *p = bio->bi_private;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;

- bio_for_each_segment_all(bvec, bio, iter_all) {
- memcpy_from_bvec(p, bvec);
- p += bvec->bv_len;
+ bio_for_each_segment_all(bvec, bio, iter) {
+ memcpy_from_bvec(p, &bvec);
+ p += bvec.bv_len;
}

bio_copy_kern_endio(bio);
diff --git a/block/bounce.c b/block/bounce.c
index 7cfcb242f9..e701832d76 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -102,18 +102,18 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
static void bounce_end_io(struct bio *bio)
{
struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, orig_vec;
+ struct bio_vec bvec, orig_vec;
struct bvec_iter orig_iter = bio_orig->bi_iter;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all iter;

/*
* free up bounce indirect pages used
*/
- bio_for_each_segment_all(bvec, bio, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter) {
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
- if (bvec->bv_page != orig_vec.bv_page) {
- dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
- mempool_free(bvec->bv_page, &page_pool);
+ if (bvec.bv_page != orig_vec.bv_page) {
+ dec_zone_page_state(bvec.bv_page, NR_BOUNCE);
+ mempool_free(bvec.bv_page, &page_pool);
}
bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 147c493a98..98ce12b239 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -373,12 +373,12 @@ static void do_btree_node_write(struct btree *b)
bset_sector_offset(&b->keys, i));

if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
- struct bio_vec *bv;
+ struct bio_vec bv;
void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all iter;

- bio_for_each_segment_all(bv, b->bio, iter_all) {
- memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
+ bio_for_each_segment_all(bv, b->bio, iter) {
+ memcpy(page_address(bv.bv_page), addr, PAGE_SIZE);
addr += PAGE_SIZE;
}

diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8b47b913ee..8ffc225f7b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1713,12 +1713,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)

static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct bvec_iter_all iter;
+ struct bio_vec bv;

- bio_for_each_segment_all(bv, clone, iter_all) {
- BUG_ON(!bv->bv_page);
- mempool_free(bv->bv_page, &cc->page_pool);
+ bio_for_each_segment_all(bv, clone, iter) {
+ BUG_ON(!bv.bv_page);
+ mempool_free(bv.bv_page, &cc->page_pool);
}
}

diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 68a9e2d998..4f58cae37e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2188,7 +2188,7 @@ static void process_checks(struct r1bio *r1_bio)
blk_status_t status = sbio->bi_status;
struct page **ppages = get_resync_pages(pbio)->pages;
struct page **spages = get_resync_pages(sbio)->pages;
- struct bio_vec *bi;
+ struct bio_vec bi;
int page_len[RESYNC_PAGES] = { 0 };
struct bvec_iter_all iter_all;

@@ -2198,7 +2198,7 @@ static void process_checks(struct r1bio *r1_bio)
sbio->bi_status = 0;

bio_for_each_segment_all(bi, sbio, iter_all)
- page_len[j++] = bi->bv_len;
+ page_len[j++] = bi.bv_len;

if (!status) {
for (j = vcnt; j-- ; ) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fbf9006c62..10326f978a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3807,12 +3807,12 @@ ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
static void btrfs_end_super_write(struct bio *bio)
{
struct btrfs_device *device = bio->bi_private;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;
struct page *page;

bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
+ page = bvec.bv_page;

if (bio->bi_status) {
btrfs_warn_rl_in_rcu(device->fs_info,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a1adadd5d2..e3b7fbbad7 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -573,34 +573,34 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio)
{
struct bio *bio = &bbio->bio;
int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
+ struct bio_vec bvec;
u64 start;
u64 end;
struct bvec_iter_all iter_all;

ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+ struct page *page = bvec.bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const u32 sectorsize = fs_info->sectorsize;

/* Our read/write should always be sector aligned. */
- if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
+ if (!IS_ALIGNED(bvec.bv_offset, sectorsize))
btrfs_err(fs_info,
"partial page write in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
+ bvec.bv_offset, bvec.bv_len);
+ else if (!IS_ALIGNED(bvec.bv_len, sectorsize))
btrfs_info(fs_info,
"incomplete page write with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
+ bvec.bv_offset, bvec.bv_len);

- start = page_offset(page) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
+ start = page_offset(page) + bvec.bv_offset;
+ end = start + bvec.bv_len - 1;

end_extent_writepage(page, error, start, end);

- btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
+ btrfs_page_clear_writeback(fs_info, page, start, bvec.bv_len);
}

bio_put(bio);
@@ -728,7 +728,7 @@ static struct extent_buffer *find_extent_buffer_readpage(
static void end_bio_extent_readpage(struct btrfs_bio *bbio)
{
struct bio *bio = &bbio->bio;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct processed_extent processed = { 0 };
/*
* The offset to the beginning of a bio, since one bio can never be
@@ -741,7 +741,7 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
bool uptodate = !bio->bi_status;
- struct page *page = bvec->bv_page;
+ struct page *page = bvec.bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const u32 sectorsize = fs_info->sectorsize;
@@ -761,19 +761,19 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
* for unaligned offsets, and an error if they don't add up to
* a full sector.
*/
- if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
+ if (!IS_ALIGNED(bvec.bv_offset, sectorsize))
btrfs_err(fs_info,
"partial page read in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
+ bvec.bv_offset, bvec.bv_len);
+ else if (!IS_ALIGNED(bvec.bv_offset + bvec.bv_len,
sectorsize))
btrfs_info(fs_info,
"incomplete page read with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
+ bvec.bv_offset, bvec.bv_len);

- start = page_offset(page) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- len = bvec->bv_len;
+ start = page_offset(page) + bvec.bv_offset;
+ end = start + bvec.bv_len - 1;
+ len = bvec.bv_len;

mirror = bbio->mirror_num;
if (uptodate && !is_data_inode(inode) &&
@@ -1879,7 +1879,7 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
{
struct bio *bio = &bbio->bio;
struct btrfs_fs_info *fs_info;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
@@ -1887,12 +1887,12 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)

ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- u64 bvec_start = page_offset(page) + bvec->bv_offset;
- u64 bvec_end = bvec_start + bvec->bv_len - 1;
+ struct page *page = bvec.bv_page;
+ u64 bvec_start = page_offset(page) + bvec.bv_offset;
+ u64 bvec_end = bvec_start + bvec.bv_len - 1;
u64 cur_bytenr = bvec_start;

- ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
+ ASSERT(IS_ALIGNED(bvec.bv_len, fs_info->nodesize));

/* Iterate through all extent buffers in the range */
while (cur_bytenr <= bvec_end) {
@@ -1936,14 +1936,14 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
static void end_bio_extent_buffer_writepage(struct btrfs_bio *bbio)
{
struct bio *bio = &bbio->bio;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct extent_buffer *eb;
int done;
struct bvec_iter_all iter_all;

ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+ struct page *page = bvec.bv_page;

eb = (struct extent_buffer *)page->private;
BUG_ON(!eb);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 2fab37f062..8da6097c42 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1393,7 +1393,7 @@ static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
{
const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

ASSERT(!bio_flagged(bio, BIO_CLONED));
@@ -1402,9 +1402,9 @@ static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
struct sector_ptr *sector;
int pgoff;

- for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
+ for (pgoff = bvec.bv_offset; pgoff - bvec.bv_offset < bvec.bv_len;
pgoff += sectorsize) {
- sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
+ sector = find_stripe_sector(rbio, bvec.bv_page, pgoff);
ASSERT(sector);
if (sector)
sector->uptodate = 1;
@@ -1458,7 +1458,7 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
{
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
int total_sector_nr = get_bio_sector_nr(rbio, bio);
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

/* No data csum for the whole stripe, no need to verify. */
@@ -1472,8 +1472,8 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
bio_for_each_segment_all(bvec, bio, iter_all) {
int bv_offset;

- for (bv_offset = bvec->bv_offset;
- bv_offset < bvec->bv_offset + bvec->bv_len;
+ for (bv_offset = bvec.bv_offset;
+ bv_offset < bvec.bv_offset + bvec.bv_len;
bv_offset += fs_info->sectorsize, total_sector_nr++) {
u8 csum_buf[BTRFS_CSUM_SIZE];
u8 *expected_csum = rbio->csum_buf +
@@ -1484,7 +1484,7 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
if (!test_bit(total_sector_nr, rbio->csum_bitmap))
continue;

- ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
+ ret = btrfs_check_sector_csum(fs_info, bvec.bv_page,
bv_offset, csum_buf, expected_csum);
if (ret < 0)
set_bit(total_sector_nr, rbio->error_bitmap);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 45f21db230..fc0ce6d685 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1648,11 +1648,11 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
{
struct z_erofs_decompressqueue *q = bio->bi_private;
blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+ struct page *page = bvec.bv_page;

DBG_BUGON(PageUptodate(page));
DBG_BUGON(z_erofs_page_is_invalidated(page));
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7165b1202f..d687e11bdf 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -139,12 +139,12 @@ struct bio_post_read_ctx {
*/
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
struct bvec_iter_all iter_all;
struct bio_post_read_ctx *ctx = bio->bi_private;

bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ struct page *page = bv.bv_page;

if (f2fs_is_compressed_page(page)) {
if (ctx && !ctx->decompression_attempted)
@@ -189,11 +189,11 @@ static void f2fs_verify_bio(struct work_struct *work)
* as those were handled separately by f2fs_end_read_compressed_page().
*/
if (may_have_compressed_pages) {
- struct bio_vec *bv;
+ struct bio_vec bv;
struct bvec_iter_all iter_all;

bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
+ struct page *page = bv.bv_page;

if (!f2fs_is_compressed_page(page) &&
!fsverity_verify_page(page)) {
@@ -241,13 +241,13 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
bool in_task)
{
- struct bio_vec *bv;
+ struct bio_vec bv;
struct bvec_iter_all iter_all;
bool all_compressed = true;
block_t blkaddr = ctx->fs_blkaddr;

bio_for_each_segment_all(bv, ctx->bio, iter_all) {
- struct page *page = bv->bv_page;
+ struct page *page = bv.bv_page;

if (f2fs_is_compressed_page(page))
f2fs_end_read_compressed_page(page, false, blkaddr,
@@ -327,7 +327,7 @@ static void f2fs_read_end_io(struct bio *bio)
static void f2fs_write_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

iostat_update_and_unbind_ctx(bio);
@@ -337,7 +337,7 @@ static void f2fs_write_end_io(struct bio *bio)
bio->bi_status = BLK_STS_IOERR;

bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+ struct page *page = bvec.bv_page;
enum count_type type = WB_DATA_TYPE(page);

if (page_private_dummy(page)) {
@@ -583,7 +583,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
static bool __has_merged_page(struct bio *bio, struct inode *inode,
struct page *page, nid_t ino)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

if (!bio)
@@ -593,7 +593,7 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
return true;

bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *target = bvec->bv_page;
+ struct page *target = bvec.bv_page;

if (fscrypt_is_bounce_page(target)) {
target = fscrypt_pagecache_page(target);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 1902413d5d..7f62fe8eb7 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -202,7 +202,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
static void gfs2_end_log_write(struct bio *bio)
{
struct gfs2_sbd *sdp = bio->bi_private;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct page *page;
struct bvec_iter_all iter_all;

@@ -217,9 +217,9 @@ static void gfs2_end_log_write(struct bio *bio)
}

bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
+ page = bvec.bv_page;
if (page_has_buffers(page))
- gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
+ gfs2_end_log_write_bh(sdp, &bvec, bio->bi_status);
else
mempool_free(page, gfs2_page_pool);
}
@@ -395,11 +395,11 @@ static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
static void gfs2_end_log_read(struct bio *bio)
{
struct page *page;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
+ page = bvec.bv_page;
if (bio->bi_status) {
int err = blk_status_to_errno(bio->bi_status);

diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 924361fa51..832572784e 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -193,15 +193,15 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)

static void gfs2_meta_read_endio(struct bio *bio)
{
- struct bio_vec *bvec;
+ struct bio_vec bvec;
struct bvec_iter_all iter_all;

bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
+ struct page *page = bvec.bv_page;
struct buffer_head *bh = page_buffers(page);
- unsigned int len = bvec->bv_len;
+ unsigned int len = bvec.bv_len;

- while (bh_offset(bh) < bvec->bv_offset)
+ while (bh_offset(bh) < bvec.bv_offset)
bh = bh->b_this_page;
do {
struct buffer_head *next = bh->b_this_page;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index bed3bb8b27..83e8b44518 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -35,30 +35,33 @@ static int copy_bio_to_actor(struct bio *bio,
int offset, int req_length)
{
void *actor_addr;
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;
int copied_bytes = 0;
int actor_offset = 0;
+ int bytes_to_copy;

squashfs_actor_nobuff(actor);
actor_addr = squashfs_first_page(actor);

- if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
- return 0;
+ bvec_iter_all_init(&iter);
+ bio_iter_all_advance(bio, &iter, offset);

- while (copied_bytes < req_length) {
- int bytes_to_copy = min_t(int, bvec->bv_len - offset,
+ while (copied_bytes < req_length &&
+ iter.idx < bio->bi_vcnt) {
+ bvec = bio_iter_all_peek(bio, &iter);
+
+ bytes_to_copy = min_t(int, bvec.bv_len,
PAGE_SIZE - actor_offset);

bytes_to_copy = min_t(int, bytes_to_copy,
req_length - copied_bytes);
if (!IS_ERR(actor_addr))
- memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
- offset, bytes_to_copy);
+ memcpy(actor_addr + actor_offset, bvec_virt(&bvec),
+ bytes_to_copy);

actor_offset += bytes_to_copy;
copied_bytes += bytes_to_copy;
- offset += bytes_to_copy;

if (actor_offset >= PAGE_SIZE) {
actor_addr = squashfs_next_page(actor);
@@ -66,11 +69,8 @@ static int copy_bio_to_actor(struct bio *bio,
break;
actor_offset = 0;
}
- if (offset >= bvec->bv_len) {
- if (!bio_next_segment(bio, &iter_all))
- break;
- offset = 0;
- }
+
+ bio_iter_all_advance(bio, &iter, bytes_to_copy);
}
squashfs_finish_page(actor);
return copied_bytes;
@@ -159,8 +159,10 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
* Metadata block.
*/
const u8 *data;
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;
+
+ bvec_iter_all_init(&iter);

if (index + 2 > msblk->bytes_used) {
res = -EIO;
@@ -170,21 +172,25 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
if (res)
goto out;

- if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
+ bvec = bio_iter_all_peek(bio, &iter);
+
+ if (WARN_ON_ONCE(!bvec.bv_len)) {
res = -EIO;
goto out_free_bio;
}
/* Extract the length of the metadata block */
- data = bvec_virt(bvec);
+ data = bvec_virt(&bvec);
length = data[offset];
- if (offset < bvec->bv_len - 1) {
+ if (offset < bvec.bv_len - 1) {
length |= data[offset + 1] << 8;
} else {
- if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
+ bio_iter_all_advance(bio, &iter, bvec.bv_len);
+
+ if (WARN_ON_ONCE(!bvec.bv_len)) {
res = -EIO;
goto out_free_bio;
}
- data = bvec_virt(bvec);
+ data = bvec_virt(&bvec);
length |= data[0] << 8;
}
bio_free_pages(bio);
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index 49797729f1..bd0dd787d2 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -92,20 +92,23 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;
struct squashfs_lz4 *stream = strm;
void *buff = stream->input, *data;
int bytes = length, res;

- while (bio_next_segment(bio, &iter_all)) {
- int avail = min(bytes, ((int)bvec->bv_len) - offset);
+ bvec_iter_all_init(&iter);
+ bio_iter_all_advance(bio, &iter, offset);

- data = bvec_virt(bvec);
- memcpy(buff, data + offset, avail);
+ bio_for_each_segment_all_continue(bvec, bio, iter) {
+ unsigned avail = min_t(unsigned, bytes, bvec.bv_len);
+
+ memcpy(buff, bvec_virt(&bvec), avail);
buff += avail;
bytes -= avail;
- offset = 0;
+ if (!bytes)
+ break;
}

res = LZ4_decompress_safe(stream->input, stream->output,
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index d216aeefa8..bccfcfa12e 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -66,21 +66,24 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;
+ struct bio_vec bvec;
struct squashfs_lzo *stream = strm;
void *buff = stream->input, *data;
int bytes = length, res;
size_t out_len = output->length;

- while (bio_next_segment(bio, &iter_all)) {
- int avail = min(bytes, ((int)bvec->bv_len) - offset);
+ bvec_iter_all_init(&iter);
+ bio_iter_all_advance(bio, &iter, offset);

- data = bvec_virt(bvec);
- memcpy(buff, data + offset, avail);
+ bio_for_each_segment_all_continue(bvec, bio, iter) {
+ unsigned avail = min_t(unsigned, bytes, bvec.bv_len);
+
+ memcpy(buff, bvec_virt(&bvec), avail);
buff += avail;
bytes -= avail;
- offset = 0;
+ if (!bytes)
+ break;
}

res = lzo1x_decompress_safe(stream->input, (size_t)length,
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 6c49481a2f..6cf0e11e3b 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -120,8 +120,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;
int total = 0, error = 0;
struct squashfs_xz *stream = strm;

@@ -136,26 +135,28 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
goto finish;
}

+ bvec_iter_all_init(&iter);
+ bio_iter_all_advance(bio, &iter, offset);
+
for (;;) {
enum xz_ret xz_err;

if (stream->buf.in_pos == stream->buf.in_size) {
- const void *data;
- int avail;
+ struct bio_vec bvec = bio_iter_all_peek(bio, &iter);
+ unsigned avail = min_t(unsigned, length, bvec.bv_len);

- if (!bio_next_segment(bio, &iter_all)) {
+ if (iter.idx >= bio->bi_vcnt) {
/* XZ_STREAM_END must be reached. */
error = -EIO;
break;
}

- avail = min(length, ((int)bvec->bv_len) - offset);
- data = bvec_virt(bvec);
length -= avail;
- stream->buf.in = data + offset;
+ stream->buf.in = bvec_virt(&bvec);
stream->buf.in_size = avail;
stream->buf.in_pos = 0;
- offset = 0;
+
+ bio_iter_all_advance(bio, &iter, avail);
}

if (stream->buf.out_pos == stream->buf.out_size) {
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index cbb7afe7bc..981ca5e410 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -53,8 +53,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct bio *bio, int offset, int length,
struct squashfs_page_actor *output)
{
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;
int zlib_init = 0, error = 0;
z_stream *stream = strm;

@@ -67,25 +66,28 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
goto finish;
}

+ bvec_iter_all_init(&iter);
+ bio_iter_all_advance(bio, &iter, offset);
+
for (;;) {
int zlib_err;

if (stream->avail_in == 0) {
- const void *data;
+ struct bio_vec bvec = bio_iter_all_peek(bio, &iter);
int avail;

- if (!bio_next_segment(bio, &iter_all)) {
+ if (iter.idx >= bio->bi_vcnt) {
/* Z_STREAM_END must be reached. */
error = -EIO;
break;
}

- avail = min(length, ((int)bvec->bv_len) - offset);
- data = bvec_virt(bvec);
+ avail = min_t(unsigned, length, bvec.bv_len);
length -= avail;
- stream->next_in = data + offset;
+ stream->next_in = bvec_virt(&bvec);
stream->avail_in = avail;
- offset = 0;
+
+ bio_iter_all_advance(bio, &iter, avail);
}

if (stream->avail_out == 0) {
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
index 0e407c4d8b..658e5d462a 100644
--- a/fs/squashfs/zstd_wrapper.c
+++ b/fs/squashfs/zstd_wrapper.c
@@ -68,8 +68,7 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
int error = 0;
zstd_in_buffer in_buf = { NULL, 0, 0 };
zstd_out_buffer out_buf = { NULL, 0, 0 };
- struct bvec_iter_all iter_all = {};
- struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct bvec_iter_all iter;

stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);

@@ -85,25 +84,27 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
goto finish;
}

+ bvec_iter_all_init(&iter);
+ bio_iter_all_advance(bio, &iter, offset);
+
for (;;) {
size_t zstd_err;

if (in_buf.pos == in_buf.size) {
- const void *data;
- int avail;
+ struct bio_vec bvec = bio_iter_all_peek(bio, &iter);
+ unsigned avail = min_t(unsigned, length, bvec.bv_len);

- if (!bio_next_segment(bio, &iter_all)) {
+ if (iter.idx >= bio->bi_vcnt) {
error = -EIO;
break;
}

- avail = min(length, ((int)bvec->bv_len) - offset);
- data = bvec_virt(bvec);
length -= avail;
- in_buf.src = data + offset;
+ in_buf.src = bvec_virt(&bvec);
in_buf.size = avail;
in_buf.pos = 0;
- offset = 0;
+
+ bio_iter_all_advance(bio, &iter, avail);
}

if (out_buf.pos == out_buf.size) {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 8588bcfbc6..39d8a1cc6e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -78,22 +78,34 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}

-static inline bool bio_next_segment(const struct bio *bio,
- struct bvec_iter_all *iter)
+static inline struct bio_vec bio_iter_all_peek(const struct bio *bio,
+ struct bvec_iter_all *iter)
{
- if (iter->idx >= bio->bi_vcnt)
- return false;
+ return bvec_iter_all_peek(bio->bi_io_vec, iter);
+}

- bvec_advance(&bio->bi_io_vec[iter->idx], iter);
- return true;
+static inline void bio_iter_all_advance(const struct bio *bio,
+ struct bvec_iter_all *iter,
+ unsigned bytes)
+{
+ bvec_iter_all_advance(bio->bi_io_vec, iter, bytes);
}

+#define bio_for_each_segment_all_continue(bvl, bio, iter) \
+ for (; \
+ iter.idx < bio->bi_vcnt && \
+ ((bvl = bio_iter_all_peek(bio, &iter)), true); \
+ bio_iter_all_advance((bio), &iter, bvl.bv_len))
+
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
-#define bio_for_each_segment_all(bvl, bio, iter) \
- for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
+#define bio_for_each_segment_all(bvl, bio, iter) \
+ for (bvec_iter_all_init(&iter); \
+ iter.idx < (bio)->bi_vcnt && \
+ ((bvl = bio_iter_all_peek((bio), &iter)), true); \
+ bio_iter_all_advance((bio), &iter, bvl.bv_len))

static inline void bio_advance_iter(const struct bio *bio,
struct bvec_iter *iter, unsigned int bytes)
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 555aae5448..635fb54143 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -85,12 +85,6 @@ struct bvec_iter {
current bvec */
} __packed;

-struct bvec_iter_all {
- struct bio_vec bv;
- int idx;
- unsigned done;
-};
-
/*
* various member access, note that bio_data should of course not be used
* on highmem page vectors
@@ -184,7 +178,10 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))

-/* for iterating one bio from start to end */
+/*
+ * bvec_iter_all: for advancing over a bio as it was originally created, but
+ * with the usual bio_for_each_segment interface - nonstandard, do not use:
+ */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
.bi_sector = 0, \
@@ -193,33 +190,45 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
.bi_bvec_done = 0, \
}

-static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
+/*
+ * bvec_iter_all: for advancing over individual pages in a bio, as it was when
+ * it was first created:
+ */
+struct bvec_iter_all {
+ int idx;
+ unsigned done;
+};
+
+static inline void bvec_iter_all_init(struct bvec_iter_all *iter_all)
{
iter_all->done = 0;
iter_all->idx = 0;
+}

- return &iter_all->bv;
+static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
+ struct bvec_iter_all *iter)
+{
+ struct bio_vec bv = bvec[iter->idx];
+
+ bv.bv_offset += iter->done;
+ bv.bv_len -= iter->done;
+
+ bv.bv_page += bv.bv_offset >> PAGE_SHIFT;
+ bv.bv_offset &= ~PAGE_MASK;
+ bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
+
+ return bv;
}

-static inline void bvec_advance(const struct bio_vec *bvec,
- struct bvec_iter_all *iter_all)
+static inline void bvec_iter_all_advance(const struct bio_vec *bvec,
+ struct bvec_iter_all *iter,
+ unsigned bytes)
{
- struct bio_vec *bv = &iter_all->bv;
-
- if (iter_all->done) {
- bv->bv_page++;
- bv->bv_offset = 0;
- } else {
- bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
- bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
- }
- bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
- bvec->bv_len - iter_all->done);
- iter_all->done += bv->bv_len;
+ iter->done += bytes;

- if (iter_all->done == bvec->bv_len) {
- iter_all->idx++;
- iter_all->done = 0;
+ while (iter->done && iter->done >= bvec[iter->idx].bv_len) {
+ iter->done -= bvec[iter->idx].bv_len;
+ iter->idx++;
}
}

--
2.40.1



2023-06-05 21:37:27

by Kent Overstreet

[permalink] [raw]
Subject: [PATCH v2 5/5] block: CONFIG_BLK_DEBUG

This adds a new kconfig option to enable some extra debug mode
assertions. Currently this is just used for some bio iterator
assertions, but hopefully more will be added in the future.

Signed-off-by: Kent Overstreet <[email protected]>
---
block/Kconfig | 7 +++++++
include/linux/bio.h | 5 +++++
include/linux/bvec.h | 8 ++++++++
3 files changed, 20 insertions(+)

diff --git a/block/Kconfig b/block/Kconfig
index 86122e459f..611dddb59c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -165,6 +165,13 @@ config BLK_CGROUP_IOPRIO
scheduler and block devices process requests. Only some I/O schedulers
and some block devices support I/O priorities.

+config BLK_DEBUG
+ bool "Extra block layer assertions"
+ depends on DEBUG_KERNEL
+ help
+ Enable extra assertions in the block layer. Currently this is just the
+ bio iterator code, but may be expanded.
+
config BLK_DEBUG_FS
bool "Block layer debugging information in debugfs"
default y
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f599bcf15e..cece57c54d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -81,6 +81,8 @@ static inline void *bio_data(struct bio *bio)
static inline struct bio_vec bio_iter_all_peek(const struct bio *bio,
struct bvec_iter_all *iter)
{
+ BLK_BUG_ON(iter->idx >= bio->bi_vcnt);
+
return bvec_iter_all_peek(bio->bi_io_vec, iter);
}

@@ -89,6 +91,9 @@ static inline void bio_iter_all_advance(const struct bio *bio,
unsigned bytes)
{
bvec_iter_all_advance(bio->bi_io_vec, iter, bytes);
+
+ BLK_BUG_ON(iter->idx > bio->bi_vcnt ||
+ (iter->idx == bio->bi_vcnt && iter->done));
}

#define bio_for_each_segment_all_continue(bvl, bio, iter) \
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ee5965fedf..2467245da2 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -16,6 +16,12 @@

struct page;

+#ifdef CONFIG_BLK_DEBUG
+#define BLK_BUG_ON(cond) BUG_ON(cond)
+#else
+#define BLK_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#endif
+
/**
* struct bio_vec - a contiguous range of physical memory addresses
* @bv_page: First page associated with the address range.
@@ -210,6 +216,8 @@ static inline struct bio_vec __bvec_iter_all_peek(const struct bio_vec *bvec,
{
struct bio_vec bv = bvec[iter->idx];

+ BLK_BUG_ON(iter->done >= bv.bv_len);
+
bv.bv_offset += iter->done;
bv.bv_len -= iter->done;

--
2.40.1


2023-06-05 22:06:36

by Kent Overstreet

[permalink] [raw]
Subject: [PATCH v2 2/5] block: Rework bio_for_each_folio_all()

This reimplements bio_for_each_folio_all() on top of the newly-reworked
bvec_iter_all, and since it's now trivial we also provide
bio_for_each_folio.

Signed-off-by: Kent Overstreet <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: [email protected]
Signed-off-by: Kent Overstreet <[email protected]>
---
fs/crypto/bio.c | 9 ++--
fs/ext4/page-io.c | 11 ++---
fs/ext4/readpage.c | 7 ++--
fs/iomap/buffered-io.c | 14 ++++---
fs/mpage.c | 22 +++++-----
fs/verity/verify.c | 9 ++--
include/linux/bio.h | 95 ++++++++++++++++++++++--------------------
include/linux/bvec.h | 13 ++++--
8 files changed, 99 insertions(+), 81 deletions(-)

diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 62e1a3dd83..4fed8d175c 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -30,11 +30,12 @@
*/
bool fscrypt_decrypt_bio(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;

- bio_for_each_folio_all(fi, bio) {
- int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
- fi.offset);
+ bio_for_each_folio_all(fs, bio, iter) {
+ int err = fscrypt_decrypt_pagecache_blocks(fs.fs_folio, fs.fs_len,
+ fs.fs_offset);

if (err) {
bio->bi_status = errno_to_blk_status(err);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 3621f29ec6..4c2fa805b3 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -99,14 +99,15 @@ static void buffer_io_error(struct buffer_head *bh)

static void ext4_finish_bio(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;

- bio_for_each_folio_all(fi, bio) {
- struct folio *folio = fi.folio;
+ bio_for_each_folio_all(fs, bio, iter) {
+ struct folio *folio = fs.fs_folio;
struct folio *io_folio = NULL;
struct buffer_head *bh, *head;
- size_t bio_start = fi.offset;
- size_t bio_end = bio_start + fi.length;
+ size_t bio_start = fs.fs_offset;
+ size_t bio_end = bio_start + fs.fs_len;
unsigned under_io = 0;
unsigned long flags;

diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 6f46823fba..8aa560d61c 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -68,10 +68,11 @@ struct bio_post_read_ctx {

static void __read_end_io(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;

- bio_for_each_folio_all(fi, bio) {
- struct folio *folio = fi.folio;
+ bio_for_each_folio_all(fs, bio, iter) {
+ struct folio *folio = fs.fs_folio;

if (bio->bi_status)
folio_clear_uptodate(folio);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 063133ec77..43956529cb 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -187,10 +187,11 @@ static void iomap_finish_folio_read(struct folio *folio, size_t offset,
static void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;

- bio_for_each_folio_all(fi, bio)
- iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+ bio_for_each_folio_all(fs, bio, iter)
+ iomap_finish_folio_read(fs.fs_folio, fs.fs_offset, fs.fs_len, error);
bio_put(bio);
}

@@ -1321,7 +1322,8 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
u32 folio_count = 0;

for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;

/*
* For the last bio, bi_private points to the ioend, so we
@@ -1333,8 +1335,8 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
next = bio->bi_private;

/* walk all folios in bio, ending page IO on them */
- bio_for_each_folio_all(fi, bio) {
- iomap_finish_folio_write(inode, fi.folio, fi.length,
+ bio_for_each_folio_all(fs, bio, iter) {
+ iomap_finish_folio_write(inode, fs.fs_folio, fs.fs_len,
error);
folio_count++;
}
diff --git a/fs/mpage.c b/fs/mpage.c
index 242e213ee0..7da2c80f71 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -45,15 +45,16 @@
*/
static void mpage_read_end_io(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
int err = blk_status_to_errno(bio->bi_status);

- bio_for_each_folio_all(fi, bio) {
+ bio_for_each_folio_all(fs, bio, iter) {
if (err)
- folio_set_error(fi.folio);
+ folio_set_error(fs.fs_folio);
else
- folio_mark_uptodate(fi.folio);
- folio_unlock(fi.folio);
+ folio_mark_uptodate(fs.fs_folio);
+ folio_unlock(fs.fs_folio);
}

bio_put(bio);
@@ -61,15 +62,16 @@ static void mpage_read_end_io(struct bio *bio)

static void mpage_write_end_io(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
int err = blk_status_to_errno(bio->bi_status);

- bio_for_each_folio_all(fi, bio) {
+ bio_for_each_folio_all(fs, bio, iter) {
if (err) {
- folio_set_error(fi.folio);
- mapping_set_error(fi.folio->mapping, err);
+ folio_set_error(fs.fs_folio);
+ mapping_set_error(fs.fs_folio->mapping, err);
}
- folio_end_writeback(fi.folio);
+ folio_end_writeback(fs.fs_folio);
}

bio_put(bio);
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index e250822275..29ebca1006 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -340,7 +340,8 @@ void fsverity_verify_bio(struct bio *bio)
struct inode *inode = bio_first_page_all(bio)->mapping->host;
struct fsverity_info *vi = inode->i_verity_info;
struct ahash_request *req;
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
unsigned long max_ra_pages = 0;

/* This allocation never fails, since it's mempool-backed. */
@@ -359,9 +360,9 @@ void fsverity_verify_bio(struct bio *bio)
max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
}

- bio_for_each_folio_all(fi, bio) {
- if (!verify_data_blocks(inode, vi, req, fi.folio, fi.length,
- fi.offset, max_ra_pages)) {
+ bio_for_each_folio_all(fs, bio, iter) {
+ if (!verify_data_blocks(inode, vi, req, fs.fs_folio, fs.fs_len,
+ fs.fs_offset, max_ra_pages)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 39d8a1cc6e..4b197ea83c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -163,6 +163,46 @@ static inline void bio_advance(struct bio *bio, unsigned int nbytes)
#define bio_for_each_segment(bvl, bio, iter) \
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)

+struct folio_seg {
+ struct folio *fs_folio;
+ size_t fs_offset;
+ size_t fs_len;
+};
+
+/*
+ * Returns the initial portion of @bv that points to a single folio (or all of
+ * @bv, if it pointsn to a single folio)
+ */
+static inline struct folio_seg biovec_to_folioseg(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_seg) {
+ .fs_folio = folio,
+ .fs_offset = offset,
+ .fs_len = len,
+ };
+}
+
+static inline struct folio_seg bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_folioseg(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(fs, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((fs = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (fs).fs_len))
+
+#define bio_for_each_folio(fs, bio, iter) \
+ __bio_for_each_folio(fs, bio, iter, (bio)->bi_iter)
+
#define __bio_for_each_bvec(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
@@ -271,59 +311,22 @@ static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}

-/**
- * struct folio_iter - State for iterating all folios in a bio.
- * @folio: The current folio we're iterating. NULL after the last folio.
- * @offset: The byte offset within the current folio.
- * @length: The number of bytes in this iteration (will not cross folio
- * boundary).
- */
-struct folio_iter {
- struct folio *folio;
- size_t offset;
- size_t length;
- /* private: for use by the iterator */
- struct folio *_next;
- size_t _seg_count;
- int _i;
-};
-
-static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
- int i)
+static inline struct folio_seg bio_folio_iter_all_peek(const struct bio *bio,
+ const struct bvec_iter_all *iter)
{
- struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
-
- fi->folio = page_folio(bvec->bv_page);
- fi->offset = bvec->bv_offset +
- PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
- fi->_seg_count = bvec->bv_len;
- fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
- fi->_next = folio_next(fi->folio);
- fi->_i = i;
-}
-
-static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
-{
- fi->_seg_count -= fi->length;
- if (fi->_seg_count) {
- fi->folio = fi->_next;
- fi->offset = 0;
- fi->length = min(folio_size(fi->folio), fi->_seg_count);
- fi->_next = folio_next(fi->folio);
- } else if (fi->_i + 1 < bio->bi_vcnt) {
- bio_first_folio(fi, bio, fi->_i + 1);
- } else {
- fi->folio = NULL;
- }
+ return biovec_to_folioseg(__bvec_iter_all_peek(bio->bi_io_vec, iter));
}

/**
* bio_for_each_folio_all - Iterate over each folio in a bio.
- * @fi: struct folio_iter which is updated for each folio.
+ * @fi: struct bio_folio_iter_all which is updated for each folio.
* @bio: struct bio to iterate over.
*/
-#define bio_for_each_folio_all(fi, bio) \
- for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+#define bio_for_each_folio_all(fs, bio, iter) \
+ for (bvec_iter_all_init(&iter); \
+ iter.idx < bio->bi_vcnt && \
+ ((fs = bio_folio_iter_all_peek(bio, &iter)), true); \
+ bio_iter_all_advance((bio), &iter, fs.fs_len))

enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 635fb54143..ebf8b612ba 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -205,8 +205,8 @@ static inline void bvec_iter_all_init(struct bvec_iter_all *iter_all)
iter_all->idx = 0;
}

-static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
- struct bvec_iter_all *iter)
+static inline struct bio_vec __bvec_iter_all_peek(const struct bio_vec *bvec,
+ const struct bvec_iter_all *iter)
{
struct bio_vec bv = bvec[iter->idx];

@@ -215,8 +215,15 @@ static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,

bv.bv_page += bv.bv_offset >> PAGE_SHIFT;
bv.bv_offset &= ~PAGE_MASK;
- bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
+ return bv;
+}
+
+static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
+ const struct bvec_iter_all *iter)
+{
+ struct bio_vec bv = __bvec_iter_all_peek(bvec, iter);

+ bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
return bv;
}

--
2.40.1


2023-06-05 22:18:53

by Kent Overstreet

[permalink] [raw]
Subject: [PATCH v2 4/5] block: Change memcpy_(to|from)_bvec to pass bvec by value

This is a small code size decrease; memcpy_from_bvec() is inlined so
there's no benefit to passing the bvec by referene, but taking a pointer
to it may inhibit optimization in the caller.

Signed-off-by: Kent Overstreet <[email protected]>
---
block/blk-map.c | 2 +-
block/bounce.c | 4 ++--
drivers/block/floppy.c | 4 ++--
drivers/block/ps3disk.c | 2 +-
drivers/block/zram/zram_drv.c | 4 ++--
drivers/md/dm-log-writes.c | 2 +-
drivers/scsi/aha1542.c | 2 +-
include/linux/bvec.h | 8 ++++----
8 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/block/blk-map.c b/block/blk-map.c
index 524d84011f..50540d0a22 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -447,7 +447,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
struct bio_vec bvec;

bio_for_each_segment_all(bvec, bio, iter) {
- memcpy_from_bvec(p, &bvec);
+ memcpy_from_bvec(p, bvec);
p += bvec.bv_len;
}

diff --git a/block/bounce.c b/block/bounce.c
index e701832d76..1dc6716084 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -92,7 +92,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
* been modified by the block layer, so use the original
* copy, bounce_copy_vec already uses tovec->bv_len
*/
- memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
+ memcpy_to_bvec(tovec, page_address(fromvec.bv_page) +
tovec.bv_offset);
}
bio_advance_iter(from, &from_iter, tovec.bv_len);
@@ -249,7 +249,7 @@ struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)

if (rw == WRITE) {
flush_dcache_page(to->bv_page);
- memcpy_from_bvec(page_address(bounce_page), to);
+ memcpy_from_bvec(page_address(bounce_page), *to);
}
to->bv_page = bounce_page;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cec2c20f5e..039e4ebfad 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2483,9 +2483,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
}

if (CT(raw_cmd->cmd[COMMAND]) == FD_READ)
- memcpy_to_bvec(&bv, dma_buffer);
+ memcpy_to_bvec(bv, dma_buffer);
else
- memcpy_from_bvec(dma_buffer, &bv);
+ memcpy_from_bvec(dma_buffer, bv);

remaining -= size;
dma_buffer += size;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 36d7b36c60..73db9ca530 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -86,7 +86,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,

rq_for_each_segment(bvec, req, iter) {
if (gather)
- memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
+ memcpy_from_bvec(dev->bounce_buf + offset, bvec);
else
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f6d90f1ba5..b68ae64a99 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1396,7 +1396,7 @@ static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
return -ENOMEM;
ret = zram_read_page(zram, page, index, NULL);
if (likely(!ret))
- memcpy_to_bvec(bvec, page_address(page) + offset);
+ memcpy_to_bvec(*bvec, page_address(page) + offset);
__free_page(page);
return ret;
}
@@ -1548,7 +1548,7 @@ static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,

ret = zram_read_page(zram, page, index, bio);
if (!ret) {
- memcpy_from_bvec(page_address(page) + offset, bvec);
+ memcpy_from_bvec(page_address(page) + offset, *bvec);
ret = zram_write_page(zram, page, index);
}
__free_page(page);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index f17a6cf228..7772466d0a 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -744,7 +744,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
}

dst = kmap_local_page(page);
- memcpy_from_bvec(dst, &bv);
+ memcpy_from_bvec(dst, bv);
kunmap_local(dst);
block->vecs[i].bv_page = page;
block->vecs[i].bv_len = bv.bv_len;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 9503996c63..5db9e31352 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -450,7 +450,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
struct bio_vec bv;

rq_for_each_segment(bv, rq, iter) {
- memcpy_from_bvec(buf, &bv);
+ memcpy_from_bvec(buf, bv);
buf += bv.bv_len;
}
}
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ebf8b612ba..ee5965fedf 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -257,9 +257,9 @@ static inline void *bvec_kmap_local(struct bio_vec *bvec)
*
* Must be called on single-page bvecs only.
*/
-static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+static inline void memcpy_from_bvec(char *to, struct bio_vec bvec)
{
- memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+ memcpy_from_page(to, bvec.bv_page, bvec.bv_offset, bvec.bv_len);
}

/**
@@ -268,9 +268,9 @@ static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
*
* Must be called on single-page bvecs only.
*/
-static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+static inline void memcpy_to_bvec(struct bio_vec bvec, const char *from)
{
- memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+ memcpy_to_page(bvec.bv_page, bvec.bv_offset, from, bvec.bv_len);
}

/**
--
2.40.1


2023-06-05 23:05:02

by Randy Dunlap

[permalink] [raw]
Subject: Re: [PATCH v2 5/5] block: CONFIG_BLK_DEBUG

Hi,

On 6/5/23 14:27, Kent Overstreet wrote:
> diff --git a/block/Kconfig b/block/Kconfig
> index 86122e459f..611dddb59c 100644
> --- a/block/Kconfig
> +++ b/block/Kconfig
> @@ -165,6 +165,13 @@ config BLK_CGROUP_IOPRIO
> scheduler and block devices process requests. Only some I/O schedulers
> and some block devices support I/O priorities.
>
> +config BLK_DEBUG
> + bool "Extra block layer assertions"
> + depends on DEBUG_KERNEL
> + help
> + Enable extra assertions in the block layer. Currently this is just the
> + bio iterator code, but may be expanded.

I see that block/Kconfig isn't consistent about indenting help text,
but Documentation/process/coding-style.rst says:

10) Kconfig configuration files
-------------------------------

For all of the Kconfig* configuration files throughout the source tree,
the indentation is somewhat different. Lines under a ``config`` definition
are indented with one tab, while help text is indented an additional two
spaces.


so please add 2 spaces after the tab at the beginning of the help text
lines (but not the "help" line itself).

--
~Randy

2023-06-06 02:09:02

by Ming Lei

[permalink] [raw]
Subject: Re: [PATCH v2 1/5] block: Rework bio_for_each_segment_all()

On Mon, Jun 05, 2023 at 05:27:13PM -0400, Kent Overstreet wrote:
> This patch reworks bio_for_each_segment_all() to be more inline with how
> the other bio iterators work:
>
> - bio_iter_all_peek() now returns a synthesized bio_vec; we don't stash
> one in the iterator and pass a pointer to it - bad. This way makes it
> clearer what's a constructed value vs. a reference to something
> pre-existing, and it also will help with cleaning up and
> consolidating code with bio_for_each_folio_all().

by-value is more readable & clean, the current by-reference is just for aligning
with original interface.

>
> - We now provide bio_for_each_segment_all_continue(), for squashfs:
> this makes their code clearer.

Probably it is better to move the new helper into new patch, but I guess
it could be hard for this patch.

>
> Signed-off-by: Kent Overstreet <[email protected]>
> Cc: Jens Axboe <[email protected]>
> Cc: [email protected]
> Cc: Ming Lei <[email protected]>
> Cc: Phillip Lougher <[email protected]>
> ---
> block/bio.c | 38 ++++++++++++------------
> block/blk-map.c | 38 ++++++++++++------------
> block/bounce.c | 12 ++++----
> drivers/md/bcache/btree.c | 8 ++---
> drivers/md/dm-crypt.c | 10 +++----
> drivers/md/raid1.c | 4 +--
> fs/btrfs/disk-io.c | 4 +--
> fs/btrfs/extent_io.c | 50 +++++++++++++++----------------
> fs/btrfs/raid56.c | 14 ++++-----
> fs/erofs/zdata.c | 4 +--
> fs/f2fs/data.c | 20 ++++++-------
> fs/gfs2/lops.c | 10 +++----
> fs/gfs2/meta_io.c | 8 ++---
> fs/squashfs/block.c | 48 +++++++++++++++++-------------
> fs/squashfs/lz4_wrapper.c | 17 ++++++-----
> fs/squashfs/lzo_wrapper.c | 17 ++++++-----
> fs/squashfs/xz_wrapper.c | 19 ++++++------
> fs/squashfs/zlib_wrapper.c | 18 ++++++-----
> fs/squashfs/zstd_wrapper.c | 19 ++++++------
> include/linux/bio.h | 28 ++++++++++++-----
> include/linux/bvec.h | 61 ++++++++++++++++++++++----------------
> 21 files changed, 242 insertions(+), 205 deletions(-)
>
> diff --git a/block/bio.c b/block/bio.c
> index 798cc4cf3b..2af9f50643 100644
> --- a/block/bio.c
> +++ b/block/bio.c
> @@ -1163,13 +1163,13 @@ EXPORT_SYMBOL(bio_add_folio);
>
> void __bio_release_pages(struct bio *bio, bool mark_dirty)
> {
> - struct bvec_iter_all iter_all;
> - struct bio_vec *bvec;
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
>
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> - if (mark_dirty && !PageCompound(bvec->bv_page))
> - set_page_dirty_lock(bvec->bv_page);
> - bio_release_page(bio, bvec->bv_page);
> + bio_for_each_segment_all(bvec, bio, iter) {
> + if (mark_dirty && !PageCompound(bvec.bv_page))
> + set_page_dirty_lock(bvec.bv_page);
> + bio_release_page(bio, bvec.bv_page);
> }
> }
> EXPORT_SYMBOL_GPL(__bio_release_pages);
> @@ -1435,11 +1435,11 @@ EXPORT_SYMBOL(bio_copy_data);
>
> void bio_free_pages(struct bio *bio)
> {
> - struct bio_vec *bvec;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
>
> - bio_for_each_segment_all(bvec, bio, iter_all)
> - __free_page(bvec->bv_page);
> + bio_for_each_segment_all(bvec, bio, iter)
> + __free_page(bvec.bv_page);
> }
> EXPORT_SYMBOL(bio_free_pages);
>
> @@ -1474,12 +1474,12 @@ EXPORT_SYMBOL(bio_free_pages);
> */
> void bio_set_pages_dirty(struct bio *bio)
> {
> - struct bio_vec *bvec;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
>
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> - if (!PageCompound(bvec->bv_page))
> - set_page_dirty_lock(bvec->bv_page);
> + bio_for_each_segment_all(bvec, bio, iter) {
> + if (!PageCompound(bvec.bv_page))
> + set_page_dirty_lock(bvec.bv_page);
> }
> }
>
> @@ -1522,12 +1522,12 @@ static void bio_dirty_fn(struct work_struct *work)
>
> void bio_check_pages_dirty(struct bio *bio)
> {
> - struct bio_vec *bvec;
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
> unsigned long flags;
> - struct bvec_iter_all iter_all;
>
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> - if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
> + bio_for_each_segment_all(bvec, bio, iter) {
> + if (!PageDirty(bvec.bv_page) && !PageCompound(bvec.bv_page))
> goto defer;
> }
>
> diff --git a/block/blk-map.c b/block/blk-map.c
> index 3551c3ff17..524d84011f 100644
> --- a/block/blk-map.c
> +++ b/block/blk-map.c
> @@ -47,21 +47,21 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
> */
> static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
> {
> - struct bio_vec *bvec;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all bv_iter;
> + struct bio_vec bvec;
>
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> + bio_for_each_segment_all(bvec, bio, bv_iter) {
> ssize_t ret;
>
> - ret = copy_page_from_iter(bvec->bv_page,
> - bvec->bv_offset,
> - bvec->bv_len,
> + ret = copy_page_from_iter(bvec.bv_page,
> + bvec.bv_offset,
> + bvec.bv_len,
> iter);
>
> if (!iov_iter_count(iter))
> break;
>
> - if (ret < bvec->bv_len)
> + if (ret < bvec.bv_len)
> return -EFAULT;
> }
>
> @@ -78,21 +78,21 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
> */
> static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
> {
> - struct bio_vec *bvec;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all bv_iter;
> + struct bio_vec bvec;
>
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> + bio_for_each_segment_all(bvec, bio, bv_iter) {
> ssize_t ret;
>
> - ret = copy_page_to_iter(bvec->bv_page,
> - bvec->bv_offset,
> - bvec->bv_len,
> + ret = copy_page_to_iter(bvec.bv_page,
> + bvec.bv_offset,
> + bvec.bv_len,
> &iter);
>
> if (!iov_iter_count(&iter))
> break;
>
> - if (ret < bvec->bv_len)
> + if (ret < bvec.bv_len)
> return -EFAULT;
> }
>
> @@ -443,12 +443,12 @@ static void bio_copy_kern_endio(struct bio *bio)
> static void bio_copy_kern_endio_read(struct bio *bio)
> {
> char *p = bio->bi_private;
> - struct bio_vec *bvec;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
>
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> - memcpy_from_bvec(p, bvec);
> - p += bvec->bv_len;
> + bio_for_each_segment_all(bvec, bio, iter) {
> + memcpy_from_bvec(p, &bvec);
> + p += bvec.bv_len;
> }
>
> bio_copy_kern_endio(bio);
> diff --git a/block/bounce.c b/block/bounce.c
> index 7cfcb242f9..e701832d76 100644
> --- a/block/bounce.c
> +++ b/block/bounce.c
> @@ -102,18 +102,18 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
> static void bounce_end_io(struct bio *bio)
> {
> struct bio *bio_orig = bio->bi_private;
> - struct bio_vec *bvec, orig_vec;
> + struct bio_vec bvec, orig_vec;
> struct bvec_iter orig_iter = bio_orig->bi_iter;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all iter;
>
> /*
> * free up bounce indirect pages used
> */
> - bio_for_each_segment_all(bvec, bio, iter_all) {
> + bio_for_each_segment_all(bvec, bio, iter) {
> orig_vec = bio_iter_iovec(bio_orig, orig_iter);
> - if (bvec->bv_page != orig_vec.bv_page) {
> - dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
> - mempool_free(bvec->bv_page, &page_pool);
> + if (bvec.bv_page != orig_vec.bv_page) {
> + dec_zone_page_state(bvec.bv_page, NR_BOUNCE);
> + mempool_free(bvec.bv_page, &page_pool);
> }
> bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
> }
> diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
> index 147c493a98..98ce12b239 100644
> --- a/drivers/md/bcache/btree.c
> +++ b/drivers/md/bcache/btree.c
> @@ -373,12 +373,12 @@ static void do_btree_node_write(struct btree *b)
> bset_sector_offset(&b->keys, i));
>
> if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
> - struct bio_vec *bv;
> + struct bio_vec bv;
> void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all iter;
>
> - bio_for_each_segment_all(bv, b->bio, iter_all) {
> - memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
> + bio_for_each_segment_all(bv, b->bio, iter) {
> + memcpy(page_address(bv.bv_page), addr, PAGE_SIZE);
> addr += PAGE_SIZE;
> }
>
> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
> index 8b47b913ee..8ffc225f7b 100644
> --- a/drivers/md/dm-crypt.c
> +++ b/drivers/md/dm-crypt.c
> @@ -1713,12 +1713,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
>
> static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
> {
> - struct bio_vec *bv;
> - struct bvec_iter_all iter_all;
> + struct bvec_iter_all iter;
> + struct bio_vec bv;
>
> - bio_for_each_segment_all(bv, clone, iter_all) {
> - BUG_ON(!bv->bv_page);
> - mempool_free(bv->bv_page, &cc->page_pool);
> + bio_for_each_segment_all(bv, clone, iter) {
> + BUG_ON(!bv.bv_page);
> + mempool_free(bv.bv_page, &cc->page_pool);
> }
> }
>
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 68a9e2d998..4f58cae37e 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -2188,7 +2188,7 @@ static void process_checks(struct r1bio *r1_bio)
> blk_status_t status = sbio->bi_status;
> struct page **ppages = get_resync_pages(pbio)->pages;
> struct page **spages = get_resync_pages(sbio)->pages;
> - struct bio_vec *bi;
> + struct bio_vec bi;
> int page_len[RESYNC_PAGES] = { 0 };
> struct bvec_iter_all iter_all;
>
> @@ -2198,7 +2198,7 @@ static void process_checks(struct r1bio *r1_bio)
> sbio->bi_status = 0;
>
> bio_for_each_segment_all(bi, sbio, iter_all)
> - page_len[j++] = bi->bv_len;
> + page_len[j++] = bi.bv_len;
>
> if (!status) {
> for (j = vcnt; j-- ; ) {
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index fbf9006c62..10326f978a 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -3807,12 +3807,12 @@ ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
> static void btrfs_end_super_write(struct bio *bio)
> {
> struct btrfs_device *device = bio->bi_private;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
> struct page *page;
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - page = bvec->bv_page;
> + page = bvec.bv_page;
>
> if (bio->bi_status) {
> btrfs_warn_rl_in_rcu(device->fs_info,
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index a1adadd5d2..e3b7fbbad7 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -573,34 +573,34 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio)
> {
> struct bio *bio = &bbio->bio;
> int error = blk_status_to_errno(bio->bi_status);
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> u64 start;
> u64 end;
> struct bvec_iter_all iter_all;
>
> ASSERT(!bio_flagged(bio, BIO_CLONED));
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *page = bvec->bv_page;
> + struct page *page = bvec.bv_page;
> struct inode *inode = page->mapping->host;
> struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
> const u32 sectorsize = fs_info->sectorsize;
>
> /* Our read/write should always be sector aligned. */
> - if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
> + if (!IS_ALIGNED(bvec.bv_offset, sectorsize))
> btrfs_err(fs_info,
> "partial page write in btrfs with offset %u and length %u",
> - bvec->bv_offset, bvec->bv_len);
> - else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
> + bvec.bv_offset, bvec.bv_len);
> + else if (!IS_ALIGNED(bvec.bv_len, sectorsize))
> btrfs_info(fs_info,
> "incomplete page write with offset %u and length %u",
> - bvec->bv_offset, bvec->bv_len);
> + bvec.bv_offset, bvec.bv_len);
>
> - start = page_offset(page) + bvec->bv_offset;
> - end = start + bvec->bv_len - 1;
> + start = page_offset(page) + bvec.bv_offset;
> + end = start + bvec.bv_len - 1;
>
> end_extent_writepage(page, error, start, end);
>
> - btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
> + btrfs_page_clear_writeback(fs_info, page, start, bvec.bv_len);
> }
>
> bio_put(bio);
> @@ -728,7 +728,7 @@ static struct extent_buffer *find_extent_buffer_readpage(
> static void end_bio_extent_readpage(struct btrfs_bio *bbio)
> {
> struct bio *bio = &bbio->bio;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct processed_extent processed = { 0 };
> /*
> * The offset to the beginning of a bio, since one bio can never be
> @@ -741,7 +741,7 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
> ASSERT(!bio_flagged(bio, BIO_CLONED));
> bio_for_each_segment_all(bvec, bio, iter_all) {
> bool uptodate = !bio->bi_status;
> - struct page *page = bvec->bv_page;
> + struct page *page = bvec.bv_page;
> struct inode *inode = page->mapping->host;
> struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
> const u32 sectorsize = fs_info->sectorsize;
> @@ -761,19 +761,19 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
> * for unaligned offsets, and an error if they don't add up to
> * a full sector.
> */
> - if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
> + if (!IS_ALIGNED(bvec.bv_offset, sectorsize))
> btrfs_err(fs_info,
> "partial page read in btrfs with offset %u and length %u",
> - bvec->bv_offset, bvec->bv_len);
> - else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
> + bvec.bv_offset, bvec.bv_len);
> + else if (!IS_ALIGNED(bvec.bv_offset + bvec.bv_len,
> sectorsize))
> btrfs_info(fs_info,
> "incomplete page read with offset %u and length %u",
> - bvec->bv_offset, bvec->bv_len);
> + bvec.bv_offset, bvec.bv_len);
>
> - start = page_offset(page) + bvec->bv_offset;
> - end = start + bvec->bv_len - 1;
> - len = bvec->bv_len;
> + start = page_offset(page) + bvec.bv_offset;
> + end = start + bvec.bv_len - 1;
> + len = bvec.bv_len;
>
> mirror = bbio->mirror_num;
> if (uptodate && !is_data_inode(inode) &&
> @@ -1879,7 +1879,7 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
> {
> struct bio *bio = &bbio->bio;
> struct btrfs_fs_info *fs_info;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
> @@ -1887,12 +1887,12 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
>
> ASSERT(!bio_flagged(bio, BIO_CLONED));
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *page = bvec->bv_page;
> - u64 bvec_start = page_offset(page) + bvec->bv_offset;
> - u64 bvec_end = bvec_start + bvec->bv_len - 1;
> + struct page *page = bvec.bv_page;
> + u64 bvec_start = page_offset(page) + bvec.bv_offset;
> + u64 bvec_end = bvec_start + bvec.bv_len - 1;
> u64 cur_bytenr = bvec_start;
>
> - ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize));
> + ASSERT(IS_ALIGNED(bvec.bv_len, fs_info->nodesize));
>
> /* Iterate through all extent buffers in the range */
> while (cur_bytenr <= bvec_end) {
> @@ -1936,14 +1936,14 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
> static void end_bio_extent_buffer_writepage(struct btrfs_bio *bbio)
> {
> struct bio *bio = &bbio->bio;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct extent_buffer *eb;
> int done;
> struct bvec_iter_all iter_all;
>
> ASSERT(!bio_flagged(bio, BIO_CLONED));
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *page = bvec->bv_page;
> + struct page *page = bvec.bv_page;
>
> eb = (struct extent_buffer *)page->private;
> BUG_ON(!eb);
> diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
> index 2fab37f062..8da6097c42 100644
> --- a/fs/btrfs/raid56.c
> +++ b/fs/btrfs/raid56.c
> @@ -1393,7 +1393,7 @@ static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
> static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
> {
> const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> ASSERT(!bio_flagged(bio, BIO_CLONED));
> @@ -1402,9 +1402,9 @@ static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
> struct sector_ptr *sector;
> int pgoff;
>
> - for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
> + for (pgoff = bvec.bv_offset; pgoff - bvec.bv_offset < bvec.bv_len;
> pgoff += sectorsize) {
> - sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
> + sector = find_stripe_sector(rbio, bvec.bv_page, pgoff);
> ASSERT(sector);
> if (sector)
> sector->uptodate = 1;
> @@ -1458,7 +1458,7 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
> {
> struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
> int total_sector_nr = get_bio_sector_nr(rbio, bio);
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> /* No data csum for the whole stripe, no need to verify. */
> @@ -1472,8 +1472,8 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
> bio_for_each_segment_all(bvec, bio, iter_all) {
> int bv_offset;
>
> - for (bv_offset = bvec->bv_offset;
> - bv_offset < bvec->bv_offset + bvec->bv_len;
> + for (bv_offset = bvec.bv_offset;
> + bv_offset < bvec.bv_offset + bvec.bv_len;
> bv_offset += fs_info->sectorsize, total_sector_nr++) {
> u8 csum_buf[BTRFS_CSUM_SIZE];
> u8 *expected_csum = rbio->csum_buf +
> @@ -1484,7 +1484,7 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
> if (!test_bit(total_sector_nr, rbio->csum_bitmap))
> continue;
>
> - ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
> + ret = btrfs_check_sector_csum(fs_info, bvec.bv_page,
> bv_offset, csum_buf, expected_csum);
> if (ret < 0)
> set_bit(total_sector_nr, rbio->error_bitmap);
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 45f21db230..fc0ce6d685 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -1648,11 +1648,11 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
> {
> struct z_erofs_decompressqueue *q = bio->bi_private;
> blk_status_t err = bio->bi_status;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *page = bvec->bv_page;
> + struct page *page = bvec.bv_page;
>
> DBG_BUGON(PageUptodate(page));
> DBG_BUGON(z_erofs_page_is_invalidated(page));
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 7165b1202f..d687e11bdf 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -139,12 +139,12 @@ struct bio_post_read_ctx {
> */
> static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
> {
> - struct bio_vec *bv;
> + struct bio_vec bv;
> struct bvec_iter_all iter_all;
> struct bio_post_read_ctx *ctx = bio->bi_private;
>
> bio_for_each_segment_all(bv, bio, iter_all) {
> - struct page *page = bv->bv_page;
> + struct page *page = bv.bv_page;
>
> if (f2fs_is_compressed_page(page)) {
> if (ctx && !ctx->decompression_attempted)
> @@ -189,11 +189,11 @@ static void f2fs_verify_bio(struct work_struct *work)
> * as those were handled separately by f2fs_end_read_compressed_page().
> */
> if (may_have_compressed_pages) {
> - struct bio_vec *bv;
> + struct bio_vec bv;
> struct bvec_iter_all iter_all;
>
> bio_for_each_segment_all(bv, bio, iter_all) {
> - struct page *page = bv->bv_page;
> + struct page *page = bv.bv_page;
>
> if (!f2fs_is_compressed_page(page) &&
> !fsverity_verify_page(page)) {
> @@ -241,13 +241,13 @@ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
> static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
> bool in_task)
> {
> - struct bio_vec *bv;
> + struct bio_vec bv;
> struct bvec_iter_all iter_all;
> bool all_compressed = true;
> block_t blkaddr = ctx->fs_blkaddr;
>
> bio_for_each_segment_all(bv, ctx->bio, iter_all) {
> - struct page *page = bv->bv_page;
> + struct page *page = bv.bv_page;
>
> if (f2fs_is_compressed_page(page))
> f2fs_end_read_compressed_page(page, false, blkaddr,
> @@ -327,7 +327,7 @@ static void f2fs_read_end_io(struct bio *bio)
> static void f2fs_write_end_io(struct bio *bio)
> {
> struct f2fs_sb_info *sbi;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> iostat_update_and_unbind_ctx(bio);
> @@ -337,7 +337,7 @@ static void f2fs_write_end_io(struct bio *bio)
> bio->bi_status = BLK_STS_IOERR;
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *page = bvec->bv_page;
> + struct page *page = bvec.bv_page;
> enum count_type type = WB_DATA_TYPE(page);
>
> if (page_private_dummy(page)) {
> @@ -583,7 +583,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
> static bool __has_merged_page(struct bio *bio, struct inode *inode,
> struct page *page, nid_t ino)
> {
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> if (!bio)
> @@ -593,7 +593,7 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> return true;
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *target = bvec->bv_page;
> + struct page *target = bvec.bv_page;
>
> if (fscrypt_is_bounce_page(target)) {
> target = fscrypt_pagecache_page(target);
> diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
> index 1902413d5d..7f62fe8eb7 100644
> --- a/fs/gfs2/lops.c
> +++ b/fs/gfs2/lops.c
> @@ -202,7 +202,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
> static void gfs2_end_log_write(struct bio *bio)
> {
> struct gfs2_sbd *sdp = bio->bi_private;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct page *page;
> struct bvec_iter_all iter_all;
>
> @@ -217,9 +217,9 @@ static void gfs2_end_log_write(struct bio *bio)
> }
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - page = bvec->bv_page;
> + page = bvec.bv_page;
> if (page_has_buffers(page))
> - gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
> + gfs2_end_log_write_bh(sdp, &bvec, bio->bi_status);
> else
> mempool_free(page, gfs2_page_pool);
> }
> @@ -395,11 +395,11 @@ static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
> static void gfs2_end_log_read(struct bio *bio)
> {
> struct page *page;
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - page = bvec->bv_page;
> + page = bvec.bv_page;
> if (bio->bi_status) {
> int err = blk_status_to_errno(bio->bi_status);
>
> diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
> index 924361fa51..832572784e 100644
> --- a/fs/gfs2/meta_io.c
> +++ b/fs/gfs2/meta_io.c
> @@ -193,15 +193,15 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
>
> static void gfs2_meta_read_endio(struct bio *bio)
> {
> - struct bio_vec *bvec;
> + struct bio_vec bvec;
> struct bvec_iter_all iter_all;
>
> bio_for_each_segment_all(bvec, bio, iter_all) {
> - struct page *page = bvec->bv_page;
> + struct page *page = bvec.bv_page;
> struct buffer_head *bh = page_buffers(page);
> - unsigned int len = bvec->bv_len;
> + unsigned int len = bvec.bv_len;
>
> - while (bh_offset(bh) < bvec->bv_offset)
> + while (bh_offset(bh) < bvec.bv_offset)
> bh = bh->b_this_page;
> do {
> struct buffer_head *next = bh->b_this_page;
> diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
> index bed3bb8b27..83e8b44518 100644
> --- a/fs/squashfs/block.c
> +++ b/fs/squashfs/block.c
> @@ -35,30 +35,33 @@ static int copy_bio_to_actor(struct bio *bio,
> int offset, int req_length)
> {
> void *actor_addr;
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
> int copied_bytes = 0;
> int actor_offset = 0;
> + int bytes_to_copy;
>
> squashfs_actor_nobuff(actor);
> actor_addr = squashfs_first_page(actor);
>
> - if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
> - return 0;
> + bvec_iter_all_init(&iter);
> + bio_iter_all_advance(bio, &iter, offset);
>
> - while (copied_bytes < req_length) {
> - int bytes_to_copy = min_t(int, bvec->bv_len - offset,
> + while (copied_bytes < req_length &&
> + iter.idx < bio->bi_vcnt) {
> + bvec = bio_iter_all_peek(bio, &iter);
> +
> + bytes_to_copy = min_t(int, bvec.bv_len,
> PAGE_SIZE - actor_offset);
>
> bytes_to_copy = min_t(int, bytes_to_copy,
> req_length - copied_bytes);
> if (!IS_ERR(actor_addr))
> - memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
> - offset, bytes_to_copy);
> + memcpy(actor_addr + actor_offset, bvec_virt(&bvec),
> + bytes_to_copy);
>
> actor_offset += bytes_to_copy;
> copied_bytes += bytes_to_copy;
> - offset += bytes_to_copy;
>
> if (actor_offset >= PAGE_SIZE) {
> actor_addr = squashfs_next_page(actor);
> @@ -66,11 +69,8 @@ static int copy_bio_to_actor(struct bio *bio,
> break;
> actor_offset = 0;
> }
> - if (offset >= bvec->bv_len) {
> - if (!bio_next_segment(bio, &iter_all))
> - break;
> - offset = 0;
> - }
> +
> + bio_iter_all_advance(bio, &iter, bytes_to_copy);
> }
> squashfs_finish_page(actor);
> return copied_bytes;
> @@ -159,8 +159,10 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
> * Metadata block.
> */
> const u8 *data;
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
> +
> + bvec_iter_all_init(&iter);
>
> if (index + 2 > msblk->bytes_used) {
> res = -EIO;
> @@ -170,21 +172,25 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
> if (res)
> goto out;
>
> - if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
> + bvec = bio_iter_all_peek(bio, &iter);
> +
> + if (WARN_ON_ONCE(!bvec.bv_len)) {
> res = -EIO;
> goto out_free_bio;
> }
> /* Extract the length of the metadata block */
> - data = bvec_virt(bvec);
> + data = bvec_virt(&bvec);
> length = data[offset];
> - if (offset < bvec->bv_len - 1) {
> + if (offset < bvec.bv_len - 1) {
> length |= data[offset + 1] << 8;
> } else {
> - if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
> + bio_iter_all_advance(bio, &iter, bvec.bv_len);
> +
> + if (WARN_ON_ONCE(!bvec.bv_len)) {
> res = -EIO;
> goto out_free_bio;
> }
> - data = bvec_virt(bvec);
> + data = bvec_virt(&bvec);
> length |= data[0] << 8;
> }
> bio_free_pages(bio);
> diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
> index 49797729f1..bd0dd787d2 100644
> --- a/fs/squashfs/lz4_wrapper.c
> +++ b/fs/squashfs/lz4_wrapper.c
> @@ -92,20 +92,23 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
> struct bio *bio, int offset, int length,
> struct squashfs_page_actor *output)
> {
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
> struct squashfs_lz4 *stream = strm;
> void *buff = stream->input, *data;
> int bytes = length, res;
>
> - while (bio_next_segment(bio, &iter_all)) {
> - int avail = min(bytes, ((int)bvec->bv_len) - offset);
> + bvec_iter_all_init(&iter);
> + bio_iter_all_advance(bio, &iter, offset);
>
> - data = bvec_virt(bvec);
> - memcpy(buff, data + offset, avail);
> + bio_for_each_segment_all_continue(bvec, bio, iter) {
> + unsigned avail = min_t(unsigned, bytes, bvec.bv_len);
> +
> + memcpy(buff, bvec_virt(&bvec), avail);
> buff += avail;
> bytes -= avail;
> - offset = 0;
> + if (!bytes)
> + break;
> }
>
> res = LZ4_decompress_safe(stream->input, stream->output,
> diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
> index d216aeefa8..bccfcfa12e 100644
> --- a/fs/squashfs/lzo_wrapper.c
> +++ b/fs/squashfs/lzo_wrapper.c
> @@ -66,21 +66,24 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
> struct bio *bio, int offset, int length,
> struct squashfs_page_actor *output)
> {
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
> + struct bio_vec bvec;
> struct squashfs_lzo *stream = strm;
> void *buff = stream->input, *data;
> int bytes = length, res;
> size_t out_len = output->length;
>
> - while (bio_next_segment(bio, &iter_all)) {
> - int avail = min(bytes, ((int)bvec->bv_len) - offset);
> + bvec_iter_all_init(&iter);
> + bio_iter_all_advance(bio, &iter, offset);
>
> - data = bvec_virt(bvec);
> - memcpy(buff, data + offset, avail);
> + bio_for_each_segment_all_continue(bvec, bio, iter) {
> + unsigned avail = min_t(unsigned, bytes, bvec.bv_len);
> +
> + memcpy(buff, bvec_virt(&bvec), avail);
> buff += avail;
> bytes -= avail;
> - offset = 0;
> + if (!bytes)
> + break;
> }
>
> res = lzo1x_decompress_safe(stream->input, (size_t)length,
> diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
> index 6c49481a2f..6cf0e11e3b 100644
> --- a/fs/squashfs/xz_wrapper.c
> +++ b/fs/squashfs/xz_wrapper.c
> @@ -120,8 +120,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
> struct bio *bio, int offset, int length,
> struct squashfs_page_actor *output)
> {
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
> int total = 0, error = 0;
> struct squashfs_xz *stream = strm;
>
> @@ -136,26 +135,28 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
> goto finish;
> }
>
> + bvec_iter_all_init(&iter);
> + bio_iter_all_advance(bio, &iter, offset);
> +
> for (;;) {
> enum xz_ret xz_err;
>
> if (stream->buf.in_pos == stream->buf.in_size) {
> - const void *data;
> - int avail;
> + struct bio_vec bvec = bio_iter_all_peek(bio, &iter);
> + unsigned avail = min_t(unsigned, length, bvec.bv_len);
>
> - if (!bio_next_segment(bio, &iter_all)) {
> + if (iter.idx >= bio->bi_vcnt) {
> /* XZ_STREAM_END must be reached. */
> error = -EIO;
> break;
> }
>
> - avail = min(length, ((int)bvec->bv_len) - offset);
> - data = bvec_virt(bvec);
> length -= avail;
> - stream->buf.in = data + offset;
> + stream->buf.in = bvec_virt(&bvec);
> stream->buf.in_size = avail;
> stream->buf.in_pos = 0;
> - offset = 0;
> +
> + bio_iter_all_advance(bio, &iter, avail);
> }
>
> if (stream->buf.out_pos == stream->buf.out_size) {
> diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
> index cbb7afe7bc..981ca5e410 100644
> --- a/fs/squashfs/zlib_wrapper.c
> +++ b/fs/squashfs/zlib_wrapper.c
> @@ -53,8 +53,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
> struct bio *bio, int offset, int length,
> struct squashfs_page_actor *output)
> {
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
> int zlib_init = 0, error = 0;
> z_stream *stream = strm;
>
> @@ -67,25 +66,28 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
> goto finish;
> }
>
> + bvec_iter_all_init(&iter);
> + bio_iter_all_advance(bio, &iter, offset);
> +
> for (;;) {
> int zlib_err;
>
> if (stream->avail_in == 0) {
> - const void *data;
> + struct bio_vec bvec = bio_iter_all_peek(bio, &iter);
> int avail;
>
> - if (!bio_next_segment(bio, &iter_all)) {
> + if (iter.idx >= bio->bi_vcnt) {

It is bad to expose iterator/bio internal here, probably you can
return bool from bio_iter_all_peek() by moveing the check into
helper, meantime passing bvec reference to bio_iter_all_peek().

> /* Z_STREAM_END must be reached. */
> error = -EIO;
> break;
> }
>
> - avail = min(length, ((int)bvec->bv_len) - offset);
> - data = bvec_virt(bvec);
> + avail = min_t(unsigned, length, bvec.bv_len);
> length -= avail;
> - stream->next_in = data + offset;
> + stream->next_in = bvec_virt(&bvec);
> stream->avail_in = avail;
> - offset = 0;
> +
> + bio_iter_all_advance(bio, &iter, avail);
> }
>
> if (stream->avail_out == 0) {
> diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
> index 0e407c4d8b..658e5d462a 100644
> --- a/fs/squashfs/zstd_wrapper.c
> +++ b/fs/squashfs/zstd_wrapper.c
> @@ -68,8 +68,7 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
> int error = 0;
> zstd_in_buffer in_buf = { NULL, 0, 0 };
> zstd_out_buffer out_buf = { NULL, 0, 0 };
> - struct bvec_iter_all iter_all = {};
> - struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
> + struct bvec_iter_all iter;
>
> stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
>
> @@ -85,25 +84,27 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
> goto finish;
> }
>
> + bvec_iter_all_init(&iter);
> + bio_iter_all_advance(bio, &iter, offset);
> +
> for (;;) {
> size_t zstd_err;
>
> if (in_buf.pos == in_buf.size) {
> - const void *data;
> - int avail;
> + struct bio_vec bvec = bio_iter_all_peek(bio, &iter);
> + unsigned avail = min_t(unsigned, length, bvec.bv_len);
>
> - if (!bio_next_segment(bio, &iter_all)) {
> + if (iter.idx >= bio->bi_vcnt) {

Same with above.

> error = -EIO;
> break;
> }
>
> - avail = min(length, ((int)bvec->bv_len) - offset);
> - data = bvec_virt(bvec);
> length -= avail;
> - in_buf.src = data + offset;
> + in_buf.src = bvec_virt(&bvec);
> in_buf.size = avail;
> in_buf.pos = 0;
> - offset = 0;
> +
> + bio_iter_all_advance(bio, &iter, avail);
> }
>
> if (out_buf.pos == out_buf.size) {
> diff --git a/include/linux/bio.h b/include/linux/bio.h
> index 8588bcfbc6..39d8a1cc6e 100644
> --- a/include/linux/bio.h
> +++ b/include/linux/bio.h
> @@ -78,22 +78,34 @@ static inline void *bio_data(struct bio *bio)
> return NULL;
> }
>
> -static inline bool bio_next_segment(const struct bio *bio,
> - struct bvec_iter_all *iter)
> +static inline struct bio_vec bio_iter_all_peek(const struct bio *bio,
> + struct bvec_iter_all *iter)
> {
> - if (iter->idx >= bio->bi_vcnt)
> - return false;
> + return bvec_iter_all_peek(bio->bi_io_vec, iter);
> +}
>
> - bvec_advance(&bio->bi_io_vec[iter->idx], iter);
> - return true;
> +static inline void bio_iter_all_advance(const struct bio *bio,
> + struct bvec_iter_all *iter,
> + unsigned bytes)
> +{
> + bvec_iter_all_advance(bio->bi_io_vec, iter, bytes);
> }
>
> +#define bio_for_each_segment_all_continue(bvl, bio, iter) \
> + for (; \
> + iter.idx < bio->bi_vcnt && \
> + ((bvl = bio_iter_all_peek(bio, &iter)), true); \
> + bio_iter_all_advance((bio), &iter, bvl.bv_len))
> +
> /*
> * drivers should _never_ use the all version - the bio may have been split
> * before it got to the driver and the driver won't own all of it
> */
> -#define bio_for_each_segment_all(bvl, bio, iter) \
> - for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
> +#define bio_for_each_segment_all(bvl, bio, iter) \
> + for (bvec_iter_all_init(&iter); \
> + iter.idx < (bio)->bi_vcnt && \
> + ((bvl = bio_iter_all_peek((bio), &iter)), true); \

The check of 'iter.idx < (bio)->bi_vcnt' could be moved to
bio_iter_all_peek(), meantime passing &bvl to bio_iter_all_peek().

> + bio_iter_all_advance((bio), &iter, bvl.bv_len))
>
> static inline void bio_advance_iter(const struct bio *bio,
> struct bvec_iter *iter, unsigned int bytes)
> diff --git a/include/linux/bvec.h b/include/linux/bvec.h
> index 555aae5448..635fb54143 100644
> --- a/include/linux/bvec.h
> +++ b/include/linux/bvec.h
> @@ -85,12 +85,6 @@ struct bvec_iter {
> current bvec */
> } __packed;
>
> -struct bvec_iter_all {
> - struct bio_vec bv;
> - int idx;
> - unsigned done;
> -};
> -
> /*
> * various member access, note that bio_data should of course not be used
> * on highmem page vectors
> @@ -184,7 +178,10 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
> ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
> bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
>
> -/* for iterating one bio from start to end */
> +/*
> + * bvec_iter_all: for advancing over a bio as it was originally created, but
> + * with the usual bio_for_each_segment interface - nonstandard, do not use:
> + */
> #define BVEC_ITER_ALL_INIT (struct bvec_iter) \
> { \
> .bi_sector = 0, \
> @@ -193,33 +190,45 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
> .bi_bvec_done = 0, \
> }
>
> -static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
> +/*
> + * bvec_iter_all: for advancing over individual pages in a bio, as it was when
> + * it was first created:
> + */
> +struct bvec_iter_all {
> + int idx;
> + unsigned done;
> +};
> +
> +static inline void bvec_iter_all_init(struct bvec_iter_all *iter_all)
> {
> iter_all->done = 0;
> iter_all->idx = 0;
> +}
>
> - return &iter_all->bv;
> +static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
> + struct bvec_iter_all *iter)
> +{
> + struct bio_vec bv = bvec[iter->idx];
> +
> + bv.bv_offset += iter->done;
> + bv.bv_len -= iter->done;
> +
> + bv.bv_page += bv.bv_offset >> PAGE_SHIFT;
> + bv.bv_offset &= ~PAGE_MASK;
> + bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
> +
> + return bv;
> }

Here I think compiler optimizes the last bvec copy by using external bvec
directly, and I run fio/t/io_uring and not find IOPS degrade.

As mentioned above, it can be done explicitly by passing &bvl to
bvec_iter_all_peek(), then user of bvec_iter_all_peek() can know if
one real bvec is peeked, and check of 'if (iter.idx >= bio->bi_vcnt)'
can be avoided in external callers.

Otherwise, this patch looks fine for me.


thanks,
Ming


2023-06-06 07:40:50

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] block: Change memcpy_(to|from)_bvec to pass bvec by value

Hi Kent,

kernel test robot noticed the following build errors:

[auto build test ERROR on axboe-block/for-next]
[cannot apply to kdave/for-next device-mapper-dm/for-next gfs2/for-next tytso-ext4/dev linus/master v6.4-rc5 next-20230606]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url: https://github.com/intel-lab-lkp/linux/commits/Kent-Overstreet/block-Rework-bio_for_each_folio_all/20230606-052850
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
patch link: https://lore.kernel.org/r/20230605212717.2570570-4-kent.overstreet%40linux.dev
patch subject: [PATCH v2 4/5] block: Change memcpy_(to|from)_bvec to pass bvec by value
config: alpha-allyesconfig (https://download.01.org/0day-ci/archive/20230606/[email protected]/config)
compiler: alpha-linux-gcc (GCC) 12.3.0
reproduce (this is a W=1 build):
mkdir -p ~/bin
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git remote add axboe-block https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
git fetch axboe-block for-next
git checkout axboe-block/for-next
b4 shazam https://lore.kernel.org/r/[email protected]
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.3.0 ~/bin/make.cross W=1 O=build_dir ARCH=alpha olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.3.0 ~/bin/make.cross W=1 O=build_dir ARCH=alpha SHELL=/bin/bash drivers/scsi/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/

All errors (new ones prefixed by >>):

drivers/scsi/aha1542.c: In function 'aha1542_free_cmd':
>> drivers/scsi/aha1542.c:270:40: error: incompatible type for argument 1 of 'memcpy_to_bvec'
270 | memcpy_to_bvec(&bv, buf);
| ^~~
| |
| struct bio_vec *
In file included from include/linux/blk_types.h:10,
from include/linux/blkdev.h:9,
from include/scsi/scsi_cmnd.h:6,
from drivers/scsi/aha1542.c:23:
include/linux/bvec.h:271:50: note: expected 'struct bio_vec' but argument is of type 'struct bio_vec *'
271 | static inline void memcpy_to_bvec(struct bio_vec bvec, const char *from)
| ~~~~~~~~~~~~~~~^~~~


vim +/memcpy_to_bvec +270 drivers/scsi/aha1542.c

^1da177e4c3f41 Linus Torvalds 2005-04-16 258
1794ef2b150dd5 Christoph Hellwig 2018-11-10 259 static void aha1542_free_cmd(struct scsi_cmnd *cmd)
1794ef2b150dd5 Christoph Hellwig 2018-11-10 260 {
1794ef2b150dd5 Christoph Hellwig 2018-11-10 261 struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
1794ef2b150dd5 Christoph Hellwig 2018-11-10 262
2f2fef022c3e7a Christoph Hellwig 2021-03-31 263 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
11bf4ec580737c Bart Van Assche 2021-08-09 264 struct request *rq = scsi_cmd_to_rq(cmd);
2f2fef022c3e7a Christoph Hellwig 2021-03-31 265 void *buf = acmd->data_buffer;
2f2fef022c3e7a Christoph Hellwig 2021-03-31 266 struct req_iterator iter;
2f2fef022c3e7a Christoph Hellwig 2021-03-31 267 struct bio_vec bv;
2f2fef022c3e7a Christoph Hellwig 2021-03-31 268
11bf4ec580737c Bart Van Assche 2021-08-09 269 rq_for_each_segment(bv, rq, iter) {
e6ab6113526aa4 Christoph Hellwig 2021-10-18 @270 memcpy_to_bvec(&bv, buf);
2f2fef022c3e7a Christoph Hellwig 2021-03-31 271 buf += bv.bv_len;
2f2fef022c3e7a Christoph Hellwig 2021-03-31 272 }
1794ef2b150dd5 Christoph Hellwig 2018-11-10 273 }
1794ef2b150dd5 Christoph Hellwig 2018-11-10 274
1794ef2b150dd5 Christoph Hellwig 2018-11-10 275 scsi_dma_unmap(cmd);
1794ef2b150dd5 Christoph Hellwig 2018-11-10 276 }
1794ef2b150dd5 Christoph Hellwig 2018-11-10 277

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

2023-06-06 07:54:27

by Kent Overstreet

[permalink] [raw]
Subject: Re: [PATCH v2 1/5] block: Rework bio_for_each_segment_all()

On Tue, Jun 06, 2023 at 10:03:45AM +0800, Ming Lei wrote:
> On Mon, Jun 05, 2023 at 05:27:13PM -0400, Kent Overstreet wrote:
> > This patch reworks bio_for_each_segment_all() to be more inline with how
> > the other bio iterators work:
> >
> > - bio_iter_all_peek() now returns a synthesized bio_vec; we don't stash
> > one in the iterator and pass a pointer to it - bad. This way makes it
> > clearer what's a constructed value vs. a reference to something
> > pre-existing, and it also will help with cleaning up and
> > consolidating code with bio_for_each_folio_all().
>
> by-value is more readable & clean, the current by-reference is just for aligning
> with original interface.

This is the fourth or fifth time this patch set has been out on the
list, and I specifically asked you to review it awhile back, and you're
just now looking at it?

2023-06-09 21:13:03

by Kent Overstreet

[permalink] [raw]
Subject: Re: [PATCH v2 1/5] block: Rework bio_for_each_segment_all()

Jens - I've updated the branch at
https://evilpiepirate.org/git/bcachefs.git block-for-bcachefs

which now has the patches you'll want to take for now (bio iterator)
first, only changes being bio_iter_all_end() based on Ming's feedback.