Hi all,
this series cleans up the xfs writepage code and then lifts it to
fs/iomap/ so that it could be use by other file system.
Note that in this version a lot of the reviewed-by tag got dropped
as the patch organization changed quite a bit (the actual final
result changes very little, though).
Changes since v6:
- actually add trace.c to the patch
- move back to the old order that massages XFS into shape and then
lifts the code to iomap
- cleanup iomap_ioend_compare
- cleanup the add_to_ioend checks
Changes since v5:
- move the tracing code to fs/iomap/trace.[ch]
- fix a bisection issue with the tracing code
- add an assert that xfs_end_io now only gets "complicated" completions
- better document the iomap_writeback_ops methods in iomap.h
Changes since v4:
- rebased on top 5.4-rc1
- drop the addition of list_pop / list_pop_entry
- re-split a few patches to better fit Darricks scheme of keeping the
iomap additions separate from the XFS switchover
Changes since v3:
- re-split the pages to add new code to iomap and then switch xfs to
it later (Darrick)
Changes since v2:
- rebased to v5.3-rc1
- folded in a few changes from the gfs2 enablement series
Changes since v1:
- rebased to the latest xfs for-next tree
- keep the preallocated transactions for size updates
- rename list_pop to list_pop_entry and related cleanups
- better document the nofs context handling
- document that the iomap tracepoints are not a stable API
Currently we don't overwrite the flags field in the iomap in
xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack,
but is harmful once we want to be able to reuse an iomap in the
writeback code. Replace the shared paramter with a set of initial
flags an thus ensures the flags field is always reinitialized.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/xfs/xfs_iomap.c | 28 +++++++++++++++++-----------
fs/xfs/xfs_iomap.h | 2 +-
fs/xfs/xfs_pnfs.c | 2 +-
3 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index f780e223b118..54c9ec7ad337 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -54,7 +54,7 @@ xfs_bmbt_to_iomap(
struct xfs_inode *ip,
struct iomap *iomap,
struct xfs_bmbt_irec *imap,
- bool shared)
+ u16 flags)
{
struct xfs_mount *mp = ip->i_mount;
@@ -79,12 +79,11 @@ xfs_bmbt_to_iomap(
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+ iomap->flags = flags;
if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY;
- if (shared)
- iomap->flags |= IOMAP_F_SHARED;
return 0;
}
@@ -540,6 +539,7 @@ xfs_file_iomap_begin_delay(
struct xfs_iext_cursor icur, ccur;
xfs_fsblock_t prealloc_blocks = 0;
bool eof = false, cow_eof = false, shared = false;
+ u16 iomap_flags = 0;
int whichfork = XFS_DATA_FORK;
int error = 0;
@@ -707,7 +707,7 @@ xfs_file_iomap_begin_delay(
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
*/
- iomap->flags |= IOMAP_F_NEW;
+ iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, whichfork,
whichfork == XFS_DATA_FORK ? &imap : &cmap);
done:
@@ -715,14 +715,17 @@ xfs_file_iomap_begin_delay(
if (imap.br_startoff > offset_fsb) {
xfs_trim_extent(&cmap, offset_fsb,
imap.br_startoff - offset_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap,
+ IOMAP_F_SHARED);
goto out_unlock;
}
/* ensure we only report blocks we have a reservation for */
xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
shared = true;
}
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
+ if (shared)
+ iomap_flags |= IOMAP_F_SHARED;
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -930,6 +933,7 @@ xfs_file_iomap_begin(
xfs_fileoff_t offset_fsb, end_fsb;
int nimaps = 1, error = 0;
bool shared = false;
+ u16 iomap_flags = 0;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
@@ -1045,11 +1049,13 @@ xfs_file_iomap_begin(
if (error)
return error;
- iomap->flags |= IOMAP_F_NEW;
+ iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
out_finish:
- return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
+ if (shared)
+ iomap_flags |= IOMAP_F_SHARED;
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_found:
ASSERT(nimaps);
@@ -1193,7 +1199,7 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
/*
* This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed
@@ -1215,7 +1221,7 @@ xfs_seek_iomap_begin(
imap.br_state = XFS_EXT_NORM;
done:
xfs_trim_extent(&imap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
@@ -1261,7 +1267,7 @@ xfs_xattr_iomap_begin(
if (error)
return error;
ASSERT(nimaps);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
}
const struct iomap_ops xfs_xattr_iomap_ops = {
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 5c2f6aa6d78f..71d0ae460c44 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
- struct xfs_bmbt_irec *, bool shared);
+ struct xfs_bmbt_irec *, u16);
xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
static inline xfs_filblks_t
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index a339bd5fa260..9c96493be9e0 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -178,7 +178,7 @@ xfs_fs_map_blocks(
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
*device_generation = mp->m_generation;
return error;
out_unlock:
--
2.20.1
Don't set IOMAP_F_NEW if we COW over and existing allocated range, as
these aren't strictly new allocations. This is required to be able to
use IOMAP_F_NEW to zero newly allocated blocks, which is required for
the iomap code to fully support file systems that don't do delayed
allocations or use unwritten extents.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/xfs/xfs_iomap.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 54c9ec7ad337..c0a492353826 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -707,9 +707,12 @@ xfs_file_iomap_begin_delay(
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
*/
- iomap_flags |= IOMAP_F_NEW;
- trace_xfs_iomap_alloc(ip, offset, count, whichfork,
- whichfork == XFS_DATA_FORK ? &imap : &cmap);
+ if (whichfork == XFS_DATA_FORK) {
+ iomap_flags |= IOMAP_F_NEW;
+ trace_xfs_iomap_alloc(ip, offset, count, whichfork, &imap);
+ } else {
+ trace_xfs_iomap_alloc(ip, offset, count, whichfork, &cmap);
+ }
done:
if (whichfork == XFS_COW_FORK) {
if (imap.br_startoff > offset_fsb) {
--
2.20.1
In preparation for moving the XFS writeback code to fs/iomap.c, switch
it to use struct iomap instead of the XFS-specific struct xfs_bmbt_irec.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Carlos Maiolino <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
---
fs/xfs/libxfs/xfs_bmap.c | 14 +++++--
fs/xfs/libxfs/xfs_bmap.h | 3 +-
fs/xfs/xfs_aops.c | 82 +++++++++++++++++++---------------------
fs/xfs/xfs_aops.h | 2 +-
4 files changed, 50 insertions(+), 51 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 02469d59c787..ef75e223cb70 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -34,6 +34,7 @@
#include "xfs_ag_resv.h"
#include "xfs_refcount.h"
#include "xfs_icache.h"
+#include "xfs_iomap.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -4456,16 +4457,21 @@ int
xfs_bmapi_convert_delalloc(
struct xfs_inode *ip,
int whichfork,
- xfs_fileoff_t offset_fsb,
- struct xfs_bmbt_irec *imap,
+ xfs_off_t offset,
+ struct iomap *iomap,
unsigned int *seq)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
struct xfs_bmalloca bma = { NULL };
+ u16 flags = 0;
struct xfs_trans *tp;
int error;
+ if (whichfork == XFS_COW_FORK)
+ flags |= IOMAP_F_SHARED;
+
/*
* Space for the extent and indirect blocks was reserved when the
* delalloc extent was created so there's no need to do so here.
@@ -4495,7 +4501,7 @@ xfs_bmapi_convert_delalloc(
* the extent. Just return the real extent at this offset.
*/
if (!isnullstartblock(bma.got.br_startblock)) {
- *imap = bma.got;
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel;
}
@@ -4528,7 +4534,7 @@ xfs_bmapi_convert_delalloc(
XFS_STATS_INC(mp, xs_xstrat_quick);
ASSERT(!isnullstartblock(bma.got.br_startblock));
- *imap = bma.got;
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index e2798c6f3a5f..14d25e0b7d9c 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -228,8 +228,7 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
int eof);
int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork,
- xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap,
- unsigned int *seq);
+ xfs_off_t offset, struct iomap *iomap, unsigned int *seq);
int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f16d5f196c6b..91899de2be09 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -22,7 +22,7 @@
* structure owned by writepages passed to individual writepage calls
*/
struct xfs_writepage_ctx {
- struct xfs_bmbt_irec imap;
+ struct iomap iomap;
int fork;
unsigned int data_seq;
unsigned int cow_seq;
@@ -267,7 +267,7 @@ xfs_end_ioend(
*/
if (ioend->io_fork == XFS_COW_FORK)
error = xfs_reflink_end_cow(ip, offset, size);
- else if (ioend->io_state == XFS_EXT_UNWRITTEN)
+ else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
else
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
@@ -300,8 +300,8 @@ xfs_ioend_can_merge(
return false;
if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
return false;
- if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
- (next->io_state == XFS_EXT_UNWRITTEN))
+ if ((ioend->io_type == IOMAP_UNWRITTEN) ^
+ (next->io_type == IOMAP_UNWRITTEN))
return false;
if (ioend->io_offset + ioend->io_size != next->io_offset)
return false;
@@ -403,7 +403,7 @@ xfs_end_bio(
unsigned long flags;
if (ioend->io_fork == XFS_COW_FORK ||
- ioend->io_state == XFS_EXT_UNWRITTEN ||
+ ioend->io_type == IOMAP_UNWRITTEN ||
ioend->io_append_trans != NULL) {
spin_lock_irqsave(&ip->i_ioend_lock, flags);
if (list_empty(&ip->i_ioend_list))
@@ -423,10 +423,10 @@ static bool
xfs_imap_valid(
struct xfs_writepage_ctx *wpc,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb)
+ loff_t offset)
{
- if (offset_fsb < wpc->imap.br_startoff ||
- offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length)
return false;
/*
* If this is a COW mapping, it is sufficient to check that the mapping
@@ -453,7 +453,7 @@ xfs_imap_valid(
/*
* Pass in a dellalloc extent and convert it to real extents, return the real
- * extent that maps offset_fsb in wpc->imap.
+ * extent that maps offset_fsb in wpc->iomap.
*
* The current page is held locked so nothing could have removed the block
* backing offset_fsb, although it could have moved from the COW to the data
@@ -463,23 +463,23 @@ static int
xfs_convert_blocks(
struct xfs_writepage_ctx *wpc,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb)
+ loff_t offset)
{
int error;
/*
- * Attempt to allocate whatever delalloc extent currently backs
- * offset_fsb and put the result into wpc->imap. Allocate in a loop
- * because it may take several attempts to allocate real blocks for a
- * contiguous delalloc extent if free space is sufficiently fragmented.
+ * Attempt to allocate whatever delalloc extent currently backs offset
+ * and put the result into wpc->iomap. Allocate in a loop because it
+ * may take several attempts to allocate real blocks for a contiguous
+ * delalloc extent if free space is sufficiently fragmented.
*/
do {
- error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
- &wpc->imap, wpc->fork == XFS_COW_FORK ?
+ error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset,
+ &wpc->iomap, wpc->fork == XFS_COW_FORK ?
&wpc->cow_seq : &wpc->data_seq);
if (error)
return error;
- } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
+ } while (wpc->iomap.offset + wpc->iomap.length <= offset);
return 0;
}
@@ -519,7 +519,7 @@ xfs_map_blocks(
* against concurrent updates and provides a memory barrier on the way
* out that ensures that we always see the current value.
*/
- if (xfs_imap_valid(wpc, ip, offset_fsb))
+ if (xfs_imap_valid(wpc, ip, offset))
return 0;
/*
@@ -552,7 +552,7 @@ xfs_map_blocks(
* No COW extent overlap. Revalidate now that we may have updated
* ->cow_seq. If the data mapping is still valid, we're done.
*/
- if (xfs_imap_valid(wpc, ip, offset_fsb)) {
+ if (xfs_imap_valid(wpc, ip, offset)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
@@ -592,11 +592,11 @@ xfs_map_blocks(
isnullstartblock(imap.br_startblock))
goto allocate_blocks;
- wpc->imap = imap;
+ xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
return 0;
allocate_blocks:
- error = xfs_convert_blocks(wpc, ip, offset_fsb);
+ error = xfs_convert_blocks(wpc, ip, offset);
if (error) {
/*
* If we failed to find the extent in the COW fork we might have
@@ -616,12 +616,15 @@ xfs_map_blocks(
* original delalloc one. Trim the return extent to the next COW
* boundary again to force a re-lookup.
*/
- if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
- cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
- wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
+ if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
+ loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
+
+ if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
+ wpc->iomap.length = cow_offset - wpc->iomap.offset;
+ }
- ASSERT(wpc->imap.br_startoff <= offset_fsb);
- ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
+ ASSERT(wpc->iomap.offset <= offset);
+ ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
return 0;
}
@@ -664,7 +667,7 @@ xfs_submit_ioend(
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
(ioend->io_fork == XFS_COW_FORK ||
- ioend->io_state != XFS_EXT_UNWRITTEN) &&
+ ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
!ioend->io_append_trans)
status = xfs_setfilesize_trans_alloc(ioend);
@@ -693,10 +696,8 @@ xfs_submit_ioend(
static struct xfs_ioend *
xfs_alloc_ioend(
struct inode *inode,
- int fork,
- xfs_exntst_t state,
+ struct xfs_writepage_ctx *wpc,
xfs_off_t offset,
- struct block_device *bdev,
sector_t sector,
struct writeback_control *wbc)
{
@@ -704,7 +705,7 @@ xfs_alloc_ioend(
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
- bio_set_dev(bio, bdev);
+ bio_set_dev(bio, wpc->iomap.bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
bio->bi_write_hint = inode->i_write_hint;
@@ -712,8 +713,8 @@ xfs_alloc_ioend(
ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_fork = fork;
- ioend->io_state = state;
+ ioend->io_fork = wpc->fork;
+ ioend->io_type = wpc->iomap.type;
ioend->io_inode = inode;
ioend->io_size = 0;
ioend->io_offset = offset;
@@ -761,26 +762,19 @@ xfs_add_to_ioend(
struct writeback_control *wbc,
struct list_head *iolist)
{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- struct block_device *bdev = xfs_find_bdev_for_inode(inode);
+ sector_t sector = iomap_sector(&wpc->iomap, offset);
unsigned len = i_blocksize(inode);
unsigned poff = offset & (PAGE_SIZE - 1);
bool merged, same_page = false;
- sector_t sector;
-
- sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
- ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
if (!wpc->ioend ||
wpc->fork != wpc->ioend->io_fork ||
- wpc->imap.br_state != wpc->ioend->io_state ||
+ wpc->iomap.type != wpc->ioend->io_type ||
sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
- wpc->imap.br_state, offset, bdev, sector, wbc);
+ wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
}
merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
@@ -894,7 +888,7 @@ xfs_writepage_map(
error = xfs_map_blocks(wpc, inode, file_offset);
if (error)
break;
- if (wpc->imap.br_startblock == HOLESTARTBLOCK)
+ if (wpc->iomap.type == IOMAP_HOLE)
continue;
xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
&submit_list);
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 45a1ea240cbb..4af8ec0115cd 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -14,7 +14,7 @@ extern struct bio_set xfs_ioend_bioset;
struct xfs_ioend {
struct list_head io_list; /* next ioend in chain */
int io_fork; /* inode fork written back */
- xfs_exntst_t io_state; /* extent state */
+ u16 io_type;
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
--
2.20.1
Introduce two nicely abstracted helper, which can be moved to the
iomap code later. Also use list_pop_entry and list_first_entry_or_null
to simplify the code a bit.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/xfs/xfs_aops.c | 73 +++++++++++++++++++++++++++--------------------
1 file changed, 42 insertions(+), 31 deletions(-)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 91899de2be09..c29ef69d1e51 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -116,6 +116,22 @@ xfs_destroy_ioend(
}
}
+static void
+xfs_destroy_ioends(
+ struct xfs_ioend *ioend,
+ int error)
+{
+ struct list_head tmp;
+
+ list_replace_init(&ioend->io_list, &tmp);
+ xfs_destroy_ioend(ioend, error);
+ while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
+ io_list))) {
+ list_del_init(&ioend->io_list);
+ xfs_destroy_ioend(ioend, error);
+ }
+}
+
/*
* Fast and loose check if this write could update the on-disk inode size.
*/
@@ -230,7 +246,6 @@ STATIC void
xfs_end_ioend(
struct xfs_ioend *ioend)
{
- struct list_head ioend_list;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
@@ -275,16 +290,7 @@ xfs_end_ioend(
done:
if (ioend->io_append_trans)
error = xfs_setfilesize_ioend(ioend, error);
- list_replace_init(&ioend->io_list, &ioend_list);
- xfs_destroy_ioend(ioend, error);
-
- while (!list_empty(&ioend_list)) {
- ioend = list_first_entry(&ioend_list, struct xfs_ioend,
- io_list);
- list_del_init(&ioend->io_list);
- xfs_destroy_ioend(ioend, error);
- }
-
+ xfs_destroy_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
}
@@ -333,17 +339,18 @@ xfs_ioend_try_merge(
struct xfs_ioend *ioend,
struct list_head *more_ioends)
{
- struct xfs_ioend *next_ioend;
+ struct xfs_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
- while (!list_empty(more_ioends)) {
- next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
- io_list);
- if (!xfs_ioend_can_merge(ioend, next_ioend))
+ while ((next = list_first_entry_or_null(more_ioends, struct xfs_ioend,
+ io_list))) {
+ if (!xfs_ioend_can_merge(ioend, next))
break;
- list_move_tail(&next_ioend->io_list, &ioend->io_list);
- ioend->io_size += next_ioend->io_size;
- if (next_ioend->io_append_trans)
- xfs_ioend_merge_append_transactions(ioend, next_ioend);
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ if (next->io_append_trans)
+ xfs_ioend_merge_append_transactions(ioend, next);
}
}
@@ -366,29 +373,33 @@ xfs_ioend_compare(
return 0;
}
+static void
+xfs_sort_ioends(
+ struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, xfs_ioend_compare);
+}
+
/* Finish all pending io completions. */
void
xfs_end_io(
struct work_struct *work)
{
- struct xfs_inode *ip;
+ struct xfs_inode *ip =
+ container_of(work, struct xfs_inode, i_ioend_work);
struct xfs_ioend *ioend;
- struct list_head completion_list;
+ struct list_head tmp;
unsigned long flags;
- ip = container_of(work, struct xfs_inode, i_ioend_work);
-
spin_lock_irqsave(&ip->i_ioend_lock, flags);
- list_replace_init(&ip->i_ioend_list, &completion_list);
+ list_replace_init(&ip->i_ioend_list, &tmp);
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- list_sort(NULL, &completion_list, xfs_ioend_compare);
-
- while (!list_empty(&completion_list)) {
- ioend = list_first_entry(&completion_list, struct xfs_ioend,
- io_list);
+ xfs_sort_ioends(&tmp);
+ while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
+ io_list))) {
list_del_init(&ioend->io_list);
- xfs_ioend_try_merge(ioend, &completion_list);
+ xfs_ioend_try_merge(ioend, &tmp);
xfs_end_ioend(ioend);
}
}
--
2.20.1
In preparation for moving the writeback code to iomap.c, replace the
XFS-specific COW fork concept with the iomap IOMAP_F_SHARED flag.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/xfs/xfs_aops.c | 71 ++++++++++++++++++++++++++++++-----------------
fs/xfs/xfs_aops.h | 2 +-
2 files changed, 46 insertions(+), 27 deletions(-)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index df5955388adc..00fe40b35f72 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -23,7 +23,6 @@
*/
struct xfs_writepage_ctx {
struct iomap iomap;
- int fork;
unsigned int data_seq;
unsigned int cow_seq;
struct xfs_ioend *ioend;
@@ -272,7 +271,7 @@ xfs_end_ioend(
*/
error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) {
- if (ioend->io_fork == XFS_COW_FORK)
+ if (ioend->io_flags & IOMAP_F_SHARED)
xfs_reflink_cancel_cow_range(ip, offset, size, true);
goto done;
}
@@ -280,7 +279,7 @@ xfs_end_ioend(
/*
* Success: commit the COW or unwritten blocks if needed.
*/
- if (ioend->io_fork == XFS_COW_FORK)
+ if (ioend->io_flags & IOMAP_F_SHARED)
error = xfs_reflink_end_cow(ip, offset, size);
else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
@@ -304,7 +303,8 @@ xfs_ioend_can_merge(
{
if (ioend->io_bio->bi_status != next->io_bio->bi_status)
return false;
- if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
+ if ((ioend->io_flags & IOMAP_F_SHARED) ^
+ (next->io_flags & IOMAP_F_SHARED))
return false;
if ((ioend->io_type == IOMAP_UNWRITTEN) ^
(next->io_type == IOMAP_UNWRITTEN))
@@ -404,6 +404,13 @@ xfs_end_io(
}
}
+static inline bool xfs_ioend_needs_workqueue(struct xfs_ioend *ioend)
+{
+ return ioend->io_private ||
+ ioend->io_type == IOMAP_UNWRITTEN ||
+ (ioend->io_flags & IOMAP_F_SHARED);
+}
+
STATIC void
xfs_end_bio(
struct bio *bio)
@@ -413,9 +420,7 @@ xfs_end_bio(
struct xfs_mount *mp = ip->i_mount;
unsigned long flags;
- if (ioend->io_fork == XFS_COW_FORK ||
- ioend->io_type == IOMAP_UNWRITTEN ||
- ioend->io_private) {
+ if (xfs_ioend_needs_workqueue(ioend)) {
spin_lock_irqsave(&ip->i_ioend_lock, flags);
if (list_empty(&ip->i_ioend_list))
WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
@@ -444,7 +449,7 @@ xfs_imap_valid(
* covers the offset. Be careful to check this first because the caller
* can revalidate a COW mapping without updating the data seqno.
*/
- if (wpc->fork == XFS_COW_FORK)
+ if (wpc->iomap.flags & IOMAP_F_SHARED)
return true;
/*
@@ -474,6 +479,7 @@ static int
xfs_convert_blocks(
struct xfs_writepage_ctx *wpc,
struct xfs_inode *ip,
+ int whichfork,
loff_t offset)
{
int error;
@@ -485,8 +491,8 @@ xfs_convert_blocks(
* delalloc extent if free space is sufficiently fragmented.
*/
do {
- error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset,
- &wpc->iomap, wpc->fork == XFS_COW_FORK ?
+ error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
+ &wpc->iomap, whichfork == XFS_COW_FORK ?
&wpc->cow_seq : &wpc->data_seq);
if (error)
return error;
@@ -507,6 +513,7 @@ xfs_map_blocks(
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_fileoff_t cow_fsb = NULLFILEOFF;
+ int whichfork = XFS_DATA_FORK;
struct xfs_bmbt_irec imap;
struct xfs_iext_cursor icur;
int retries = 0;
@@ -555,7 +562,7 @@ xfs_map_blocks(
wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- wpc->fork = XFS_COW_FORK;
+ whichfork = XFS_COW_FORK;
goto allocate_blocks;
}
@@ -578,8 +585,6 @@ xfs_map_blocks(
wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- wpc->fork = XFS_DATA_FORK;
-
/* landed in a hole or beyond EOF? */
if (imap.br_startoff > offset_fsb) {
imap.br_blockcount = imap.br_startoff - offset_fsb;
@@ -604,10 +609,10 @@ xfs_map_blocks(
goto allocate_blocks;
xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
- trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
+ trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0;
allocate_blocks:
- error = xfs_convert_blocks(wpc, ip, offset);
+ error = xfs_convert_blocks(wpc, ip, whichfork, offset);
if (error) {
/*
* If we failed to find the extent in the COW fork we might have
@@ -616,7 +621,7 @@ xfs_map_blocks(
* the former case, but prevent additional retries to avoid
* looping forever for the latter case.
*/
- if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
+ if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
goto retry;
ASSERT(error != -EAGAIN);
return error;
@@ -627,7 +632,7 @@ xfs_map_blocks(
* original delalloc one. Trim the return extent to the next COW
* boundary again to force a re-lookup.
*/
- if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
+ if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
@@ -636,7 +641,7 @@ xfs_map_blocks(
ASSERT(wpc->iomap.offset <= offset);
ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
- trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
+ trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
return 0;
}
@@ -670,14 +675,14 @@ xfs_submit_ioend(
nofs_flag = memalloc_nofs_save();
/* Convert CoW extents to regular */
- if (!status && ioend->io_fork == XFS_COW_FORK) {
+ if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
ioend->io_offset, ioend->io_size);
}
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
- (ioend->io_fork == XFS_COW_FORK ||
+ ((ioend->io_flags & IOMAP_F_SHARED) ||
ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
!ioend->io_private)
@@ -724,8 +729,8 @@ xfs_alloc_ioend(
ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_fork = wpc->fork;
ioend->io_type = wpc->iomap.type;
+ ioend->io_flags = wpc->iomap.flags;
ioend->io_inode = inode;
ioend->io_size = 0;
ioend->io_offset = offset;
@@ -759,6 +764,24 @@ xfs_chain_bio(
return new;
}
+static bool
+xfs_can_add_to_ioend(
+ struct xfs_writepage_ctx *wpc,
+ xfs_off_t offset,
+ sector_t sector)
+{
+ if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
+ (wpc->ioend->io_flags & IOMAP_F_SHARED))
+ return false;
+ if (wpc->iomap.type != wpc->ioend->io_type)
+ return false;
+ if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
+ return false;
+ if (sector != bio_end_sector(wpc->ioend->io_bio))
+ return false;
+ return true;
+}
+
/*
* Test to see if we have an existing ioend structure that we could append to
* first, otherwise finish off the current ioend and start another.
@@ -778,11 +801,7 @@ xfs_add_to_ioend(
unsigned poff = offset & (PAGE_SIZE - 1);
bool merged, same_page = false;
- if (!wpc->ioend ||
- wpc->fork != wpc->ioend->io_fork ||
- wpc->iomap.type != wpc->ioend->io_type ||
- sector != bio_end_sector(wpc->ioend->io_bio) ||
- offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
+ if (!wpc->ioend || !xfs_can_add_to_ioend(wpc, offset, sector)) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 6a45d675dcba..4a0226cdad4f 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -13,8 +13,8 @@ extern struct bio_set xfs_ioend_bioset;
*/
struct xfs_ioend {
struct list_head io_list; /* next ioend in chain */
- int io_fork; /* inode fork written back */
u16 io_type;
+ u16 io_flags; /* IOMAP_F_* */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
--
2.20.1
File systems like gfs2 don't support delayed allocations or unwritten
extents and thus allocate normal mapped blocks to fill holes. To
cover the case of such file systems allocating new blocks to fill holes
also zero out mapped blocks with the new flag.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
---
fs/iomap/buffered-io.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e25901ae3ff4..181ee8477aad 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -203,6 +203,14 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
SetPageUptodate(page);
}
+static inline bool iomap_block_needs_zeroing(struct inode *inode,
+ struct iomap *iomap, loff_t pos)
+{
+ return iomap->type != IOMAP_MAPPED ||
+ (iomap->flags & IOMAP_F_NEW) ||
+ pos >= i_size_read(inode);
+}
+
static loff_t
iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct iomap *iomap)
@@ -226,7 +234,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (plen == 0)
goto done;
- if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+ if (iomap_block_needs_zeroing(inode, iomap, pos)) {
zero_user(page, poff, plen);
iomap_set_range_uptodate(page, poff, plen);
goto done;
@@ -532,7 +540,7 @@ iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
struct bio_vec bvec;
struct bio bio;
- if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
+ if (iomap_block_needs_zeroing(inode, iomap, block_start)) {
zero_user_segments(page, poff, from, to, poff + plen);
iomap_set_range_uptodate(page, poff, plen);
return 0;
--
2.20.1
Now that all the writepage code is in the iomap code there is no
need to keep this structure public.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Carlos Maiolino <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
---
fs/iomap/buffered-io.c | 17 +++++++++++++++++
include/linux/iomap.h | 17 -----------------
2 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 76e72576f307..c57acc3d3120 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -21,6 +21,23 @@
#include "../internal.h"
+/*
+ * Structure allocated for each page when block size < PAGE_SIZE to track
+ * sub-page uptodate status and I/O completions.
+ */
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+};
+
+static inline struct iomap_page *to_iomap_page(struct page *page)
+{
+ if (page_has_private(page))
+ return (struct iomap_page *)page_private(page);
+ return NULL;
+}
+
static struct bio_set iomap_ioend_bioset;
static struct iomap_page *
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 0b399718c387..46ce730b1590 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -134,23 +134,6 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, const struct iomap_ops *ops, void *data,
iomap_actor_t actor);
-/*
- * Structure allocate for each page when block size < PAGE_SIZE to track
- * sub-page uptodate status and I/O completions.
- */
-struct iomap_page {
- atomic_t read_count;
- atomic_t write_count;
- DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
-};
-
-static inline struct iomap_page *to_iomap_page(struct page *page)
-{
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
- return NULL;
-}
-
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
--
2.20.1
Move the initialization of ia and ib to the declaration line and remove
a superflous else.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/iomap/buffered-io.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index c57acc3d3120..0c7f185c8c52 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1226,13 +1226,12 @@ EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
static int
iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
{
- struct iomap_ioend *ia, *ib;
+ struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
+ struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
- ia = container_of(a, struct iomap_ioend, io_list);
- ib = container_of(b, struct iomap_ioend, io_list);
if (ia->io_offset < ib->io_offset)
return -1;
- else if (ia->io_offset > ib->io_offset)
+ if (ia->io_offset > ib->io_offset)
return 1;
return 0;
}
--
2.20.1
In preparation for moving the ioend structure to common code we need
to get rid of the xfs-specific xfs_trans type. Just make it a file
system private void pointer instead.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/xfs/xfs_aops.c | 26 +++++++++++++-------------
fs/xfs/xfs_aops.h | 2 +-
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index c29ef69d1e51..df5955388adc 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -153,7 +153,7 @@ xfs_setfilesize_trans_alloc(
if (error)
return error;
- ioend->io_append_trans = tp;
+ ioend->io_private = tp;
/*
* We may pass freeze protection with a transaction. So tell lockdep
@@ -220,7 +220,7 @@ xfs_setfilesize_ioend(
int error)
{
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_trans *tp = ioend->io_append_trans;
+ struct xfs_trans *tp = ioend->io_private;
/*
* The transaction may have been allocated in the I/O submission thread,
@@ -285,10 +285,10 @@ xfs_end_ioend(
else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
else
- ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+ ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
done:
- if (ioend->io_append_trans)
+ if (ioend->io_private)
error = xfs_setfilesize_ioend(ioend, error);
xfs_destroy_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
@@ -321,13 +321,13 @@ xfs_ioend_can_merge(
* as it is guaranteed to be clean.
*/
static void
-xfs_ioend_merge_append_transactions(
+xfs_ioend_merge_private(
struct xfs_ioend *ioend,
struct xfs_ioend *next)
{
- if (!ioend->io_append_trans) {
- ioend->io_append_trans = next->io_append_trans;
- next->io_append_trans = NULL;
+ if (!ioend->io_private) {
+ ioend->io_private = next->io_private;
+ next->io_private = NULL;
} else {
xfs_setfilesize_ioend(next, -ECANCELED);
}
@@ -349,8 +349,8 @@ xfs_ioend_try_merge(
break;
list_move_tail(&next->io_list, &ioend->io_list);
ioend->io_size += next->io_size;
- if (next->io_append_trans)
- xfs_ioend_merge_append_transactions(ioend, next);
+ if (next->io_private)
+ xfs_ioend_merge_private(ioend, next);
}
}
@@ -415,7 +415,7 @@ xfs_end_bio(
if (ioend->io_fork == XFS_COW_FORK ||
ioend->io_type == IOMAP_UNWRITTEN ||
- ioend->io_append_trans != NULL) {
+ ioend->io_private) {
spin_lock_irqsave(&ip->i_ioend_lock, flags);
if (list_empty(&ip->i_ioend_list))
WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
@@ -680,7 +680,7 @@ xfs_submit_ioend(
(ioend->io_fork == XFS_COW_FORK ||
ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
- !ioend->io_append_trans)
+ !ioend->io_private)
status = xfs_setfilesize_trans_alloc(ioend);
memalloc_nofs_restore(nofs_flag);
@@ -729,7 +729,7 @@ xfs_alloc_ioend(
ioend->io_inode = inode;
ioend->io_size = 0;
ioend->io_offset = offset;
- ioend->io_append_trans = NULL;
+ ioend->io_private = NULL;
ioend->io_bio = bio;
return ioend;
}
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 4af8ec0115cd..6a45d675dcba 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -18,7 +18,7 @@ struct xfs_ioend {
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
- struct xfs_trans *io_append_trans;/* xact. for size update */
+ void *io_private; /* file system private data */
struct bio *io_bio; /* bio being built */
struct bio io_inline_bio; /* MUST BE LAST! */
};
--
2.20.1
Take the xfs writeback code and move it to fs/iomap. A new structure
with three methods is added as the abstraction from the generic writeback
code to the file system. These methods are used to map blocks, submit an
ioend, and cancel a page that encountered an error before it was added to
an ioend.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/iomap/buffered-io.c | 564 ++++++++++++++++++++++++++++++++++-
fs/iomap/trace.h | 39 +++
fs/xfs/xfs_aops.c | 662 ++++-------------------------------------
fs/xfs/xfs_aops.h | 17 --
fs/xfs/xfs_super.c | 11 +-
fs/xfs/xfs_trace.h | 39 ---
include/linux/iomap.h | 59 ++++
7 files changed, 722 insertions(+), 669 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index d1620c3f2a4c..00af08006cd3 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2018 Christoph Hellwig.
+ * Copyright (c) 2016-2019 Christoph Hellwig.
*/
#include <linux/module.h>
#include <linux/compiler.h>
@@ -12,6 +12,7 @@
#include <linux/buffer_head.h>
#include <linux/dax.h>
#include <linux/writeback.h>
+#include <linux/list_sort.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/sched/signal.h>
@@ -20,6 +21,8 @@
#include "../internal.h"
+static struct bio_set iomap_ioend_bioset;
+
static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
@@ -468,6 +471,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
+ trace_iomap_releasepage(page->mapping->host, page, 0, 0);
+
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
@@ -483,6 +488,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
{
+ trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
+
/*
* If we are invalidating the entire page, clear the dirty state from it
* and release it to avoid unnecessary buildup of the LRU.
@@ -1084,3 +1091,558 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
+
+static void
+iomap_finish_page_writeback(struct inode *inode, struct bio_vec *bvec,
+ int error)
+{
+ struct iomap_page *iop = to_iomap_page(bvec->bv_page);
+
+ if (error) {
+ SetPageError(bvec->bv_page);
+ mapping_set_error(inode->i_mapping, -EIO);
+ }
+
+ WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
+
+ if (!iop || atomic_dec_and_test(&iop->write_count))
+ end_page_writeback(bvec->bv_page);
+}
+
+/*
+ * We're now finished for good with this ioend structure. Update the page
+ * state, release holds on bios, and finally free up memory. Do not use the
+ * ioend after this.
+ */
+static void
+iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+{
+ struct inode *inode = ioend->io_inode;
+ struct bio *bio = &ioend->io_inline_bio;
+ struct bio *last = ioend->io_bio, *next;
+ u64 start = bio->bi_iter.bi_sector;
+ bool quiet = bio_flagged(bio, BIO_QUIET);
+
+ for (bio = &ioend->io_inline_bio; bio; bio = next) {
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ /*
+ * For the last bio, bi_private points to the ioend, so we
+ * need to explicitly end the iteration here.
+ */
+ if (bio == last)
+ next = NULL;
+ else
+ next = bio->bi_private;
+
+ /* walk each page on bio, ending page IO on them */
+ bio_for_each_segment_all(bvec, bio, iter_all)
+ iomap_finish_page_writeback(inode, bvec, error);
+ bio_put(bio);
+ }
+
+ if (unlikely(error && !quiet)) {
+ printk_ratelimited(KERN_ERR
+ "%s: writeback error on sector %llu",
+ inode->i_sb->s_id, start);
+ }
+}
+
+void
+iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+{
+ struct list_head tmp;
+
+ list_replace_init(&ioend->io_list, &tmp);
+ iomap_finish_ioend(ioend, error);
+
+ while (!list_empty(&tmp)) {
+ ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
+ list_del_init(&ioend->io_list);
+ iomap_finish_ioend(ioend, error);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+
+/*
+ * We can merge two adjacent ioends if they have the same set of work to do.
+ */
+static bool
+iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
+{
+ if (ioend->io_bio->bi_status != next->io_bio->bi_status)
+ return false;
+ if ((ioend->io_flags & IOMAP_F_SHARED) ^
+ (next->io_flags & IOMAP_F_SHARED))
+ return false;
+ if ((ioend->io_type == IOMAP_UNWRITTEN) ^
+ (next->io_type == IOMAP_UNWRITTEN))
+ return false;
+ if (ioend->io_offset + ioend->io_size != next->io_offset)
+ return false;
+ return true;
+}
+
+void
+iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
+ void (*merge_private)(struct iomap_ioend *ioend,
+ struct iomap_ioend *next))
+{
+ struct iomap_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
+
+ while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
+ io_list))) {
+ if (!iomap_ioend_can_merge(ioend, next))
+ break;
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ if (next->io_private && merge_private)
+ merge_private(ioend, next);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+
+static int
+iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct iomap_ioend *ia, *ib;
+
+ ia = container_of(a, struct iomap_ioend, io_list);
+ ib = container_of(b, struct iomap_ioend, io_list);
+ if (ia->io_offset < ib->io_offset)
+ return -1;
+ else if (ia->io_offset > ib->io_offset)
+ return 1;
+ return 0;
+}
+
+void
+iomap_sort_ioends(struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, iomap_ioend_compare);
+}
+EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+
+static void iomap_writepage_end_bio(struct bio *bio)
+{
+ struct iomap_ioend *ioend = bio->bi_private;
+
+ iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
+}
+
+/*
+ * Submit the bio for an ioend. We are passed an ioend with a bio attached to
+ * it, and we submit that bio. The ioend may be used for multiple bio
+ * submissions, so we only want to allocate an append transaction for the ioend
+ * once. In the case of multiple bio submission, each bio will take an IO
+ * reference to the ioend to ensure that the ioend completion is only done once
+ * all bios have been submitted and the ioend is really done.
+ *
+ * If @error is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the bio and ioend
+ * rather than submit it to IO. This typically only happens on a filesystem
+ * shutdown.
+ */
+static int
+iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
+ int error)
+{
+ ioend->io_bio->bi_private = ioend;
+ ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
+
+ if (wpc->ops->submit_ioend)
+ error = wpc->ops->submit_ioend(ioend, error);
+ if (error) {
+ /*
+ * If we are failing the IO now, just mark the ioend with an
+ * error and finish it. This will run IO completion immediately
+ * as there is only one reference to the ioend at this point in
+ * time.
+ */
+ ioend->io_bio->bi_status = errno_to_blk_status(error);
+ bio_endio(ioend->io_bio);
+ return error;
+ }
+
+ submit_bio(ioend->io_bio);
+ return 0;
+}
+
+static struct iomap_ioend *
+iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
+ loff_t offset, sector_t sector, struct writeback_control *wbc)
+{
+ struct iomap_ioend *ioend;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
+ bio_set_dev(bio, wpc->iomap.bdev);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
+ bio->bi_write_hint = inode->i_write_hint;
+ wbc_init_bio(wbc, bio);
+
+ ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_type = wpc->iomap.type;
+ ioend->io_flags = wpc->iomap.flags;
+ ioend->io_inode = inode;
+ ioend->io_size = 0;
+ ioend->io_offset = offset;
+ ioend->io_private = NULL;
+ ioend->io_bio = bio;
+ return ioend;
+}
+
+/*
+ * Allocate a new bio, and chain the old bio to the new one.
+ *
+ * Note that we have to do perform the chaining in this unintuitive order
+ * so that the bi_private linkage is set up in the right direction for the
+ * traversal in iomap_finish_ioend().
+ */
+static struct bio *
+iomap_chain_bio(struct bio *prev)
+{
+ struct bio *new;
+
+ new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ bio_copy_dev(new, prev);/* also copies over blkcg information */
+ new->bi_iter.bi_sector = bio_end_sector(prev);
+ new->bi_opf = prev->bi_opf;
+ new->bi_write_hint = prev->bi_write_hint;
+
+ bio_chain(prev, new);
+ bio_get(prev); /* for iomap_finish_ioend */
+ submit_bio(prev);
+ return new;
+}
+
+static bool
+iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
+ sector_t sector)
+{
+ if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
+ (wpc->ioend->io_flags & IOMAP_F_SHARED))
+ return false;
+ if (wpc->iomap.type != wpc->ioend->io_type)
+ return false;
+ if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
+ return false;
+ if (sector != bio_end_sector(wpc->ioend->io_bio))
+ return false;
+ return true;
+}
+
+/*
+ * Test to see if we have an existing ioend structure that we could append to
+ * first, otherwise finish off the current ioend and start another.
+ */
+static void
+iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+ struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct list_head *iolist)
+{
+ sector_t sector = iomap_sector(&wpc->iomap, offset);
+ unsigned len = i_blocksize(inode);
+ unsigned poff = offset & (PAGE_SIZE - 1);
+ bool merged, same_page = false;
+
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+ if (wpc->ioend)
+ list_add(&wpc->ioend->io_list, iolist);
+ wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+ }
+
+ merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
+ &same_page);
+
+ if (iop && !same_page)
+ atomic_inc(&iop->write_count);
+ if (!merged) {
+ if (bio_full(wpc->ioend->io_bio, len)) {
+ wpc->ioend->io_bio =
+ iomap_chain_bio(wpc->ioend->io_bio);
+ }
+ bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ }
+
+ wpc->ioend->io_size += len;
+ wbc_account_cgroup_owner(wbc, page, len);
+}
+
+/*
+ * We implement an immediate ioend submission policy here to avoid needing to
+ * chain multiple ioends and hence nest mempool allocations which can violate
+ * forward progress guarantees we need to provide. The current ioend we are
+ * adding blocks to is cached on the writepage context, and if the new block
+ * does not append to the cached ioend it will create a new ioend and cache that
+ * instead.
+ *
+ * If a new ioend is created and cached, the old ioend is returned and queued
+ * locally for submission once the entire page is processed or an error has been
+ * detected. While ioends are submitted immediately after they are completed,
+ * batching optimisations are provided by higher level block plugging.
+ *
+ * At the end of a writeback pass, there will be a cached ioend remaining on the
+ * writepage context that the caller will need to submit.
+ */
+static int
+iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct inode *inode,
+ struct page *page, u64 end_offset)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_ioend *ioend, *next;
+ unsigned len = i_blocksize(inode);
+ u64 file_offset; /* file offset of page */
+ int error = 0, count = 0, i;
+ LIST_HEAD(submit_list);
+
+ WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
+
+ /*
+ * Walk through the page to find areas to write back. If we run off the
+ * end of the current map or find the current map invalid, grab a new
+ * one.
+ */
+ for (i = 0, file_offset = page_offset(page);
+ i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
+ i++, file_offset += len) {
+ if (iop && !test_bit(i, iop->uptodate))
+ continue;
+
+ error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ if (error)
+ break;
+ if (wpc->iomap.type == IOMAP_HOLE)
+ continue;
+ iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ &submit_list);
+ count++;
+ }
+
+ WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
+ WARN_ON_ONCE(!PageLocked(page));
+ WARN_ON_ONCE(PageWriteback(page));
+
+ /*
+ * On error, we have to fail the ioend here because we may have set
+ * pages under writeback, we have to make sure we run IO completion to
+ * mark the error state of the IO appropriately, so we can't cancel the
+ * ioend directly here. That means we have to mark this page as under
+ * writeback if we included any blocks from it in the ioend chain so
+ * that completion treats it correctly.
+ *
+ * If we didn't include the page in the ioend, the on error we can
+ * simply discard and unlock it as there are no other users of the page
+ * now. The caller will still need to trigger submission of outstanding
+ * ioends on the writepage context so they are treated correctly on
+ * error.
+ */
+ if (unlikely(error)) {
+ if (!count) {
+ if (wpc->ops->discard_page)
+ wpc->ops->discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
+ goto done;
+ }
+
+ /*
+ * If the page was not fully cleaned, we need to ensure that the
+ * higher layers come back to it correctly. That means we need
+ * to keep the page dirty, and for WB_SYNC_ALL writeback we need
+ * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
+ * so another attempt to write this page in this writeback sweep
+ * will be made.
+ */
+ set_page_writeback_keepwrite(page);
+ } else {
+ clear_page_dirty_for_io(page);
+ set_page_writeback(page);
+ }
+
+ unlock_page(page);
+
+ /*
+ * Preserve the original error if there was one, otherwise catch
+ * submission errors here and propagate into subsequent ioend
+ * submissions.
+ */
+ list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
+ int error2;
+
+ list_del_init(&ioend->io_list);
+ error2 = iomap_submit_ioend(wpc, ioend, error);
+ if (error2 && !error)
+ error = error2;
+ }
+
+ /*
+ * We can end up here with no error and nothing to write only if we race
+ * with a partial page truncate on a sub-page block sized filesystem.
+ */
+ if (!count)
+ end_page_writeback(page);
+done:
+ mapping_set_error(page->mapping, error);
+ return error;
+}
+
+/*
+ * Write out a dirty page.
+ *
+ * For delalloc space on the page we need to allocate space and flush it.
+ * For unwritten space on the page we need to start the conversion to
+ * regular allocated space.
+ */
+static int
+iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
+{
+ struct iomap_writepage_ctx *wpc = data;
+ struct inode *inode = page->mapping->host;
+ pgoff_t end_index;
+ u64 end_offset;
+ loff_t offset;
+
+ trace_iomap_writepage(inode, page, 0, 0);
+
+ /*
+ * Refuse to write the page out if we are called from reclaim context.
+ *
+ * This avoids stack overflows when called from deeply used stacks in
+ * random callers for direct reclaim or memcg reclaim. We explicitly
+ * allow reclaim from kswapd as the stack usage there is relatively low.
+ *
+ * This should never happen except in the case of a VM regression so
+ * warn about it.
+ */
+ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
+ PF_MEMALLOC))
+ goto redirty;
+
+ /*
+ * Given that we do not allow direct reclaim to call us, we should
+ * never be called while in a filesystem transaction.
+ */
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
+ goto redirty;
+
+ /*
+ * Is this page beyond the end of the file?
+ *
+ * The page index is less than the end_index, adjust the end_offset
+ * to the highest offset that this page should represent.
+ * -----------------------------------------------------
+ * | file mapping | <EOF> |
+ * -----------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | |
+ * ^--------------------------------^----------|--------
+ * | desired writeback range | see else |
+ * ---------------------------------^------------------|
+ */
+ offset = i_size_read(inode);
+ end_index = offset >> PAGE_SHIFT;
+ if (page->index < end_index)
+ end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ else {
+ /*
+ * Check whether the page to write out is beyond or straddles
+ * i_size or not.
+ * -------------------------------------------------------
+ * | file mapping | <EOF> |
+ * -------------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
+ * ^--------------------------------^-----------|---------
+ * | | Straddles |
+ * ---------------------------------^-----------|--------|
+ */
+ unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+
+ /*
+ * Skip the page if it is fully outside i_size, e.g. due to a
+ * truncate operation that is in progress. We must redirty the
+ * page so that reclaim stops reclaiming it. Otherwise
+ * iomap_vm_releasepage() is called on it and gets confused.
+ *
+ * Note that the end_index is unsigned long, it would overflow
+ * if the given offset is greater than 16TB on 32-bit system
+ * and if we do check the page is fully outside i_size or not
+ * via "if (page->index >= end_index + 1)" as "end_index + 1"
+ * will be evaluated to 0. Hence this page will be redirtied
+ * and be written out repeatedly which would result in an
+ * infinite loop, the user program that perform this operation
+ * will hang. Instead, we can verify this situation by checking
+ * if the page to write is totally beyond the i_size or if it's
+ * offset is just equal to the EOF.
+ */
+ if (page->index > end_index ||
+ (page->index == end_index && offset_into_page == 0))
+ goto redirty;
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invocation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining
+ * memory is zeroed when mapped, and writes to that region are
+ * not written out to the file."
+ */
+ zero_user_segment(page, offset_into_page, PAGE_SIZE);
+
+ /* Adjust the end_offset to the end of file */
+ end_offset = offset;
+ }
+
+ return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
+
+redirty:
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ return 0;
+}
+
+int
+iomap_writepage(struct page *page, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops)
+{
+ int ret;
+
+ wpc->ops = ops;
+ ret = iomap_do_writepage(page, wbc, wpc);
+ if (!wpc->ioend)
+ return ret;
+ return iomap_submit_ioend(wpc, wpc->ioend, ret);
+}
+EXPORT_SYMBOL_GPL(iomap_writepage);
+
+int
+iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops)
+{
+ int ret;
+
+ wpc->ops = ops;
+ ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
+ if (!wpc->ioend)
+ return ret;
+ return iomap_submit_ioend(wpc, wpc->ioend, ret);
+}
+EXPORT_SYMBOL_GPL(iomap_writepages);
+
+static int __init iomap_init(void)
+{
+ return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_ioend, io_inline_bio),
+ BIOSET_NEED_BVECS);
+}
+fs_initcall(iomap_init);
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 3900de1d871d..3bc8d0263020 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -41,6 +41,45 @@ DEFINE_EVENT(iomap_readpage_class, name, \
DEFINE_READPAGE_EVENT(iomap_readpage);
DEFINE_READPAGE_EVENT(iomap_readpages);
+DECLARE_EVENT_CLASS(iomap_page_class,
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
+ unsigned int len),
+ TP_ARGS(inode, page, off, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(pgoff_t, pgoff)
+ __field(loff_t, size)
+ __field(unsigned long, offset)
+ __field(unsigned int, length)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgoff = page_offset(page);
+ __entry->size = i_size_read(inode);
+ __entry->offset = off;
+ __entry->length = len;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
+ "length %x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pgoff,
+ __entry->size,
+ __entry->offset,
+ __entry->length)
+)
+
+#define DEFINE_PAGE_EVENT(name) \
+DEFINE_EVENT(iomap_page_class, name, \
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
+ unsigned int len), \
+ TP_ARGS(inode, page, off, len))
+DEFINE_PAGE_EVENT(iomap_writepage);
+DEFINE_PAGE_EVENT(iomap_releasepage);
+DEFINE_PAGE_EVENT(iomap_invalidatepage);
+
#endif /* _IOMAP_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index e2033b070f4a..546bd8a48cad 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -18,16 +18,18 @@
#include "xfs_bmap_util.h"
#include "xfs_reflink.h"
-/*
- * structure owned by writepages passed to individual writepage calls
- */
struct xfs_writepage_ctx {
- struct iomap iomap;
+ struct iomap_writepage_ctx ctx;
unsigned int data_seq;
unsigned int cow_seq;
- struct xfs_ioend *ioend;
};
+static inline struct xfs_writepage_ctx *
+XFS_WPC(struct iomap_writepage_ctx *ctx)
+{
+ return container_of(ctx, struct xfs_writepage_ctx, ctx);
+}
+
struct block_device *
xfs_find_bdev_for_inode(
struct inode *inode)
@@ -54,87 +56,10 @@ xfs_find_daxdev_for_inode(
return mp->m_ddev_targp->bt_daxdev;
}
-static void
-xfs_finish_page_writeback(
- struct inode *inode,
- struct bio_vec *bvec,
- int error)
-{
- struct iomap_page *iop = to_iomap_page(bvec->bv_page);
-
- if (error) {
- SetPageError(bvec->bv_page);
- mapping_set_error(inode->i_mapping, -EIO);
- }
-
- ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
- ASSERT(!iop || atomic_read(&iop->write_count) > 0);
-
- if (!iop || atomic_dec_and_test(&iop->write_count))
- end_page_writeback(bvec->bv_page);
-}
-
-/*
- * We're now finished for good with this ioend structure. Update the page
- * state, release holds on bios, and finally free up memory. Do not use the
- * ioend after this.
- */
-STATIC void
-xfs_destroy_ioend(
- struct xfs_ioend *ioend,
- int error)
-{
- struct inode *inode = ioend->io_inode;
- struct bio *bio = &ioend->io_inline_bio;
- struct bio *last = ioend->io_bio, *next;
- u64 start = bio->bi_iter.bi_sector;
- bool quiet = bio_flagged(bio, BIO_QUIET);
-
- for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- /*
- * For the last bio, bi_private points to the ioend, so we
- * need to explicitly end the iteration here.
- */
- if (bio == last)
- next = NULL;
- else
- next = bio->bi_private;
-
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bvec, bio, iter_all)
- xfs_finish_page_writeback(inode, bvec, error);
- bio_put(bio);
- }
-
- if (unlikely(error && !quiet)) {
- xfs_err_ratelimited(XFS_I(inode)->i_mount,
- "writeback error on sector %llu", start);
- }
-}
-
-static void
-xfs_destroy_ioends(
- struct xfs_ioend *ioend,
- int error)
-{
- struct list_head tmp;
-
- list_replace_init(&ioend->io_list, &tmp);
- xfs_destroy_ioend(ioend, error);
- while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
- io_list))) {
- list_del_init(&ioend->io_list);
- xfs_destroy_ioend(ioend, error);
- }
-}
-
/*
* Fast and loose check if this write could update the on-disk inode size.
*/
-static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
+static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
{
return ioend->io_offset + ioend->io_size >
XFS_I(ioend->io_inode)->i_d.di_size;
@@ -142,7 +67,7 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
STATIC int
xfs_setfilesize_trans_alloc(
- struct xfs_ioend *ioend)
+ struct iomap_ioend *ioend)
{
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
struct xfs_trans *tp;
@@ -215,7 +140,7 @@ xfs_setfilesize(
STATIC int
xfs_setfilesize_ioend(
- struct xfs_ioend *ioend,
+ struct iomap_ioend *ioend,
int error)
{
struct xfs_inode *ip = XFS_I(ioend->io_inode);
@@ -243,7 +168,7 @@ xfs_setfilesize_ioend(
*/
STATIC void
xfs_end_ioend(
- struct xfs_ioend *ioend)
+ struct iomap_ioend *ioend)
{
struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset;
@@ -289,31 +214,10 @@ xfs_end_ioend(
done:
if (ioend->io_private)
error = xfs_setfilesize_ioend(ioend, error);
- xfs_destroy_ioends(ioend, error);
+ iomap_finish_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
}
-/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-xfs_ioend_can_merge(
- struct xfs_ioend *ioend,
- struct xfs_ioend *next)
-{
- if (ioend->io_bio->bi_status != next->io_bio->bi_status)
- return false;
- if ((ioend->io_flags & IOMAP_F_SHARED) ^
- (next->io_flags & IOMAP_F_SHARED))
- return false;
- if ((ioend->io_type == IOMAP_UNWRITTEN) ^
- (next->io_type == IOMAP_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- return true;
-}
-
/*
* If the to be merged ioend has a preallocated transaction for file
* size updates we need to ensure the ioend it is merged into also
@@ -322,8 +226,8 @@ xfs_ioend_can_merge(
*/
static void
xfs_ioend_merge_private(
- struct xfs_ioend *ioend,
- struct xfs_ioend *next)
+ struct iomap_ioend *ioend,
+ struct iomap_ioend *next)
{
if (!ioend->io_private) {
ioend->io_private = next->io_private;
@@ -333,53 +237,6 @@ xfs_ioend_merge_private(
}
}
-/* Try to merge adjacent completions. */
-STATIC void
-xfs_ioend_try_merge(
- struct xfs_ioend *ioend,
- struct list_head *more_ioends)
-{
- struct xfs_ioend *next;
-
- INIT_LIST_HEAD(&ioend->io_list);
-
- while ((next = list_first_entry_or_null(more_ioends, struct xfs_ioend,
- io_list))) {
- if (!xfs_ioend_can_merge(ioend, next))
- break;
- list_move_tail(&next->io_list, &ioend->io_list);
- ioend->io_size += next->io_size;
- if (next->io_private)
- xfs_ioend_merge_private(ioend, next);
- }
-}
-
-/* list_sort compare function for ioends */
-static int
-xfs_ioend_compare(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- struct xfs_ioend *ia;
- struct xfs_ioend *ib;
-
- ia = container_of(a, struct xfs_ioend, io_list);
- ib = container_of(b, struct xfs_ioend, io_list);
- if (ia->io_offset < ib->io_offset)
- return -1;
- else if (ia->io_offset > ib->io_offset)
- return 1;
- return 0;
-}
-
-static void
-xfs_sort_ioends(
- struct list_head *ioend_list)
-{
- list_sort(NULL, ioend_list, xfs_ioend_compare);
-}
-
/* Finish all pending io completions. */
void
xfs_end_io(
@@ -387,7 +244,7 @@ xfs_end_io(
{
struct xfs_inode *ip =
container_of(work, struct xfs_inode, i_ioend_work);
- struct xfs_ioend *ioend;
+ struct iomap_ioend *ioend;
struct list_head tmp;
unsigned long flags;
@@ -395,16 +252,16 @@ xfs_end_io(
list_replace_init(&ip->i_ioend_list, &tmp);
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- xfs_sort_ioends(&tmp);
- while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
+ iomap_sort_ioends(&tmp);
+ while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
io_list))) {
list_del_init(&ioend->io_list);
- xfs_ioend_try_merge(ioend, &tmp);
+ iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
xfs_end_ioend(ioend);
}
}
-static inline bool xfs_ioend_needs_workqueue(struct xfs_ioend *ioend)
+static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
{
return ioend->io_private ||
ioend->io_type == IOMAP_UNWRITTEN ||
@@ -415,20 +272,18 @@ STATIC void
xfs_end_bio(
struct bio *bio)
{
- struct xfs_ioend *ioend = bio->bi_private;
+ struct iomap_ioend *ioend = bio->bi_private;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_mount *mp = ip->i_mount;
unsigned long flags;
- if (xfs_ioend_needs_workqueue(ioend)) {
- spin_lock_irqsave(&ip->i_ioend_lock, flags);
- if (list_empty(&ip->i_ioend_list))
- WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
- &ip->i_ioend_work));
- list_add_tail(&ioend->io_list, &ip->i_ioend_list);
- spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- } else
- xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
+ ASSERT(xfs_ioend_needs_workqueue(ioend));
+
+ spin_lock_irqsave(&ip->i_ioend_lock, flags);
+ if (list_empty(&ip->i_ioend_list))
+ WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
+ &ip->i_ioend_work));
+ list_add_tail(&ioend->io_list, &ip->i_ioend_list);
+ spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
}
/*
@@ -437,7 +292,7 @@ xfs_end_bio(
*/
static bool
xfs_imap_valid(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
loff_t offset)
{
@@ -459,10 +314,10 @@ xfs_imap_valid(
* checked (and found nothing at this offset) could have added
* overlapping blocks.
*/
- if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
+ if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
return false;
if (xfs_inode_has_cow_data(ip) &&
- wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
+ XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
return false;
return true;
}
@@ -477,12 +332,18 @@ xfs_imap_valid(
*/
static int
xfs_convert_blocks(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
int whichfork,
loff_t offset)
{
int error;
+ unsigned *seq;
+
+ if (whichfork == XFS_COW_FORK)
+ seq = &XFS_WPC(wpc)->cow_seq;
+ else
+ seq = &XFS_WPC(wpc)->data_seq;
/*
* Attempt to allocate whatever delalloc extent currently backs offset
@@ -492,8 +353,7 @@ xfs_convert_blocks(
*/
do {
error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
- &wpc->iomap, whichfork == XFS_COW_FORK ?
- &wpc->cow_seq : &wpc->data_seq);
+ &wpc->iomap, seq);
if (error)
return error;
} while (wpc->iomap.offset + wpc->iomap.length <= offset);
@@ -501,9 +361,9 @@ xfs_convert_blocks(
return 0;
}
-STATIC int
+static int
xfs_map_blocks(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct inode *inode,
loff_t offset)
{
@@ -559,7 +419,7 @@ xfs_map_blocks(
xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
cow_fsb = imap.br_startoff;
if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
- wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
+ XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
whichfork = XFS_COW_FORK;
@@ -582,7 +442,7 @@ xfs_map_blocks(
*/
if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
imap.br_startoff = end_fsb; /* fake a hole past EOF */
- wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
+ XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
/* landed in a hole or beyond EOF? */
@@ -645,24 +505,9 @@ xfs_map_blocks(
return 0;
}
-/*
- * Submit the bio for an ioend. We are passed an ioend with a bio attached to
- * it, and we submit that bio. The ioend may be used for multiple bio
- * submissions, so we only want to allocate an append transaction for the ioend
- * once. In the case of multiple bio submission, each bio will take an IO
- * reference to the ioend to ensure that the ioend completion is only done once
- * all bios have been submitted and the ioend is really done.
- *
- * If @status is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we have marked paged for writeback
- * and unlocked them. In this situation, we need to fail the bio and ioend
- * rather than submit it to IO. This typically only happens on a filesystem
- * shutdown.
- */
-STATIC int
+static int
xfs_submit_ioend(
- struct writeback_control *wbc,
- struct xfs_ioend *ioend,
+ struct iomap_ioend *ioend,
int status)
{
unsigned int nofs_flag;
@@ -690,147 +535,9 @@ xfs_submit_ioend(
memalloc_nofs_restore(nofs_flag);
- ioend->io_bio->bi_private = ioend;
- ioend->io_bio->bi_end_io = xfs_end_bio;
-
- /*
- * If we are failing the IO now, just mark the ioend with an
- * error and finish it. This will run IO completion immediately
- * as there is only one reference to the ioend at this point in
- * time.
- */
- if (status) {
- ioend->io_bio->bi_status = errno_to_blk_status(status);
- bio_endio(ioend->io_bio);
- return status;
- }
-
- submit_bio(ioend->io_bio);
- return 0;
-}
-
-static struct xfs_ioend *
-xfs_alloc_ioend(
- struct inode *inode,
- struct xfs_writepage_ctx *wpc,
- xfs_off_t offset,
- sector_t sector,
- struct writeback_control *wbc)
-{
- struct xfs_ioend *ioend;
- struct bio *bio;
-
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
- bio_set_dev(bio, wpc->iomap.bdev);
- bio->bi_iter.bi_sector = sector;
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
- bio->bi_write_hint = inode->i_write_hint;
- wbc_init_bio(wbc, bio);
-
- ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = wpc->iomap.type;
- ioend->io_flags = wpc->iomap.flags;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = offset;
- ioend->io_private = NULL;
- ioend->io_bio = bio;
- return ioend;
-}
-
-/*
- * Allocate a new bio, and chain the old bio to the new one.
- *
- * Note that we have to do perform the chaining in this unintuitive order
- * so that the bi_private linkage is set up in the right direction for the
- * traversal in xfs_destroy_ioend().
- */
-static struct bio *
-xfs_chain_bio(
- struct bio *prev)
-{
- struct bio *new;
-
- new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
- new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
- new->bi_write_hint = prev->bi_write_hint;
-
- bio_chain(prev, new);
- bio_get(prev); /* for xfs_destroy_ioend */
- submit_bio(prev);
- return new;
-}
-
-static bool
-xfs_can_add_to_ioend(
- struct xfs_writepage_ctx *wpc,
- xfs_off_t offset,
- sector_t sector)
-{
- if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
- (wpc->ioend->io_flags & IOMAP_F_SHARED))
- return false;
- if (wpc->iomap.type != wpc->ioend->io_type)
- return false;
- if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
- return false;
- if (sector != bio_end_sector(wpc->ioend->io_bio))
- return false;
- return true;
-}
-
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first, otherwise finish off the current ioend and start another.
- */
-STATIC void
-xfs_add_to_ioend(
- struct inode *inode,
- xfs_off_t offset,
- struct page *page,
- struct iomap_page *iop,
- struct xfs_writepage_ctx *wpc,
- struct writeback_control *wbc,
- struct list_head *iolist)
-{
- sector_t sector = iomap_sector(&wpc->iomap, offset);
- unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
- bool merged, same_page = false;
-
- if (!wpc->ioend || !xfs_can_add_to_ioend(wpc, offset, sector)) {
- if (wpc->ioend)
- list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
- }
-
- merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
- &same_page);
-
- if (iop && !same_page)
- atomic_inc(&iop->write_count);
-
- if (!merged) {
- if (bio_full(wpc->ioend->io_bio, len))
- wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
- bio_add_page(wpc->ioend->io_bio, page, len, poff);
- }
-
- wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
-}
-
-STATIC void
-xfs_vm_invalidatepage(
- struct page *page,
- unsigned int offset,
- unsigned int length)
-{
- trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
- iomap_invalidatepage(page, offset, length);
+ if (xfs_ioend_needs_workqueue(ioend))
+ ioend->io_bio->bi_end_io = xfs_end_bio;
+ return status;
}
/*
@@ -844,8 +551,8 @@ xfs_vm_invalidatepage(
* transaction as there is no space left for block reservation (typically why we
* see a ENOSPC in writeback).
*/
-STATIC void
-xfs_aops_discard_page(
+static void
+xfs_discard_page(
struct page *page)
{
struct inode *inode = page->mapping->host;
@@ -867,246 +574,14 @@ xfs_aops_discard_page(
if (error && !XFS_FORCED_SHUTDOWN(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
- xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
+ iomap_invalidatepage(page, 0, PAGE_SIZE);
}
-/*
- * We implement an immediate ioend submission policy here to avoid needing to
- * chain multiple ioends and hence nest mempool allocations which can violate
- * forward progress guarantees we need to provide. The current ioend we are
- * adding blocks to is cached on the writepage context, and if the new block
- * does not append to the cached ioend it will create a new ioend and cache that
- * instead.
- *
- * If a new ioend is created and cached, the old ioend is returned and queued
- * locally for submission once the entire page is processed or an error has been
- * detected. While ioends are submitted immediately after they are completed,
- * batching optimisations are provided by higher level block plugging.
- *
- * At the end of a writeback pass, there will be a cached ioend remaining on the
- * writepage context that the caller will need to submit.
- */
-static int
-xfs_writepage_map(
- struct xfs_writepage_ctx *wpc,
- struct writeback_control *wbc,
- struct inode *inode,
- struct page *page,
- uint64_t end_offset)
-{
- LIST_HEAD(submit_list);
- struct iomap_page *iop = to_iomap_page(page);
- unsigned len = i_blocksize(inode);
- struct xfs_ioend *ioend, *next;
- uint64_t file_offset; /* file offset of page */
- int error = 0, count = 0, i;
-
- ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
- ASSERT(!iop || atomic_read(&iop->write_count) == 0);
-
- /*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
- */
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
- if (iop && !test_bit(i, iop->uptodate))
- continue;
-
- error = xfs_map_blocks(wpc, inode, file_offset);
- if (error)
- break;
- if (wpc->iomap.type == IOMAP_HOLE)
- continue;
- xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
- &submit_list);
- count++;
- }
-
- ASSERT(wpc->ioend || list_empty(&submit_list));
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
-
- /*
- * On error, we have to fail the ioend here because we may have set
- * pages under writeback, we have to make sure we run IO completion to
- * mark the error state of the IO appropriately, so we can't cancel the
- * ioend directly here. That means we have to mark this page as under
- * writeback if we included any blocks from it in the ioend chain so
- * that completion treats it correctly.
- *
- * If we didn't include the page in the ioend, the on error we can
- * simply discard and unlock it as there are no other users of the page
- * now. The caller will still need to trigger submission of outstanding
- * ioends on the writepage context so they are treated correctly on
- * error.
- */
- if (unlikely(error)) {
- if (!count) {
- xfs_aops_discard_page(page);
- ClearPageUptodate(page);
- unlock_page(page);
- goto done;
- }
-
- /*
- * If the page was not fully cleaned, we need to ensure that the
- * higher layers come back to it correctly. That means we need
- * to keep the page dirty, and for WB_SYNC_ALL writeback we need
- * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
- * so another attempt to write this page in this writeback sweep
- * will be made.
- */
- set_page_writeback_keepwrite(page);
- } else {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- }
-
- unlock_page(page);
-
- /*
- * Preserve the original error if there was one, otherwise catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
- */
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = xfs_submit_ioend(wbc, ioend, error);
- if (error2 && !error)
- error = error2;
- }
-
- /*
- * We can end up here with no error and nothing to write only if we race
- * with a partial page truncate on a sub-page block sized filesystem.
- */
- if (!count)
- end_page_writeback(page);
-done:
- mapping_set_error(page->mapping, error);
- return error;
-}
-
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page we need to allocate space and flush it.
- * For unwritten space on the page we need to start the conversion to
- * regular allocated space.
- */
-STATIC int
-xfs_do_writepage(
- struct page *page,
- struct writeback_control *wbc,
- void *data)
-{
- struct xfs_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- loff_t offset;
- uint64_t end_offset;
- pgoff_t end_index;
-
- trace_xfs_writepage(inode, page, 0, 0);
-
- /*
- * Refuse to write the page out if we are called from reclaim context.
- *
- * This avoids stack overflows when called from deeply used stacks in
- * random callers for direct reclaim or memcg reclaim. We explicitly
- * allow reclaim from kswapd as the stack usage there is relatively low.
- *
- * This should never happen except in the case of a VM regression so
- * warn about it.
- */
- if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
- PF_MEMALLOC))
- goto redirty;
-
- /*
- * Given that we do not allow direct reclaim to call us, we should
- * never be called while in a filesystem transaction.
- */
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
- goto redirty;
-
- /*
- * Is this page beyond the end of the file?
- *
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
- * -----------------------------------------------------
- * | file mapping | <EOF> |
- * -----------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | |
- * ^--------------------------------^----------|--------
- * | desired writeback range | see else |
- * ---------------------------------^------------------|
- */
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
- else {
- /*
- * Check whether the page to write out is beyond or straddles
- * i_size or not.
- * -------------------------------------------------------
- * | file mapping | <EOF> |
- * -------------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
- * ^--------------------------------^-----------|---------
- * | | Straddles |
- * ---------------------------------^-----------|--------|
- */
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
-
- /*
- * Skip the page if it is fully outside i_size, e.g. due to a
- * truncate operation that is in progress. We must redirty the
- * page so that reclaim stops reclaiming it. Otherwise
- * xfs_vm_releasepage() is called on it and gets confused.
- *
- * Note that the end_index is unsigned long, it would overflow
- * if the given offset is greater than 16TB on 32-bit system
- * and if we do check the page is fully outside i_size or not
- * via "if (page->index >= end_index + 1)" as "end_index + 1"
- * will be evaluated to 0. Hence this page will be redirtied
- * and be written out repeatedly which would result in an
- * infinite loop, the user program that perform this operation
- * will hang. Instead, we can verify this situation by checking
- * if the page to write is totally beyond the i_size or if it's
- * offset is just equal to the EOF.
- */
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
- goto redirty;
-
- /*
- * The page straddles i_size. It must be zeroed out on each
- * and every writepage invocation because it may be mmapped.
- * "A file is mapped in multiples of the page size. For a file
- * that is not a multiple of the page size, the remaining
- * memory is zeroed when mapped, and writes to that region are
- * not written out to the file."
- */
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
- }
-
- return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
+static const struct iomap_writeback_ops xfs_writeback_ops = {
+ .map_blocks = xfs_map_blocks,
+ .submit_ioend = xfs_submit_ioend,
+ .discard_page = xfs_discard_page,
+};
STATIC int
xfs_vm_writepage(
@@ -1114,12 +589,8 @@ xfs_vm_writepage(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = { };
- int ret;
- ret = xfs_do_writepage(page, wbc, &wpc);
- if (wpc.ioend)
- ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
- return ret;
+ return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
}
STATIC int
@@ -1128,13 +599,9 @@ xfs_vm_writepages(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = { };
- int ret;
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
- if (wpc.ioend)
- ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
- return ret;
+ return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
STATIC int
@@ -1147,15 +614,6 @@ xfs_dax_writepages(
xfs_find_bdev_for_inode(mapping->host), wbc);
}
-STATIC int
-xfs_vm_releasepage(
- struct page *page,
- gfp_t gfp_mask)
-{
- trace_xfs_releasepage(page->mapping->host, page, 0, 0);
- return iomap_releasepage(page, gfp_mask);
-}
-
STATIC sector_t
xfs_vm_bmap(
struct address_space *mapping,
@@ -1213,8 +671,8 @@ const struct address_space_operations xfs_address_space_operations = {
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
.set_page_dirty = iomap_set_page_dirty,
- .releasepage = xfs_vm_releasepage,
- .invalidatepage = xfs_vm_invalidatepage,
+ .releasepage = iomap_releasepage,
+ .invalidatepage = iomap_invalidatepage,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
.migratepage = iomap_migrate_page,
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 4a0226cdad4f..687b11f34fa2 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -6,23 +6,6 @@
#ifndef __XFS_AOPS_H__
#define __XFS_AOPS_H__
-extern struct bio_set xfs_ioend_bioset;
-
-/*
- * Structure for buffered I/O completions.
- */
-struct xfs_ioend {
- struct list_head io_list; /* next ioend in chain */
- u16 io_type;
- u16 io_flags; /* IOMAP_F_* */
- struct inode *io_inode; /* file being written to */
- size_t io_size; /* size of the extent */
- xfs_off_t io_offset; /* offset in the file */
- void *io_private; /* file system private data */
- struct bio *io_bio; /* bio being built */
- struct bio io_inline_bio; /* MUST BE LAST! */
-};
-
extern const struct address_space_operations xfs_address_space_operations;
extern const struct address_space_operations xfs_dax_aops;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8d1df9f8be07..0a8cf6b87a21 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -40,7 +40,6 @@
#include <linux/parser.h>
static const struct super_operations xfs_super_operations;
-struct bio_set xfs_ioend_bioset;
static struct kset *xfs_kset; /* top-level xfs sysfs dir */
#ifdef DEBUG
@@ -1853,15 +1852,10 @@ MODULE_ALIAS_FS("xfs");
STATIC int __init
xfs_init_zones(void)
{
- if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct xfs_ioend, io_inline_bio),
- BIOSET_NEED_BVECS))
- goto out;
-
xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
"xfs_log_ticket");
if (!xfs_log_ticket_zone)
- goto out_free_ioend_bioset;
+ goto out;
xfs_bmap_free_item_zone = kmem_zone_init(
sizeof(struct xfs_extent_free_item),
@@ -1996,8 +1990,6 @@ xfs_init_zones(void)
kmem_zone_destroy(xfs_bmap_free_item_zone);
out_destroy_log_ticket_zone:
kmem_zone_destroy(xfs_log_ticket_zone);
- out_free_ioend_bioset:
- bioset_exit(&xfs_ioend_bioset);
out:
return -ENOMEM;
}
@@ -2028,7 +2020,6 @@ xfs_destroy_zones(void)
kmem_zone_destroy(xfs_btree_cur_zone);
kmem_zone_destroy(xfs_bmap_free_item_zone);
kmem_zone_destroy(xfs_log_ticket_zone);
- bioset_exit(&xfs_ioend_bioset);
}
STATIC int __init
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index eae4b29c174e..cbb23d7a3554 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1158,45 +1158,6 @@ DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_dax_write);
-DECLARE_EVENT_CLASS(xfs_page_class,
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
- unsigned int len),
- TP_ARGS(inode, page, off, len),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(pgoff_t, pgoff)
- __field(loff_t, size)
- __field(unsigned long, offset)
- __field(unsigned int, length)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = XFS_I(inode)->i_ino;
- __entry->pgoff = page_offset(page);
- __entry->size = i_size_read(inode);
- __entry->offset = off;
- __entry->length = len;
- ),
- TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
- "length %x",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->pgoff,
- __entry->size,
- __entry->offset,
- __entry->length)
-)
-
-#define DEFINE_PAGE_EVENT(name) \
-DEFINE_EVENT(xfs_page_class, name, \
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
- unsigned int len), \
- TP_ARGS(inode, page, off, len))
-DEFINE_PAGE_EVENT(xfs_writepage);
-DEFINE_PAGE_EVENT(xfs_releasepage);
-DEFINE_PAGE_EVENT(xfs_invalidatepage);
-
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
int whichfork, struct xfs_bmbt_irec *irec),
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7aa5d6117936..0b399718c387 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/blk_types.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mm_types.h>
@@ -12,6 +13,7 @@
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_writepage_ctx;
struct iov_iter;
struct kiocb;
struct page;
@@ -183,6 +185,63 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset,
sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
+/*
+ * Structure for writeback I/O completions.
+ */
+struct iomap_ioend {
+ struct list_head io_list; /* next ioend in chain */
+ u16 io_type;
+ u16 io_flags; /* IOMAP_F_* */
+ struct inode *io_inode; /* file being written to */
+ size_t io_size; /* size of the extent */
+ loff_t io_offset; /* offset in the file */
+ void *io_private; /* file system private data */
+ struct bio *io_bio; /* bio being built */
+ struct bio io_inline_bio; /* MUST BE LAST! */
+};
+
+struct iomap_writeback_ops {
+ /*
+ * Required, maps the blocks so that writeback can be performed on
+ * the range starting at offset.
+ */
+ int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
+ loff_t offset);
+
+ /*
+ * Optional, allows the file systems to perform actions just before
+ * submitting the bio and/or override the bio end_io handler for complex
+ * operations like copy on write extent manipulation or unwritten extent
+ * conversions.
+ */
+ int (*submit_ioend)(struct iomap_ioend *ioend, int status);
+
+ /*
+ * Optional, allows the file system to discard state on a page where
+ * we failed to submit any I/O.
+ */
+ void (*discard_page)(struct page *page);
+};
+
+struct iomap_writepage_ctx {
+ struct iomap iomap;
+ struct iomap_ioend *ioend;
+ const struct iomap_writeback_ops *ops;
+};
+
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends,
+ void (*merge_private)(struct iomap_ioend *ioend,
+ struct iomap_ioend *next));
+void iomap_sort_ioends(struct list_head *ioend_list);
+int iomap_writepage(struct page *page, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
+int iomap_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
+
/*
* Flags for direct I/O ->end_io:
*/
--
2.20.1
Lift the xfs code for tracing address space operations to the iomap
layer.
Signed-off-by: Christoph Hellwig <[email protected]>
---
fs/iomap/Makefile | 16 ++++++++------
fs/iomap/buffered-io.c | 5 +++++
fs/iomap/trace.c | 12 +++++++++++
fs/iomap/trace.h | 49 ++++++++++++++++++++++++++++++++++++++++++
fs/xfs/xfs_aops.c | 2 --
fs/xfs/xfs_trace.h | 26 ----------------------
6 files changed, 75 insertions(+), 35 deletions(-)
create mode 100644 fs/iomap/trace.c
create mode 100644 fs/iomap/trace.h
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 93cd11938bf5..eef2722d93a1 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -3,13 +3,15 @@
# Copyright (c) 2019 Oracle.
# All Rights Reserved.
#
-obj-$(CONFIG_FS_IOMAP) += iomap.o
-iomap-y += \
- apply.o \
- buffered-io.o \
- direct-io.o \
- fiemap.o \
- seek.o
+ccflags-y += -I $(srctree)/$(src) # needed for trace events
+
+obj-$(CONFIG_FS_IOMAP) += iomap.o
+iomap-y += trace.o \
+ apply.o \
+ buffered-io.o \
+ direct-io.o \
+ fiemap.o \
+ seek.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 181ee8477aad..d1620c3f2a4c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -16,6 +16,7 @@
#include <linux/bio.h>
#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "trace.h"
#include "../internal.h"
@@ -301,6 +302,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
unsigned poff;
loff_t ret;
+ trace_iomap_readpage(page->mapping->host, 1);
+
for (poff = 0; poff < PAGE_SIZE; poff += ret) {
ret = iomap_apply(inode, page_offset(page) + poff,
PAGE_SIZE - poff, 0, ops, &ctx,
@@ -397,6 +400,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
loff_t length = last - pos + PAGE_SIZE, ret = 0;
+ trace_iomap_readpages(mapping->host, nr_pages);
+
while (length > 0) {
ret = iomap_apply(mapping->host, pos, length, 0, ops,
&ctx, iomap_readpages_actor);
diff --git a/fs/iomap/trace.c b/fs/iomap/trace.c
new file mode 100644
index 000000000000..63ce9f0ce4dc
--- /dev/null
+++ b/fs/iomap/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Christoph Hellwig
+ */
+#include <linux/iomap.h>
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
new file mode 100644
index 000000000000..3900de1d871d
--- /dev/null
+++ b/fs/iomap/trace.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2009-2019, Christoph Hellwig
+ *
+ * NOTE: none of these tracepoints shall be consider a stable kernel ABI
+ * as they can change at any time.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iomap
+
+#if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IOMAP_TRACE_H
+
+#include <linux/tracepoint.h>
+
+struct inode;
+
+DECLARE_EVENT_CLASS(iomap_readpage_class,
+ TP_PROTO(struct inode *inode, int nr_pages),
+ TP_ARGS(inode, nr_pages),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(int, nr_pages)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nr_pages = nr_pages;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->nr_pages)
+)
+
+#define DEFINE_READPAGE_EVENT(name) \
+DEFINE_EVENT(iomap_readpage_class, name, \
+ TP_PROTO(struct inode *inode, int nr_pages), \
+ TP_ARGS(inode, nr_pages))
+DEFINE_READPAGE_EVENT(iomap_readpage);
+DEFINE_READPAGE_EVENT(iomap_readpages);
+
+#endif /* _IOMAP_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 00fe40b35f72..e2033b070f4a 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1184,7 +1184,6 @@ xfs_vm_readpage(
struct file *unused,
struct page *page)
{
- trace_xfs_vm_readpage(page->mapping->host, 1);
return iomap_readpage(page, &xfs_iomap_ops);
}
@@ -1195,7 +1194,6 @@ xfs_vm_readpages(
struct list_head *pages,
unsigned nr_pages)
{
- trace_xfs_vm_readpages(mapping->host, nr_pages);
return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
}
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index eaae275ed430..eae4b29c174e 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1197,32 +1197,6 @@ DEFINE_PAGE_EVENT(xfs_writepage);
DEFINE_PAGE_EVENT(xfs_releasepage);
DEFINE_PAGE_EVENT(xfs_invalidatepage);
-DECLARE_EVENT_CLASS(xfs_readpage_class,
- TP_PROTO(struct inode *inode, int nr_pages),
- TP_ARGS(inode, nr_pages),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(int, nr_pages)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->nr_pages = nr_pages;
- ),
- TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->nr_pages)
-)
-
-#define DEFINE_READPAGE_EVENT(name) \
-DEFINE_EVENT(xfs_readpage_class, name, \
- TP_PROTO(struct inode *inode, int nr_pages), \
- TP_ARGS(inode, nr_pages))
-DEFINE_READPAGE_EVENT(xfs_vm_readpage);
-DEFINE_READPAGE_EVENT(xfs_vm_readpages);
-
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
int whichfork, struct xfs_bmbt_irec *irec),
--
2.20.1
And inline mapping should never mark the page dirty and thus never end up
in writepages. Add a check for that condition and warn if it happens.
Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]>
---
fs/iomap/buffered-io.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 00af08006cd3..76e72576f307 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1421,6 +1421,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
error = wpc->ops->map_blocks(wpc, inode, file_offset);
if (error)
break;
+ if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
+ continue;
if (wpc->iomap.type == IOMAP_HOLE)
continue;
iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
--
2.20.1
On Tue, Oct 15, 2019 at 10:56:19AM -0700, Darrick J. Wong wrote:
> On Tue, Oct 15, 2019 at 05:43:37PM +0200, Christoph Hellwig wrote:
> > Introduce two nicely abstracted helper, which can be moved to the
> > iomap code later. Also use list_pop_entry and list_first_entry_or_null
> > to simplify the code a bit.
>
> No we don't use these.... ^^^^^^^^^^^^^^
list_first_entry_or_null is used, only list_pop_entry isn't, so that
needs to be removed.
On Tue, Oct 15, 2019 at 05:43:34PM +0200, Christoph Hellwig wrote:
> Currently we don't overwrite the flags field in the iomap in
> xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack,
> but is harmful once we want to be able to reuse an iomap in the
> writeback code. Replace the shared paramter with a set of initial
"parameter"
Looks ok otherwise,
Reviewed-by: Darrick J. Wong <[email protected]>
--D
> flags an thus ensures the flags field is always reinitialized.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> fs/xfs/xfs_iomap.c | 28 +++++++++++++++++-----------
> fs/xfs/xfs_iomap.h | 2 +-
> fs/xfs/xfs_pnfs.c | 2 +-
> 3 files changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
> index f780e223b118..54c9ec7ad337 100644
> --- a/fs/xfs/xfs_iomap.c
> +++ b/fs/xfs/xfs_iomap.c
> @@ -54,7 +54,7 @@ xfs_bmbt_to_iomap(
> struct xfs_inode *ip,
> struct iomap *iomap,
> struct xfs_bmbt_irec *imap,
> - bool shared)
> + u16 flags)
> {
> struct xfs_mount *mp = ip->i_mount;
>
> @@ -79,12 +79,11 @@ xfs_bmbt_to_iomap(
> iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
> iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
> iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
> + iomap->flags = flags;
>
> if (xfs_ipincount(ip) &&
> (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
> iomap->flags |= IOMAP_F_DIRTY;
> - if (shared)
> - iomap->flags |= IOMAP_F_SHARED;
> return 0;
> }
>
> @@ -540,6 +539,7 @@ xfs_file_iomap_begin_delay(
> struct xfs_iext_cursor icur, ccur;
> xfs_fsblock_t prealloc_blocks = 0;
> bool eof = false, cow_eof = false, shared = false;
> + u16 iomap_flags = 0;
> int whichfork = XFS_DATA_FORK;
> int error = 0;
>
> @@ -707,7 +707,7 @@ xfs_file_iomap_begin_delay(
> * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
> * them out if the write happens to fail.
> */
> - iomap->flags |= IOMAP_F_NEW;
> + iomap_flags |= IOMAP_F_NEW;
> trace_xfs_iomap_alloc(ip, offset, count, whichfork,
> whichfork == XFS_DATA_FORK ? &imap : &cmap);
> done:
> @@ -715,14 +715,17 @@ xfs_file_iomap_begin_delay(
> if (imap.br_startoff > offset_fsb) {
> xfs_trim_extent(&cmap, offset_fsb,
> imap.br_startoff - offset_fsb);
> - error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
> + error = xfs_bmbt_to_iomap(ip, iomap, &cmap,
> + IOMAP_F_SHARED);
> goto out_unlock;
> }
> /* ensure we only report blocks we have a reservation for */
> xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
> shared = true;
> }
> - error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
> + if (shared)
> + iomap_flags |= IOMAP_F_SHARED;
> + error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
> out_unlock:
> xfs_iunlock(ip, XFS_ILOCK_EXCL);
> return error;
> @@ -930,6 +933,7 @@ xfs_file_iomap_begin(
> xfs_fileoff_t offset_fsb, end_fsb;
> int nimaps = 1, error = 0;
> bool shared = false;
> + u16 iomap_flags = 0;
> unsigned lockmode;
>
> if (XFS_FORCED_SHUTDOWN(mp))
> @@ -1045,11 +1049,13 @@ xfs_file_iomap_begin(
> if (error)
> return error;
>
> - iomap->flags |= IOMAP_F_NEW;
> + iomap_flags |= IOMAP_F_NEW;
> trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
>
> out_finish:
> - return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
> + if (shared)
> + iomap_flags |= IOMAP_F_SHARED;
> + return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
>
> out_found:
> ASSERT(nimaps);
> @@ -1193,7 +1199,7 @@ xfs_seek_iomap_begin(
> if (data_fsb < cow_fsb + cmap.br_blockcount)
> end_fsb = min(end_fsb, data_fsb);
> xfs_trim_extent(&cmap, offset_fsb, end_fsb);
> - error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
> + error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
> /*
> * This is a COW extent, so we must probe the page cache
> * because there could be dirty page cache being backed
> @@ -1215,7 +1221,7 @@ xfs_seek_iomap_begin(
> imap.br_state = XFS_EXT_NORM;
> done:
> xfs_trim_extent(&imap, offset_fsb, end_fsb);
> - error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
> + error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
> out_unlock:
> xfs_iunlock(ip, lockmode);
> return error;
> @@ -1261,7 +1267,7 @@ xfs_xattr_iomap_begin(
> if (error)
> return error;
> ASSERT(nimaps);
> - return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
> + return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
> }
>
> const struct iomap_ops xfs_xattr_iomap_ops = {
> diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
> index 5c2f6aa6d78f..71d0ae460c44 100644
> --- a/fs/xfs/xfs_iomap.h
> +++ b/fs/xfs/xfs_iomap.h
> @@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
> int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
>
> int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
> - struct xfs_bmbt_irec *, bool shared);
> + struct xfs_bmbt_irec *, u16);
> xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
>
> static inline xfs_filblks_t
> diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
> index a339bd5fa260..9c96493be9e0 100644
> --- a/fs/xfs/xfs_pnfs.c
> +++ b/fs/xfs/xfs_pnfs.c
> @@ -178,7 +178,7 @@ xfs_fs_map_blocks(
> }
> xfs_iunlock(ip, XFS_IOLOCK_EXCL);
>
> - error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
> + error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
> *device_generation = mp->m_generation;
> return error;
> out_unlock:
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:35PM +0200, Christoph Hellwig wrote:
> Don't set IOMAP_F_NEW if we COW over and existing allocated range, as
"..over an existing..."
> these aren't strictly new allocations. This is required to be able to
> use IOMAP_F_NEW to zero newly allocated blocks, which is required for
> the iomap code to fully support file systems that don't do delayed
> allocations or use unwritten extents.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Other than that,
Reviewed-by: Darrick J. Wong <[email protected]>
--D
> ---
> fs/xfs/xfs_iomap.c | 9 ++++++---
> 1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
> index 54c9ec7ad337..c0a492353826 100644
> --- a/fs/xfs/xfs_iomap.c
> +++ b/fs/xfs/xfs_iomap.c
> @@ -707,9 +707,12 @@ xfs_file_iomap_begin_delay(
> * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
> * them out if the write happens to fail.
> */
> - iomap_flags |= IOMAP_F_NEW;
> - trace_xfs_iomap_alloc(ip, offset, count, whichfork,
> - whichfork == XFS_DATA_FORK ? &imap : &cmap);
> + if (whichfork == XFS_DATA_FORK) {
> + iomap_flags |= IOMAP_F_NEW;
> + trace_xfs_iomap_alloc(ip, offset, count, whichfork, &imap);
> + } else {
> + trace_xfs_iomap_alloc(ip, offset, count, whichfork, &cmap);
> + }
> done:
> if (whichfork == XFS_COW_FORK) {
> if (imap.br_startoff > offset_fsb) {
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:38PM +0200, Christoph Hellwig wrote:
> In preparation for moving the ioend structure to common code we need
> to get rid of the xfs-specific xfs_trans type. Just make it a file
> system private void pointer instead.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks ok,
Reviewed-by: Darrick J. Wong <[email protected]>
--D
> ---
> fs/xfs/xfs_aops.c | 26 +++++++++++++-------------
> fs/xfs/xfs_aops.h | 2 +-
> 2 files changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
> index c29ef69d1e51..df5955388adc 100644
> --- a/fs/xfs/xfs_aops.c
> +++ b/fs/xfs/xfs_aops.c
> @@ -153,7 +153,7 @@ xfs_setfilesize_trans_alloc(
> if (error)
> return error;
>
> - ioend->io_append_trans = tp;
> + ioend->io_private = tp;
>
> /*
> * We may pass freeze protection with a transaction. So tell lockdep
> @@ -220,7 +220,7 @@ xfs_setfilesize_ioend(
> int error)
> {
> struct xfs_inode *ip = XFS_I(ioend->io_inode);
> - struct xfs_trans *tp = ioend->io_append_trans;
> + struct xfs_trans *tp = ioend->io_private;
>
> /*
> * The transaction may have been allocated in the I/O submission thread,
> @@ -285,10 +285,10 @@ xfs_end_ioend(
> else if (ioend->io_type == IOMAP_UNWRITTEN)
> error = xfs_iomap_write_unwritten(ip, offset, size, false);
> else
> - ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
> + ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
>
> done:
> - if (ioend->io_append_trans)
> + if (ioend->io_private)
> error = xfs_setfilesize_ioend(ioend, error);
> xfs_destroy_ioends(ioend, error);
> memalloc_nofs_restore(nofs_flag);
> @@ -321,13 +321,13 @@ xfs_ioend_can_merge(
> * as it is guaranteed to be clean.
> */
> static void
> -xfs_ioend_merge_append_transactions(
> +xfs_ioend_merge_private(
> struct xfs_ioend *ioend,
> struct xfs_ioend *next)
> {
> - if (!ioend->io_append_trans) {
> - ioend->io_append_trans = next->io_append_trans;
> - next->io_append_trans = NULL;
> + if (!ioend->io_private) {
> + ioend->io_private = next->io_private;
> + next->io_private = NULL;
> } else {
> xfs_setfilesize_ioend(next, -ECANCELED);
> }
> @@ -349,8 +349,8 @@ xfs_ioend_try_merge(
> break;
> list_move_tail(&next->io_list, &ioend->io_list);
> ioend->io_size += next->io_size;
> - if (next->io_append_trans)
> - xfs_ioend_merge_append_transactions(ioend, next);
> + if (next->io_private)
> + xfs_ioend_merge_private(ioend, next);
> }
> }
>
> @@ -415,7 +415,7 @@ xfs_end_bio(
>
> if (ioend->io_fork == XFS_COW_FORK ||
> ioend->io_type == IOMAP_UNWRITTEN ||
> - ioend->io_append_trans != NULL) {
> + ioend->io_private) {
> spin_lock_irqsave(&ip->i_ioend_lock, flags);
> if (list_empty(&ip->i_ioend_list))
> WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
> @@ -680,7 +680,7 @@ xfs_submit_ioend(
> (ioend->io_fork == XFS_COW_FORK ||
> ioend->io_type != IOMAP_UNWRITTEN) &&
> xfs_ioend_is_append(ioend) &&
> - !ioend->io_append_trans)
> + !ioend->io_private)
> status = xfs_setfilesize_trans_alloc(ioend);
>
> memalloc_nofs_restore(nofs_flag);
> @@ -729,7 +729,7 @@ xfs_alloc_ioend(
> ioend->io_inode = inode;
> ioend->io_size = 0;
> ioend->io_offset = offset;
> - ioend->io_append_trans = NULL;
> + ioend->io_private = NULL;
> ioend->io_bio = bio;
> return ioend;
> }
> diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
> index 4af8ec0115cd..6a45d675dcba 100644
> --- a/fs/xfs/xfs_aops.h
> +++ b/fs/xfs/xfs_aops.h
> @@ -18,7 +18,7 @@ struct xfs_ioend {
> struct inode *io_inode; /* file being written to */
> size_t io_size; /* size of the extent */
> xfs_off_t io_offset; /* offset in the file */
> - struct xfs_trans *io_append_trans;/* xact. for size update */
> + void *io_private; /* file system private data */
> struct bio *io_bio; /* bio being built */
> struct bio io_inline_bio; /* MUST BE LAST! */
> };
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:39PM +0200, Christoph Hellwig wrote:
> In preparation for moving the writeback code to iomap.c, replace the
> XFS-specific COW fork concept with the iomap IOMAP_F_SHARED flag.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks ok,
Reviewed-by: Darrick J. Wong <[email protected]>
--D
> ---
> fs/xfs/xfs_aops.c | 71 ++++++++++++++++++++++++++++++-----------------
> fs/xfs/xfs_aops.h | 2 +-
> 2 files changed, 46 insertions(+), 27 deletions(-)
>
> diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
> index df5955388adc..00fe40b35f72 100644
> --- a/fs/xfs/xfs_aops.c
> +++ b/fs/xfs/xfs_aops.c
> @@ -23,7 +23,6 @@
> */
> struct xfs_writepage_ctx {
> struct iomap iomap;
> - int fork;
> unsigned int data_seq;
> unsigned int cow_seq;
> struct xfs_ioend *ioend;
> @@ -272,7 +271,7 @@ xfs_end_ioend(
> */
> error = blk_status_to_errno(ioend->io_bio->bi_status);
> if (unlikely(error)) {
> - if (ioend->io_fork == XFS_COW_FORK)
> + if (ioend->io_flags & IOMAP_F_SHARED)
> xfs_reflink_cancel_cow_range(ip, offset, size, true);
> goto done;
> }
> @@ -280,7 +279,7 @@ xfs_end_ioend(
> /*
> * Success: commit the COW or unwritten blocks if needed.
> */
> - if (ioend->io_fork == XFS_COW_FORK)
> + if (ioend->io_flags & IOMAP_F_SHARED)
> error = xfs_reflink_end_cow(ip, offset, size);
> else if (ioend->io_type == IOMAP_UNWRITTEN)
> error = xfs_iomap_write_unwritten(ip, offset, size, false);
> @@ -304,7 +303,8 @@ xfs_ioend_can_merge(
> {
> if (ioend->io_bio->bi_status != next->io_bio->bi_status)
> return false;
> - if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
> + if ((ioend->io_flags & IOMAP_F_SHARED) ^
> + (next->io_flags & IOMAP_F_SHARED))
> return false;
> if ((ioend->io_type == IOMAP_UNWRITTEN) ^
> (next->io_type == IOMAP_UNWRITTEN))
> @@ -404,6 +404,13 @@ xfs_end_io(
> }
> }
>
> +static inline bool xfs_ioend_needs_workqueue(struct xfs_ioend *ioend)
> +{
> + return ioend->io_private ||
> + ioend->io_type == IOMAP_UNWRITTEN ||
> + (ioend->io_flags & IOMAP_F_SHARED);
> +}
> +
> STATIC void
> xfs_end_bio(
> struct bio *bio)
> @@ -413,9 +420,7 @@ xfs_end_bio(
> struct xfs_mount *mp = ip->i_mount;
> unsigned long flags;
>
> - if (ioend->io_fork == XFS_COW_FORK ||
> - ioend->io_type == IOMAP_UNWRITTEN ||
> - ioend->io_private) {
> + if (xfs_ioend_needs_workqueue(ioend)) {
> spin_lock_irqsave(&ip->i_ioend_lock, flags);
> if (list_empty(&ip->i_ioend_list))
> WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
> @@ -444,7 +449,7 @@ xfs_imap_valid(
> * covers the offset. Be careful to check this first because the caller
> * can revalidate a COW mapping without updating the data seqno.
> */
> - if (wpc->fork == XFS_COW_FORK)
> + if (wpc->iomap.flags & IOMAP_F_SHARED)
> return true;
>
> /*
> @@ -474,6 +479,7 @@ static int
> xfs_convert_blocks(
> struct xfs_writepage_ctx *wpc,
> struct xfs_inode *ip,
> + int whichfork,
> loff_t offset)
> {
> int error;
> @@ -485,8 +491,8 @@ xfs_convert_blocks(
> * delalloc extent if free space is sufficiently fragmented.
> */
> do {
> - error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset,
> - &wpc->iomap, wpc->fork == XFS_COW_FORK ?
> + error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
> + &wpc->iomap, whichfork == XFS_COW_FORK ?
> &wpc->cow_seq : &wpc->data_seq);
> if (error)
> return error;
> @@ -507,6 +513,7 @@ xfs_map_blocks(
> xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
> xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
> xfs_fileoff_t cow_fsb = NULLFILEOFF;
> + int whichfork = XFS_DATA_FORK;
> struct xfs_bmbt_irec imap;
> struct xfs_iext_cursor icur;
> int retries = 0;
> @@ -555,7 +562,7 @@ xfs_map_blocks(
> wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
> xfs_iunlock(ip, XFS_ILOCK_SHARED);
>
> - wpc->fork = XFS_COW_FORK;
> + whichfork = XFS_COW_FORK;
> goto allocate_blocks;
> }
>
> @@ -578,8 +585,6 @@ xfs_map_blocks(
> wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
> xfs_iunlock(ip, XFS_ILOCK_SHARED);
>
> - wpc->fork = XFS_DATA_FORK;
> -
> /* landed in a hole or beyond EOF? */
> if (imap.br_startoff > offset_fsb) {
> imap.br_blockcount = imap.br_startoff - offset_fsb;
> @@ -604,10 +609,10 @@ xfs_map_blocks(
> goto allocate_blocks;
>
> xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
> - trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
> + trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
> return 0;
> allocate_blocks:
> - error = xfs_convert_blocks(wpc, ip, offset);
> + error = xfs_convert_blocks(wpc, ip, whichfork, offset);
> if (error) {
> /*
> * If we failed to find the extent in the COW fork we might have
> @@ -616,7 +621,7 @@ xfs_map_blocks(
> * the former case, but prevent additional retries to avoid
> * looping forever for the latter case.
> */
> - if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
> + if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
> goto retry;
> ASSERT(error != -EAGAIN);
> return error;
> @@ -627,7 +632,7 @@ xfs_map_blocks(
> * original delalloc one. Trim the return extent to the next COW
> * boundary again to force a re-lookup.
> */
> - if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
> + if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
> loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
>
> if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
> @@ -636,7 +641,7 @@ xfs_map_blocks(
>
> ASSERT(wpc->iomap.offset <= offset);
> ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
> - trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
> + trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
> return 0;
> }
>
> @@ -670,14 +675,14 @@ xfs_submit_ioend(
> nofs_flag = memalloc_nofs_save();
>
> /* Convert CoW extents to regular */
> - if (!status && ioend->io_fork == XFS_COW_FORK) {
> + if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
> status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
> ioend->io_offset, ioend->io_size);
> }
>
> /* Reserve log space if we might write beyond the on-disk inode size. */
> if (!status &&
> - (ioend->io_fork == XFS_COW_FORK ||
> + ((ioend->io_flags & IOMAP_F_SHARED) ||
> ioend->io_type != IOMAP_UNWRITTEN) &&
> xfs_ioend_is_append(ioend) &&
> !ioend->io_private)
> @@ -724,8 +729,8 @@ xfs_alloc_ioend(
>
> ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
> INIT_LIST_HEAD(&ioend->io_list);
> - ioend->io_fork = wpc->fork;
> ioend->io_type = wpc->iomap.type;
> + ioend->io_flags = wpc->iomap.flags;
> ioend->io_inode = inode;
> ioend->io_size = 0;
> ioend->io_offset = offset;
> @@ -759,6 +764,24 @@ xfs_chain_bio(
> return new;
> }
>
> +static bool
> +xfs_can_add_to_ioend(
> + struct xfs_writepage_ctx *wpc,
> + xfs_off_t offset,
> + sector_t sector)
> +{
> + if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
> + (wpc->ioend->io_flags & IOMAP_F_SHARED))
> + return false;
> + if (wpc->iomap.type != wpc->ioend->io_type)
> + return false;
> + if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
> + return false;
> + if (sector != bio_end_sector(wpc->ioend->io_bio))
> + return false;
> + return true;
> +}
> +
> /*
> * Test to see if we have an existing ioend structure that we could append to
> * first, otherwise finish off the current ioend and start another.
> @@ -778,11 +801,7 @@ xfs_add_to_ioend(
> unsigned poff = offset & (PAGE_SIZE - 1);
> bool merged, same_page = false;
>
> - if (!wpc->ioend ||
> - wpc->fork != wpc->ioend->io_fork ||
> - wpc->iomap.type != wpc->ioend->io_type ||
> - sector != bio_end_sector(wpc->ioend->io_bio) ||
> - offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
> + if (!wpc->ioend || !xfs_can_add_to_ioend(wpc, offset, sector)) {
> if (wpc->ioend)
> list_add(&wpc->ioend->io_list, iolist);
> wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
> diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
> index 6a45d675dcba..4a0226cdad4f 100644
> --- a/fs/xfs/xfs_aops.h
> +++ b/fs/xfs/xfs_aops.h
> @@ -13,8 +13,8 @@ extern struct bio_set xfs_ioend_bioset;
> */
> struct xfs_ioend {
> struct list_head io_list; /* next ioend in chain */
> - int io_fork; /* inode fork written back */
> u16 io_type;
> + u16 io_flags; /* IOMAP_F_* */
> struct inode *io_inode; /* file being written to */
> size_t io_size; /* size of the extent */
> xfs_off_t io_offset; /* offset in the file */
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:37PM +0200, Christoph Hellwig wrote:
> Introduce two nicely abstracted helper, which can be moved to the
> iomap code later. Also use list_pop_entry and list_first_entry_or_null
> to simplify the code a bit.
No we don't use these.... ^^^^^^^^^^^^^^
Everything else looks ok.
--D
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> fs/xfs/xfs_aops.c | 73 +++++++++++++++++++++++++++--------------------
> 1 file changed, 42 insertions(+), 31 deletions(-)
>
> diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
> index 91899de2be09..c29ef69d1e51 100644
> --- a/fs/xfs/xfs_aops.c
> +++ b/fs/xfs/xfs_aops.c
> @@ -116,6 +116,22 @@ xfs_destroy_ioend(
> }
> }
>
> +static void
> +xfs_destroy_ioends(
> + struct xfs_ioend *ioend,
> + int error)
> +{
> + struct list_head tmp;
> +
> + list_replace_init(&ioend->io_list, &tmp);
> + xfs_destroy_ioend(ioend, error);
> + while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
> + io_list))) {
> + list_del_init(&ioend->io_list);
> + xfs_destroy_ioend(ioend, error);
> + }
> +}
> +
> /*
> * Fast and loose check if this write could update the on-disk inode size.
> */
> @@ -230,7 +246,6 @@ STATIC void
> xfs_end_ioend(
> struct xfs_ioend *ioend)
> {
> - struct list_head ioend_list;
> struct xfs_inode *ip = XFS_I(ioend->io_inode);
> xfs_off_t offset = ioend->io_offset;
> size_t size = ioend->io_size;
> @@ -275,16 +290,7 @@ xfs_end_ioend(
> done:
> if (ioend->io_append_trans)
> error = xfs_setfilesize_ioend(ioend, error);
> - list_replace_init(&ioend->io_list, &ioend_list);
> - xfs_destroy_ioend(ioend, error);
> -
> - while (!list_empty(&ioend_list)) {
> - ioend = list_first_entry(&ioend_list, struct xfs_ioend,
> - io_list);
> - list_del_init(&ioend->io_list);
> - xfs_destroy_ioend(ioend, error);
> - }
> -
> + xfs_destroy_ioends(ioend, error);
> memalloc_nofs_restore(nofs_flag);
> }
>
> @@ -333,17 +339,18 @@ xfs_ioend_try_merge(
> struct xfs_ioend *ioend,
> struct list_head *more_ioends)
> {
> - struct xfs_ioend *next_ioend;
> + struct xfs_ioend *next;
> +
> + INIT_LIST_HEAD(&ioend->io_list);
>
> - while (!list_empty(more_ioends)) {
> - next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
> - io_list);
> - if (!xfs_ioend_can_merge(ioend, next_ioend))
> + while ((next = list_first_entry_or_null(more_ioends, struct xfs_ioend,
> + io_list))) {
> + if (!xfs_ioend_can_merge(ioend, next))
> break;
> - list_move_tail(&next_ioend->io_list, &ioend->io_list);
> - ioend->io_size += next_ioend->io_size;
> - if (next_ioend->io_append_trans)
> - xfs_ioend_merge_append_transactions(ioend, next_ioend);
> + list_move_tail(&next->io_list, &ioend->io_list);
> + ioend->io_size += next->io_size;
> + if (next->io_append_trans)
> + xfs_ioend_merge_append_transactions(ioend, next);
> }
> }
>
> @@ -366,29 +373,33 @@ xfs_ioend_compare(
> return 0;
> }
>
> +static void
> +xfs_sort_ioends(
> + struct list_head *ioend_list)
> +{
> + list_sort(NULL, ioend_list, xfs_ioend_compare);
> +}
> +
> /* Finish all pending io completions. */
> void
> xfs_end_io(
> struct work_struct *work)
> {
> - struct xfs_inode *ip;
> + struct xfs_inode *ip =
> + container_of(work, struct xfs_inode, i_ioend_work);
> struct xfs_ioend *ioend;
> - struct list_head completion_list;
> + struct list_head tmp;
> unsigned long flags;
>
> - ip = container_of(work, struct xfs_inode, i_ioend_work);
> -
> spin_lock_irqsave(&ip->i_ioend_lock, flags);
> - list_replace_init(&ip->i_ioend_list, &completion_list);
> + list_replace_init(&ip->i_ioend_list, &tmp);
> spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
>
> - list_sort(NULL, &completion_list, xfs_ioend_compare);
> -
> - while (!list_empty(&completion_list)) {
> - ioend = list_first_entry(&completion_list, struct xfs_ioend,
> - io_list);
> + xfs_sort_ioends(&tmp);
> + while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
> + io_list))) {
> list_del_init(&ioend->io_list);
> - xfs_ioend_try_merge(ioend, &completion_list);
> + xfs_ioend_try_merge(ioend, &tmp);
> xfs_end_ioend(ioend);
> }
> }
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:45PM +0200, Christoph Hellwig wrote:
> Move the initialization of ia and ib to the declaration line and remove
> a superflous else.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks ok,
Reviewed-by: Darrick J. Wong <[email protected]>
--D
> ---
> fs/iomap/buffered-io.c | 7 +++----
> 1 file changed, 3 insertions(+), 4 deletions(-)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index c57acc3d3120..0c7f185c8c52 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1226,13 +1226,12 @@ EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
> static int
> iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
> {
> - struct iomap_ioend *ia, *ib;
> + struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
> + struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
>
> - ia = container_of(a, struct iomap_ioend, io_list);
> - ib = container_of(b, struct iomap_ioend, io_list);
> if (ia->io_offset < ib->io_offset)
> return -1;
> - else if (ia->io_offset > ib->io_offset)
> + if (ia->io_offset > ib->io_offset)
> return 1;
> return 0;
> }
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:41PM +0200, Christoph Hellwig wrote:
> Lift the xfs code for tracing address space operations to the iomap
> layer.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks ok,
Reviewed-by: Darrick J. Wong <[email protected]>
--D
> ---
> fs/iomap/Makefile | 16 ++++++++------
> fs/iomap/buffered-io.c | 5 +++++
> fs/iomap/trace.c | 12 +++++++++++
> fs/iomap/trace.h | 49 ++++++++++++++++++++++++++++++++++++++++++
> fs/xfs/xfs_aops.c | 2 --
> fs/xfs/xfs_trace.h | 26 ----------------------
> 6 files changed, 75 insertions(+), 35 deletions(-)
> create mode 100644 fs/iomap/trace.c
> create mode 100644 fs/iomap/trace.h
>
> diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
> index 93cd11938bf5..eef2722d93a1 100644
> --- a/fs/iomap/Makefile
> +++ b/fs/iomap/Makefile
> @@ -3,13 +3,15 @@
> # Copyright (c) 2019 Oracle.
> # All Rights Reserved.
> #
> -obj-$(CONFIG_FS_IOMAP) += iomap.o
>
> -iomap-y += \
> - apply.o \
> - buffered-io.o \
> - direct-io.o \
> - fiemap.o \
> - seek.o
> +ccflags-y += -I $(srctree)/$(src) # needed for trace events
> +
> +obj-$(CONFIG_FS_IOMAP) += iomap.o
>
> +iomap-y += trace.o \
> + apply.o \
> + buffered-io.o \
> + direct-io.o \
> + fiemap.o \
> + seek.o
> iomap-$(CONFIG_SWAP) += swapfile.o
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 181ee8477aad..d1620c3f2a4c 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -16,6 +16,7 @@
> #include <linux/bio.h>
> #include <linux/sched/signal.h>
> #include <linux/migrate.h>
> +#include "trace.h"
>
> #include "../internal.h"
>
> @@ -301,6 +302,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
> unsigned poff;
> loff_t ret;
>
> + trace_iomap_readpage(page->mapping->host, 1);
> +
> for (poff = 0; poff < PAGE_SIZE; poff += ret) {
> ret = iomap_apply(inode, page_offset(page) + poff,
> PAGE_SIZE - poff, 0, ops, &ctx,
> @@ -397,6 +400,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
> loff_t last = page_offset(list_entry(pages->next, struct page, lru));
> loff_t length = last - pos + PAGE_SIZE, ret = 0;
>
> + trace_iomap_readpages(mapping->host, nr_pages);
> +
> while (length > 0) {
> ret = iomap_apply(mapping->host, pos, length, 0, ops,
> &ctx, iomap_readpages_actor);
> diff --git a/fs/iomap/trace.c b/fs/iomap/trace.c
> new file mode 100644
> index 000000000000..63ce9f0ce4dc
> --- /dev/null
> +++ b/fs/iomap/trace.c
> @@ -0,0 +1,12 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c) 2019, Christoph Hellwig
> + */
> +#include <linux/iomap.h>
> +
> +/*
> + * We include this last to have the helpers above available for the trace
> + * event implementations.
> + */
> +#define CREATE_TRACE_POINTS
> +#include "trace.h"
> diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
> new file mode 100644
> index 000000000000..3900de1d871d
> --- /dev/null
> +++ b/fs/iomap/trace.h
> @@ -0,0 +1,49 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (c) 2009-2019, Christoph Hellwig
> + *
> + * NOTE: none of these tracepoints shall be consider a stable kernel ABI
> + * as they can change at any time.
> + */
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM iomap
> +
> +#if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _IOMAP_TRACE_H
> +
> +#include <linux/tracepoint.h>
> +
> +struct inode;
> +
> +DECLARE_EVENT_CLASS(iomap_readpage_class,
> + TP_PROTO(struct inode *inode, int nr_pages),
> + TP_ARGS(inode, nr_pages),
> + TP_STRUCT__entry(
> + __field(dev_t, dev)
> + __field(u64, ino)
> + __field(int, nr_pages)
> + ),
> + TP_fast_assign(
> + __entry->dev = inode->i_sb->s_dev;
> + __entry->ino = inode->i_ino;
> + __entry->nr_pages = nr_pages;
> + ),
> + TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
> + MAJOR(__entry->dev), MINOR(__entry->dev),
> + __entry->ino,
> + __entry->nr_pages)
> +)
> +
> +#define DEFINE_READPAGE_EVENT(name) \
> +DEFINE_EVENT(iomap_readpage_class, name, \
> + TP_PROTO(struct inode *inode, int nr_pages), \
> + TP_ARGS(inode, nr_pages))
> +DEFINE_READPAGE_EVENT(iomap_readpage);
> +DEFINE_READPAGE_EVENT(iomap_readpages);
> +
> +#endif /* _IOMAP_TRACE_H */
> +
> +#undef TRACE_INCLUDE_PATH
> +#define TRACE_INCLUDE_PATH .
> +#define TRACE_INCLUDE_FILE trace
> +#include <trace/define_trace.h>
> diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
> index 00fe40b35f72..e2033b070f4a 100644
> --- a/fs/xfs/xfs_aops.c
> +++ b/fs/xfs/xfs_aops.c
> @@ -1184,7 +1184,6 @@ xfs_vm_readpage(
> struct file *unused,
> struct page *page)
> {
> - trace_xfs_vm_readpage(page->mapping->host, 1);
> return iomap_readpage(page, &xfs_iomap_ops);
> }
>
> @@ -1195,7 +1194,6 @@ xfs_vm_readpages(
> struct list_head *pages,
> unsigned nr_pages)
> {
> - trace_xfs_vm_readpages(mapping->host, nr_pages);
> return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
> }
>
> diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
> index eaae275ed430..eae4b29c174e 100644
> --- a/fs/xfs/xfs_trace.h
> +++ b/fs/xfs/xfs_trace.h
> @@ -1197,32 +1197,6 @@ DEFINE_PAGE_EVENT(xfs_writepage);
> DEFINE_PAGE_EVENT(xfs_releasepage);
> DEFINE_PAGE_EVENT(xfs_invalidatepage);
>
> -DECLARE_EVENT_CLASS(xfs_readpage_class,
> - TP_PROTO(struct inode *inode, int nr_pages),
> - TP_ARGS(inode, nr_pages),
> - TP_STRUCT__entry(
> - __field(dev_t, dev)
> - __field(xfs_ino_t, ino)
> - __field(int, nr_pages)
> - ),
> - TP_fast_assign(
> - __entry->dev = inode->i_sb->s_dev;
> - __entry->ino = inode->i_ino;
> - __entry->nr_pages = nr_pages;
> - ),
> - TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
> - MAJOR(__entry->dev), MINOR(__entry->dev),
> - __entry->ino,
> - __entry->nr_pages)
> -)
> -
> -#define DEFINE_READPAGE_EVENT(name) \
> -DEFINE_EVENT(xfs_readpage_class, name, \
> - TP_PROTO(struct inode *inode, int nr_pages), \
> - TP_ARGS(inode, nr_pages))
> -DEFINE_READPAGE_EVENT(xfs_vm_readpage);
> -DEFINE_READPAGE_EVENT(xfs_vm_readpages);
> -
> DECLARE_EVENT_CLASS(xfs_imap_class,
> TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
> int whichfork, struct xfs_bmbt_irec *irec),
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:42PM +0200, Christoph Hellwig wrote:
> Take the xfs writeback code and move it to fs/iomap. A new structure
> with three methods is added as the abstraction from the generic writeback
> code to the file system. These methods are used to map blocks, submit an
> ioend, and cancel a page that encountered an error before it was added to
> an ioend.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> fs/iomap/buffered-io.c | 564 ++++++++++++++++++++++++++++++++++-
> fs/iomap/trace.h | 39 +++
> fs/xfs/xfs_aops.c | 662 ++++-------------------------------------
> fs/xfs/xfs_aops.h | 17 --
> fs/xfs/xfs_super.c | 11 +-
> fs/xfs/xfs_trace.h | 39 ---
> include/linux/iomap.h | 59 ++++
> 7 files changed, 722 insertions(+), 669 deletions(-)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index d1620c3f2a4c..00af08006cd3 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
<snip>
> @@ -1084,3 +1091,558 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
> return block_page_mkwrite_return(ret);
> }
> EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
> +
> +static void
> +iomap_finish_page_writeback(struct inode *inode, struct bio_vec *bvec,
> + int error)
> +{
> + struct iomap_page *iop = to_iomap_page(bvec->bv_page);
> +
> + if (error) {
> + SetPageError(bvec->bv_page);
> + mapping_set_error(inode->i_mapping, -EIO);
> + }
> +
> + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
> +
> + if (!iop || atomic_dec_and_test(&iop->write_count))
> + end_page_writeback(bvec->bv_page);
> +}
> +
> +/*
> + * We're now finished for good with this ioend structure. Update the page
> + * state, release holds on bios, and finally free up memory. Do not use the
> + * ioend after this.
> + */
> +static void
> +iomap_finish_ioend(struct iomap_ioend *ioend, int error)
> +{
> + struct inode *inode = ioend->io_inode;
> + struct bio *bio = &ioend->io_inline_bio;
> + struct bio *last = ioend->io_bio, *next;
> + u64 start = bio->bi_iter.bi_sector;
> + bool quiet = bio_flagged(bio, BIO_QUIET);
> +
> + for (bio = &ioend->io_inline_bio; bio; bio = next) {
> + struct bio_vec *bvec;
> + struct bvec_iter_all iter_all;
> +
> + /*
> + * For the last bio, bi_private points to the ioend, so we
> + * need to explicitly end the iteration here.
> + */
> + if (bio == last)
> + next = NULL;
> + else
> + next = bio->bi_private;
> +
> + /* walk each page on bio, ending page IO on them */
> + bio_for_each_segment_all(bvec, bio, iter_all)
> + iomap_finish_page_writeback(inode, bvec, error);
> + bio_put(bio);
> + }
> +
> + if (unlikely(error && !quiet)) {
> + printk_ratelimited(KERN_ERR
> + "%s: writeback error on sector %llu",
> + inode->i_sb->s_id, start);
Ugh, /this/ message. It's pretty annoying how it doesn't tell you which
file or where in that file the write was lost.
I want to send in a patch atop your series to fix this, though I'm a
also little inclined to want to keep the message inside XFS.
Thoughts?
<snip>
> +/*
> + * Submit the bio for an ioend. We are passed an ioend with a bio attached to
> + * it, and we submit that bio. The ioend may be used for multiple bio
> + * submissions, so we only want to allocate an append transaction for the ioend
> + * once. In the case of multiple bio submission, each bio will take an IO
> + * reference to the ioend to ensure that the ioend completion is only done once
> + * all bios have been submitted and the ioend is really done.
> + *
> + * If @error is non-zero, it means that we have a situation where some part of
> + * the submission process has failed after we have marked paged for writeback
> + * and unlocked them. In this situation, we need to fail the bio and ioend
> + * rather than submit it to IO. This typically only happens on a filesystem
> + * shutdown.
> + */
> +static int
> +iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
> + int error)
> +{
> + ioend->io_bio->bi_private = ioend;
> + ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
> +
> + if (wpc->ops->submit_ioend)
> + error = wpc->ops->submit_ioend(ioend, error);
IIRC, Brian complained about this name ^^^^^^ in a previous vesrion.
I think the documentation in the ops structure definition makes it clear
that "submit_ioend" is really a pre-submission hook. Everyone ok with
that?
(Everything else in the patch looks ok.)
--D
> + if (error) {
> + /*
> + * If we are failing the IO now, just mark the ioend with an
> + * error and finish it. This will run IO completion immediately
> + * as there is only one reference to the ioend at this point in
> + * time.
> + */
> + ioend->io_bio->bi_status = errno_to_blk_status(error);
> + bio_endio(ioend->io_bio);
> + return error;
> + }
> +
> + submit_bio(ioend->io_bio);
> + return 0;
> +}
> +
> +static struct iomap_ioend *
> +iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
> + loff_t offset, sector_t sector, struct writeback_control *wbc)
> +{
> + struct iomap_ioend *ioend;
> + struct bio *bio;
> +
> + bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
> + bio_set_dev(bio, wpc->iomap.bdev);
> + bio->bi_iter.bi_sector = sector;
> + bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
> + bio->bi_write_hint = inode->i_write_hint;
> + wbc_init_bio(wbc, bio);
> +
> + ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
> + INIT_LIST_HEAD(&ioend->io_list);
> + ioend->io_type = wpc->iomap.type;
> + ioend->io_flags = wpc->iomap.flags;
> + ioend->io_inode = inode;
> + ioend->io_size = 0;
> + ioend->io_offset = offset;
> + ioend->io_private = NULL;
> + ioend->io_bio = bio;
> + return ioend;
> +}
> +
> +/*
> + * Allocate a new bio, and chain the old bio to the new one.
> + *
> + * Note that we have to do perform the chaining in this unintuitive order
> + * so that the bi_private linkage is set up in the right direction for the
> + * traversal in iomap_finish_ioend().
> + */
> +static struct bio *
> +iomap_chain_bio(struct bio *prev)
> +{
> + struct bio *new;
> +
> + new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
> + bio_copy_dev(new, prev);/* also copies over blkcg information */
> + new->bi_iter.bi_sector = bio_end_sector(prev);
> + new->bi_opf = prev->bi_opf;
> + new->bi_write_hint = prev->bi_write_hint;
> +
> + bio_chain(prev, new);
> + bio_get(prev); /* for iomap_finish_ioend */
> + submit_bio(prev);
> + return new;
> +}
> +
> +static bool
> +iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
> + sector_t sector)
> +{
> + if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
> + (wpc->ioend->io_flags & IOMAP_F_SHARED))
> + return false;
> + if (wpc->iomap.type != wpc->ioend->io_type)
> + return false;
> + if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
> + return false;
> + if (sector != bio_end_sector(wpc->ioend->io_bio))
> + return false;
> + return true;
> +}
> +
> +/*
> + * Test to see if we have an existing ioend structure that we could append to
> + * first, otherwise finish off the current ioend and start another.
> + */
> +static void
> +iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
> + struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
> + struct writeback_control *wbc, struct list_head *iolist)
> +{
> + sector_t sector = iomap_sector(&wpc->iomap, offset);
> + unsigned len = i_blocksize(inode);
> + unsigned poff = offset & (PAGE_SIZE - 1);
> + bool merged, same_page = false;
> +
> + if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
> + if (wpc->ioend)
> + list_add(&wpc->ioend->io_list, iolist);
> + wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
> + }
> +
> + merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
> + &same_page);
> +
> + if (iop && !same_page)
> + atomic_inc(&iop->write_count);
> + if (!merged) {
> + if (bio_full(wpc->ioend->io_bio, len)) {
> + wpc->ioend->io_bio =
> + iomap_chain_bio(wpc->ioend->io_bio);
> + }
> + bio_add_page(wpc->ioend->io_bio, page, len, poff);
> + }
> +
> + wpc->ioend->io_size += len;
> + wbc_account_cgroup_owner(wbc, page, len);
> +}
> +
> +/*
> + * We implement an immediate ioend submission policy here to avoid needing to
> + * chain multiple ioends and hence nest mempool allocations which can violate
> + * forward progress guarantees we need to provide. The current ioend we are
> + * adding blocks to is cached on the writepage context, and if the new block
> + * does not append to the cached ioend it will create a new ioend and cache that
> + * instead.
> + *
> + * If a new ioend is created and cached, the old ioend is returned and queued
> + * locally for submission once the entire page is processed or an error has been
> + * detected. While ioends are submitted immediately after they are completed,
> + * batching optimisations are provided by higher level block plugging.
> + *
> + * At the end of a writeback pass, there will be a cached ioend remaining on the
> + * writepage context that the caller will need to submit.
> + */
> +static int
> +iomap_writepage_map(struct iomap_writepage_ctx *wpc,
> + struct writeback_control *wbc, struct inode *inode,
> + struct page *page, u64 end_offset)
> +{
> + struct iomap_page *iop = to_iomap_page(page);
> + struct iomap_ioend *ioend, *next;
> + unsigned len = i_blocksize(inode);
> + u64 file_offset; /* file offset of page */
> + int error = 0, count = 0, i;
> + LIST_HEAD(submit_list);
> +
> + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
> +
> + /*
> + * Walk through the page to find areas to write back. If we run off the
> + * end of the current map or find the current map invalid, grab a new
> + * one.
> + */
> + for (i = 0, file_offset = page_offset(page);
> + i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
> + i++, file_offset += len) {
> + if (iop && !test_bit(i, iop->uptodate))
> + continue;
> +
> + error = wpc->ops->map_blocks(wpc, inode, file_offset);
> + if (error)
> + break;
> + if (wpc->iomap.type == IOMAP_HOLE)
> + continue;
> + iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
> + &submit_list);
> + count++;
> + }
> +
> + WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
> + WARN_ON_ONCE(!PageLocked(page));
> + WARN_ON_ONCE(PageWriteback(page));
> +
> + /*
> + * On error, we have to fail the ioend here because we may have set
> + * pages under writeback, we have to make sure we run IO completion to
> + * mark the error state of the IO appropriately, so we can't cancel the
> + * ioend directly here. That means we have to mark this page as under
> + * writeback if we included any blocks from it in the ioend chain so
> + * that completion treats it correctly.
> + *
> + * If we didn't include the page in the ioend, the on error we can
> + * simply discard and unlock it as there are no other users of the page
> + * now. The caller will still need to trigger submission of outstanding
> + * ioends on the writepage context so they are treated correctly on
> + * error.
> + */
> + if (unlikely(error)) {
> + if (!count) {
> + if (wpc->ops->discard_page)
> + wpc->ops->discard_page(page);
> + ClearPageUptodate(page);
> + unlock_page(page);
> + goto done;
> + }
> +
> + /*
> + * If the page was not fully cleaned, we need to ensure that the
> + * higher layers come back to it correctly. That means we need
> + * to keep the page dirty, and for WB_SYNC_ALL writeback we need
> + * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
> + * so another attempt to write this page in this writeback sweep
> + * will be made.
> + */
> + set_page_writeback_keepwrite(page);
> + } else {
> + clear_page_dirty_for_io(page);
> + set_page_writeback(page);
> + }
> +
> + unlock_page(page);
> +
> + /*
> + * Preserve the original error if there was one, otherwise catch
> + * submission errors here and propagate into subsequent ioend
> + * submissions.
> + */
> + list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
> + int error2;
> +
> + list_del_init(&ioend->io_list);
> + error2 = iomap_submit_ioend(wpc, ioend, error);
> + if (error2 && !error)
> + error = error2;
> + }
> +
> + /*
> + * We can end up here with no error and nothing to write only if we race
> + * with a partial page truncate on a sub-page block sized filesystem.
> + */
> + if (!count)
> + end_page_writeback(page);
> +done:
> + mapping_set_error(page->mapping, error);
> + return error;
> +}
> +
> +/*
> + * Write out a dirty page.
> + *
> + * For delalloc space on the page we need to allocate space and flush it.
> + * For unwritten space on the page we need to start the conversion to
> + * regular allocated space.
> + */
> +static int
> +iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
> +{
> + struct iomap_writepage_ctx *wpc = data;
> + struct inode *inode = page->mapping->host;
> + pgoff_t end_index;
> + u64 end_offset;
> + loff_t offset;
> +
> + trace_iomap_writepage(inode, page, 0, 0);
> +
> + /*
> + * Refuse to write the page out if we are called from reclaim context.
> + *
> + * This avoids stack overflows when called from deeply used stacks in
> + * random callers for direct reclaim or memcg reclaim. We explicitly
> + * allow reclaim from kswapd as the stack usage there is relatively low.
> + *
> + * This should never happen except in the case of a VM regression so
> + * warn about it.
> + */
> + if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
> + PF_MEMALLOC))
> + goto redirty;
> +
> + /*
> + * Given that we do not allow direct reclaim to call us, we should
> + * never be called while in a filesystem transaction.
> + */
> + if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
> + goto redirty;
> +
> + /*
> + * Is this page beyond the end of the file?
> + *
> + * The page index is less than the end_index, adjust the end_offset
> + * to the highest offset that this page should represent.
> + * -----------------------------------------------------
> + * | file mapping | <EOF> |
> + * -----------------------------------------------------
> + * | Page ... | Page N-2 | Page N-1 | Page N | |
> + * ^--------------------------------^----------|--------
> + * | desired writeback range | see else |
> + * ---------------------------------^------------------|
> + */
> + offset = i_size_read(inode);
> + end_index = offset >> PAGE_SHIFT;
> + if (page->index < end_index)
> + end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
> + else {
> + /*
> + * Check whether the page to write out is beyond or straddles
> + * i_size or not.
> + * -------------------------------------------------------
> + * | file mapping | <EOF> |
> + * -------------------------------------------------------
> + * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
> + * ^--------------------------------^-----------|---------
> + * | | Straddles |
> + * ---------------------------------^-----------|--------|
> + */
> + unsigned offset_into_page = offset & (PAGE_SIZE - 1);
> +
> + /*
> + * Skip the page if it is fully outside i_size, e.g. due to a
> + * truncate operation that is in progress. We must redirty the
> + * page so that reclaim stops reclaiming it. Otherwise
> + * iomap_vm_releasepage() is called on it and gets confused.
> + *
> + * Note that the end_index is unsigned long, it would overflow
> + * if the given offset is greater than 16TB on 32-bit system
> + * and if we do check the page is fully outside i_size or not
> + * via "if (page->index >= end_index + 1)" as "end_index + 1"
> + * will be evaluated to 0. Hence this page will be redirtied
> + * and be written out repeatedly which would result in an
> + * infinite loop, the user program that perform this operation
> + * will hang. Instead, we can verify this situation by checking
> + * if the page to write is totally beyond the i_size or if it's
> + * offset is just equal to the EOF.
> + */
> + if (page->index > end_index ||
> + (page->index == end_index && offset_into_page == 0))
> + goto redirty;
> +
> + /*
> + * The page straddles i_size. It must be zeroed out on each
> + * and every writepage invocation because it may be mmapped.
> + * "A file is mapped in multiples of the page size. For a file
> + * that is not a multiple of the page size, the remaining
> + * memory is zeroed when mapped, and writes to that region are
> + * not written out to the file."
> + */
> + zero_user_segment(page, offset_into_page, PAGE_SIZE);
> +
> + /* Adjust the end_offset to the end of file */
> + end_offset = offset;
> + }
> +
> + return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
> +
> +redirty:
> + redirty_page_for_writepage(wbc, page);
> + unlock_page(page);
> + return 0;
> +}
> +
> +int
> +iomap_writepage(struct page *page, struct writeback_control *wbc,
> + struct iomap_writepage_ctx *wpc,
> + const struct iomap_writeback_ops *ops)
> +{
> + int ret;
> +
> + wpc->ops = ops;
> + ret = iomap_do_writepage(page, wbc, wpc);
> + if (!wpc->ioend)
> + return ret;
> + return iomap_submit_ioend(wpc, wpc->ioend, ret);
> +}
> +EXPORT_SYMBOL_GPL(iomap_writepage);
> +
> +int
> +iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
> + struct iomap_writepage_ctx *wpc,
> + const struct iomap_writeback_ops *ops)
> +{
> + int ret;
> +
> + wpc->ops = ops;
> + ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
> + if (!wpc->ioend)
> + return ret;
> + return iomap_submit_ioend(wpc, wpc->ioend, ret);
> +}
> +EXPORT_SYMBOL_GPL(iomap_writepages);
> +
> +static int __init iomap_init(void)
> +{
> + return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
> + offsetof(struct iomap_ioend, io_inline_bio),
> + BIOSET_NEED_BVECS);
> +}
> +fs_initcall(iomap_init);
> diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
> index 3900de1d871d..3bc8d0263020 100644
> --- a/fs/iomap/trace.h
> +++ b/fs/iomap/trace.h
> @@ -41,6 +41,45 @@ DEFINE_EVENT(iomap_readpage_class, name, \
> DEFINE_READPAGE_EVENT(iomap_readpage);
> DEFINE_READPAGE_EVENT(iomap_readpages);
>
> +DECLARE_EVENT_CLASS(iomap_page_class,
> + TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
> + unsigned int len),
> + TP_ARGS(inode, page, off, len),
> + TP_STRUCT__entry(
> + __field(dev_t, dev)
> + __field(u64, ino)
> + __field(pgoff_t, pgoff)
> + __field(loff_t, size)
> + __field(unsigned long, offset)
> + __field(unsigned int, length)
> + ),
> + TP_fast_assign(
> + __entry->dev = inode->i_sb->s_dev;
> + __entry->ino = inode->i_ino;
> + __entry->pgoff = page_offset(page);
> + __entry->size = i_size_read(inode);
> + __entry->offset = off;
> + __entry->length = len;
> + ),
> + TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
> + "length %x",
> + MAJOR(__entry->dev), MINOR(__entry->dev),
> + __entry->ino,
> + __entry->pgoff,
> + __entry->size,
> + __entry->offset,
> + __entry->length)
> +)
> +
> +#define DEFINE_PAGE_EVENT(name) \
> +DEFINE_EVENT(iomap_page_class, name, \
> + TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
> + unsigned int len), \
> + TP_ARGS(inode, page, off, len))
> +DEFINE_PAGE_EVENT(iomap_writepage);
> +DEFINE_PAGE_EVENT(iomap_releasepage);
> +DEFINE_PAGE_EVENT(iomap_invalidatepage);
> +
> #endif /* _IOMAP_TRACE_H */
>
> #undef TRACE_INCLUDE_PATH
> diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
> index e2033b070f4a..546bd8a48cad 100644
> --- a/fs/xfs/xfs_aops.c
> +++ b/fs/xfs/xfs_aops.c
> @@ -18,16 +18,18 @@
> #include "xfs_bmap_util.h"
> #include "xfs_reflink.h"
>
> -/*
> - * structure owned by writepages passed to individual writepage calls
> - */
> struct xfs_writepage_ctx {
> - struct iomap iomap;
> + struct iomap_writepage_ctx ctx;
> unsigned int data_seq;
> unsigned int cow_seq;
> - struct xfs_ioend *ioend;
> };
>
> +static inline struct xfs_writepage_ctx *
> +XFS_WPC(struct iomap_writepage_ctx *ctx)
> +{
> + return container_of(ctx, struct xfs_writepage_ctx, ctx);
> +}
> +
> struct block_device *
> xfs_find_bdev_for_inode(
> struct inode *inode)
> @@ -54,87 +56,10 @@ xfs_find_daxdev_for_inode(
> return mp->m_ddev_targp->bt_daxdev;
> }
>
> -static void
> -xfs_finish_page_writeback(
> - struct inode *inode,
> - struct bio_vec *bvec,
> - int error)
> -{
> - struct iomap_page *iop = to_iomap_page(bvec->bv_page);
> -
> - if (error) {
> - SetPageError(bvec->bv_page);
> - mapping_set_error(inode->i_mapping, -EIO);
> - }
> -
> - ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
> - ASSERT(!iop || atomic_read(&iop->write_count) > 0);
> -
> - if (!iop || atomic_dec_and_test(&iop->write_count))
> - end_page_writeback(bvec->bv_page);
> -}
> -
> -/*
> - * We're now finished for good with this ioend structure. Update the page
> - * state, release holds on bios, and finally free up memory. Do not use the
> - * ioend after this.
> - */
> -STATIC void
> -xfs_destroy_ioend(
> - struct xfs_ioend *ioend,
> - int error)
> -{
> - struct inode *inode = ioend->io_inode;
> - struct bio *bio = &ioend->io_inline_bio;
> - struct bio *last = ioend->io_bio, *next;
> - u64 start = bio->bi_iter.bi_sector;
> - bool quiet = bio_flagged(bio, BIO_QUIET);
> -
> - for (bio = &ioend->io_inline_bio; bio; bio = next) {
> - struct bio_vec *bvec;
> - struct bvec_iter_all iter_all;
> -
> - /*
> - * For the last bio, bi_private points to the ioend, so we
> - * need to explicitly end the iteration here.
> - */
> - if (bio == last)
> - next = NULL;
> - else
> - next = bio->bi_private;
> -
> - /* walk each page on bio, ending page IO on them */
> - bio_for_each_segment_all(bvec, bio, iter_all)
> - xfs_finish_page_writeback(inode, bvec, error);
> - bio_put(bio);
> - }
> -
> - if (unlikely(error && !quiet)) {
> - xfs_err_ratelimited(XFS_I(inode)->i_mount,
> - "writeback error on sector %llu", start);
> - }
> -}
> -
> -static void
> -xfs_destroy_ioends(
> - struct xfs_ioend *ioend,
> - int error)
> -{
> - struct list_head tmp;
> -
> - list_replace_init(&ioend->io_list, &tmp);
> - xfs_destroy_ioend(ioend, error);
> - while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
> - io_list))) {
> - list_del_init(&ioend->io_list);
> - xfs_destroy_ioend(ioend, error);
> - }
> -}
> -
> /*
> * Fast and loose check if this write could update the on-disk inode size.
> */
> -static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
> +static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
> {
> return ioend->io_offset + ioend->io_size >
> XFS_I(ioend->io_inode)->i_d.di_size;
> @@ -142,7 +67,7 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
>
> STATIC int
> xfs_setfilesize_trans_alloc(
> - struct xfs_ioend *ioend)
> + struct iomap_ioend *ioend)
> {
> struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
> struct xfs_trans *tp;
> @@ -215,7 +140,7 @@ xfs_setfilesize(
>
> STATIC int
> xfs_setfilesize_ioend(
> - struct xfs_ioend *ioend,
> + struct iomap_ioend *ioend,
> int error)
> {
> struct xfs_inode *ip = XFS_I(ioend->io_inode);
> @@ -243,7 +168,7 @@ xfs_setfilesize_ioend(
> */
> STATIC void
> xfs_end_ioend(
> - struct xfs_ioend *ioend)
> + struct iomap_ioend *ioend)
> {
> struct xfs_inode *ip = XFS_I(ioend->io_inode);
> xfs_off_t offset = ioend->io_offset;
> @@ -289,31 +214,10 @@ xfs_end_ioend(
> done:
> if (ioend->io_private)
> error = xfs_setfilesize_ioend(ioend, error);
> - xfs_destroy_ioends(ioend, error);
> + iomap_finish_ioends(ioend, error);
> memalloc_nofs_restore(nofs_flag);
> }
>
> -/*
> - * We can merge two adjacent ioends if they have the same set of work to do.
> - */
> -static bool
> -xfs_ioend_can_merge(
> - struct xfs_ioend *ioend,
> - struct xfs_ioend *next)
> -{
> - if (ioend->io_bio->bi_status != next->io_bio->bi_status)
> - return false;
> - if ((ioend->io_flags & IOMAP_F_SHARED) ^
> - (next->io_flags & IOMAP_F_SHARED))
> - return false;
> - if ((ioend->io_type == IOMAP_UNWRITTEN) ^
> - (next->io_type == IOMAP_UNWRITTEN))
> - return false;
> - if (ioend->io_offset + ioend->io_size != next->io_offset)
> - return false;
> - return true;
> -}
> -
> /*
> * If the to be merged ioend has a preallocated transaction for file
> * size updates we need to ensure the ioend it is merged into also
> @@ -322,8 +226,8 @@ xfs_ioend_can_merge(
> */
> static void
> xfs_ioend_merge_private(
> - struct xfs_ioend *ioend,
> - struct xfs_ioend *next)
> + struct iomap_ioend *ioend,
> + struct iomap_ioend *next)
> {
> if (!ioend->io_private) {
> ioend->io_private = next->io_private;
> @@ -333,53 +237,6 @@ xfs_ioend_merge_private(
> }
> }
>
> -/* Try to merge adjacent completions. */
> -STATIC void
> -xfs_ioend_try_merge(
> - struct xfs_ioend *ioend,
> - struct list_head *more_ioends)
> -{
> - struct xfs_ioend *next;
> -
> - INIT_LIST_HEAD(&ioend->io_list);
> -
> - while ((next = list_first_entry_or_null(more_ioends, struct xfs_ioend,
> - io_list))) {
> - if (!xfs_ioend_can_merge(ioend, next))
> - break;
> - list_move_tail(&next->io_list, &ioend->io_list);
> - ioend->io_size += next->io_size;
> - if (next->io_private)
> - xfs_ioend_merge_private(ioend, next);
> - }
> -}
> -
> -/* list_sort compare function for ioends */
> -static int
> -xfs_ioend_compare(
> - void *priv,
> - struct list_head *a,
> - struct list_head *b)
> -{
> - struct xfs_ioend *ia;
> - struct xfs_ioend *ib;
> -
> - ia = container_of(a, struct xfs_ioend, io_list);
> - ib = container_of(b, struct xfs_ioend, io_list);
> - if (ia->io_offset < ib->io_offset)
> - return -1;
> - else if (ia->io_offset > ib->io_offset)
> - return 1;
> - return 0;
> -}
> -
> -static void
> -xfs_sort_ioends(
> - struct list_head *ioend_list)
> -{
> - list_sort(NULL, ioend_list, xfs_ioend_compare);
> -}
> -
> /* Finish all pending io completions. */
> void
> xfs_end_io(
> @@ -387,7 +244,7 @@ xfs_end_io(
> {
> struct xfs_inode *ip =
> container_of(work, struct xfs_inode, i_ioend_work);
> - struct xfs_ioend *ioend;
> + struct iomap_ioend *ioend;
> struct list_head tmp;
> unsigned long flags;
>
> @@ -395,16 +252,16 @@ xfs_end_io(
> list_replace_init(&ip->i_ioend_list, &tmp);
> spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
>
> - xfs_sort_ioends(&tmp);
> - while ((ioend = list_first_entry_or_null(&tmp, struct xfs_ioend,
> + iomap_sort_ioends(&tmp);
> + while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
> io_list))) {
> list_del_init(&ioend->io_list);
> - xfs_ioend_try_merge(ioend, &tmp);
> + iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
> xfs_end_ioend(ioend);
> }
> }
>
> -static inline bool xfs_ioend_needs_workqueue(struct xfs_ioend *ioend)
> +static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
> {
> return ioend->io_private ||
> ioend->io_type == IOMAP_UNWRITTEN ||
> @@ -415,20 +272,18 @@ STATIC void
> xfs_end_bio(
> struct bio *bio)
> {
> - struct xfs_ioend *ioend = bio->bi_private;
> + struct iomap_ioend *ioend = bio->bi_private;
> struct xfs_inode *ip = XFS_I(ioend->io_inode);
> - struct xfs_mount *mp = ip->i_mount;
> unsigned long flags;
>
> - if (xfs_ioend_needs_workqueue(ioend)) {
> - spin_lock_irqsave(&ip->i_ioend_lock, flags);
> - if (list_empty(&ip->i_ioend_list))
> - WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
> - &ip->i_ioend_work));
> - list_add_tail(&ioend->io_list, &ip->i_ioend_list);
> - spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
> - } else
> - xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
> + ASSERT(xfs_ioend_needs_workqueue(ioend));
> +
> + spin_lock_irqsave(&ip->i_ioend_lock, flags);
> + if (list_empty(&ip->i_ioend_list))
> + WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
> + &ip->i_ioend_work));
> + list_add_tail(&ioend->io_list, &ip->i_ioend_list);
> + spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
> }
>
> /*
> @@ -437,7 +292,7 @@ xfs_end_bio(
> */
> static bool
> xfs_imap_valid(
> - struct xfs_writepage_ctx *wpc,
> + struct iomap_writepage_ctx *wpc,
> struct xfs_inode *ip,
> loff_t offset)
> {
> @@ -459,10 +314,10 @@ xfs_imap_valid(
> * checked (and found nothing at this offset) could have added
> * overlapping blocks.
> */
> - if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
> + if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
> return false;
> if (xfs_inode_has_cow_data(ip) &&
> - wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
> + XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
> return false;
> return true;
> }
> @@ -477,12 +332,18 @@ xfs_imap_valid(
> */
> static int
> xfs_convert_blocks(
> - struct xfs_writepage_ctx *wpc,
> + struct iomap_writepage_ctx *wpc,
> struct xfs_inode *ip,
> int whichfork,
> loff_t offset)
> {
> int error;
> + unsigned *seq;
> +
> + if (whichfork == XFS_COW_FORK)
> + seq = &XFS_WPC(wpc)->cow_seq;
> + else
> + seq = &XFS_WPC(wpc)->data_seq;
>
> /*
> * Attempt to allocate whatever delalloc extent currently backs offset
> @@ -492,8 +353,7 @@ xfs_convert_blocks(
> */
> do {
> error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
> - &wpc->iomap, whichfork == XFS_COW_FORK ?
> - &wpc->cow_seq : &wpc->data_seq);
> + &wpc->iomap, seq);
> if (error)
> return error;
> } while (wpc->iomap.offset + wpc->iomap.length <= offset);
> @@ -501,9 +361,9 @@ xfs_convert_blocks(
> return 0;
> }
>
> -STATIC int
> +static int
> xfs_map_blocks(
> - struct xfs_writepage_ctx *wpc,
> + struct iomap_writepage_ctx *wpc,
> struct inode *inode,
> loff_t offset)
> {
> @@ -559,7 +419,7 @@ xfs_map_blocks(
> xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
> cow_fsb = imap.br_startoff;
> if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
> - wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
> + XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
> xfs_iunlock(ip, XFS_ILOCK_SHARED);
>
> whichfork = XFS_COW_FORK;
> @@ -582,7 +442,7 @@ xfs_map_blocks(
> */
> if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
> imap.br_startoff = end_fsb; /* fake a hole past EOF */
> - wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
> + XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
> xfs_iunlock(ip, XFS_ILOCK_SHARED);
>
> /* landed in a hole or beyond EOF? */
> @@ -645,24 +505,9 @@ xfs_map_blocks(
> return 0;
> }
>
> -/*
> - * Submit the bio for an ioend. We are passed an ioend with a bio attached to
> - * it, and we submit that bio. The ioend may be used for multiple bio
> - * submissions, so we only want to allocate an append transaction for the ioend
> - * once. In the case of multiple bio submission, each bio will take an IO
> - * reference to the ioend to ensure that the ioend completion is only done once
> - * all bios have been submitted and the ioend is really done.
> - *
> - * If @status is non-zero, it means that we have a situation where some part of
> - * the submission process has failed after we have marked paged for writeback
> - * and unlocked them. In this situation, we need to fail the bio and ioend
> - * rather than submit it to IO. This typically only happens on a filesystem
> - * shutdown.
> - */
> -STATIC int
> +static int
> xfs_submit_ioend(
> - struct writeback_control *wbc,
> - struct xfs_ioend *ioend,
> + struct iomap_ioend *ioend,
> int status)
> {
> unsigned int nofs_flag;
> @@ -690,147 +535,9 @@ xfs_submit_ioend(
>
> memalloc_nofs_restore(nofs_flag);
>
> - ioend->io_bio->bi_private = ioend;
> - ioend->io_bio->bi_end_io = xfs_end_bio;
> -
> - /*
> - * If we are failing the IO now, just mark the ioend with an
> - * error and finish it. This will run IO completion immediately
> - * as there is only one reference to the ioend at this point in
> - * time.
> - */
> - if (status) {
> - ioend->io_bio->bi_status = errno_to_blk_status(status);
> - bio_endio(ioend->io_bio);
> - return status;
> - }
> -
> - submit_bio(ioend->io_bio);
> - return 0;
> -}
> -
> -static struct xfs_ioend *
> -xfs_alloc_ioend(
> - struct inode *inode,
> - struct xfs_writepage_ctx *wpc,
> - xfs_off_t offset,
> - sector_t sector,
> - struct writeback_control *wbc)
> -{
> - struct xfs_ioend *ioend;
> - struct bio *bio;
> -
> - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
> - bio_set_dev(bio, wpc->iomap.bdev);
> - bio->bi_iter.bi_sector = sector;
> - bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
> - bio->bi_write_hint = inode->i_write_hint;
> - wbc_init_bio(wbc, bio);
> -
> - ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
> - INIT_LIST_HEAD(&ioend->io_list);
> - ioend->io_type = wpc->iomap.type;
> - ioend->io_flags = wpc->iomap.flags;
> - ioend->io_inode = inode;
> - ioend->io_size = 0;
> - ioend->io_offset = offset;
> - ioend->io_private = NULL;
> - ioend->io_bio = bio;
> - return ioend;
> -}
> -
> -/*
> - * Allocate a new bio, and chain the old bio to the new one.
> - *
> - * Note that we have to do perform the chaining in this unintuitive order
> - * so that the bi_private linkage is set up in the right direction for the
> - * traversal in xfs_destroy_ioend().
> - */
> -static struct bio *
> -xfs_chain_bio(
> - struct bio *prev)
> -{
> - struct bio *new;
> -
> - new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
> - bio_copy_dev(new, prev);/* also copies over blkcg information */
> - new->bi_iter.bi_sector = bio_end_sector(prev);
> - new->bi_opf = prev->bi_opf;
> - new->bi_write_hint = prev->bi_write_hint;
> -
> - bio_chain(prev, new);
> - bio_get(prev); /* for xfs_destroy_ioend */
> - submit_bio(prev);
> - return new;
> -}
> -
> -static bool
> -xfs_can_add_to_ioend(
> - struct xfs_writepage_ctx *wpc,
> - xfs_off_t offset,
> - sector_t sector)
> -{
> - if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
> - (wpc->ioend->io_flags & IOMAP_F_SHARED))
> - return false;
> - if (wpc->iomap.type != wpc->ioend->io_type)
> - return false;
> - if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
> - return false;
> - if (sector != bio_end_sector(wpc->ioend->io_bio))
> - return false;
> - return true;
> -}
> -
> -/*
> - * Test to see if we have an existing ioend structure that we could append to
> - * first, otherwise finish off the current ioend and start another.
> - */
> -STATIC void
> -xfs_add_to_ioend(
> - struct inode *inode,
> - xfs_off_t offset,
> - struct page *page,
> - struct iomap_page *iop,
> - struct xfs_writepage_ctx *wpc,
> - struct writeback_control *wbc,
> - struct list_head *iolist)
> -{
> - sector_t sector = iomap_sector(&wpc->iomap, offset);
> - unsigned len = i_blocksize(inode);
> - unsigned poff = offset & (PAGE_SIZE - 1);
> - bool merged, same_page = false;
> -
> - if (!wpc->ioend || !xfs_can_add_to_ioend(wpc, offset, sector)) {
> - if (wpc->ioend)
> - list_add(&wpc->ioend->io_list, iolist);
> - wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
> - }
> -
> - merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
> - &same_page);
> -
> - if (iop && !same_page)
> - atomic_inc(&iop->write_count);
> -
> - if (!merged) {
> - if (bio_full(wpc->ioend->io_bio, len))
> - wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
> - bio_add_page(wpc->ioend->io_bio, page, len, poff);
> - }
> -
> - wpc->ioend->io_size += len;
> - wbc_account_cgroup_owner(wbc, page, len);
> -}
> -
> -STATIC void
> -xfs_vm_invalidatepage(
> - struct page *page,
> - unsigned int offset,
> - unsigned int length)
> -{
> - trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
> - iomap_invalidatepage(page, offset, length);
> + if (xfs_ioend_needs_workqueue(ioend))
> + ioend->io_bio->bi_end_io = xfs_end_bio;
> + return status;
> }
>
> /*
> @@ -844,8 +551,8 @@ xfs_vm_invalidatepage(
> * transaction as there is no space left for block reservation (typically why we
> * see a ENOSPC in writeback).
> */
> -STATIC void
> -xfs_aops_discard_page(
> +static void
> +xfs_discard_page(
> struct page *page)
> {
> struct inode *inode = page->mapping->host;
> @@ -867,246 +574,14 @@ xfs_aops_discard_page(
> if (error && !XFS_FORCED_SHUTDOWN(mp))
> xfs_alert(mp, "page discard unable to remove delalloc mapping.");
> out_invalidate:
> - xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
> + iomap_invalidatepage(page, 0, PAGE_SIZE);
> }
>
> -/*
> - * We implement an immediate ioend submission policy here to avoid needing to
> - * chain multiple ioends and hence nest mempool allocations which can violate
> - * forward progress guarantees we need to provide. The current ioend we are
> - * adding blocks to is cached on the writepage context, and if the new block
> - * does not append to the cached ioend it will create a new ioend and cache that
> - * instead.
> - *
> - * If a new ioend is created and cached, the old ioend is returned and queued
> - * locally for submission once the entire page is processed or an error has been
> - * detected. While ioends are submitted immediately after they are completed,
> - * batching optimisations are provided by higher level block plugging.
> - *
> - * At the end of a writeback pass, there will be a cached ioend remaining on the
> - * writepage context that the caller will need to submit.
> - */
> -static int
> -xfs_writepage_map(
> - struct xfs_writepage_ctx *wpc,
> - struct writeback_control *wbc,
> - struct inode *inode,
> - struct page *page,
> - uint64_t end_offset)
> -{
> - LIST_HEAD(submit_list);
> - struct iomap_page *iop = to_iomap_page(page);
> - unsigned len = i_blocksize(inode);
> - struct xfs_ioend *ioend, *next;
> - uint64_t file_offset; /* file offset of page */
> - int error = 0, count = 0, i;
> -
> - ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
> - ASSERT(!iop || atomic_read(&iop->write_count) == 0);
> -
> - /*
> - * Walk through the page to find areas to write back. If we run off the
> - * end of the current map or find the current map invalid, grab a new
> - * one.
> - */
> - for (i = 0, file_offset = page_offset(page);
> - i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
> - i++, file_offset += len) {
> - if (iop && !test_bit(i, iop->uptodate))
> - continue;
> -
> - error = xfs_map_blocks(wpc, inode, file_offset);
> - if (error)
> - break;
> - if (wpc->iomap.type == IOMAP_HOLE)
> - continue;
> - xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
> - &submit_list);
> - count++;
> - }
> -
> - ASSERT(wpc->ioend || list_empty(&submit_list));
> - ASSERT(PageLocked(page));
> - ASSERT(!PageWriteback(page));
> -
> - /*
> - * On error, we have to fail the ioend here because we may have set
> - * pages under writeback, we have to make sure we run IO completion to
> - * mark the error state of the IO appropriately, so we can't cancel the
> - * ioend directly here. That means we have to mark this page as under
> - * writeback if we included any blocks from it in the ioend chain so
> - * that completion treats it correctly.
> - *
> - * If we didn't include the page in the ioend, the on error we can
> - * simply discard and unlock it as there are no other users of the page
> - * now. The caller will still need to trigger submission of outstanding
> - * ioends on the writepage context so they are treated correctly on
> - * error.
> - */
> - if (unlikely(error)) {
> - if (!count) {
> - xfs_aops_discard_page(page);
> - ClearPageUptodate(page);
> - unlock_page(page);
> - goto done;
> - }
> -
> - /*
> - * If the page was not fully cleaned, we need to ensure that the
> - * higher layers come back to it correctly. That means we need
> - * to keep the page dirty, and for WB_SYNC_ALL writeback we need
> - * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
> - * so another attempt to write this page in this writeback sweep
> - * will be made.
> - */
> - set_page_writeback_keepwrite(page);
> - } else {
> - clear_page_dirty_for_io(page);
> - set_page_writeback(page);
> - }
> -
> - unlock_page(page);
> -
> - /*
> - * Preserve the original error if there was one, otherwise catch
> - * submission errors here and propagate into subsequent ioend
> - * submissions.
> - */
> - list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
> - int error2;
> -
> - list_del_init(&ioend->io_list);
> - error2 = xfs_submit_ioend(wbc, ioend, error);
> - if (error2 && !error)
> - error = error2;
> - }
> -
> - /*
> - * We can end up here with no error and nothing to write only if we race
> - * with a partial page truncate on a sub-page block sized filesystem.
> - */
> - if (!count)
> - end_page_writeback(page);
> -done:
> - mapping_set_error(page->mapping, error);
> - return error;
> -}
> -
> -/*
> - * Write out a dirty page.
> - *
> - * For delalloc space on the page we need to allocate space and flush it.
> - * For unwritten space on the page we need to start the conversion to
> - * regular allocated space.
> - */
> -STATIC int
> -xfs_do_writepage(
> - struct page *page,
> - struct writeback_control *wbc,
> - void *data)
> -{
> - struct xfs_writepage_ctx *wpc = data;
> - struct inode *inode = page->mapping->host;
> - loff_t offset;
> - uint64_t end_offset;
> - pgoff_t end_index;
> -
> - trace_xfs_writepage(inode, page, 0, 0);
> -
> - /*
> - * Refuse to write the page out if we are called from reclaim context.
> - *
> - * This avoids stack overflows when called from deeply used stacks in
> - * random callers for direct reclaim or memcg reclaim. We explicitly
> - * allow reclaim from kswapd as the stack usage there is relatively low.
> - *
> - * This should never happen except in the case of a VM regression so
> - * warn about it.
> - */
> - if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
> - PF_MEMALLOC))
> - goto redirty;
> -
> - /*
> - * Given that we do not allow direct reclaim to call us, we should
> - * never be called while in a filesystem transaction.
> - */
> - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
> - goto redirty;
> -
> - /*
> - * Is this page beyond the end of the file?
> - *
> - * The page index is less than the end_index, adjust the end_offset
> - * to the highest offset that this page should represent.
> - * -----------------------------------------------------
> - * | file mapping | <EOF> |
> - * -----------------------------------------------------
> - * | Page ... | Page N-2 | Page N-1 | Page N | |
> - * ^--------------------------------^----------|--------
> - * | desired writeback range | see else |
> - * ---------------------------------^------------------|
> - */
> - offset = i_size_read(inode);
> - end_index = offset >> PAGE_SHIFT;
> - if (page->index < end_index)
> - end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
> - else {
> - /*
> - * Check whether the page to write out is beyond or straddles
> - * i_size or not.
> - * -------------------------------------------------------
> - * | file mapping | <EOF> |
> - * -------------------------------------------------------
> - * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
> - * ^--------------------------------^-----------|---------
> - * | | Straddles |
> - * ---------------------------------^-----------|--------|
> - */
> - unsigned offset_into_page = offset & (PAGE_SIZE - 1);
> -
> - /*
> - * Skip the page if it is fully outside i_size, e.g. due to a
> - * truncate operation that is in progress. We must redirty the
> - * page so that reclaim stops reclaiming it. Otherwise
> - * xfs_vm_releasepage() is called on it and gets confused.
> - *
> - * Note that the end_index is unsigned long, it would overflow
> - * if the given offset is greater than 16TB on 32-bit system
> - * and if we do check the page is fully outside i_size or not
> - * via "if (page->index >= end_index + 1)" as "end_index + 1"
> - * will be evaluated to 0. Hence this page will be redirtied
> - * and be written out repeatedly which would result in an
> - * infinite loop, the user program that perform this operation
> - * will hang. Instead, we can verify this situation by checking
> - * if the page to write is totally beyond the i_size or if it's
> - * offset is just equal to the EOF.
> - */
> - if (page->index > end_index ||
> - (page->index == end_index && offset_into_page == 0))
> - goto redirty;
> -
> - /*
> - * The page straddles i_size. It must be zeroed out on each
> - * and every writepage invocation because it may be mmapped.
> - * "A file is mapped in multiples of the page size. For a file
> - * that is not a multiple of the page size, the remaining
> - * memory is zeroed when mapped, and writes to that region are
> - * not written out to the file."
> - */
> - zero_user_segment(page, offset_into_page, PAGE_SIZE);
> -
> - /* Adjust the end_offset to the end of file */
> - end_offset = offset;
> - }
> -
> - return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
> -
> -redirty:
> - redirty_page_for_writepage(wbc, page);
> - unlock_page(page);
> - return 0;
> -}
> +static const struct iomap_writeback_ops xfs_writeback_ops = {
> + .map_blocks = xfs_map_blocks,
> + .submit_ioend = xfs_submit_ioend,
> + .discard_page = xfs_discard_page,
> +};
>
> STATIC int
> xfs_vm_writepage(
> @@ -1114,12 +589,8 @@ xfs_vm_writepage(
> struct writeback_control *wbc)
> {
> struct xfs_writepage_ctx wpc = { };
> - int ret;
>
> - ret = xfs_do_writepage(page, wbc, &wpc);
> - if (wpc.ioend)
> - ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
> - return ret;
> + return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
> }
>
> STATIC int
> @@ -1128,13 +599,9 @@ xfs_vm_writepages(
> struct writeback_control *wbc)
> {
> struct xfs_writepage_ctx wpc = { };
> - int ret;
>
> xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
> - ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
> - if (wpc.ioend)
> - ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
> - return ret;
> + return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
> }
>
> STATIC int
> @@ -1147,15 +614,6 @@ xfs_dax_writepages(
> xfs_find_bdev_for_inode(mapping->host), wbc);
> }
>
> -STATIC int
> -xfs_vm_releasepage(
> - struct page *page,
> - gfp_t gfp_mask)
> -{
> - trace_xfs_releasepage(page->mapping->host, page, 0, 0);
> - return iomap_releasepage(page, gfp_mask);
> -}
> -
> STATIC sector_t
> xfs_vm_bmap(
> struct address_space *mapping,
> @@ -1213,8 +671,8 @@ const struct address_space_operations xfs_address_space_operations = {
> .writepage = xfs_vm_writepage,
> .writepages = xfs_vm_writepages,
> .set_page_dirty = iomap_set_page_dirty,
> - .releasepage = xfs_vm_releasepage,
> - .invalidatepage = xfs_vm_invalidatepage,
> + .releasepage = iomap_releasepage,
> + .invalidatepage = iomap_invalidatepage,
> .bmap = xfs_vm_bmap,
> .direct_IO = noop_direct_IO,
> .migratepage = iomap_migrate_page,
> diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
> index 4a0226cdad4f..687b11f34fa2 100644
> --- a/fs/xfs/xfs_aops.h
> +++ b/fs/xfs/xfs_aops.h
> @@ -6,23 +6,6 @@
> #ifndef __XFS_AOPS_H__
> #define __XFS_AOPS_H__
>
> -extern struct bio_set xfs_ioend_bioset;
> -
> -/*
> - * Structure for buffered I/O completions.
> - */
> -struct xfs_ioend {
> - struct list_head io_list; /* next ioend in chain */
> - u16 io_type;
> - u16 io_flags; /* IOMAP_F_* */
> - struct inode *io_inode; /* file being written to */
> - size_t io_size; /* size of the extent */
> - xfs_off_t io_offset; /* offset in the file */
> - void *io_private; /* file system private data */
> - struct bio *io_bio; /* bio being built */
> - struct bio io_inline_bio; /* MUST BE LAST! */
> -};
> -
> extern const struct address_space_operations xfs_address_space_operations;
> extern const struct address_space_operations xfs_dax_aops;
>
> diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
> index 8d1df9f8be07..0a8cf6b87a21 100644
> --- a/fs/xfs/xfs_super.c
> +++ b/fs/xfs/xfs_super.c
> @@ -40,7 +40,6 @@
> #include <linux/parser.h>
>
> static const struct super_operations xfs_super_operations;
> -struct bio_set xfs_ioend_bioset;
>
> static struct kset *xfs_kset; /* top-level xfs sysfs dir */
> #ifdef DEBUG
> @@ -1853,15 +1852,10 @@ MODULE_ALIAS_FS("xfs");
> STATIC int __init
> xfs_init_zones(void)
> {
> - if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
> - offsetof(struct xfs_ioend, io_inline_bio),
> - BIOSET_NEED_BVECS))
> - goto out;
> -
> xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
> "xfs_log_ticket");
> if (!xfs_log_ticket_zone)
> - goto out_free_ioend_bioset;
> + goto out;
>
> xfs_bmap_free_item_zone = kmem_zone_init(
> sizeof(struct xfs_extent_free_item),
> @@ -1996,8 +1990,6 @@ xfs_init_zones(void)
> kmem_zone_destroy(xfs_bmap_free_item_zone);
> out_destroy_log_ticket_zone:
> kmem_zone_destroy(xfs_log_ticket_zone);
> - out_free_ioend_bioset:
> - bioset_exit(&xfs_ioend_bioset);
> out:
> return -ENOMEM;
> }
> @@ -2028,7 +2020,6 @@ xfs_destroy_zones(void)
> kmem_zone_destroy(xfs_btree_cur_zone);
> kmem_zone_destroy(xfs_bmap_free_item_zone);
> kmem_zone_destroy(xfs_log_ticket_zone);
> - bioset_exit(&xfs_ioend_bioset);
> }
>
> STATIC int __init
> diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
> index eae4b29c174e..cbb23d7a3554 100644
> --- a/fs/xfs/xfs_trace.h
> +++ b/fs/xfs/xfs_trace.h
> @@ -1158,45 +1158,6 @@ DEFINE_RW_EVENT(xfs_file_buffered_write);
> DEFINE_RW_EVENT(xfs_file_direct_write);
> DEFINE_RW_EVENT(xfs_file_dax_write);
>
> -DECLARE_EVENT_CLASS(xfs_page_class,
> - TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
> - unsigned int len),
> - TP_ARGS(inode, page, off, len),
> - TP_STRUCT__entry(
> - __field(dev_t, dev)
> - __field(xfs_ino_t, ino)
> - __field(pgoff_t, pgoff)
> - __field(loff_t, size)
> - __field(unsigned long, offset)
> - __field(unsigned int, length)
> - ),
> - TP_fast_assign(
> - __entry->dev = inode->i_sb->s_dev;
> - __entry->ino = XFS_I(inode)->i_ino;
> - __entry->pgoff = page_offset(page);
> - __entry->size = i_size_read(inode);
> - __entry->offset = off;
> - __entry->length = len;
> - ),
> - TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
> - "length %x",
> - MAJOR(__entry->dev), MINOR(__entry->dev),
> - __entry->ino,
> - __entry->pgoff,
> - __entry->size,
> - __entry->offset,
> - __entry->length)
> -)
> -
> -#define DEFINE_PAGE_EVENT(name) \
> -DEFINE_EVENT(xfs_page_class, name, \
> - TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
> - unsigned int len), \
> - TP_ARGS(inode, page, off, len))
> -DEFINE_PAGE_EVENT(xfs_writepage);
> -DEFINE_PAGE_EVENT(xfs_releasepage);
> -DEFINE_PAGE_EVENT(xfs_invalidatepage);
> -
> DECLARE_EVENT_CLASS(xfs_imap_class,
> TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
> int whichfork, struct xfs_bmbt_irec *irec),
> diff --git a/include/linux/iomap.h b/include/linux/iomap.h
> index 7aa5d6117936..0b399718c387 100644
> --- a/include/linux/iomap.h
> +++ b/include/linux/iomap.h
> @@ -4,6 +4,7 @@
>
> #include <linux/atomic.h>
> #include <linux/bitmap.h>
> +#include <linux/blk_types.h>
> #include <linux/mm.h>
> #include <linux/types.h>
> #include <linux/mm_types.h>
> @@ -12,6 +13,7 @@
> struct address_space;
> struct fiemap_extent_info;
> struct inode;
> +struct iomap_writepage_ctx;
> struct iov_iter;
> struct kiocb;
> struct page;
> @@ -183,6 +185,63 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset,
> sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
> const struct iomap_ops *ops);
>
> +/*
> + * Structure for writeback I/O completions.
> + */
> +struct iomap_ioend {
> + struct list_head io_list; /* next ioend in chain */
> + u16 io_type;
> + u16 io_flags; /* IOMAP_F_* */
> + struct inode *io_inode; /* file being written to */
> + size_t io_size; /* size of the extent */
> + loff_t io_offset; /* offset in the file */
> + void *io_private; /* file system private data */
> + struct bio *io_bio; /* bio being built */
> + struct bio io_inline_bio; /* MUST BE LAST! */
> +};
> +
> +struct iomap_writeback_ops {
> + /*
> + * Required, maps the blocks so that writeback can be performed on
> + * the range starting at offset.
> + */
> + int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
> + loff_t offset);
> +
> + /*
> + * Optional, allows the file systems to perform actions just before
> + * submitting the bio and/or override the bio end_io handler for complex
> + * operations like copy on write extent manipulation or unwritten extent
> + * conversions.
> + */
> + int (*submit_ioend)(struct iomap_ioend *ioend, int status);
> +
> + /*
> + * Optional, allows the file system to discard state on a page where
> + * we failed to submit any I/O.
> + */
> + void (*discard_page)(struct page *page);
> +};
> +
> +struct iomap_writepage_ctx {
> + struct iomap iomap;
> + struct iomap_ioend *ioend;
> + const struct iomap_writeback_ops *ops;
> +};
> +
> +void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
> +void iomap_ioend_try_merge(struct iomap_ioend *ioend,
> + struct list_head *more_ioends,
> + void (*merge_private)(struct iomap_ioend *ioend,
> + struct iomap_ioend *next));
> +void iomap_sort_ioends(struct list_head *ioend_list);
> +int iomap_writepage(struct page *page, struct writeback_control *wbc,
> + struct iomap_writepage_ctx *wpc,
> + const struct iomap_writeback_ops *ops);
> +int iomap_writepages(struct address_space *mapping,
> + struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
> + const struct iomap_writeback_ops *ops);
> +
> /*
> * Flags for direct I/O ->end_io:
> */
> --
> 2.20.1
>
On Tue, Oct 15, 2019 at 05:43:34PM +0200, Christoph Hellwig wrote:
> Currently we don't overwrite the flags field in the iomap in
> xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack,
> but is harmful once we want to be able to reuse an iomap in the
> writeback code. Replace the shared paramter with a set of initial
> flags an thus ensures the flags field is always reinitialized.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
Looks fine.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:36PM +0200, Christoph Hellwig wrote:
> In preparation for moving the XFS writeback code to fs/iomap.c, switch
> it to use struct iomap instead of the XFS-specific struct xfs_bmbt_irec.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Carlos Maiolino <[email protected]>
> Reviewed-by: Darrick J. Wong <[email protected]>
Pretty straight forward.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:37PM +0200, Christoph Hellwig wrote:
> Introduce two nicely abstracted helper, which can be moved to the
> iomap code later. Also use list_pop_entry and list_first_entry_or_null
> to simplify the code a bit.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
looks ok.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:35PM +0200, Christoph Hellwig wrote:
> Don't set IOMAP_F_NEW if we COW over and existing allocated range, as
> these aren't strictly new allocations. This is required to be able to
> use IOMAP_F_NEW to zero newly allocated blocks, which is required for
> the iomap code to fully support file systems that don't do delayed
> allocations or use unwritten extents.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
looks fine.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:39PM +0200, Christoph Hellwig wrote:
> In preparation for moving the writeback code to iomap.c, replace the
> XFS-specific COW fork concept with the iomap IOMAP_F_SHARED flag.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
no problems I can spot.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:40PM +0200, Christoph Hellwig wrote:
> File systems like gfs2 don't support delayed allocations or unwritten
> extents and thus allocate normal mapped blocks to fill holes. To
> cover the case of such file systems allocating new blocks to fill holes
> also zero out mapped blocks with the new flag.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Darrick J. Wong <[email protected]>
Sensible.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:38PM +0200, Christoph Hellwig wrote:
> In preparation for moving the ioend structure to common code we need
> to get rid of the xfs-specific xfs_trans type. Just make it a file
> system private void pointer instead.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
looks good.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:41PM +0200, Christoph Hellwig wrote:
> Lift the xfs code for tracing address space operations to the iomap
> layer.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
OK.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:42PM +0200, Christoph Hellwig wrote:
> Take the xfs writeback code and move it to fs/iomap. A new structure
> with three methods is added as the abstraction from the generic writeback
> code to the file system. These methods are used to map blocks, submit an
> ioend, and cancel a page that encountered an error before it was added to
> an ioend.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> fs/iomap/buffered-io.c | 564 ++++++++++++++++++++++++++++++++++-
> fs/iomap/trace.h | 39 +++
> fs/xfs/xfs_aops.c | 662 ++++-------------------------------------
> fs/xfs/xfs_aops.h | 17 --
> fs/xfs/xfs_super.c | 11 +-
> fs/xfs/xfs_trace.h | 39 ---
> include/linux/iomap.h | 59 ++++
> 7 files changed, 722 insertions(+), 669 deletions(-)
.....
> @@ -468,6 +471,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
> int
> iomap_releasepage(struct page *page, gfp_t gfp_mask)
> {
> + trace_iomap_releasepage(page->mapping->host, page, 0, 0);
> +
> /*
> * mm accommodates an old ext3 case where clean pages might not have had
> * the dirty bit cleared. Thus, it can send actual dirty pages to
> @@ -483,6 +488,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
> void
> iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
> {
> + trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
> +
These tracepoints should be split out into a separate patch like
the readpage(s) tracepoints. Maybe just lift all the non-writeback
ones in a single patch...
> /*
> * If we are invalidating the entire page, clear the dirty state from it
> * and release it to avoid unnecessary buildup of the LRU.
> @@ -1084,3 +1091,558 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
> return block_page_mkwrite_return(ret);
> }
> EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
> +
> +static void
> +iomap_finish_page_writeback(struct inode *inode, struct bio_vec *bvec,
> + int error)
> +{
> + struct iomap_page *iop = to_iomap_page(bvec->bv_page);
> +
> + if (error) {
> + SetPageError(bvec->bv_page);
> + mapping_set_error(inode->i_mapping, -EIO);
> + }
> +
> + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
> +
> + if (!iop || atomic_dec_and_test(&iop->write_count))
> + end_page_writeback(bvec->bv_page);
> +}
Can we just pass the struct page into this function?
.....
> +/*
> + * Submit the bio for an ioend. We are passed an ioend with a bio attached to
> + * it, and we submit that bio. The ioend may be used for multiple bio
> + * submissions, so we only want to allocate an append transaction for the ioend
> + * once. In the case of multiple bio submission, each bio will take an IO
This needs to be changed to describe what wpc->ops->submit_ioend()
is used for rather than what XFS might use this hook for.
> + * reference to the ioend to ensure that the ioend completion is only done once
> + * all bios have been submitted and the ioend is really done.
> + *
> + * If @error is non-zero, it means that we have a situation where some part of
> + * the submission process has failed after we have marked paged for writeback
> + * and unlocked them. In this situation, we need to fail the bio and ioend
> + * rather than submit it to IO. This typically only happens on a filesystem
> + * shutdown.
> + */
> +static int
> +iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
> + int error)
> +{
> + ioend->io_bio->bi_private = ioend;
> + ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
> +
> + if (wpc->ops->submit_ioend)
> + error = wpc->ops->submit_ioend(ioend, error);
I'm not sure that "submit_ioend" is the best name for this method,
as it is a pre-bio-submission hook, not an actual IO submission
method. "prepare_ioend_for_submit" is more descriptive, but probably
too long. wpc->ops->prepare_submit(ioend, error) reads pretty well,
though...
> + if (error) {
> + /*
> + * If we are failing the IO now, just mark the ioend with an
> + * error and finish it. This will run IO completion immediately
> + * as there is only one reference to the ioend at this point in
> + * time.
> + */
> + ioend->io_bio->bi_status = errno_to_blk_status(error);
> + bio_endio(ioend->io_bio);
> + return error;
> + }
> +
> + submit_bio(ioend->io_bio);
> + return 0;
> +}
.....
> +/*
> + * We implement an immediate ioend submission policy here to avoid needing to
> + * chain multiple ioends and hence nest mempool allocations which can violate
> + * forward progress guarantees we need to provide. The current ioend we are
> + * adding blocks to is cached on the writepage context, and if the new block
adding pages to ... , and if the new block mapping
> + * does not append to the cached ioend it will create a new ioend and cache that
> + * instead.
> + *
> + * If a new ioend is created and cached, the old ioend is returned and queued
> + * locally for submission once the entire page is processed or an error has been
> + * detected. While ioends are submitted immediately after they are completed,
> + * batching optimisations are provided by higher level block plugging.
> + *
> + * At the end of a writeback pass, there will be a cached ioend remaining on the
> + * writepage context that the caller will need to submit.
> + */
> +static int
> +iomap_writepage_map(struct iomap_writepage_ctx *wpc,
> + struct writeback_control *wbc, struct inode *inode,
> + struct page *page, u64 end_offset)
> +{
> + struct iomap_page *iop = to_iomap_page(page);
> + struct iomap_ioend *ioend, *next;
> + unsigned len = i_blocksize(inode);
> + u64 file_offset; /* file offset of page */
> + int error = 0, count = 0, i;
> + LIST_HEAD(submit_list);
> +
> + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
> +
> + /*
> + * Walk through the page to find areas to write back. If we run off the
> + * end of the current map or find the current map invalid, grab a new
> + * one.
> + */
> + for (i = 0, file_offset = page_offset(page);
> + i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
> + i++, file_offset += len) {
> + if (iop && !test_bit(i, iop->uptodate))
> + continue;
> +
> + error = wpc->ops->map_blocks(wpc, inode, file_offset);
> + if (error)
> + break;
> + if (wpc->iomap.type == IOMAP_HOLE)
> + continue;
> + iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
> + &submit_list);
> + count++;
> + }
> +
> + WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
> + WARN_ON_ONCE(!PageLocked(page));
> + WARN_ON_ONCE(PageWriteback(page));
> +
> + /*
> + * On error, we have to fail the ioend here because we may have set
> + * pages under writeback, we have to make sure we run IO completion to
> + * mark the error state of the IO appropriately, so we can't cancel the
> + * ioend directly here.
Few too many commas and run-ons here. Maybe reword it like this:
/*
* We cannot cancel the ioend directly here if there is a submission
* error. We may have already set pages under writeback and hence we
* have to run IO completion to mark the error state of the pages under
* writeback appropriately.
>
>
> That means we have to mark this page as under
> + * writeback if we included any blocks from it in the ioend chain so
> + * that completion treats it correctly.
> + *
> + * If we didn't include the page in the ioend, the on error we can
then on error
> + * simply discard and unlock it as there are no other users of the page
> + * now. The caller will still need to trigger submission of outstanding
> + * ioends on the writepage context so they are treated correctly on
> + * error.
> + */
.....
> +static int
> +iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
> +{
> + struct iomap_writepage_ctx *wpc = data;
> + struct inode *inode = page->mapping->host;
> + pgoff_t end_index;
> + u64 end_offset;
> + loff_t offset;
> +
> + trace_iomap_writepage(inode, page, 0, 0);
> +
> + /*
> + * Refuse to write the page out if we are called from reclaim context.
> + *
> + * This avoids stack overflows when called from deeply used stacks in
> + * random callers for direct reclaim or memcg reclaim. We explicitly
> + * allow reclaim from kswapd as the stack usage there is relatively low.
> + *
> + * This should never happen except in the case of a VM regression so
> + * warn about it.
> + */
> + if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
> + PF_MEMALLOC))
> + goto redirty;
> +
> + /*
> + * Given that we do not allow direct reclaim to call us, we should
> + * never be called while in a filesystem transaction.
> + */
never be called in a recursive filesystem reclaim context.
> + if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
> + goto redirty;
> +
Cheers,
Dave.
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:43PM +0200, Christoph Hellwig wrote:
> And inline mapping should never mark the page dirty and thus never end up
> in writepages. Add a check for that condition and warn if it happens.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Darrick J. Wong <[email protected]>
> Signed-off-by: Darrick J. Wong <[email protected]>
> ---
> fs/iomap/buffered-io.c | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 00af08006cd3..76e72576f307 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1421,6 +1421,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
> error = wpc->ops->map_blocks(wpc, inode, file_offset);
> if (error)
> break;
> + if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
> + continue;
> if (wpc->iomap.type == IOMAP_HOLE)
> continue;
> iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
looks fine.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:45PM +0200, Christoph Hellwig wrote:
> Move the initialization of ia and ib to the declaration line and remove
> a superflous else.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
nice little cleanup.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Tue, Oct 15, 2019 at 05:43:44PM +0200, Christoph Hellwig wrote:
> Now that all the writepage code is in the iomap code there is no
> need to keep this structure public.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Carlos Maiolino <[email protected]>
> Reviewed-by: Darrick J. Wong <[email protected]>
> ---
> fs/iomap/buffered-io.c | 17 +++++++++++++++++
> include/linux/iomap.h | 17 -----------------
> 2 files changed, 17 insertions(+), 17 deletions(-)
Sensible, nothing should be playing around with internal iomap
per-page state.
Reviewed-by: Dave Chinner <[email protected]>
--
Dave Chinner
[email protected]
On Wed, Oct 16, 2019 at 09:07:21AM +1100, Dave Chinner wrote:
> On Tue, Oct 15, 2019 at 05:43:42PM +0200, Christoph Hellwig wrote:
> > Take the xfs writeback code and move it to fs/iomap. A new structure
> > with three methods is added as the abstraction from the generic writeback
> > code to the file system. These methods are used to map blocks, submit an
> > ioend, and cancel a page that encountered an error before it was added to
> > an ioend.
> >
> > Signed-off-by: Christoph Hellwig <[email protected]>
> > ---
> > fs/iomap/buffered-io.c | 564 ++++++++++++++++++++++++++++++++++-
> > fs/iomap/trace.h | 39 +++
> > fs/xfs/xfs_aops.c | 662 ++++-------------------------------------
> > fs/xfs/xfs_aops.h | 17 --
> > fs/xfs/xfs_super.c | 11 +-
> > fs/xfs/xfs_trace.h | 39 ---
> > include/linux/iomap.h | 59 ++++
> > 7 files changed, 722 insertions(+), 669 deletions(-)
> .....
> > @@ -468,6 +471,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
> > int
> > iomap_releasepage(struct page *page, gfp_t gfp_mask)
> > {
> > + trace_iomap_releasepage(page->mapping->host, page, 0, 0);
> > +
> > /*
> > * mm accommodates an old ext3 case where clean pages might not have had
> > * the dirty bit cleared. Thus, it can send actual dirty pages to
> > @@ -483,6 +488,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
> > void
> > iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
> > {
> > + trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
> > +
>
> These tracepoints should be split out into a separate patch like
> the readpage(s) tracepoints. Maybe just lift all the non-writeback
> ones in a single patch...
>
> > /*
> > * If we are invalidating the entire page, clear the dirty state from it
> > * and release it to avoid unnecessary buildup of the LRU.
> > @@ -1084,3 +1091,558 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
> > return block_page_mkwrite_return(ret);
> > }
> > EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
> > +
> > +static void
> > +iomap_finish_page_writeback(struct inode *inode, struct bio_vec *bvec,
> > + int error)
> > +{
> > + struct iomap_page *iop = to_iomap_page(bvec->bv_page);
> > +
> > + if (error) {
> > + SetPageError(bvec->bv_page);
> > + mapping_set_error(inode->i_mapping, -EIO);
> > + }
> > +
> > + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> > + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
> > +
> > + if (!iop || atomic_dec_and_test(&iop->write_count))
> > + end_page_writeback(bvec->bv_page);
> > +}
>
> Can we just pass the struct page into this function?
>
> .....
>
> > +/*
> > + * Submit the bio for an ioend. We are passed an ioend with a bio attached to
> > + * it, and we submit that bio. The ioend may be used for multiple bio
> > + * submissions, so we only want to allocate an append transaction for the ioend
> > + * once. In the case of multiple bio submission, each bio will take an IO
>
> This needs to be changed to describe what wpc->ops->submit_ioend()
> is used for rather than what XFS might use this hook for.
>
> > + * reference to the ioend to ensure that the ioend completion is only done once
> > + * all bios have been submitted and the ioend is really done.
> > + *
> > + * If @error is non-zero, it means that we have a situation where some part of
> > + * the submission process has failed after we have marked paged for writeback
> > + * and unlocked them. In this situation, we need to fail the bio and ioend
> > + * rather than submit it to IO. This typically only happens on a filesystem
> > + * shutdown.
> > + */
> > +static int
> > +iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
> > + int error)
> > +{
> > + ioend->io_bio->bi_private = ioend;
> > + ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
> > +
> > + if (wpc->ops->submit_ioend)
> > + error = wpc->ops->submit_ioend(ioend, error);
>
> I'm not sure that "submit_ioend" is the best name for this method,
> as it is a pre-bio-submission hook, not an actual IO submission
> method. "prepare_ioend_for_submit" is more descriptive, but probably
> too long. wpc->ops->prepare_submit(ioend, error) reads pretty well,
> though...
->prepare_submission() ?
--D
> > + if (error) {
> > + /*
> > + * If we are failing the IO now, just mark the ioend with an
> > + * error and finish it. This will run IO completion immediately
> > + * as there is only one reference to the ioend at this point in
> > + * time.
> > + */
> > + ioend->io_bio->bi_status = errno_to_blk_status(error);
> > + bio_endio(ioend->io_bio);
> > + return error;
> > + }
> > +
> > + submit_bio(ioend->io_bio);
> > + return 0;
> > +}
>
> .....
> > +/*
> > + * We implement an immediate ioend submission policy here to avoid needing to
> > + * chain multiple ioends and hence nest mempool allocations which can violate
> > + * forward progress guarantees we need to provide. The current ioend we are
> > + * adding blocks to is cached on the writepage context, and if the new block
>
> adding pages to ... , and if the new block mapping
>
> > + * does not append to the cached ioend it will create a new ioend and cache that
> > + * instead.
> > + *
> > + * If a new ioend is created and cached, the old ioend is returned and queued
> > + * locally for submission once the entire page is processed or an error has been
> > + * detected. While ioends are submitted immediately after they are completed,
> > + * batching optimisations are provided by higher level block plugging.
> > + *
> > + * At the end of a writeback pass, there will be a cached ioend remaining on the
> > + * writepage context that the caller will need to submit.
> > + */
> > +static int
> > +iomap_writepage_map(struct iomap_writepage_ctx *wpc,
> > + struct writeback_control *wbc, struct inode *inode,
> > + struct page *page, u64 end_offset)
> > +{
> > + struct iomap_page *iop = to_iomap_page(page);
> > + struct iomap_ioend *ioend, *next;
> > + unsigned len = i_blocksize(inode);
> > + u64 file_offset; /* file offset of page */
> > + int error = 0, count = 0, i;
> > + LIST_HEAD(submit_list);
> > +
> > + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> > + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
> > +
> > + /*
> > + * Walk through the page to find areas to write back. If we run off the
> > + * end of the current map or find the current map invalid, grab a new
> > + * one.
> > + */
> > + for (i = 0, file_offset = page_offset(page);
> > + i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
> > + i++, file_offset += len) {
> > + if (iop && !test_bit(i, iop->uptodate))
> > + continue;
> > +
> > + error = wpc->ops->map_blocks(wpc, inode, file_offset);
> > + if (error)
> > + break;
> > + if (wpc->iomap.type == IOMAP_HOLE)
> > + continue;
> > + iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
> > + &submit_list);
> > + count++;
> > + }
> > +
> > + WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
> > + WARN_ON_ONCE(!PageLocked(page));
> > + WARN_ON_ONCE(PageWriteback(page));
> > +
> > + /*
> > + * On error, we have to fail the ioend here because we may have set
> > + * pages under writeback, we have to make sure we run IO completion to
> > + * mark the error state of the IO appropriately, so we can't cancel the
> > + * ioend directly here.
>
> Few too many commas and run-ons here. Maybe reword it like this:
>
> /*
> * We cannot cancel the ioend directly here if there is a submission
> * error. We may have already set pages under writeback and hence we
> * have to run IO completion to mark the error state of the pages under
> * writeback appropriately.
>
> >
> >
> > That means we have to mark this page as under
> > + * writeback if we included any blocks from it in the ioend chain so
> > + * that completion treats it correctly.
> > + *
> > + * If we didn't include the page in the ioend, the on error we can
> then on error
>
> > + * simply discard and unlock it as there are no other users of the page
> > + * now. The caller will still need to trigger submission of outstanding
> > + * ioends on the writepage context so they are treated correctly on
> > + * error.
> > + */
>
> .....
>
> > +static int
> > +iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
> > +{
> > + struct iomap_writepage_ctx *wpc = data;
> > + struct inode *inode = page->mapping->host;
> > + pgoff_t end_index;
> > + u64 end_offset;
> > + loff_t offset;
> > +
> > + trace_iomap_writepage(inode, page, 0, 0);
> > +
> > + /*
> > + * Refuse to write the page out if we are called from reclaim context.
> > + *
> > + * This avoids stack overflows when called from deeply used stacks in
> > + * random callers for direct reclaim or memcg reclaim. We explicitly
> > + * allow reclaim from kswapd as the stack usage there is relatively low.
> > + *
> > + * This should never happen except in the case of a VM regression so
> > + * warn about it.
> > + */
> > + if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
> > + PF_MEMALLOC))
> > + goto redirty;
> > +
> > + /*
> > + * Given that we do not allow direct reclaim to call us, we should
> > + * never be called while in a filesystem transaction.
> > + */
>
> never be called in a recursive filesystem reclaim context.
>
> > + if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
> > + goto redirty;
> > +
>
> Cheers,
>
> Dave.
> --
> Dave Chinner
> [email protected]
On Tue, Oct 15, 2019 at 11:40:40AM -0700, Darrick J. Wong wrote:
> > + if (unlikely(error && !quiet)) {
> > + printk_ratelimited(KERN_ERR
> > + "%s: writeback error on sector %llu",
> > + inode->i_sb->s_id, start);
>
> Ugh, /this/ message. It's pretty annoying how it doesn't tell you which
> file or where in that file the write was lost.
Sure, feel free to improve it in a follow on patch.
>
> I want to send in a patch atop your series to fix this, though I'm a
> also little inclined to want to keep the message inside XFS.
>
> Thoughts?
I don't see a sensible way to keep it in the file system, and I also
don't really see what that would buy us. I'd rather use the same
message for all iomap using file systems rather than having slightly
different error reporting for each of them.
inside the iomap callstack.
On Wed, Oct 16, 2019 at 09:07:21AM +1100, Dave Chinner wrote:
> > + trace_iomap_releasepage(page->mapping->host, page, 0, 0);
> > +
> > /*
> > * mm accommodates an old ext3 case where clean pages might not have had
> > * the dirty bit cleared. Thus, it can send actual dirty pages to
> > @@ -483,6 +488,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
> > void
> > iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
> > {
> > + trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
> > +
>
> These tracepoints should be split out into a separate patch like
> the readpage(s) tracepoints. Maybe just lift all the non-writeback
> ones in a single patch...
I guess that makes sense. Initially I didn't want to duplicate the
trace definition as it is shared with the writeback tracepoints,
but in the overall scheme of things that doesn't really matter.
> > +iomap_finish_page_writeback(struct inode *inode, struct bio_vec *bvec,
> > + int error)
> > +{
> > + struct iomap_page *iop = to_iomap_page(bvec->bv_page);
> > +
> > + if (error) {
> > + SetPageError(bvec->bv_page);
> > + mapping_set_error(inode->i_mapping, -EIO);
> > + }
> > +
> > + WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
> > + WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
> > +
> > + if (!iop || atomic_dec_and_test(&iop->write_count))
> > + end_page_writeback(bvec->bv_page);
> > +}
>
> Can we just pass the struct page into this function?
I'd rather not change calling conventions in code just moved over for
no good reason. That being said I agree with passing a page, so I'll
just throw in a follow on patch like I did for iomap_ioend_compare
cleanup.
>
> .....
>
> > +/*
> > + * Submit the bio for an ioend. We are passed an ioend with a bio attached to
> > + * it, and we submit that bio. The ioend may be used for multiple bio
> > + * submissions, so we only want to allocate an append transaction for the ioend
> > + * once. In the case of multiple bio submission, each bio will take an IO
>
> This needs to be changed to describe what wpc->ops->submit_ioend()
> is used for rather than what XFS might use this hook for.
True. The real documentation now is in the header near the ops defintion,
but I'll update this one to make more sense as well.
> > +static int
> > +iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
> > + int error)
> > +{
> > + ioend->io_bio->bi_private = ioend;
> > + ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
> > +
> > + if (wpc->ops->submit_ioend)
> > + error = wpc->ops->submit_ioend(ioend, error);
>
> I'm not sure that "submit_ioend" is the best name for this method,
> as it is a pre-bio-submission hook, not an actual IO submission
> method. "prepare_ioend_for_submit" is more descriptive, but probably
> too long. wpc->ops->prepare_submit(ioend, error) reads pretty well,
> though...
Not a huge fan of that name either, but Brian complained. Let's hold
a popular vote for a name and see if we have a winner.
As for the grammar comments - all this is copied over as-is. I'll add
another patch to fix that up.
On Wed, Oct 16, 2019 at 09:07:21AM +1100, Dave Chinner wrote:
> > +/*
> > + * We implement an immediate ioend submission policy here to avoid needing to
> > + * chain multiple ioends and hence nest mempool allocations which can violate
> > + * forward progress guarantees we need to provide. The current ioend we are
> > + * adding blocks to is cached on the writepage context, and if the new block
>
> adding pages to ... , and if the new block mapping
So reviewing this comment I disagree with the change. We add on a per-
block basis to the ioend, not a per-page one. Similar for the second
one it is the block that needs to append (which is defined by the
mapping, but still..).
On Wed, Oct 16, 2019 at 09:48:36AM +0200, Christoph Hellwig wrote:
> On Wed, Oct 16, 2019 at 09:07:21AM +1100, Dave Chinner wrote:
...
> > > +/*
> > > + * Submit the bio for an ioend. We are passed an ioend with a bio attached to
> > > + * it, and we submit that bio. The ioend may be used for multiple bio
> > > + * submissions, so we only want to allocate an append transaction for the ioend
> > > + * once. In the case of multiple bio submission, each bio will take an IO
> >
> > This needs to be changed to describe what wpc->ops->submit_ioend()
> > is used for rather than what XFS might use this hook for.
>
> True. The real documentation now is in the header near the ops defintion,
> but I'll update this one to make more sense as well.
>
> > > +static int
> > > +iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
> > > + int error)
> > > +{
> > > + ioend->io_bio->bi_private = ioend;
> > > + ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
> > > +
> > > + if (wpc->ops->submit_ioend)
> > > + error = wpc->ops->submit_ioend(ioend, error);
> >
> > I'm not sure that "submit_ioend" is the best name for this method,
> > as it is a pre-bio-submission hook, not an actual IO submission
> > method. "prepare_ioend_for_submit" is more descriptive, but probably
> > too long. wpc->ops->prepare_submit(ioend, error) reads pretty well,
> > though...
>
> Not a huge fan of that name either, but Brian complained. Let's hold
> a popular vote for a name and see if we have a winner.
>
Just to recall, I suggested something like ->pre_submit_ioend() back in
v5. Short of that, I asked for extra comments to clearly document
semantics which I believe Christoph added to the header, and I acked (it
looks like the handful of R-B tags I sent were all dropped btw...? Have
all of those patches changed?).
To give my .02 on the naming thing, I care about functional clarity more
than aesthetics. In that regard, ->submit_ioend() reads like a
submission hook and thus sounds rather confusing to me when I don't see
it actually submit anything. ->pre_submit_ioend(), ->prepare_ioend() or
->prepare_submission() all clearly indicate semantics to me, so I'm good
with any of those over ->submit_ioend(). :)
Brian
> As for the grammar comments - all this is copied over as-is. I'll add
> another patch to fix that up.
>