A trivial cleanup to make the fragment handling logic more clear.
Signed-off-by: Gao Xiang <[email protected]>
---
fs/erofs/zdata.c | 39 +++++++++++++--------------------------
1 file changed, 13 insertions(+), 26 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 53820271e538..dc104add0a99 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -932,34 +932,27 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
return true;
}
-static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
- struct page *page, unsigned int pageofs,
- unsigned int len)
+static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
+ unsigned int cur, unsigned int end, erofs_off_t pos)
{
- struct super_block *sb = inode->i_sb;
- struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
+ struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
- u8 *src, *dst;
- unsigned int i, cnt;
+ unsigned int cnt;
+ u8 *src;
if (!packed_inode)
return -EFSCORRUPTED;
buf.inode = packed_inode;
- pos += EROFS_I(inode)->z_fragmentoff;
- for (i = 0; i < len; i += cnt) {
- cnt = min_t(unsigned int, len - i,
+ for (; cur < end; cur += cnt, pos += cnt) {
+ cnt = min_t(unsigned int, end - cur,
sb->s_blocksize - erofs_blkoff(sb, pos));
src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(src)) {
erofs_put_metabuf(&buf);
return PTR_ERR(src);
}
-
- dst = kmap_local_page(page);
- memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt);
- kunmap_local(dst);
- pos += cnt;
+ memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
}
erofs_put_metabuf(&buf);
return 0;
@@ -972,7 +965,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct erofs_map_blocks *const map = &fe->map;
const loff_t offset = page_offset(page);
bool tight = true, exclusive;
- unsigned int cur, end, spiltted;
+ unsigned int cur, end, len, spiltted;
int err = 0;
/* register locked file pages as online pages in pack */
@@ -1041,17 +1034,11 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
goto next_part;
}
if (map->m_flags & EROFS_MAP_FRAGMENT) {
- unsigned int pageofs, skip, len;
+ erofs_off_t fpos = offset + cur - map->m_la;
- if (offset > map->m_la) {
- pageofs = 0;
- skip = offset - map->m_la;
- } else {
- pageofs = map->m_la & ~PAGE_MASK;
- skip = 0;
- }
- len = min_t(unsigned int, map->m_llen - skip, end - cur);
- err = z_erofs_read_fragment(inode, skip, page, pageofs, len);
+ len = min_t(unsigned int, map->m_llen - fpos, end - cur);
+ err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
+ EROFS_I(inode)->z_fragmentoff + fpos);
if (err)
goto out;
++spiltted;
--
2.24.4
It can be folded into z_erofs_onlinepage_endio() to simplify the code.
Signed-off-by: Gao Xiang <[email protected]>
---
fs/erofs/zdata.c | 29 +++++++++--------------------
1 file changed, 9 insertions(+), 20 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index a200e99f7d4f..4009283944ca 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -143,22 +143,17 @@ static inline void z_erofs_onlinepage_split(struct page *page)
atomic_inc((atomic_t *)&page->private);
}
-static inline void z_erofs_page_mark_eio(struct page *page)
+static void z_erofs_onlinepage_endio(struct page *page, int err)
{
- int orig;
+ int orig, v;
+
+ DBG_BUGON(!PagePrivate(page));
do {
orig = atomic_read((atomic_t *)&page->private);
- } while (atomic_cmpxchg((atomic_t *)&page->private, orig,
- orig | Z_EROFS_PAGE_EIO) != orig);
-}
-
-static inline void z_erofs_onlinepage_endio(struct page *page)
-{
- unsigned int v;
+ v = (orig - 1) | (err ? Z_EROFS_PAGE_EIO : 0);
+ } while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig);
- DBG_BUGON(!PagePrivate(page));
- v = atomic_dec_return((atomic_t *)&page->private);
if (!(v & ~Z_EROFS_PAGE_EIO)) {
set_page_private(page, 0);
ClearPagePrivate(page);
@@ -1066,9 +1061,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
goto repeat;
out:
- if (err)
- z_erofs_page_mark_eio(page);
- z_erofs_onlinepage_endio(page);
+ z_erofs_onlinepage_endio(page, err);
return err;
}
@@ -1171,9 +1164,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
cur += len;
}
kunmap_local(dst);
- if (err)
- z_erofs_page_mark_eio(bvi->bvec.page);
- z_erofs_onlinepage_endio(bvi->bvec.page);
+ z_erofs_onlinepage_endio(bvi->bvec.page, err);
list_del(p);
kfree(bvi);
}
@@ -1344,9 +1335,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
/* recycle all individual short-lived pages */
if (z_erofs_put_shortlivedpage(be->pagepool, page))
continue;
- if (err)
- z_erofs_page_mark_eio(page);
- z_erofs_onlinepage_endio(page);
+ z_erofs_onlinepage_endio(page, err);
}
if (be->decompressed_pages != be->onstack_pages)
--
2.24.4
It's a straight-forward conversion and no logic changes (except that
it renames the corresponding tracepoint.)
Signed-off-by: Gao Xiang <[email protected]>
---
erofs stress test passes.
fs/erofs/zdata.c | 9 ++++-----
include/trace/events/erofs.h | 16 ++++++++--------
2 files changed, 12 insertions(+), 13 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 79cadb88e8bf..ace727bfe5b2 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1821,17 +1821,16 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *const inode = page->mapping->host;
+ struct inode *const inode = folio->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
int err;
- trace_erofs_readpage(page, false);
- f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
+ trace_erofs_read_folio(folio, false);
+ f.headoffset = (erofs_off_t)folio->index << folio_shift(folio);
z_erofs_pcluster_readmore(&f, NULL, true);
- err = z_erofs_do_read_page(&f, page);
+ err = z_erofs_do_read_page(&f, &folio->page);
z_erofs_pcluster_readmore(&f, NULL, false);
z_erofs_pcluster_end(&f);
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index 71dbe8bfa7db..e18684b02c3d 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -80,11 +80,11 @@ TRACE_EVENT(erofs_fill_inode,
__entry->blkaddr, __entry->ofs)
);
-TRACE_EVENT(erofs_readpage,
+TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct page *page, bool raw),
+ TP_PROTO(struct folio *folio, bool raw),
- TP_ARGS(page, raw),
+ TP_ARGS(folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -96,11 +96,11 @@ TRACE_EVENT(erofs_readpage,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->nid = EROFS_I(folio->mapping->host)->nid;
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
+ __entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
),
--
2.24.4
It's a straight-forward conversion except that readahead_folio()
will do folio_put() in advance but it doesn't matter since folios
are still locked.
As before, since file-backed folios (pages for now) are locked, so
we could temporarily use folio->private as an internal counter to
indicate split parts of each folio for the corresponding pclusters
to decompress.
When such counter becomes zero, the folio will be finally unlocked
(see compress.h and z_erofs_onlinepage_endio()).
Signed-off-by: Gao Xiang <[email protected]>
---
fs/erofs/zdata.c | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index c28945532a02..79cadb88e8bf 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1852,37 +1852,35 @@ static void z_erofs_readahead(struct readahead_control *rac)
struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- struct page *head = NULL, *page;
- unsigned int nr_pages;
+ struct folio *head = NULL, *folio;
+ unsigned int nr_folios;
+ int err;
f.headoffset = readahead_pos(rac);
z_erofs_pcluster_readmore(&f, rac, true);
- nr_pages = readahead_count(rac);
- trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
+ nr_folios = readahead_count(rac);
+ trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
- while ((page = readahead_page(rac))) {
- set_page_private(page, (unsigned long)head);
- head = page;
+ while ((folio = readahead_folio(rac))) {
+ folio->private = head;
+ head = folio;
}
+ /* traverse in reverse order for best metadata I/O performance */
while (head) {
- struct page *page = head;
- int err;
-
- /* traversal in reverse order */
- head = (void *)page_private(page);
+ folio = head;
+ head = folio_get_private(folio);
- err = z_erofs_do_read_page(&f, page);
+ err = z_erofs_do_read_page(&f, &folio->page);
if (err && err != -EINTR)
- erofs_err(inode->i_sb, "readahead error %d @ %lu of nid %llu",
- err, page->index, EROFS_I(inode)->nid);
- put_page(page);
+ erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
+ folio->index, EROFS_I(inode)->nid);
}
z_erofs_pcluster_readmore(&f, rac, false);
z_erofs_pcluster_end(&f);
- z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
+ z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}
--
2.24.4
Some preparation logic should be part of z_erofs_pcluster_begin()
instead of z_erofs_do_read_page(). Let's move now.
Signed-off-by: Gao Xiang <[email protected]>
---
fs/erofs/zdata.c | 59 +++++++++++++++++++++---------------------------
1 file changed, 26 insertions(+), 33 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 4ed99346c4e1..30ecdfe41836 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -852,7 +852,10 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
+ struct super_block *sb = fe->inode->i_sb;
+ erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct erofs_workgroup *grp = NULL;
+ void *mptr;
int ret;
DBG_BUGON(fe->pcl);
@@ -861,8 +864,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
if (!(map->m_flags & EROFS_MAP_META)) {
- grp = erofs_find_workgroup(fe->inode->i_sb,
- map->m_pa >> PAGE_SHIFT);
+ grp = erofs_find_workgroup(sb, blknr);
} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
DBG_BUGON(1);
return -EFSCORRUPTED;
@@ -881,9 +883,24 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
} else if (ret) {
return ret;
}
+
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
- /* since file-backed online pages are traversed in reverse order */
+ if (!z_erofs_is_inline_pcluster(fe->pcl)) {
+ /* bind cache first when cached decompression is preferred */
+ z_erofs_bind_cache(fe);
+ } else {
+ mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
+ if (IS_ERR(mptr)) {
+ ret = PTR_ERR(mptr);
+ erofs_err(sb, "failed to get inline data %d", ret);
+ return ret;
+ }
+ get_page(map->buf.page);
+ WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+ }
+ /* file-backed inplace I/O pages are traversed in reverse order */
fe->icur = z_erofs_pclusterpages(fe->pcl);
return 0;
}
@@ -982,39 +999,15 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
err = z_erofs_map_blocks_iter(inode, map, 0);
if (err)
goto out;
- } else {
- if (fe->pcl)
- goto hitted;
- /* didn't get a valid pcluster previously (very rare) */
- }
-
- if (!(map->m_flags & EROFS_MAP_MAPPED) ||
- map->m_flags & EROFS_MAP_FRAGMENT)
+ } else if (fe->pcl) {
goto hitted;
+ }
- err = z_erofs_pcluster_begin(fe);
- if (err)
- goto out;
-
- if (z_erofs_is_inline_pcluster(fe->pcl)) {
- void *mp;
-
- mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
- erofs_blknr(inode->i_sb, map->m_pa),
- EROFS_NO_KMAP);
- if (IS_ERR(mp)) {
- err = PTR_ERR(mp);
- erofs_err(inode->i_sb,
- "failed to get inline page, err %d", err);
+ if ((map->m_flags & EROFS_MAP_MAPPED) &&
+ !(map->m_flags & EROFS_MAP_FRAGMENT)) {
+ err = z_erofs_pcluster_begin(fe);
+ if (err)
goto out;
- }
- get_page(fe->map.buf.page);
- WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
- fe->map.buf.page);
- fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
- } else {
- /* bind cache first when cached decompression is preferred */
- z_erofs_bind_cache(fe);
}
hitted:
/*
--
2.24.4
{collector,collection} were once reserved in order to indicate different
runtime logical extent instance of multi-reference pclusters.
However, de-duplicated decompression has been landed in a more flexable
way, thus `struct z_erofs_collection` was formally removed in commit
87ca34a7065d ("erofs: get rid of `struct z_erofs_collection'").
Let's handle the remaining leftovers, for example:
`z_erofs_collector_begin` => `z_erofs_pcluster_begin`
`z_erofs_collector_end` => `z_erofs_pcluster_end`
as well as some comments. No logic changes.
Signed-off-by: Gao Xiang <[email protected]>
---
fs/erofs/zdata.c | 39 ++++++++++++++++++---------------------
1 file changed, 18 insertions(+), 21 deletions(-)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index dc104add0a99..4ed99346c4e1 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -507,19 +507,17 @@ enum z_erofs_pclustermode {
*/
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/*
- * The current collection has been linked with the owned chain, and
- * could also be linked with the remaining collections, which means
- * if the processing page is the tail page of the collection, thus
- * the current collection can safely use the whole page (since
- * the previous collection is under control) for in-place I/O, as
- * illustrated below:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current cl) | (of the previous collection) |
- * | | |
- * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________|
+ * The pcluster was just linked to a decompression chain by us. It can
+ * also be linked with the remaining pclusters, which means if the
+ * processing page is the tail page of a pcluster, this pcluster can
+ * safely use the whole page (since the previous pcluster is within the
+ * same chain) for in-place I/O, as illustrated below:
+ * ___________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current pcl) | (of the previous pcl) |
+ * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
*
- * [ (*) the above page can be used as inplace I/O. ]
+ * [ (*) the page above can be used as inplace I/O. ]
*/
Z_EROFS_PCLUSTER_FOLLOWED,
};
@@ -851,7 +849,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
return err;
}
-static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
+static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct erofs_workgroup *grp = NULL;
@@ -908,12 +906,12 @@ void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
-static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
+static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
{
struct z_erofs_pcluster *pcl = fe->pcl;
if (!pcl)
- return false;
+ return;
z_erofs_bvec_iter_end(&fe->biter);
mutex_unlock(&pcl->lock);
@@ -929,7 +927,7 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
erofs_workgroup_put(&pcl->obj);
fe->pcl = NULL;
- return true;
+ fe->backmost = false;
}
static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
@@ -978,8 +976,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
if (offset + cur < map->m_la ||
offset + cur >= map->m_la + map->m_llen) {
- if (z_erofs_collector_end(fe))
- fe->backmost = false;
+ z_erofs_pcluster_end(fe);
map->m_la = offset + cur;
map->m_llen = 0;
err = z_erofs_map_blocks_iter(inode, map, 0);
@@ -995,7 +992,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
map->m_flags & EROFS_MAP_FRAGMENT)
goto hitted;
- err = z_erofs_collector_begin(fe);
+ err = z_erofs_pcluster_begin(fe);
if (err)
goto out;
@@ -1862,7 +1859,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_do_read_page(&f, page);
z_erofs_pcluster_readmore(&f, NULL, false);
- (void)z_erofs_collector_end(&f);
+ z_erofs_pcluster_end(&f);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
@@ -1909,7 +1906,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
put_page(page);
}
z_erofs_pcluster_readmore(&f, rac, false);
- (void)z_erofs_collector_end(&f);
+ z_erofs_pcluster_end(&f);
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
erofs_put_metabuf(&f.map.buf);
--
2.24.4
On Thu, 17 Aug 2023 16:28:10 +0800
Gao Xiang <[email protected]> wrote:
> It can be folded into z_erofs_onlinepage_endio() to simplify the code.
>
> Signed-off-by: Gao Xiang <[email protected]>
Reviewed-by: Yue Hu <[email protected]>
On 2023/8/17 16:28, Gao Xiang wrote:
> Some preparation logic should be part of z_erofs_pcluster_begin()
> instead of z_erofs_do_read_page(). Let's move now.
>
> Signed-off-by: Gao Xiang <[email protected]>
> ---
> fs/erofs/zdata.c | 59 +++++++++++++++++++++---------------------------
> 1 file changed, 26 insertions(+), 33 deletions(-)
>
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 4ed99346c4e1..30ecdfe41836 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -852,7 +852,10 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
> static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
> {
> struct erofs_map_blocks *map = &fe->map;
> + struct super_block *sb = fe->inode->i_sb;
> + erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
> struct erofs_workgroup *grp = NULL;
> + void *mptr;
> int ret;
>
> DBG_BUGON(fe->pcl);
> @@ -861,8 +864,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
> DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
>
> if (!(map->m_flags & EROFS_MAP_META)) {
> - grp = erofs_find_workgroup(fe->inode->i_sb,
> - map->m_pa >> PAGE_SHIFT);
> + grp = erofs_find_workgroup(sb, blknr);
> } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
> DBG_BUGON(1);
> return -EFSCORRUPTED;
> @@ -881,9 +883,24 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
> } else if (ret) {
> return ret;
> }
> +
> z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
> Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
> - /* since file-backed online pages are traversed in reverse order */
> + if (!z_erofs_is_inline_pcluster(fe->pcl)) {
> + /* bind cache first when cached decompression is preferred */
> + z_erofs_bind_cache(fe);
> + } else {
Nitpick, mptr can be defined here.
Reviewed-by: Chao Yu <[email protected]>
Thanks,
> + mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
> + if (IS_ERR(mptr)) {
> + ret = PTR_ERR(mptr);
> + erofs_err(sb, "failed to get inline data %d", ret);
> + return ret;
> + }
> + get_page(map->buf.page);
> + WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
> + fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
> + }
> + /* file-backed inplace I/O pages are traversed in reverse order */
> fe->icur = z_erofs_pclusterpages(fe->pcl);
> return 0;
> }
> @@ -982,39 +999,15 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
> err = z_erofs_map_blocks_iter(inode, map, 0);
> if (err)
> goto out;
> - } else {
> - if (fe->pcl)
> - goto hitted;
> - /* didn't get a valid pcluster previously (very rare) */
> - }
> -
> - if (!(map->m_flags & EROFS_MAP_MAPPED) ||
> - map->m_flags & EROFS_MAP_FRAGMENT)
> + } else if (fe->pcl) {
> goto hitted;
> + }
>
> - err = z_erofs_pcluster_begin(fe);
> - if (err)
> - goto out;
> -
> - if (z_erofs_is_inline_pcluster(fe->pcl)) {
> - void *mp;
> -
> - mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
> - erofs_blknr(inode->i_sb, map->m_pa),
> - EROFS_NO_KMAP);
> - if (IS_ERR(mp)) {
> - err = PTR_ERR(mp);
> - erofs_err(inode->i_sb,
> - "failed to get inline page, err %d", err);
> + if ((map->m_flags & EROFS_MAP_MAPPED) &&
> + !(map->m_flags & EROFS_MAP_FRAGMENT)) {
> + err = z_erofs_pcluster_begin(fe);
> + if (err)
> goto out;
> - }
> - get_page(fe->map.buf.page);
> - WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
> - fe->map.buf.page);
> - fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
> - } else {
> - /* bind cache first when cached decompression is preferred */
> - z_erofs_bind_cache(fe);
> }
> hitted:
> /*
On 2023/8/17 16:28, Gao Xiang wrote:
> It's a straight-forward conversion and no logic changes (except that
> it renames the corresponding tracepoint.)
>
> Signed-off-by: Gao Xiang <[email protected]>
Reviewed-by: Chao Yu <[email protected]>
Thanks,
On 2023/8/23 23:05, Chao Yu wrote:
> On 2023/8/17 16:28, Gao Xiang wrote:
>> Some preparation logic should be part of z_erofs_pcluster_begin()
>> instead of z_erofs_do_read_page(). Let's move now.
>>
>> Signed-off-by: Gao Xiang <[email protected]>
>> ---
>> fs/erofs/zdata.c | 59 +++++++++++++++++++++---------------------------
>> 1 file changed, 26 insertions(+), 33 deletions(-)
>>
>> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
>> index 4ed99346c4e1..30ecdfe41836 100644
>> --- a/fs/erofs/zdata.c
>> +++ b/fs/erofs/zdata.c
>> @@ -852,7 +852,10 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
>> static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
>> {
>> struct erofs_map_blocks *map = &fe->map;
>> + struct super_block *sb = fe->inode->i_sb;
>> + erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
>> struct erofs_workgroup *grp = NULL;
>> + void *mptr;
>> int ret;
>> DBG_BUGON(fe->pcl);
>> @@ -861,8 +864,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
>> DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
>> if (!(map->m_flags & EROFS_MAP_META)) {
>> - grp = erofs_find_workgroup(fe->inode->i_sb,
>> - map->m_pa >> PAGE_SHIFT);
>> + grp = erofs_find_workgroup(sb, blknr);
>> } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
>> DBG_BUGON(1);
>> return -EFSCORRUPTED;
>> @@ -881,9 +883,24 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
>> } else if (ret) {
>> return ret;
>> }
>> +
>> z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
>> Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
>> - /* since file-backed online pages are traversed in reverse order */
>> + if (!z_erofs_is_inline_pcluster(fe->pcl)) {
>> + /* bind cache first when cached decompression is preferred */
>> + z_erofs_bind_cache(fe);
>> + } else {
>
> Nitpick, mptr can be defined here.
Okay, will apply the following diff directly:
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 3730775650f4..036f610e044b 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -848,7 +848,6 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
struct super_block *sb = fe->inode->i_sb;
erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct erofs_workgroup *grp = NULL;
- void *mptr;
int ret;
DBG_BUGON(fe->pcl);
@@ -883,6 +882,8 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
/* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe);
} else {
+ void *mptr;
+
mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
if (IS_ERR(mptr)) {
ret = PTR_ERR(mptr);
>
> Reviewed-by: Chao Yu <[email protected]>
Thanks!
Thanks,
Gao Xiang
>
> Thanks,
>
On 2023/8/17 16:28, Gao Xiang wrote:
> It can be folded into z_erofs_onlinepage_endio() to simplify the code.
>
> Signed-off-by: Gao Xiang <[email protected]>
Reviewed-by: Chao Yu <[email protected]>
Thanks,