2024-04-22 06:24:38

by Chao Yu

[permalink] [raw]
Subject: [PATCH v2 1/4] f2fs: convert f2fs_mpage_readpages() to use folio

Convert f2fs_mpage_readpages() to use folio and related
functionality.

Signed-off-by: Chao Yu <[email protected]>
---
v2:
- fix compile warning w/o CONFIG_F2FS_FS_COMPRESSION reported by lkp
fs/f2fs/data.c | 81 +++++++++++++++++++++++++-------------------------
1 file changed, 40 insertions(+), 41 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ed7d08785fcf..6419cf020327 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2345,7 +2345,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
* Major change was from block_size == page_size in f2fs by default.
*/
static int f2fs_mpage_readpages(struct inode *inode,
- struct readahead_control *rac, struct page *page)
+ struct readahead_control *rac, struct folio *folio)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
@@ -2362,6 +2362,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
.nr_cpages = 0,
};
pgoff_t nc_cluster_idx = NULL_CLUSTER;
+ pgoff_t index;
#endif
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
@@ -2378,64 +2379,62 @@ static int f2fs_mpage_readpages(struct inode *inode,

for (; nr_pages; nr_pages--) {
if (rac) {
- page = readahead_page(rac);
- prefetchw(&page->flags);
+ folio = readahead_folio(rac);
+ prefetchw(&folio->flags);
}

#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (f2fs_compressed_file(inode)) {
- /* there are remained compressed pages, submit them */
- if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
- ret = f2fs_read_multi_pages(&cc, &bio,
- max_nr_pages,
- &last_block_in_bio,
- rac != NULL, false);
- f2fs_destroy_compress_ctx(&cc, false);
- if (ret)
- goto set_error_page;
- }
- if (cc.cluster_idx == NULL_CLUSTER) {
- if (nc_cluster_idx ==
- page->index >> cc.log_cluster_size) {
- goto read_single_page;
- }
-
- ret = f2fs_is_compressed_cluster(inode, page->index);
- if (ret < 0)
- goto set_error_page;
- else if (!ret) {
- nc_cluster_idx =
- page->index >> cc.log_cluster_size;
- goto read_single_page;
- }
-
- nc_cluster_idx = NULL_CLUSTER;
- }
- ret = f2fs_init_compress_ctx(&cc);
+ index = folio_index(folio);
+
+ if (!f2fs_compressed_file(inode))
+ goto read_single_page;
+
+ /* there are remained compressed pages, submit them */
+ if (!f2fs_cluster_can_merge_page(&cc, index)) {
+ ret = f2fs_read_multi_pages(&cc, &bio,
+ max_nr_pages,
+ &last_block_in_bio,
+ rac != NULL, false);
+ f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
+ }
+ if (cc.cluster_idx == NULL_CLUSTER) {
+ if (nc_cluster_idx == index >> cc.log_cluster_size)
+ goto read_single_page;

- f2fs_compress_ctx_add_page(&cc, page);
+ ret = f2fs_is_compressed_cluster(inode, index);
+ if (ret < 0)
+ goto set_error_page;
+ else if (!ret) {
+ nc_cluster_idx =
+ index >> cc.log_cluster_size;
+ goto read_single_page;
+ }

- goto next_page;
+ nc_cluster_idx = NULL_CLUSTER;
}
+ ret = f2fs_init_compress_ctx(&cc);
+ if (ret)
+ goto set_error_page;
+
+ f2fs_compress_ctx_add_page(&cc, &folio->page);
+
+ goto next_page;
read_single_page:
#endif
-
- ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
+ ret = f2fs_read_single_page(inode, &folio->page, max_nr_pages, &map,
&bio, &last_block_in_bio, rac);
if (ret) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
set_error_page:
#endif
- zero_user_segment(page, 0, PAGE_SIZE);
- unlock_page(page);
+ folio_zero_segment(folio, 0, folio_size(folio));
+ folio_unlock(folio);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
next_page:
#endif
- if (rac)
- put_page(page);

#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
@@ -2472,7 +2471,7 @@ static int f2fs_read_data_folio(struct file *file, struct folio *folio)
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
- ret = f2fs_mpage_readpages(inode, NULL, page);
+ ret = f2fs_mpage_readpages(inode, NULL, folio);
return ret;
}

--
2.40.1



2024-04-22 06:24:51

by Chao Yu

[permalink] [raw]
Subject: [PATCH v2 2/4] f2fs: convert f2fs_read_single_page() to use folio

Convert f2fs_read_single_page() to use folio and related
functionality.

Signed-off-by: Chao Yu <[email protected]>
---
v2:
- no change.
fs/f2fs/data.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6419cf020327..bb6c0e955d7e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2063,7 +2063,7 @@ static inline loff_t f2fs_readpage_limit(struct inode *inode)
return i_size_read(inode);
}

-static int f2fs_read_single_page(struct inode *inode, struct page *page,
+static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
unsigned nr_pages,
struct f2fs_map_blocks *map,
struct bio **bio_ret,
@@ -2076,9 +2076,10 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
+ pgoff_t index = folio_index(folio);
int ret = 0;

- block_in_file = (sector_t)page_index(page);
+ block_in_file = (sector_t)index;
last_block = block_in_file + nr_pages;
last_block_in_file = bytes_to_blks(inode,
f2fs_readpage_limit(inode) + blocksize - 1);
@@ -2109,7 +2110,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
got_it:
if ((map->m_flags & F2FS_MAP_MAPPED)) {
block_nr = map->m_pblk + block_in_file - map->m_lblk;
- SetPageMappedToDisk(page);
+ folio_set_mappedtodisk(folio);

if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
DATA_GENERIC_ENHANCE_READ)) {
@@ -2118,15 +2119,15 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
}
} else {
zero_out:
- zero_user_segment(page, 0, PAGE_SIZE);
- if (f2fs_need_verity(inode, page->index) &&
- !fsverity_verify_page(page)) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (f2fs_need_verity(inode, index) &&
+ !fsverity_verify_folio(folio)) {
ret = -EIO;
goto out;
}
- if (!PageUptodate(page))
- SetPageUptodate(page);
- unlock_page(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
goto out;
}

@@ -2136,14 +2137,14 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
*/
if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
*last_block_in_bio, block_nr) ||
- !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+ !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
submit_and_realloc:
f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0, page->index,
+ is_readahead ? REQ_RAHEAD : 0, index,
false);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
@@ -2158,7 +2159,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
*/
f2fs_wait_on_block_writeback(inode, block_nr);

- if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;

inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
@@ -2423,7 +2424,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
goto next_page;
read_single_page:
#endif
- ret = f2fs_read_single_page(inode, &folio->page, max_nr_pages, &map,
+ ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map,
&bio, &last_block_in_bio, rac);
if (ret) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
--
2.40.1


2024-04-22 06:25:09

by Chao Yu

[permalink] [raw]
Subject: [PATCH v2 3/4] f2fs: convert f2fs_read_inline_data() to use folio

Convert f2fs_read_inline_data() to use folio and related
functionality, and also convert its caller to use folio.

Signed-off-by: Chao Yu <[email protected]>
---
v2:
- no change.
fs/f2fs/data.c | 11 +++++------
fs/f2fs/f2fs.h | 4 ++--
fs/f2fs/inline.c | 34 +++++++++++++++++-----------------
3 files changed, 24 insertions(+), 25 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index bb6c0e955d7e..24f9a39ffd56 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2457,20 +2457,19 @@ static int f2fs_mpage_readpages(struct inode *inode,

static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page_file_mapping(page)->host;
+ struct inode *inode = folio_file_mapping(folio)->host;
int ret = -EAGAIN;

- trace_f2fs_readpage(page, DATA);
+ trace_f2fs_readpage(&folio->page, DATA);

if (!f2fs_is_compress_backend_ready(inode)) {
- unlock_page(page);
+ folio_unlock(folio);
return -EOPNOTSUPP;
}

/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
- ret = f2fs_read_inline_data(inode, page);
+ ret = f2fs_read_inline_data(inode, folio);
if (ret == -EAGAIN)
ret = f2fs_mpage_readpages(inode, NULL, folio);
return ret;
@@ -3399,7 +3398,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,

if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(page, ipage);
+ f2fs_do_read_inline_data(page_folio(page), ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_page_private_inline(ipage);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 3f7196122574..a0ae99bcca39 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4154,10 +4154,10 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
bool f2fs_may_inline_data(struct inode *inode);
bool f2fs_sanity_check_inline_data(struct inode *inode);
bool f2fs_may_inline_dentry(struct inode *inode);
-void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
+void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage);
void f2fs_truncate_inline_inode(struct inode *inode,
struct page *ipage, u64 from);
-int f2fs_read_inline_data(struct inode *inode, struct page *page);
+int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 3d3218a4b29d..7638d0d7b7ee 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -61,22 +61,22 @@ bool f2fs_may_inline_dentry(struct inode *inode)
return true;
}

-void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
+void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio_file_mapping(folio)->host;

- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return;

- f2fs_bug_on(F2FS_P_SB(page), page->index);
+ f2fs_bug_on(F2FS_I_SB(inode), folio_index(folio));

- zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
+ folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));

/* Copy the whole inline data block */
- memcpy_to_page(page, 0, inline_data_addr(inode, ipage),
+ memcpy_to_folio(folio, 0, inline_data_addr(inode, ipage),
MAX_INLINE_DATA(inode));
- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
}

void f2fs_truncate_inline_inode(struct inode *inode,
@@ -97,13 +97,13 @@ void f2fs_truncate_inline_inode(struct inode *inode,
clear_inode_flag(inode, FI_DATA_EXIST);
}

-int f2fs_read_inline_data(struct inode *inode, struct page *page)
+int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
{
struct page *ipage;

ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(ipage)) {
- unlock_page(page);
+ folio_unlock(folio);
return PTR_ERR(ipage);
}

@@ -112,15 +112,15 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
return -EAGAIN;
}

- if (page->index)
- zero_user_segment(page, 0, PAGE_SIZE);
+ if (folio_index(folio))
+ folio_zero_segment(folio, 0, folio_size(folio));
else
- f2fs_do_read_inline_data(page, ipage);
+ f2fs_do_read_inline_data(folio, ipage);

- if (!PageUptodate(page))
- SetPageUptodate(page);
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
f2fs_put_page(ipage, 1);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}

@@ -166,7 +166,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)

f2fs_bug_on(F2FS_P_SB(page), folio_test_writeback(page_folio(page)));

- f2fs_do_read_inline_data(page, dn->inode_page);
+ f2fs_do_read_inline_data(page_folio(page), dn->inode_page);
set_page_dirty(page);

/* clear dirty state */
--
2.40.1


2024-04-22 06:25:23

by Chao Yu

[permalink] [raw]
Subject: [PATCH v2 4/4] f2fs: convert f2fs__page tracepoint class to use folio

Convert f2fs__page tracepoint class() and its instances to use folio
and related functionality, and rename it to f2fs__folio().

Signed-off-by: Chao Yu <[email protected]>
---
v2:
- no change.
fs/f2fs/checkpoint.c | 4 ++--
fs/f2fs/data.c | 10 ++++-----
fs/f2fs/node.c | 4 ++--
include/trace/events/f2fs.h | 42 ++++++++++++++++++-------------------
4 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index eac698b8dd38..5d05a413f451 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -345,7 +345,7 @@ static int __f2fs_write_meta_page(struct page *page,
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);

- trace_f2fs_writepage(page, META);
+ trace_f2fs_writepage(page_folio(page), META);

if (unlikely(f2fs_cp_error(sbi))) {
if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
@@ -492,7 +492,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
static bool f2fs_dirty_meta_folio(struct address_space *mapping,
struct folio *folio)
{
- trace_f2fs_set_page_dirty(&folio->page, META);
+ trace_f2fs_set_page_dirty(folio, META);

if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 24f9a39ffd56..21d4c1c9b25b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2460,7 +2460,7 @@ static int f2fs_read_data_folio(struct file *file, struct folio *folio)
struct inode *inode = folio_file_mapping(folio)->host;
int ret = -EAGAIN;

- trace_f2fs_readpage(&folio->page, DATA);
+ trace_f2fs_readpage(folio, DATA);

if (!f2fs_is_compress_backend_ready(inode)) {
folio_unlock(folio);
@@ -2709,7 +2709,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
} else {
set_inode_flag(inode, FI_UPDATE_WRITE);
}
- trace_f2fs_do_write_data_page(fio->page, IPU);
+ trace_f2fs_do_write_data_page(page_folio(page), IPU);
return err;
}

@@ -2738,7 +2738,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)

/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
- trace_f2fs_do_write_data_page(page, OPU);
+ trace_f2fs_do_write_data_page(page_folio(page), OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
out_writepage:
f2fs_put_dnode(&dn);
@@ -2785,7 +2785,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.last_block = last_block,
};

- trace_f2fs_writepage(page, DATA);
+ trace_f2fs_writepage(page_folio(page), DATA);

/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
@@ -3759,7 +3759,7 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
{
struct inode *inode = mapping->host;

- trace_f2fs_set_page_dirty(&folio->page, DATA);
+ trace_f2fs_set_page_dirty(folio, DATA);

if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3b9eb5693683..95cecf08cb37 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1624,7 +1624,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
};
unsigned int seq;

- trace_f2fs_writepage(page, NODE);
+ trace_f2fs_writepage(page_folio(page), NODE);

if (unlikely(f2fs_cp_error(sbi))) {
/* keep node pages in remount-ro mode */
@@ -2171,7 +2171,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
static bool f2fs_dirty_node_folio(struct address_space *mapping,
struct folio *folio)
{
- trace_f2fs_set_page_dirty(&folio->page, NODE);
+ trace_f2fs_set_page_dirty(folio, NODE);

if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 7ed0fc430dc6..371ba28415f5 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1304,11 +1304,11 @@ TRACE_EVENT(f2fs_write_end,
__entry->copied)
);

-DECLARE_EVENT_CLASS(f2fs__page,
+DECLARE_EVENT_CLASS(f2fs__folio,

- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),

- TP_ARGS(page, type),
+ TP_ARGS(folio, type),

TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1321,14 +1321,14 @@ DECLARE_EVENT_CLASS(f2fs__page,
),

TP_fast_assign(
- __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
- __entry->ino = page_file_mapping(page)->host->i_ino;
+ __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev;
+ __entry->ino = folio_file_mapping(folio)->host->i_ino;
__entry->type = type;
__entry->dir =
- S_ISDIR(page_file_mapping(page)->host->i_mode);
- __entry->index = page->index;
- __entry->dirty = PageDirty(page);
- __entry->uptodate = PageUptodate(page);
+ S_ISDIR(folio_file_mapping(folio)->host->i_mode);
+ __entry->index = folio_index(folio);
+ __entry->dirty = folio_test_dirty(folio);
+ __entry->uptodate = folio_test_uptodate(folio);
),

TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, "
@@ -1341,32 +1341,32 @@ DECLARE_EVENT_CLASS(f2fs__page,
__entry->uptodate)
);

-DEFINE_EVENT(f2fs__page, f2fs_writepage,
+DEFINE_EVENT(f2fs__folio, f2fs_writepage,

- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),

- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);

-DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page,
+DEFINE_EVENT(f2fs__folio, f2fs_do_write_data_page,

- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),

- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);

-DEFINE_EVENT(f2fs__page, f2fs_readpage,
+DEFINE_EVENT(f2fs__folio, f2fs_readpage,

- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),

- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);

-DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
+DEFINE_EVENT(f2fs__folio, f2fs_set_page_dirty,

- TP_PROTO(struct page *page, int type),
+ TP_PROTO(struct folio *folio, int type),

- TP_ARGS(page, type)
+ TP_ARGS(folio, type)
);

TRACE_EVENT(f2fs_replace_atomic_write_block,
--
2.40.1


2024-04-23 17:48:18

by patchwork-bot+f2fs

[permalink] [raw]
Subject: Re: [f2fs-dev] [PATCH v2 1/4] f2fs: convert f2fs_mpage_readpages() to use folio

Hello:

This series was applied to jaegeuk/f2fs.git (dev)
by Jaegeuk Kim <[email protected]>:

On Mon, 22 Apr 2024 14:24:14 +0800 you wrote:
> Convert f2fs_mpage_readpages() to use folio and related
> functionality.
>
> Signed-off-by: Chao Yu <[email protected]>
> ---
> v2:
> - fix compile warning w/o CONFIG_F2FS_FS_COMPRESSION reported by lkp
> fs/f2fs/data.c | 81 +++++++++++++++++++++++++-------------------------
> 1 file changed, 40 insertions(+), 41 deletions(-)

Here is the summary with links:
- [f2fs-dev,v2,1/4] f2fs: convert f2fs_mpage_readpages() to use folio
(no matching commit)
- [f2fs-dev,v2,2/4] f2fs: convert f2fs_read_single_page() to use folio
(no matching commit)
- [f2fs-dev,v2,3/4] f2fs: convert f2fs_read_inline_data() to use folio
https://git.kernel.org/jaegeuk/f2fs/c/96ea46f30b26
- [f2fs-dev,v2,4/4] f2fs: convert f2fs__page tracepoint class to use folio
https://git.kernel.org/jaegeuk/f2fs/c/92f750d847c9

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html