2019-12-23 03:34:12

by Chao Yu

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

Hi Jaegeuk,

Sorry for the delay.

On 2019/12/19 5:46, Jaegeuk Kim wrote:
> Hi Chao,
>
> I still see some diffs from my latest testing version, so please check anything
> that you made additionally from here.
>
> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64

I've checked the diff and picked up valid parts, could you please check and
comment on it?

---
fs/f2fs/compress.c | 8 ++++----
fs/f2fs/data.c | 18 +++++++++++++++---
fs/f2fs/f2fs.h | 3 +++
fs/f2fs/file.c | 1 -
4 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index af23ed6deffd..1bc86a54ad71 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
fgp_flag, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
- goto unlock_pages;
+ goto release_pages;
}

if (PageUptodate(page))
@@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
&last_block_in_bio, false);
if (ret)
- goto release_pages;
+ goto unlock_pages;
if (bio)
f2fs_submit_bio(sbi, bio, DATA);

ret = f2fs_init_compress_ctx(cc);
if (ret)
- goto release_pages;
+ goto unlock_pages;
}

for (i = 0; i < cc->cluster_size; i++) {
@@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (err)
goto out_unlock_op;

- psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+ psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;

err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
if (err)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 19cd03450066..f1f5c701228d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
}

#ifdef CONFIG_F2FS_FS_COMPRESSION
+void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
+{
+ f2fs_decompress_end_io(rpages, cluster_size, false, true);
+}
+
static void f2fs_verify_bio(struct bio *bio)
{
struct page *page = bio_first_page_all(bio);
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);

- f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
+ f2fs_verify_pages(dic->rpages, dic->cluster_size);
f2fs_free_dic(dic);
}
#endif
@@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *target = bvec->bv_page;

- if (fscrypt_is_bounce_page(target))
+ if (fscrypt_is_bounce_page(target)) {
target = fscrypt_pagecache_page(target);
- if (f2fs_is_compressed_page(target))
+ if (IS_ERR(target))
+ continue;
+ }
+ if (f2fs_is_compressed_page(target)) {
target = f2fs_compress_control_page(target);
+ if (IS_ERR(target))
+ continue;
+ }

if (inode && inode == target->mapping->host)
return true;
@@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (ret)
goto out;

+ /* cluster was overwritten as normal cluster */
if (dn.data_blkaddr != COMPRESS_ADDR)
goto out;

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 5d55cef66410..17d2af4eeafb 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
1 << F2FS_I(inode)->i_log_cluster_size;
F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
+ stat_inc_compr_inode(inode);
}

static inline unsigned int addrs_per_inode(struct inode *inode)
@@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return true;
if (f2fs_is_multi_device(sbi))
return true;
+ if (f2fs_compressed_file(inode))
+ return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index bde5612f37f5..9aeadf14413c 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
return -EINVAL;

set_compress_context(inode);
- stat_inc_compr_inode(inode);
}
}
if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
--
2.18.0.rc1

Thanks,


2019-12-23 03:59:46

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 2019/12/23 11:32, Chao Yu wrote:
> Hi Jaegeuk,
>
> Sorry for the delay.
>
> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>> Hi Chao,
>>
>> I still see some diffs from my latest testing version, so please check anything
>> that you made additionally from here.
>>
>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>
> I've checked the diff and picked up valid parts, could you please check and
> comment on it?
>
> ---
> fs/f2fs/compress.c | 8 ++++----
> fs/f2fs/data.c | 18 +++++++++++++++---
> fs/f2fs/f2fs.h | 3 +++
> fs/f2fs/file.c | 1 -
> 4 files changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index af23ed6deffd..1bc86a54ad71 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> fgp_flag, GFP_NOFS);
> if (!page) {
> ret = -ENOMEM;
> - goto unlock_pages;
> + goto release_pages;
> }
>
> if (PageUptodate(page))
> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
> &last_block_in_bio, false);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> if (bio)
> f2fs_submit_bio(sbi, bio, DATA);
>
> ret = f2fs_init_compress_ctx(cc);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> }
>
> for (i = 0; i < cc->cluster_size; i++) {
> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> if (err)
> goto out_unlock_op;
>
> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>
> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
> if (err)
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 19cd03450066..f1f5c701228d 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
> }
>
> #ifdef CONFIG_F2FS_FS_COMPRESSION
> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> +{
> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
> +}
> +
> static void f2fs_verify_bio(struct bio *bio)
> {
> struct page *page = bio_first_page_all(bio);
> struct decompress_io_ctx *dic =
> (struct decompress_io_ctx *)page_private(page);
>
> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
> f2fs_free_dic(dic);
> }
> #endif
> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> bio_for_each_segment_all(bvec, bio, iter_all) {
> struct page *target = bvec->bv_page;
>
> - if (fscrypt_is_bounce_page(target))
> + if (fscrypt_is_bounce_page(target)) {
> target = fscrypt_pagecache_page(target);
> - if (f2fs_is_compressed_page(target))
> + if (IS_ERR(target))
> + continue;
> + }
> + if (f2fs_is_compressed_page(target)) {
> target = f2fs_compress_control_page(target);
> + if (IS_ERR(target))
> + continue;
> + }
>
> if (inode && inode == target->mapping->host)
> return true;
> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> if (ret)
> goto out;
>
> + /* cluster was overwritten as normal cluster */
> if (dn.data_blkaddr != COMPRESS_ADDR)
> goto out;
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 5d55cef66410..17d2af4eeafb 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> 1 << F2FS_I(inode)->i_log_cluster_size;
> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> set_inode_flag(inode, FI_COMPRESSED_FILE);
> + stat_inc_compr_inode(inode);

BTW, if we track stats here, we need relocate set_compress_context to pass comppiling,
may moving it to the place around f2fs_may_compress().

Thanks,

> }
>
> static inline unsigned int addrs_per_inode(struct inode *inode)
> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
> return true;
> if (f2fs_is_multi_device(sbi))
> return true;
> + if (f2fs_compressed_file(inode))
> + return true;
> /*
> * for blkzoned device, fallback direct IO to buffered IO, so
> * all IOs can be serialized by log-structured write.
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index bde5612f37f5..9aeadf14413c 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
> return -EINVAL;
>
> set_compress_context(inode);
> - stat_inc_compr_inode(inode);
> }
> }
> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
>

2019-12-23 19:31:09

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 12/23, Chao Yu wrote:
> Hi Jaegeuk,
>
> Sorry for the delay.
>
> On 2019/12/19 5:46, Jaegeuk Kim wrote:
> > Hi Chao,
> >
> > I still see some diffs from my latest testing version, so please check anything
> > that you made additionally from here.
> >
> > https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>
> I've checked the diff and picked up valid parts, could you please check and
> comment on it?

Let me test first and see the code change soon.

Thanks,

>
> ---
> fs/f2fs/compress.c | 8 ++++----
> fs/f2fs/data.c | 18 +++++++++++++++---
> fs/f2fs/f2fs.h | 3 +++
> fs/f2fs/file.c | 1 -
> 4 files changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index af23ed6deffd..1bc86a54ad71 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> fgp_flag, GFP_NOFS);
> if (!page) {
> ret = -ENOMEM;
> - goto unlock_pages;
> + goto release_pages;
> }
>
> if (PageUptodate(page))
> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
> &last_block_in_bio, false);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> if (bio)
> f2fs_submit_bio(sbi, bio, DATA);
>
> ret = f2fs_init_compress_ctx(cc);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> }
>
> for (i = 0; i < cc->cluster_size; i++) {
> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> if (err)
> goto out_unlock_op;
>
> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>
> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
> if (err)
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 19cd03450066..f1f5c701228d 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
> }
>
> #ifdef CONFIG_F2FS_FS_COMPRESSION
> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> +{
> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
> +}
> +
> static void f2fs_verify_bio(struct bio *bio)
> {
> struct page *page = bio_first_page_all(bio);
> struct decompress_io_ctx *dic =
> (struct decompress_io_ctx *)page_private(page);
>
> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
> f2fs_free_dic(dic);
> }
> #endif
> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> bio_for_each_segment_all(bvec, bio, iter_all) {
> struct page *target = bvec->bv_page;
>
> - if (fscrypt_is_bounce_page(target))
> + if (fscrypt_is_bounce_page(target)) {
> target = fscrypt_pagecache_page(target);
> - if (f2fs_is_compressed_page(target))
> + if (IS_ERR(target))
> + continue;
> + }
> + if (f2fs_is_compressed_page(target)) {
> target = f2fs_compress_control_page(target);
> + if (IS_ERR(target))
> + continue;
> + }
>
> if (inode && inode == target->mapping->host)
> return true;
> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> if (ret)
> goto out;
>
> + /* cluster was overwritten as normal cluster */
> if (dn.data_blkaddr != COMPRESS_ADDR)
> goto out;
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 5d55cef66410..17d2af4eeafb 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> 1 << F2FS_I(inode)->i_log_cluster_size;
> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> set_inode_flag(inode, FI_COMPRESSED_FILE);
> + stat_inc_compr_inode(inode);
> }
>
> static inline unsigned int addrs_per_inode(struct inode *inode)
> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
> return true;
> if (f2fs_is_multi_device(sbi))
> return true;
> + if (f2fs_compressed_file(inode))
> + return true;
> /*
> * for blkzoned device, fallback direct IO to buffered IO, so
> * all IOs can be serialized by log-structured write.
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index bde5612f37f5..9aeadf14413c 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
> return -EINVAL;
>
> set_compress_context(inode);
> - stat_inc_compr_inode(inode);
> }
> }
> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
> --
> 2.18.0.rc1
>
> Thanks,

2019-12-31 00:51:16

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 12/23, Chao Yu wrote:
> Hi Jaegeuk,
>
> Sorry for the delay.
>
> On 2019/12/19 5:46, Jaegeuk Kim wrote:
> > Hi Chao,
> >
> > I still see some diffs from my latest testing version, so please check anything
> > that you made additionally from here.
> >
> > https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>
> I've checked the diff and picked up valid parts, could you please check and
> comment on it?
>
> ---
> fs/f2fs/compress.c | 8 ++++----
> fs/f2fs/data.c | 18 +++++++++++++++---
> fs/f2fs/f2fs.h | 3 +++
> fs/f2fs/file.c | 1 -
> 4 files changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index af23ed6deffd..1bc86a54ad71 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> fgp_flag, GFP_NOFS);
> if (!page) {
> ret = -ENOMEM;
> - goto unlock_pages;
> + goto release_pages;
> }
>
> if (PageUptodate(page))
> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
> &last_block_in_bio, false);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> if (bio)
> f2fs_submit_bio(sbi, bio, DATA);
>
> ret = f2fs_init_compress_ctx(cc);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> }
>
> for (i = 0; i < cc->cluster_size; i++) {
> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> if (err)
> goto out_unlock_op;
>
> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>
> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
> if (err)
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 19cd03450066..f1f5c701228d 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
> }
>
> #ifdef CONFIG_F2FS_FS_COMPRESSION
> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> +{
> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
> +}
> +
> static void f2fs_verify_bio(struct bio *bio)
> {
> struct page *page = bio_first_page_all(bio);
> struct decompress_io_ctx *dic =
> (struct decompress_io_ctx *)page_private(page);
>
> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
> f2fs_free_dic(dic);
> }
> #endif
> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> bio_for_each_segment_all(bvec, bio, iter_all) {
> struct page *target = bvec->bv_page;
>
> - if (fscrypt_is_bounce_page(target))
> + if (fscrypt_is_bounce_page(target)) {
> target = fscrypt_pagecache_page(target);
> - if (f2fs_is_compressed_page(target))
> + if (IS_ERR(target))
> + continue;
> + }
> + if (f2fs_is_compressed_page(target)) {
> target = f2fs_compress_control_page(target);
> + if (IS_ERR(target))
> + continue;
> + }
>
> if (inode && inode == target->mapping->host)
> return true;
> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> if (ret)
> goto out;
>
> + /* cluster was overwritten as normal cluster */
> if (dn.data_blkaddr != COMPRESS_ADDR)
> goto out;
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 5d55cef66410..17d2af4eeafb 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> 1 << F2FS_I(inode)->i_log_cluster_size;
> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> set_inode_flag(inode, FI_COMPRESSED_FILE);
> + stat_inc_compr_inode(inode);
> }
>
> static inline unsigned int addrs_per_inode(struct inode *inode)
> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
> return true;
> if (f2fs_is_multi_device(sbi))
> return true;
> + if (f2fs_compressed_file(inode))
> + return true;
> /*
> * for blkzoned device, fallback direct IO to buffered IO, so
> * all IOs can be serialized by log-structured write.
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index bde5612f37f5..9aeadf14413c 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
> return -EINVAL;
>
> set_compress_context(inode);
> - stat_inc_compr_inode(inode);

As this breaks the count, I'll keep as is.

Thanks,

> }
> }
> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
> --
> 2.18.0.rc1
>
> Thanks,

2019-12-31 01:47:08

by Chao Yu

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 2019/12/31 8:46, Jaegeuk Kim wrote:
> On 12/23, Chao Yu wrote:
>> Hi Jaegeuk,
>>
>> Sorry for the delay.
>>
>> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>>> Hi Chao,
>>>
>>> I still see some diffs from my latest testing version, so please check anything
>>> that you made additionally from here.
>>>
>>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>>
>> I've checked the diff and picked up valid parts, could you please check and
>> comment on it?
>>
>> ---
>> fs/f2fs/compress.c | 8 ++++----
>> fs/f2fs/data.c | 18 +++++++++++++++---
>> fs/f2fs/f2fs.h | 3 +++
>> fs/f2fs/file.c | 1 -
>> 4 files changed, 22 insertions(+), 8 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index af23ed6deffd..1bc86a54ad71 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>> fgp_flag, GFP_NOFS);
>> if (!page) {
>> ret = -ENOMEM;
>> - goto unlock_pages;
>> + goto release_pages;
>> }
>>
>> if (PageUptodate(page))
>> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>> &last_block_in_bio, false);
>> if (ret)
>> - goto release_pages;
>> + goto unlock_pages;
>> if (bio)
>> f2fs_submit_bio(sbi, bio, DATA);
>>
>> ret = f2fs_init_compress_ctx(cc);
>> if (ret)
>> - goto release_pages;
>> + goto unlock_pages;
>> }
>>
>> for (i = 0; i < cc->cluster_size; i++) {
>> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>> if (err)
>> goto out_unlock_op;
>>
>> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>
>> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>> if (err)
>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>> index 19cd03450066..f1f5c701228d 100644
>> --- a/fs/f2fs/data.c
>> +++ b/fs/f2fs/data.c
>> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>> }
>>
>> #ifdef CONFIG_F2FS_FS_COMPRESSION
>> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
>> +{
>> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
>> +}
>> +
>> static void f2fs_verify_bio(struct bio *bio)
>> {
>> struct page *page = bio_first_page_all(bio);
>> struct decompress_io_ctx *dic =
>> (struct decompress_io_ctx *)page_private(page);
>>
>> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
>> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
>> f2fs_free_dic(dic);
>> }
>> #endif
>> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>> bio_for_each_segment_all(bvec, bio, iter_all) {
>> struct page *target = bvec->bv_page;
>>
>> - if (fscrypt_is_bounce_page(target))
>> + if (fscrypt_is_bounce_page(target)) {
>> target = fscrypt_pagecache_page(target);
>> - if (f2fs_is_compressed_page(target))
>> + if (IS_ERR(target))
>> + continue;
>> + }
>> + if (f2fs_is_compressed_page(target)) {
>> target = f2fs_compress_control_page(target);
>> + if (IS_ERR(target))
>> + continue;
>> + }
>>
>> if (inode && inode == target->mapping->host)
>> return true;
>> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>> if (ret)
>> goto out;
>>
>> + /* cluster was overwritten as normal cluster */
>> if (dn.data_blkaddr != COMPRESS_ADDR)
>> goto out;
>>
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index 5d55cef66410..17d2af4eeafb 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>> 1 << F2FS_I(inode)->i_log_cluster_size;
>> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>> set_inode_flag(inode, FI_COMPRESSED_FILE);
>> + stat_inc_compr_inode(inode);
>> }
>>
>> static inline unsigned int addrs_per_inode(struct inode *inode)
>> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>> return true;
>> if (f2fs_is_multi_device(sbi))
>> return true;
>> + if (f2fs_compressed_file(inode))
>> + return true;
>> /*
>> * for blkzoned device, fallback direct IO to buffered IO, so
>> * all IOs can be serialized by log-structured write.
>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>> index bde5612f37f5..9aeadf14413c 100644
>> --- a/fs/f2fs/file.c
>> +++ b/fs/f2fs/file.c
>> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>> return -EINVAL;
>>
>> set_compress_context(inode);
>> - stat_inc_compr_inode(inode);
>
> As this breaks the count, I'll keep as is.

@@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
1 << F2FS_I(inode)->i_log_cluster_size;
F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
+ stat_inc_compr_inode(inode);

If I'm not missing anything, stat_inc_compr_inode() should be called inside
set_compress_context() in where we convert normal inode to compress one,
right?

Thanks,

>
> Thanks,
>
>> }
>> }
>> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
>> --
>> 2.18.0.rc1
>>
>> Thanks,
> .
>

2020-01-02 18:20:51

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 12/31, Chao Yu wrote:
> On 2019/12/31 8:46, Jaegeuk Kim wrote:
> > On 12/23, Chao Yu wrote:
> >> Hi Jaegeuk,
> >>
> >> Sorry for the delay.
> >>
> >> On 2019/12/19 5:46, Jaegeuk Kim wrote:
> >>> Hi Chao,
> >>>
> >>> I still see some diffs from my latest testing version, so please check anything
> >>> that you made additionally from here.
> >>>
> >>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
> >>
> >> I've checked the diff and picked up valid parts, could you please check and
> >> comment on it?
> >>
> >> ---
> >> fs/f2fs/compress.c | 8 ++++----
> >> fs/f2fs/data.c | 18 +++++++++++++++---
> >> fs/f2fs/f2fs.h | 3 +++
> >> fs/f2fs/file.c | 1 -
> >> 4 files changed, 22 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> >> index af23ed6deffd..1bc86a54ad71 100644
> >> --- a/fs/f2fs/compress.c
> >> +++ b/fs/f2fs/compress.c
> >> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >> fgp_flag, GFP_NOFS);
> >> if (!page) {
> >> ret = -ENOMEM;
> >> - goto unlock_pages;
> >> + goto release_pages;
> >> }
> >>
> >> if (PageUptodate(page))
> >> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
> >> &last_block_in_bio, false);
> >> if (ret)
> >> - goto release_pages;
> >> + goto unlock_pages;
> >> if (bio)
> >> f2fs_submit_bio(sbi, bio, DATA);
> >>
> >> ret = f2fs_init_compress_ctx(cc);
> >> if (ret)
> >> - goto release_pages;
> >> + goto unlock_pages;
> >> }
> >>
> >> for (i = 0; i < cc->cluster_size; i++) {
> >> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> >> if (err)
> >> goto out_unlock_op;
> >>
> >> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> >> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> >>
> >> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
> >> if (err)
> >> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> >> index 19cd03450066..f1f5c701228d 100644
> >> --- a/fs/f2fs/data.c
> >> +++ b/fs/f2fs/data.c
> >> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
> >> }
> >>
> >> #ifdef CONFIG_F2FS_FS_COMPRESSION
> >> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> >> +{
> >> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
> >> +}
> >> +
> >> static void f2fs_verify_bio(struct bio *bio)
> >> {
> >> struct page *page = bio_first_page_all(bio);
> >> struct decompress_io_ctx *dic =
> >> (struct decompress_io_ctx *)page_private(page);
> >>
> >> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> >> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
> >> f2fs_free_dic(dic);
> >> }
> >> #endif
> >> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> >> bio_for_each_segment_all(bvec, bio, iter_all) {
> >> struct page *target = bvec->bv_page;
> >>
> >> - if (fscrypt_is_bounce_page(target))
> >> + if (fscrypt_is_bounce_page(target)) {
> >> target = fscrypt_pagecache_page(target);
> >> - if (f2fs_is_compressed_page(target))
> >> + if (IS_ERR(target))
> >> + continue;
> >> + }
> >> + if (f2fs_is_compressed_page(target)) {
> >> target = f2fs_compress_control_page(target);
> >> + if (IS_ERR(target))
> >> + continue;
> >> + }
> >>
> >> if (inode && inode == target->mapping->host)
> >> return true;
> >> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> >> if (ret)
> >> goto out;
> >>
> >> + /* cluster was overwritten as normal cluster */
> >> if (dn.data_blkaddr != COMPRESS_ADDR)
> >> goto out;
> >>
> >> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> >> index 5d55cef66410..17d2af4eeafb 100644
> >> --- a/fs/f2fs/f2fs.h
> >> +++ b/fs/f2fs/f2fs.h
> >> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> >> 1 << F2FS_I(inode)->i_log_cluster_size;
> >> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> >> set_inode_flag(inode, FI_COMPRESSED_FILE);
> >> + stat_inc_compr_inode(inode);
> >> }
> >>
> >> static inline unsigned int addrs_per_inode(struct inode *inode)
> >> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
> >> return true;
> >> if (f2fs_is_multi_device(sbi))
> >> return true;
> >> + if (f2fs_compressed_file(inode))
> >> + return true;
> >> /*
> >> * for blkzoned device, fallback direct IO to buffered IO, so
> >> * all IOs can be serialized by log-structured write.
> >> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> >> index bde5612f37f5..9aeadf14413c 100644
> >> --- a/fs/f2fs/file.c
> >> +++ b/fs/f2fs/file.c
> >> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
> >> return -EINVAL;
> >>
> >> set_compress_context(inode);
> >> - stat_inc_compr_inode(inode);
> >
> > As this breaks the count, I'll keep as is.
>
> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> 1 << F2FS_I(inode)->i_log_cluster_size;
> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> set_inode_flag(inode, FI_COMPRESSED_FILE);
> + stat_inc_compr_inode(inode);
>
> If I'm not missing anything, stat_inc_compr_inode() should be called inside
> set_compress_context() in where we convert normal inode to compress one,
> right?

I don't care much whether that's right or not. If we want to do that, I found
another line to remove in f2fs_create(). Let me give it a try.

Thanks,

>
> Thanks,
>
> >
> > Thanks,
> >
> >> }
> >> }
> >> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
> >> --
> >> 2.18.0.rc1
> >>
> >> Thanks,
> > .
> >

2020-01-02 19:01:16

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 01/02, Jaegeuk Kim wrote:
> On 12/31, Chao Yu wrote:
> > On 2019/12/31 8:46, Jaegeuk Kim wrote:
> > > On 12/23, Chao Yu wrote:
> > >> Hi Jaegeuk,
> > >>
> > >> Sorry for the delay.
> > >>
> > >> On 2019/12/19 5:46, Jaegeuk Kim wrote:
> > >>> Hi Chao,
> > >>>
> > >>> I still see some diffs from my latest testing version, so please check anything
> > >>> that you made additionally from here.
> > >>>
> > >>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
> > >>
> > >> I've checked the diff and picked up valid parts, could you please check and
> > >> comment on it?
> > >>
> > >> ---
> > >> fs/f2fs/compress.c | 8 ++++----
> > >> fs/f2fs/data.c | 18 +++++++++++++++---
> > >> fs/f2fs/f2fs.h | 3 +++
> > >> fs/f2fs/file.c | 1 -
> > >> 4 files changed, 22 insertions(+), 8 deletions(-)
> > >>
> > >> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> > >> index af23ed6deffd..1bc86a54ad71 100644
> > >> --- a/fs/f2fs/compress.c
> > >> +++ b/fs/f2fs/compress.c
> > >> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> > >> fgp_flag, GFP_NOFS);
> > >> if (!page) {
> > >> ret = -ENOMEM;
> > >> - goto unlock_pages;
> > >> + goto release_pages;
> > >> }
> > >>
> > >> if (PageUptodate(page))
> > >> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> > >> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
> > >> &last_block_in_bio, false);
> > >> if (ret)
> > >> - goto release_pages;
> > >> + goto unlock_pages;
> > >> if (bio)
> > >> f2fs_submit_bio(sbi, bio, DATA);
> > >>
> > >> ret = f2fs_init_compress_ctx(cc);
> > >> if (ret)
> > >> - goto release_pages;
> > >> + goto unlock_pages;
> > >> }
> > >>
> > >> for (i = 0; i < cc->cluster_size; i++) {
> > >> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> > >> if (err)
> > >> goto out_unlock_op;
> > >>
> > >> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> > >> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> > >>
> > >> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
> > >> if (err)
> > >> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> > >> index 19cd03450066..f1f5c701228d 100644
> > >> --- a/fs/f2fs/data.c
> > >> +++ b/fs/f2fs/data.c
> > >> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
> > >> }
> > >>
> > >> #ifdef CONFIG_F2FS_FS_COMPRESSION
> > >> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> > >> +{
> > >> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
> > >> +}
> > >> +
> > >> static void f2fs_verify_bio(struct bio *bio)
> > >> {
> > >> struct page *page = bio_first_page_all(bio);
> > >> struct decompress_io_ctx *dic =
> > >> (struct decompress_io_ctx *)page_private(page);
> > >>
> > >> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> > >> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
> > >> f2fs_free_dic(dic);
> > >> }
> > >> #endif
> > >> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> > >> bio_for_each_segment_all(bvec, bio, iter_all) {
> > >> struct page *target = bvec->bv_page;
> > >>
> > >> - if (fscrypt_is_bounce_page(target))
> > >> + if (fscrypt_is_bounce_page(target)) {
> > >> target = fscrypt_pagecache_page(target);
> > >> - if (f2fs_is_compressed_page(target))
> > >> + if (IS_ERR(target))
> > >> + continue;
> > >> + }
> > >> + if (f2fs_is_compressed_page(target)) {
> > >> target = f2fs_compress_control_page(target);
> > >> + if (IS_ERR(target))
> > >> + continue;
> > >> + }
> > >>
> > >> if (inode && inode == target->mapping->host)
> > >> return true;
> > >> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> > >> if (ret)
> > >> goto out;
> > >>
> > >> + /* cluster was overwritten as normal cluster */
> > >> if (dn.data_blkaddr != COMPRESS_ADDR)
> > >> goto out;
> > >>
> > >> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > >> index 5d55cef66410..17d2af4eeafb 100644
> > >> --- a/fs/f2fs/f2fs.h
> > >> +++ b/fs/f2fs/f2fs.h
> > >> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> > >> 1 << F2FS_I(inode)->i_log_cluster_size;
> > >> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> > >> set_inode_flag(inode, FI_COMPRESSED_FILE);
> > >> + stat_inc_compr_inode(inode);
> > >> }
> > >>
> > >> static inline unsigned int addrs_per_inode(struct inode *inode)
> > >> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
> > >> return true;
> > >> if (f2fs_is_multi_device(sbi))
> > >> return true;
> > >> + if (f2fs_compressed_file(inode))
> > >> + return true;
> > >> /*
> > >> * for blkzoned device, fallback direct IO to buffered IO, so
> > >> * all IOs can be serialized by log-structured write.
> > >> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> > >> index bde5612f37f5..9aeadf14413c 100644
> > >> --- a/fs/f2fs/file.c
> > >> +++ b/fs/f2fs/file.c
> > >> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
> > >> return -EINVAL;
> > >>
> > >> set_compress_context(inode);
> > >> - stat_inc_compr_inode(inode);
> > >
> > > As this breaks the count, I'll keep as is.
> >
> > @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
> > 1 << F2FS_I(inode)->i_log_cluster_size;
> > F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> > set_inode_flag(inode, FI_COMPRESSED_FILE);
> > + stat_inc_compr_inode(inode);
> >
> > If I'm not missing anything, stat_inc_compr_inode() should be called inside
> > set_compress_context() in where we convert normal inode to compress one,
> > right?
>
> I don't care much whether that's right or not. If we want to do that, I found
> another line to remove in f2fs_create(). Let me give it a try.
>
> Thanks,
>

This works to me. Could you run fsstress tests on compressed root directory?
It seems still there are some bugs.

---
fs/f2fs/compress.c | 14 ++++++++++----
fs/f2fs/data.c | 25 ++++++++++++++++++++++---
fs/f2fs/f2fs.h | 31 +++++++++++++++++--------------
fs/f2fs/file.c | 1 -
fs/f2fs/namei.c | 1 -
5 files changed, 49 insertions(+), 23 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index af23ed6deffd..fa67ffd9d79d 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
fgp_flag, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
- goto unlock_pages;
+ goto release_pages;
}

if (PageUptodate(page))
@@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
&last_block_in_bio, false);
if (ret)
- goto release_pages;
+ goto unlock_pages;
if (bio)
f2fs_submit_bio(sbi, bio, DATA);

ret = f2fs_init_compress_ctx(cc);
if (ret)
- goto release_pages;
+ goto unlock_pages;
}

for (i = 0; i < cc->cluster_size; i++) {
@@ -762,7 +762,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (err)
goto out_unlock_op;

- psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i) == NULL_ADDR)
+ goto out_put_dnode;
+ }
+
+ psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;

err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
if (err)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 356642e8c3b3..5476d33f2d76 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
}

#ifdef CONFIG_F2FS_FS_COMPRESSION
+void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
+{
+ f2fs_decompress_end_io(rpages, cluster_size, false, true);
+}
+
static void f2fs_verify_bio(struct bio *bio)
{
struct page *page = bio_first_page_all(bio);
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);

- f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
+ f2fs_verify_pages(dic->rpages, dic->cluster_size);
f2fs_free_dic(dic);
}
#endif
@@ -520,10 +525,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *target = bvec->bv_page;

- if (fscrypt_is_bounce_page(target))
+ if (fscrypt_is_bounce_page(target)) {
target = fscrypt_pagecache_page(target);
- if (f2fs_is_compressed_page(target))
+ if (IS_ERR(target))
+ continue;
+ }
+ if (f2fs_is_compressed_page(target)) {
target = f2fs_compress_control_page(target);
+ if (IS_ERR(target))
+ continue;
+ }

if (inode && inode == target->mapping->host)
return true;
@@ -2049,6 +2060,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (ret)
goto out;

+ /* cluster was overwritten as normal cluster */
if (dn.data_blkaddr != COMPRESS_ADDR)
goto out;

@@ -2694,12 +2706,16 @@ static int f2fs_write_data_page(struct page *page,
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct inode *inode = page->mapping->host;

+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ goto out;
+
if (f2fs_compressed_file(inode)) {
if (f2fs_is_compressed_cluster(inode, page->index)) {
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
}
+out:
#endif

return f2fs_write_single_data_page(page, NULL, NULL, NULL,
@@ -2809,6 +2825,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
goto result;
}

+ if (unlikely(f2fs_cp_error(sbi)))
+ goto lock_page;
+
if (f2fs_cluster_is_empty(&cc)) {
void *fsdata = NULL;
struct page *pagep;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index de494fc9d596..a95369e32876 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2707,20 +2707,6 @@ static inline int f2fs_compressed_file(struct inode *inode)
is_inode_flag_set(inode, FI_COMPRESSED_FILE);
}

-static inline void set_compress_context(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
- F2FS_I(inode)->i_compress_algorithm =
- F2FS_OPTION(sbi).compress_algorithm;
- F2FS_I(inode)->i_log_cluster_size =
- F2FS_OPTION(sbi).compress_log_size;
- F2FS_I(inode)->i_cluster_size =
- 1 << F2FS_I(inode)->i_log_cluster_size;
- F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
- set_inode_flag(inode, FI_COMPRESSED_FILE);
-}
-
static inline unsigned int addrs_per_inode(struct inode *inode)
{
unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
@@ -3808,6 +3794,21 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
#endif

+static inline void set_compress_context(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ F2FS_I(inode)->i_compress_algorithm =
+ F2FS_OPTION(sbi).compress_algorithm;
+ F2FS_I(inode)->i_log_cluster_size =
+ F2FS_OPTION(sbi).compress_log_size;
+ F2FS_I(inode)->i_cluster_size =
+ 1 << F2FS_I(inode)->i_log_cluster_size;
+ F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+ set_inode_flag(inode, FI_COMPRESSED_FILE);
+ stat_inc_compr_inode(inode);
+}
+
static inline u64 f2fs_disable_compressed_file(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -3963,6 +3964,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return true;
if (f2fs_is_multi_device(sbi))
return true;
+ if (f2fs_compressed_file(inode))
+ return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f66c4cd067f5..cd84b3d9aa17 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1831,7 +1831,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
return -EINVAL;

set_compress_context(inode);
- stat_inc_compr_inode(inode);
}
}
if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index cf3a286106ed..2aa035422c0f 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -348,7 +348,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
f2fs_unlock_op(sbi);

- stat_inc_compr_inode(inode);
f2fs_alloc_nid_done(sbi, ino);

d_instantiate_new(dentry, inode);
--
2.24.0.525.g8f36a354ae-goog

2020-01-03 01:22:04

by Chao Yu

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 2020/1/3 2:18, Jaegeuk Kim wrote:
> On 12/31, Chao Yu wrote:
>> On 2019/12/31 8:46, Jaegeuk Kim wrote:
>>> On 12/23, Chao Yu wrote:
>>>> Hi Jaegeuk,
>>>>
>>>> Sorry for the delay.
>>>>
>>>> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>>>>> Hi Chao,
>>>>>
>>>>> I still see some diffs from my latest testing version, so please check anything
>>>>> that you made additionally from here.
>>>>>
>>>>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>>>>
>>>> I've checked the diff and picked up valid parts, could you please check and
>>>> comment on it?
>>>>
>>>> ---
>>>> fs/f2fs/compress.c | 8 ++++----
>>>> fs/f2fs/data.c | 18 +++++++++++++++---
>>>> fs/f2fs/f2fs.h | 3 +++
>>>> fs/f2fs/file.c | 1 -
>>>> 4 files changed, 22 insertions(+), 8 deletions(-)
>>>>
>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>> index af23ed6deffd..1bc86a54ad71 100644
>>>> --- a/fs/f2fs/compress.c
>>>> +++ b/fs/f2fs/compress.c
>>>> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>> fgp_flag, GFP_NOFS);
>>>> if (!page) {
>>>> ret = -ENOMEM;
>>>> - goto unlock_pages;
>>>> + goto release_pages;
>>>> }
>>>>
>>>> if (PageUptodate(page))
>>>> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>>>> &last_block_in_bio, false);
>>>> if (ret)
>>>> - goto release_pages;
>>>> + goto unlock_pages;
>>>> if (bio)
>>>> f2fs_submit_bio(sbi, bio, DATA);
>>>>
>>>> ret = f2fs_init_compress_ctx(cc);
>>>> if (ret)
>>>> - goto release_pages;
>>>> + goto unlock_pages;
>>>> }
>>>>
>>>> for (i = 0; i < cc->cluster_size; i++) {
>>>> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>> if (err)
>>>> goto out_unlock_op;
>>>>
>>>> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>>> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>>>
>>>> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>>>> if (err)
>>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>>> index 19cd03450066..f1f5c701228d 100644
>>>> --- a/fs/f2fs/data.c
>>>> +++ b/fs/f2fs/data.c
>>>> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>>>> }
>>>>
>>>> #ifdef CONFIG_F2FS_FS_COMPRESSION
>>>> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
>>>> +{
>>>> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
>>>> +}
>>>> +
>>>> static void f2fs_verify_bio(struct bio *bio)
>>>> {
>>>> struct page *page = bio_first_page_all(bio);
>>>> struct decompress_io_ctx *dic =
>>>> (struct decompress_io_ctx *)page_private(page);
>>>>
>>>> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
>>>> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
>>>> f2fs_free_dic(dic);
>>>> }
>>>> #endif
>>>> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>>>> bio_for_each_segment_all(bvec, bio, iter_all) {
>>>> struct page *target = bvec->bv_page;
>>>>
>>>> - if (fscrypt_is_bounce_page(target))
>>>> + if (fscrypt_is_bounce_page(target)) {
>>>> target = fscrypt_pagecache_page(target);
>>>> - if (f2fs_is_compressed_page(target))
>>>> + if (IS_ERR(target))
>>>> + continue;
>>>> + }
>>>> + if (f2fs_is_compressed_page(target)) {
>>>> target = f2fs_compress_control_page(target);
>>>> + if (IS_ERR(target))
>>>> + continue;
>>>> + }
>>>>
>>>> if (inode && inode == target->mapping->host)
>>>> return true;
>>>> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>> if (ret)
>>>> goto out;
>>>>
>>>> + /* cluster was overwritten as normal cluster */
>>>> if (dn.data_blkaddr != COMPRESS_ADDR)
>>>> goto out;
>>>>
>>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>>> index 5d55cef66410..17d2af4eeafb 100644
>>>> --- a/fs/f2fs/f2fs.h
>>>> +++ b/fs/f2fs/f2fs.h
>>>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>>> 1 << F2FS_I(inode)->i_log_cluster_size;
>>>> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>>> set_inode_flag(inode, FI_COMPRESSED_FILE);
>>>> + stat_inc_compr_inode(inode);
>>>> }
>>>>
>>>> static inline unsigned int addrs_per_inode(struct inode *inode)
>>>> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>>>> return true;
>>>> if (f2fs_is_multi_device(sbi))
>>>> return true;
>>>> + if (f2fs_compressed_file(inode))
>>>> + return true;
>>>> /*
>>>> * for blkzoned device, fallback direct IO to buffered IO, so
>>>> * all IOs can be serialized by log-structured write.
>>>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>>>> index bde5612f37f5..9aeadf14413c 100644
>>>> --- a/fs/f2fs/file.c
>>>> +++ b/fs/f2fs/file.c
>>>> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>>>> return -EINVAL;
>>>>
>>>> set_compress_context(inode);
>>>> - stat_inc_compr_inode(inode);
>>>
>>> As this breaks the count, I'll keep as is.
>>
>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>> 1 << F2FS_I(inode)->i_log_cluster_size;
>> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>> set_inode_flag(inode, FI_COMPRESSED_FILE);
>> + stat_inc_compr_inode(inode);
>>
>> If I'm not missing anything, stat_inc_compr_inode() should be called inside
>> set_compress_context() in where we convert normal inode to compress one,
>> right?
>
> I don't care much whether that's right or not. If we want to do that, I found
> another line to remove in f2fs_create(). Let me give it a try.

Correct, I forgot to remove that line.

Thanks,

>
> Thanks,
>
>>
>> Thanks,
>>
>>>
>>> Thanks,
>>>
>>>> }
>>>> }
>>>> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
>>>> --
>>>> 2.18.0.rc1
>>>>
>>>> Thanks,
>>> .
>>>
> .
>

2020-01-03 06:51:56

by Chao Yu

[permalink] [raw]
Subject: Re: [RFC PATCH v5] f2fs: support data compression

On 2020/1/3 3:00, Jaegeuk Kim wrote:
> On 01/02, Jaegeuk Kim wrote:
>> On 12/31, Chao Yu wrote:
>>> On 2019/12/31 8:46, Jaegeuk Kim wrote:
>>>> On 12/23, Chao Yu wrote:
>>>>> Hi Jaegeuk,
>>>>>
>>>>> Sorry for the delay.
>>>>>
>>>>> On 2019/12/19 5:46, Jaegeuk Kim wrote:
>>>>>> Hi Chao,
>>>>>>
>>>>>> I still see some diffs from my latest testing version, so please check anything
>>>>>> that you made additionally from here.
>>>>>>
>>>>>> https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/commit/?h=dev&id=25d18e19a91e60837d36368ee939db13fd16dc64
>>>>>
>>>>> I've checked the diff and picked up valid parts, could you please check and
>>>>> comment on it?
>>>>>
>>>>> ---
>>>>> fs/f2fs/compress.c | 8 ++++----
>>>>> fs/f2fs/data.c | 18 +++++++++++++++---
>>>>> fs/f2fs/f2fs.h | 3 +++
>>>>> fs/f2fs/file.c | 1 -
>>>>> 4 files changed, 22 insertions(+), 8 deletions(-)
>>>>>
>>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>>> index af23ed6deffd..1bc86a54ad71 100644
>>>>> --- a/fs/f2fs/compress.c
>>>>> +++ b/fs/f2fs/compress.c
>>>>> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>> fgp_flag, GFP_NOFS);
>>>>> if (!page) {
>>>>> ret = -ENOMEM;
>>>>> - goto unlock_pages;
>>>>> + goto release_pages;
>>>>> }
>>>>>
>>>>> if (PageUptodate(page))
>>>>> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
>>>>> &last_block_in_bio, false);
>>>>> if (ret)
>>>>> - goto release_pages;
>>>>> + goto unlock_pages;
>>>>> if (bio)
>>>>> f2fs_submit_bio(sbi, bio, DATA);
>>>>>
>>>>> ret = f2fs_init_compress_ctx(cc);
>>>>> if (ret)
>>>>> - goto release_pages;
>>>>> + goto unlock_pages;
>>>>> }
>>>>>
>>>>> for (i = 0; i < cc->cluster_size; i++) {
>>>>> @@ -762,7 +762,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>>> if (err)
>>>>> goto out_unlock_op;
>>>>>
>>>>> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>>>> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>>>>>
>>>>> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
>>>>> if (err)
>>>>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>>>>> index 19cd03450066..f1f5c701228d 100644
>>>>> --- a/fs/f2fs/data.c
>>>>> +++ b/fs/f2fs/data.c
>>>>> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
>>>>> }
>>>>>
>>>>> #ifdef CONFIG_F2FS_FS_COMPRESSION
>>>>> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
>>>>> +{
>>>>> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
>>>>> +}
>>>>> +
>>>>> static void f2fs_verify_bio(struct bio *bio)
>>>>> {
>>>>> struct page *page = bio_first_page_all(bio);
>>>>> struct decompress_io_ctx *dic =
>>>>> (struct decompress_io_ctx *)page_private(page);
>>>>>
>>>>> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
>>>>> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
>>>>> f2fs_free_dic(dic);
>>>>> }
>>>>> #endif
>>>>> @@ -507,10 +512,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
>>>>> bio_for_each_segment_all(bvec, bio, iter_all) {
>>>>> struct page *target = bvec->bv_page;
>>>>>
>>>>> - if (fscrypt_is_bounce_page(target))
>>>>> + if (fscrypt_is_bounce_page(target)) {
>>>>> target = fscrypt_pagecache_page(target);
>>>>> - if (f2fs_is_compressed_page(target))
>>>>> + if (IS_ERR(target))
>>>>> + continue;
>>>>> + }
>>>>> + if (f2fs_is_compressed_page(target)) {
>>>>> target = f2fs_compress_control_page(target);
>>>>> + if (IS_ERR(target))
>>>>> + continue;
>>>>> + }
>>>>>
>>>>> if (inode && inode == target->mapping->host)
>>>>> return true;
>>>>> @@ -2039,6 +2050,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
>>>>> if (ret)
>>>>> goto out;
>>>>>
>>>>> + /* cluster was overwritten as normal cluster */
>>>>> if (dn.data_blkaddr != COMPRESS_ADDR)
>>>>> goto out;
>>>>>
>>>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>>>> index 5d55cef66410..17d2af4eeafb 100644
>>>>> --- a/fs/f2fs/f2fs.h
>>>>> +++ b/fs/f2fs/f2fs.h
>>>>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>>>> 1 << F2FS_I(inode)->i_log_cluster_size;
>>>>> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>>>> set_inode_flag(inode, FI_COMPRESSED_FILE);
>>>>> + stat_inc_compr_inode(inode);
>>>>> }
>>>>>
>>>>> static inline unsigned int addrs_per_inode(struct inode *inode)
>>>>> @@ -3961,6 +3962,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
>>>>> return true;
>>>>> if (f2fs_is_multi_device(sbi))
>>>>> return true;
>>>>> + if (f2fs_compressed_file(inode))
>>>>> + return true;
>>>>> /*
>>>>> * for blkzoned device, fallback direct IO to buffered IO, so
>>>>> * all IOs can be serialized by log-structured write.
>>>>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>>>>> index bde5612f37f5..9aeadf14413c 100644
>>>>> --- a/fs/f2fs/file.c
>>>>> +++ b/fs/f2fs/file.c
>>>>> @@ -1828,7 +1828,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
>>>>> return -EINVAL;
>>>>>
>>>>> set_compress_context(inode);
>>>>> - stat_inc_compr_inode(inode);
>>>>
>>>> As this breaks the count, I'll keep as is.
>>>
>>> @@ -2719,6 +2719,7 @@ static inline void set_compress_context(struct inode *inode)
>>> 1 << F2FS_I(inode)->i_log_cluster_size;
>>> F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
>>> set_inode_flag(inode, FI_COMPRESSED_FILE);
>>> + stat_inc_compr_inode(inode);
>>>
>>> If I'm not missing anything, stat_inc_compr_inode() should be called inside
>>> set_compress_context() in where we convert normal inode to compress one,
>>> right?
>>
>> I don't care much whether that's right or not. If we want to do that, I found
>> another line to remove in f2fs_create(). Let me give it a try.
>>
>> Thanks,
>>
>
> This works to me. Could you run fsstress tests on compressed root directory?
> It seems still there are some bugs.

I applied below diff, and reverted ("f2fs: cover f2fs_lock_op in expand_inode_data case"),
then starting running some tests on it.

Thanks,

>
> ---
> fs/f2fs/compress.c | 14 ++++++++++----
> fs/f2fs/data.c | 25 ++++++++++++++++++++++---
> fs/f2fs/f2fs.h | 31 +++++++++++++++++--------------
> fs/f2fs/file.c | 1 -
> fs/f2fs/namei.c | 1 -
> 5 files changed, 49 insertions(+), 23 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index af23ed6deffd..fa67ffd9d79d 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -593,7 +593,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> fgp_flag, GFP_NOFS);
> if (!page) {
> ret = -ENOMEM;
> - goto unlock_pages;
> + goto release_pages;
> }
>
> if (PageUptodate(page))
> @@ -608,13 +608,13 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
> &last_block_in_bio, false);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> if (bio)
> f2fs_submit_bio(sbi, bio, DATA);
>
> ret = f2fs_init_compress_ctx(cc);
> if (ret)
> - goto release_pages;
> + goto unlock_pages;
> }
>
> for (i = 0; i < cc->cluster_size; i++) {
> @@ -762,7 +762,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> if (err)
> goto out_unlock_op;
>
> - psize = (cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
> + for (i = 0; i < cc->cluster_size; i++) {
> + if (datablock_addr(dn.inode, dn.node_page,
> + dn.ofs_in_node + i) == NULL_ADDR)
> + goto out_put_dnode;
> + }
> +
> + psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
>
> err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
> if (err)
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 356642e8c3b3..5476d33f2d76 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -184,13 +184,18 @@ static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
> }
>
> #ifdef CONFIG_F2FS_FS_COMPRESSION
> +void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
> +{
> + f2fs_decompress_end_io(rpages, cluster_size, false, true);
> +}
> +
> static void f2fs_verify_bio(struct bio *bio)
> {
> struct page *page = bio_first_page_all(bio);
> struct decompress_io_ctx *dic =
> (struct decompress_io_ctx *)page_private(page);
>
> - f2fs_decompress_end_io(dic->rpages, dic->cluster_size, false, true);
> + f2fs_verify_pages(dic->rpages, dic->cluster_size);
> f2fs_free_dic(dic);
> }
> #endif
> @@ -520,10 +525,16 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
> bio_for_each_segment_all(bvec, bio, iter_all) {
> struct page *target = bvec->bv_page;
>
> - if (fscrypt_is_bounce_page(target))
> + if (fscrypt_is_bounce_page(target)) {
> target = fscrypt_pagecache_page(target);
> - if (f2fs_is_compressed_page(target))
> + if (IS_ERR(target))
> + continue;
> + }
> + if (f2fs_is_compressed_page(target)) {
> target = f2fs_compress_control_page(target);
> + if (IS_ERR(target))
> + continue;
> + }
>
> if (inode && inode == target->mapping->host)
> return true;
> @@ -2049,6 +2060,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> if (ret)
> goto out;
>
> + /* cluster was overwritten as normal cluster */
> if (dn.data_blkaddr != COMPRESS_ADDR)
> goto out;
>
> @@ -2694,12 +2706,16 @@ static int f2fs_write_data_page(struct page *page,
> #ifdef CONFIG_F2FS_FS_COMPRESSION
> struct inode *inode = page->mapping->host;
>
> + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
> + goto out;
> +
> if (f2fs_compressed_file(inode)) {
> if (f2fs_is_compressed_cluster(inode, page->index)) {
> redirty_page_for_writepage(wbc, page);
> return AOP_WRITEPAGE_ACTIVATE;
> }
> }
> +out:
> #endif
>
> return f2fs_write_single_data_page(page, NULL, NULL, NULL,
> @@ -2809,6 +2825,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
> goto result;
> }
>
> + if (unlikely(f2fs_cp_error(sbi)))
> + goto lock_page;
> +
> if (f2fs_cluster_is_empty(&cc)) {
> void *fsdata = NULL;
> struct page *pagep;
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index de494fc9d596..a95369e32876 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2707,20 +2707,6 @@ static inline int f2fs_compressed_file(struct inode *inode)
> is_inode_flag_set(inode, FI_COMPRESSED_FILE);
> }
>
> -static inline void set_compress_context(struct inode *inode)
> -{
> - struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> -
> - F2FS_I(inode)->i_compress_algorithm =
> - F2FS_OPTION(sbi).compress_algorithm;
> - F2FS_I(inode)->i_log_cluster_size =
> - F2FS_OPTION(sbi).compress_log_size;
> - F2FS_I(inode)->i_cluster_size =
> - 1 << F2FS_I(inode)->i_log_cluster_size;
> - F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> - set_inode_flag(inode, FI_COMPRESSED_FILE);
> -}
> -
> static inline unsigned int addrs_per_inode(struct inode *inode)
> {
> unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
> @@ -3808,6 +3794,21 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
> }
> #endif
>
> +static inline void set_compress_context(struct inode *inode)
> +{
> + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> +
> + F2FS_I(inode)->i_compress_algorithm =
> + F2FS_OPTION(sbi).compress_algorithm;
> + F2FS_I(inode)->i_log_cluster_size =
> + F2FS_OPTION(sbi).compress_log_size;
> + F2FS_I(inode)->i_cluster_size =
> + 1 << F2FS_I(inode)->i_log_cluster_size;
> + F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
> + set_inode_flag(inode, FI_COMPRESSED_FILE);
> + stat_inc_compr_inode(inode);
> +}
> +
> static inline u64 f2fs_disable_compressed_file(struct inode *inode)
> {
> struct f2fs_inode_info *fi = F2FS_I(inode);
> @@ -3963,6 +3964,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
> return true;
> if (f2fs_is_multi_device(sbi))
> return true;
> + if (f2fs_compressed_file(inode))
> + return true;
> /*
> * for blkzoned device, fallback direct IO to buffered IO, so
> * all IOs can be serialized by log-structured write.
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index f66c4cd067f5..cd84b3d9aa17 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -1831,7 +1831,6 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
> return -EINVAL;
>
> set_compress_context(inode);
> - stat_inc_compr_inode(inode);
> }
> }
> if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
> diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
> index cf3a286106ed..2aa035422c0f 100644
> --- a/fs/f2fs/namei.c
> +++ b/fs/f2fs/namei.c
> @@ -348,7 +348,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
> goto out;
> f2fs_unlock_op(sbi);
>
> - stat_inc_compr_inode(inode);
> f2fs_alloc_nid_done(sbi, ino);
>
> d_instantiate_new(dentry, inode);
>

2020-01-06 09:02:35

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 2020/1/3 14:50, Chao Yu wrote:
> This works to me. Could you run fsstress tests on compressed root directory?
> It seems still there are some bugs.

Jaegeuk,

Did you mean running por_fsstress testcase?

Now, at least I didn't hit any problem for normal fsstress case.

Thanks,

2020-01-06 18:18:38

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 01/06, Chao Yu wrote:
> On 2020/1/3 14:50, Chao Yu wrote:
> > This works to me. Could you run fsstress tests on compressed root directory?
> > It seems still there are some bugs.
>
> Jaegeuk,
>
> Did you mean running por_fsstress testcase?
>
> Now, at least I didn't hit any problem for normal fsstress case.

Yup. por_fsstress

>
> Thanks,

2020-01-10 23:54:28

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 01/06, Jaegeuk Kim wrote:
> On 01/06, Chao Yu wrote:
> > On 2020/1/3 14:50, Chao Yu wrote:
> > > This works to me. Could you run fsstress tests on compressed root directory?
> > > It seems still there are some bugs.
> >
> > Jaegeuk,
> >
> > Did you mean running por_fsstress testcase?
> >
> > Now, at least I didn't hit any problem for normal fsstress case.
>
> Yup. por_fsstress

Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
I've fixed
- truncation offset
- i_compressed_blocks and its lock coverage
- error handling
- etc

One another fix in f2fs-tools as well.
https://github.com/jaegeuk/f2fs-tools

>
> >
> > Thanks,

2020-01-11 09:11:18

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 2020/1/11 7:52, Jaegeuk Kim wrote:
> On 01/06, Jaegeuk Kim wrote:
>> On 01/06, Chao Yu wrote:
>>> On 2020/1/3 14:50, Chao Yu wrote:
>>>> This works to me. Could you run fsstress tests on compressed root directory?
>>>> It seems still there are some bugs.
>>>
>>> Jaegeuk,
>>>
>>> Did you mean running por_fsstress testcase?
>>>
>>> Now, at least I didn't hit any problem for normal fsstress case.
>>
>> Yup. por_fsstress
>
> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
> I've fixed
> - truncation offset
> - i_compressed_blocks and its lock coverage
> - error handling
> - etc

I changed as below, and por_fsstress stops panic the system.

Could you merge all these fixes into original patch?

From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
From: Chao Yu <[email protected]>
Date: Sat, 11 Jan 2020 16:58:20 +0800
Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()

Signed-off-by: Chao Yu <[email protected]>
---
fs/f2fs/compress.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 502cd0ddc2a7..5c6a31d84ce4 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
}

static void f2fs_drop_rpages(struct compress_ctx *cc,
- struct address_space *mapping, int len, bool unlock)
+ int len, bool unlock)
{
unsigned int i;
for (i = 0; i < len; i++) {
- if (mapping) {
- pgoff_t start = start_idx_of_cluster(cc);
- struct page *page = find_get_page(mapping, start + i);
-
- put_page(page);
- put_page(page);
- cc->rpages[i] = NULL;
- }
if (!cc->rpages[i])
continue;
if (unlock)
@@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,

static void f2fs_put_rpages(struct compress_ctx *cc)
{
- f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
+ f2fs_drop_rpages(cc, cc->cluster_size, false);
}

static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
{
- f2fs_drop_rpages(cc, NULL, len, true);
+ f2fs_drop_rpages(cc, len, true);
}

static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
- struct address_space *mapping, int len)
+ struct address_space *mapping,
+ pgoff_t start, int len)
{
- f2fs_drop_rpages(cc, mapping, len, false);
+ int i;
+ for (i = 0; i < len; i++) {
+ struct page *page = find_get_page(mapping, start + i);
+
+ put_page(page);
+ put_page(page);
+ }
}

static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
@@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,

if (!PageUptodate(page)) {
f2fs_unlock_rpages(cc, i + 1);
- f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
+ f2fs_put_rpages_mapping(cc, mapping, start_idx,
+ cc->cluster_size);
f2fs_destroy_compress_ctx(cc);
goto retry;
}
@@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
unlock_pages:
f2fs_unlock_rpages(cc, i);
release_pages:
- f2fs_put_rpages_mapping(cc, mapping, i);
+ f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
f2fs_destroy_compress_ctx(cc);
return ret;
}
--
2.18.0.rc1



>
> One another fix in f2fs-tools as well.
> https://github.com/jaegeuk/f2fs-tools
>
>>
>>>
>>> Thanks,
> .
>

2020-01-11 18:03:18

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 01/11, Chao Yu wrote:
> On 2020/1/11 7:52, Jaegeuk Kim wrote:
> > On 01/06, Jaegeuk Kim wrote:
> >> On 01/06, Chao Yu wrote:
> >>> On 2020/1/3 14:50, Chao Yu wrote:
> >>>> This works to me. Could you run fsstress tests on compressed root directory?
> >>>> It seems still there are some bugs.
> >>>
> >>> Jaegeuk,
> >>>
> >>> Did you mean running por_fsstress testcase?
> >>>
> >>> Now, at least I didn't hit any problem for normal fsstress case.
> >>
> >> Yup. por_fsstress
> >
> > Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
> > I've fixed
> > - truncation offset
> > - i_compressed_blocks and its lock coverage
> > - error handling
> > - etc
>
> I changed as below, and por_fsstress stops panic the system.
>
> Could you merge all these fixes into original patch?

Yup, let m roll up some early patches first once test results become good.

>
> >From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
> From: Chao Yu <[email protected]>
> Date: Sat, 11 Jan 2020 16:58:20 +0800
> Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()
>
> Signed-off-by: Chao Yu <[email protected]>
> ---
> fs/f2fs/compress.c | 30 +++++++++++++++---------------
> 1 file changed, 15 insertions(+), 15 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 502cd0ddc2a7..5c6a31d84ce4 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
> }
>
> static void f2fs_drop_rpages(struct compress_ctx *cc,
> - struct address_space *mapping, int len, bool unlock)
> + int len, bool unlock)
> {
> unsigned int i;
> for (i = 0; i < len; i++) {
> - if (mapping) {
> - pgoff_t start = start_idx_of_cluster(cc);
> - struct page *page = find_get_page(mapping, start + i);
> -
> - put_page(page);
> - put_page(page);
> - cc->rpages[i] = NULL;
> - }
> if (!cc->rpages[i])
> continue;
> if (unlock)
> @@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,
>
> static void f2fs_put_rpages(struct compress_ctx *cc)
> {
> - f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
> + f2fs_drop_rpages(cc, cc->cluster_size, false);
> }
>
> static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
> {
> - f2fs_drop_rpages(cc, NULL, len, true);
> + f2fs_drop_rpages(cc, len, true);
> }
>
> static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
> - struct address_space *mapping, int len)
> + struct address_space *mapping,
> + pgoff_t start, int len)
> {
> - f2fs_drop_rpages(cc, mapping, len, false);
> + int i;
> + for (i = 0; i < len; i++) {
> + struct page *page = find_get_page(mapping, start + i);
> +
> + put_page(page);
> + put_page(page);
> + }
> }
>
> static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
> @@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>
> if (!PageUptodate(page)) {
> f2fs_unlock_rpages(cc, i + 1);
> - f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
> + f2fs_put_rpages_mapping(cc, mapping, start_idx,
> + cc->cluster_size);
> f2fs_destroy_compress_ctx(cc);
> goto retry;
> }
> @@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> unlock_pages:
> f2fs_unlock_rpages(cc, i);
> release_pages:
> - f2fs_put_rpages_mapping(cc, mapping, i);
> + f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
> f2fs_destroy_compress_ctx(cc);
> return ret;
> }
> --
> 2.18.0.rc1
>
>
>
> >
> > One another fix in f2fs-tools as well.
> > https://github.com/jaegeuk/f2fs-tools
> >
> >>
> >>>
> >>> Thanks,
> > .
> >

2020-01-13 08:57:20

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 2020/1/12 2:02, Jaegeuk Kim wrote:
> On 01/11, Chao Yu wrote:
>> On 2020/1/11 7:52, Jaegeuk Kim wrote:
>>> On 01/06, Jaegeuk Kim wrote:
>>>> On 01/06, Chao Yu wrote:
>>>>> On 2020/1/3 14:50, Chao Yu wrote:
>>>>>> This works to me. Could you run fsstress tests on compressed root directory?
>>>>>> It seems still there are some bugs.
>>>>>
>>>>> Jaegeuk,
>>>>>
>>>>> Did you mean running por_fsstress testcase?
>>>>>
>>>>> Now, at least I didn't hit any problem for normal fsstress case.
>>>>
>>>> Yup. por_fsstress
>>>
>>> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
>>> I've fixed
>>> - truncation offset
>>> - i_compressed_blocks and its lock coverage
>>> - error handling
>>> - etc
>>
>> I changed as below, and por_fsstress stops panic the system.
>>
>> Could you merge all these fixes into original patch?
>
> Yup, let m roll up some early patches first once test results become good.

I didn't encounter issue any more, how about por_fsstress test result in your
enviornment? If there is, please share the call stack with me.

Thanks,

>
>>
>> >From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
>> From: Chao Yu <[email protected]>
>> Date: Sat, 11 Jan 2020 16:58:20 +0800
>> Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()
>>
>> Signed-off-by: Chao Yu <[email protected]>
>> ---
>> fs/f2fs/compress.c | 30 +++++++++++++++---------------
>> 1 file changed, 15 insertions(+), 15 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index 502cd0ddc2a7..5c6a31d84ce4 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
>> }
>>
>> static void f2fs_drop_rpages(struct compress_ctx *cc,
>> - struct address_space *mapping, int len, bool unlock)
>> + int len, bool unlock)
>> {
>> unsigned int i;
>> for (i = 0; i < len; i++) {
>> - if (mapping) {
>> - pgoff_t start = start_idx_of_cluster(cc);
>> - struct page *page = find_get_page(mapping, start + i);
>> -
>> - put_page(page);
>> - put_page(page);
>> - cc->rpages[i] = NULL;
>> - }
>> if (!cc->rpages[i])
>> continue;
>> if (unlock)
>> @@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,
>>
>> static void f2fs_put_rpages(struct compress_ctx *cc)
>> {
>> - f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
>> + f2fs_drop_rpages(cc, cc->cluster_size, false);
>> }
>>
>> static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
>> {
>> - f2fs_drop_rpages(cc, NULL, len, true);
>> + f2fs_drop_rpages(cc, len, true);
>> }
>>
>> static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
>> - struct address_space *mapping, int len)
>> + struct address_space *mapping,
>> + pgoff_t start, int len)
>> {
>> - f2fs_drop_rpages(cc, mapping, len, false);
>> + int i;
>> + for (i = 0; i < len; i++) {
>> + struct page *page = find_get_page(mapping, start + i);
>> +
>> + put_page(page);
>> + put_page(page);
>> + }
>> }
>>
>> static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
>> @@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>
>> if (!PageUptodate(page)) {
>> f2fs_unlock_rpages(cc, i + 1);
>> - f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
>> + f2fs_put_rpages_mapping(cc, mapping, start_idx,
>> + cc->cluster_size);
>> f2fs_destroy_compress_ctx(cc);
>> goto retry;
>> }
>> @@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>> unlock_pages:
>> f2fs_unlock_rpages(cc, i);
>> release_pages:
>> - f2fs_put_rpages_mapping(cc, mapping, i);
>> + f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
>> f2fs_destroy_compress_ctx(cc);
>> return ret;
>> }
>> --
>> 2.18.0.rc1
>>
>>
>>
>>>
>>> One another fix in f2fs-tools as well.
>>> https://github.com/jaegeuk/f2fs-tools
>>>
>>>>
>>>>>
>>>>> Thanks,
>>> .
>>>
> .
>

2020-01-13 16:14:17

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 01/13, Chao Yu wrote:
> On 2020/1/12 2:02, Jaegeuk Kim wrote:
> > On 01/11, Chao Yu wrote:
> >> On 2020/1/11 7:52, Jaegeuk Kim wrote:
> >>> On 01/06, Jaegeuk Kim wrote:
> >>>> On 01/06, Chao Yu wrote:
> >>>>> On 2020/1/3 14:50, Chao Yu wrote:
> >>>>>> This works to me. Could you run fsstress tests on compressed root directory?
> >>>>>> It seems still there are some bugs.
> >>>>>
> >>>>> Jaegeuk,
> >>>>>
> >>>>> Did you mean running por_fsstress testcase?
> >>>>>
> >>>>> Now, at least I didn't hit any problem for normal fsstress case.
> >>>>
> >>>> Yup. por_fsstress
> >>>
> >>> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
> >>> I've fixed
> >>> - truncation offset
> >>> - i_compressed_blocks and its lock coverage
> >>> - error handling
> >>> - etc
> >>
> >> I changed as below, and por_fsstress stops panic the system.
> >>
> >> Could you merge all these fixes into original patch?
> >
> > Yup, let m roll up some early patches first once test results become good.
>
> I didn't encounter issue any more, how about por_fsstress test result in your
> enviornment? If there is, please share the call stack with me.

Sure, will do, once I hit an issue. BTW, I'm hitting another unreacheable nat
entry issue during por_stress without compression. :(

Thanks,

>
> Thanks,
>
> >
> >>
> >> >From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
> >> From: Chao Yu <[email protected]>
> >> Date: Sat, 11 Jan 2020 16:58:20 +0800
> >> Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()
> >>
> >> Signed-off-by: Chao Yu <[email protected]>
> >> ---
> >> fs/f2fs/compress.c | 30 +++++++++++++++---------------
> >> 1 file changed, 15 insertions(+), 15 deletions(-)
> >>
> >> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> >> index 502cd0ddc2a7..5c6a31d84ce4 100644
> >> --- a/fs/f2fs/compress.c
> >> +++ b/fs/f2fs/compress.c
> >> @@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
> >> }
> >>
> >> static void f2fs_drop_rpages(struct compress_ctx *cc,
> >> - struct address_space *mapping, int len, bool unlock)
> >> + int len, bool unlock)
> >> {
> >> unsigned int i;
> >> for (i = 0; i < len; i++) {
> >> - if (mapping) {
> >> - pgoff_t start = start_idx_of_cluster(cc);
> >> - struct page *page = find_get_page(mapping, start + i);
> >> -
> >> - put_page(page);
> >> - put_page(page);
> >> - cc->rpages[i] = NULL;
> >> - }
> >> if (!cc->rpages[i])
> >> continue;
> >> if (unlock)
> >> @@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,
> >>
> >> static void f2fs_put_rpages(struct compress_ctx *cc)
> >> {
> >> - f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
> >> + f2fs_drop_rpages(cc, cc->cluster_size, false);
> >> }
> >>
> >> static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
> >> {
> >> - f2fs_drop_rpages(cc, NULL, len, true);
> >> + f2fs_drop_rpages(cc, len, true);
> >> }
> >>
> >> static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
> >> - struct address_space *mapping, int len)
> >> + struct address_space *mapping,
> >> + pgoff_t start, int len)
> >> {
> >> - f2fs_drop_rpages(cc, mapping, len, false);
> >> + int i;
> >> + for (i = 0; i < len; i++) {
> >> + struct page *page = find_get_page(mapping, start + i);
> >> +
> >> + put_page(page);
> >> + put_page(page);
> >> + }
> >> }
> >>
> >> static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
> >> @@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >>
> >> if (!PageUptodate(page)) {
> >> f2fs_unlock_rpages(cc, i + 1);
> >> - f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
> >> + f2fs_put_rpages_mapping(cc, mapping, start_idx,
> >> + cc->cluster_size);
> >> f2fs_destroy_compress_ctx(cc);
> >> goto retry;
> >> }
> >> @@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >> unlock_pages:
> >> f2fs_unlock_rpages(cc, i);
> >> release_pages:
> >> - f2fs_put_rpages_mapping(cc, mapping, i);
> >> + f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
> >> f2fs_destroy_compress_ctx(cc);
> >> return ret;
> >> }
> >> --
> >> 2.18.0.rc1
> >>
> >>
> >>
> >>>
> >>> One another fix in f2fs-tools as well.
> >>> https://github.com/jaegeuk/f2fs-tools
> >>>
> >>>>
> >>>>>
> >>>>> Thanks,
> >>> .
> >>>
> > .
> >

2020-01-14 02:19:40

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 2020/1/14 0:11, Jaegeuk Kim wrote:
> On 01/13, Chao Yu wrote:
>> On 2020/1/12 2:02, Jaegeuk Kim wrote:
>>> On 01/11, Chao Yu wrote:
>>>> On 2020/1/11 7:52, Jaegeuk Kim wrote:
>>>>> On 01/06, Jaegeuk Kim wrote:
>>>>>> On 01/06, Chao Yu wrote:
>>>>>>> On 2020/1/3 14:50, Chao Yu wrote:
>>>>>>>> This works to me. Could you run fsstress tests on compressed root directory?
>>>>>>>> It seems still there are some bugs.
>>>>>>>
>>>>>>> Jaegeuk,
>>>>>>>
>>>>>>> Did you mean running por_fsstress testcase?
>>>>>>>
>>>>>>> Now, at least I didn't hit any problem for normal fsstress case.
>>>>>>
>>>>>> Yup. por_fsstress
>>>>>
>>>>> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
>>>>> I've fixed
>>>>> - truncation offset
>>>>> - i_compressed_blocks and its lock coverage
>>>>> - error handling
>>>>> - etc
>>>>
>>>> I changed as below, and por_fsstress stops panic the system.
>>>>
>>>> Could you merge all these fixes into original patch?
>>>
>>> Yup, let m roll up some early patches first once test results become good.
>>
>> I didn't encounter issue any more, how about por_fsstress test result in your
>> enviornment? If there is, please share the call stack with me.
>
> Sure, will do, once I hit an issue. BTW, I'm hitting another unreacheable nat
> entry issue during por_stress without compression. :(

Did you enable any features during por_fsstress test?

I only hit below warning during por_fsstress test on image w/o compression.

------------[ cut here ]------------
WARNING: CPU: 10 PID: 33483 at fs/fs-writeback.c:1448 __writeback_single_inode+0x28c/0x340
Call Trace:
writeback_single_inode+0xad/0x120
sync_inode_metadata+0x3d/0x60
f2fs_sync_inode_meta+0x90/0xe0 [f2fs]
block_operations+0x17c/0x360 [f2fs]
f2fs_write_checkpoint+0x101/0xff0 [f2fs]
f2fs_sync_fs+0xa8/0x130 [f2fs]
f2fs_do_sync_file+0x19c/0x880 [f2fs]
do_fsync+0x38/0x60
__x64_sys_fsync+0x10/0x20
do_syscall_64+0x5f/0x220
entry_SYSCALL_64_after_hwframe+0x44/0xa9

Thanks,

>
> Thanks,
>
>>
>> Thanks,
>>
>>>
>>>>
>>>> >From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
>>>> From: Chao Yu <[email protected]>
>>>> Date: Sat, 11 Jan 2020 16:58:20 +0800
>>>> Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()
>>>>
>>>> Signed-off-by: Chao Yu <[email protected]>
>>>> ---
>>>> fs/f2fs/compress.c | 30 +++++++++++++++---------------
>>>> 1 file changed, 15 insertions(+), 15 deletions(-)
>>>>
>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>> index 502cd0ddc2a7..5c6a31d84ce4 100644
>>>> --- a/fs/f2fs/compress.c
>>>> +++ b/fs/f2fs/compress.c
>>>> @@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
>>>> }
>>>>
>>>> static void f2fs_drop_rpages(struct compress_ctx *cc,
>>>> - struct address_space *mapping, int len, bool unlock)
>>>> + int len, bool unlock)
>>>> {
>>>> unsigned int i;
>>>> for (i = 0; i < len; i++) {
>>>> - if (mapping) {
>>>> - pgoff_t start = start_idx_of_cluster(cc);
>>>> - struct page *page = find_get_page(mapping, start + i);
>>>> -
>>>> - put_page(page);
>>>> - put_page(page);
>>>> - cc->rpages[i] = NULL;
>>>> - }
>>>> if (!cc->rpages[i])
>>>> continue;
>>>> if (unlock)
>>>> @@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,
>>>>
>>>> static void f2fs_put_rpages(struct compress_ctx *cc)
>>>> {
>>>> - f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
>>>> + f2fs_drop_rpages(cc, cc->cluster_size, false);
>>>> }
>>>>
>>>> static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
>>>> {
>>>> - f2fs_drop_rpages(cc, NULL, len, true);
>>>> + f2fs_drop_rpages(cc, len, true);
>>>> }
>>>>
>>>> static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
>>>> - struct address_space *mapping, int len)
>>>> + struct address_space *mapping,
>>>> + pgoff_t start, int len)
>>>> {
>>>> - f2fs_drop_rpages(cc, mapping, len, false);
>>>> + int i;
>>>> + for (i = 0; i < len; i++) {
>>>> + struct page *page = find_get_page(mapping, start + i);
>>>> +
>>>> + put_page(page);
>>>> + put_page(page);
>>>> + }
>>>> }
>>>>
>>>> static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
>>>> @@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>
>>>> if (!PageUptodate(page)) {
>>>> f2fs_unlock_rpages(cc, i + 1);
>>>> - f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
>>>> + f2fs_put_rpages_mapping(cc, mapping, start_idx,
>>>> + cc->cluster_size);
>>>> f2fs_destroy_compress_ctx(cc);
>>>> goto retry;
>>>> }
>>>> @@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>> unlock_pages:
>>>> f2fs_unlock_rpages(cc, i);
>>>> release_pages:
>>>> - f2fs_put_rpages_mapping(cc, mapping, i);
>>>> + f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
>>>> f2fs_destroy_compress_ctx(cc);
>>>> return ret;
>>>> }
>>>> --
>>>> 2.18.0.rc1
>>>>
>>>>
>>>>
>>>>>
>>>>> One another fix in f2fs-tools as well.
>>>>> https://github.com/jaegeuk/f2fs-tools
>>>>>
>>>>>>
>>>>>>>
>>>>>>> Thanks,
>>>>> .
>>>>>
>>> .
>>>
> .
>

2020-01-14 22:50:44

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 01/14, Chao Yu wrote:
> On 2020/1/14 0:11, Jaegeuk Kim wrote:
> > On 01/13, Chao Yu wrote:
> >> On 2020/1/12 2:02, Jaegeuk Kim wrote:
> >>> On 01/11, Chao Yu wrote:
> >>>> On 2020/1/11 7:52, Jaegeuk Kim wrote:
> >>>>> On 01/06, Jaegeuk Kim wrote:
> >>>>>> On 01/06, Chao Yu wrote:
> >>>>>>> On 2020/1/3 14:50, Chao Yu wrote:
> >>>>>>>> This works to me. Could you run fsstress tests on compressed root directory?
> >>>>>>>> It seems still there are some bugs.
> >>>>>>>
> >>>>>>> Jaegeuk,
> >>>>>>>
> >>>>>>> Did you mean running por_fsstress testcase?
> >>>>>>>
> >>>>>>> Now, at least I didn't hit any problem for normal fsstress case.
> >>>>>>
> >>>>>> Yup. por_fsstress
> >>>>>
> >>>>> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
> >>>>> I've fixed
> >>>>> - truncation offset
> >>>>> - i_compressed_blocks and its lock coverage
> >>>>> - error handling
> >>>>> - etc
> >>>>
> >>>> I changed as below, and por_fsstress stops panic the system.
> >>>>
> >>>> Could you merge all these fixes into original patch?
> >>>
> >>> Yup, let m roll up some early patches first once test results become good.
> >>
> >> I didn't encounter issue any more, how about por_fsstress test result in your
> >> enviornment? If there is, please share the call stack with me.
> >
> > Sure, will do, once I hit an issue. BTW, I'm hitting another unreacheable nat
> > entry issue during por_stress without compression. :(
>
> Did you enable any features during por_fsstress test?
>
> I only hit below warning during por_fsstress test on image w/o compression.
>
> ------------[ cut here ]------------
> WARNING: CPU: 10 PID: 33483 at fs/fs-writeback.c:1448 __writeback_single_inode+0x28c/0x340
> Call Trace:
> writeback_single_inode+0xad/0x120
> sync_inode_metadata+0x3d/0x60
> f2fs_sync_inode_meta+0x90/0xe0 [f2fs]
> block_operations+0x17c/0x360 [f2fs]
> f2fs_write_checkpoint+0x101/0xff0 [f2fs]
> f2fs_sync_fs+0xa8/0x130 [f2fs]
> f2fs_do_sync_file+0x19c/0x880 [f2fs]
> do_fsync+0x38/0x60
> __x64_sys_fsync+0x10/0x20
> do_syscall_64+0x5f/0x220
> entry_SYSCALL_64_after_hwframe+0x44/0xa9

Does gc_mutex patch fix this?

>
> Thanks,
>
> >
> > Thanks,
> >
> >>
> >> Thanks,
> >>
> >>>
> >>>>
> >>>> >From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
> >>>> From: Chao Yu <[email protected]>
> >>>> Date: Sat, 11 Jan 2020 16:58:20 +0800
> >>>> Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()
> >>>>
> >>>> Signed-off-by: Chao Yu <[email protected]>
> >>>> ---
> >>>> fs/f2fs/compress.c | 30 +++++++++++++++---------------
> >>>> 1 file changed, 15 insertions(+), 15 deletions(-)
> >>>>
> >>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> >>>> index 502cd0ddc2a7..5c6a31d84ce4 100644
> >>>> --- a/fs/f2fs/compress.c
> >>>> +++ b/fs/f2fs/compress.c
> >>>> @@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
> >>>> }
> >>>>
> >>>> static void f2fs_drop_rpages(struct compress_ctx *cc,
> >>>> - struct address_space *mapping, int len, bool unlock)
> >>>> + int len, bool unlock)
> >>>> {
> >>>> unsigned int i;
> >>>> for (i = 0; i < len; i++) {
> >>>> - if (mapping) {
> >>>> - pgoff_t start = start_idx_of_cluster(cc);
> >>>> - struct page *page = find_get_page(mapping, start + i);
> >>>> -
> >>>> - put_page(page);
> >>>> - put_page(page);
> >>>> - cc->rpages[i] = NULL;
> >>>> - }
> >>>> if (!cc->rpages[i])
> >>>> continue;
> >>>> if (unlock)
> >>>> @@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,
> >>>>
> >>>> static void f2fs_put_rpages(struct compress_ctx *cc)
> >>>> {
> >>>> - f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
> >>>> + f2fs_drop_rpages(cc, cc->cluster_size, false);
> >>>> }
> >>>>
> >>>> static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
> >>>> {
> >>>> - f2fs_drop_rpages(cc, NULL, len, true);
> >>>> + f2fs_drop_rpages(cc, len, true);
> >>>> }
> >>>>
> >>>> static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
> >>>> - struct address_space *mapping, int len)
> >>>> + struct address_space *mapping,
> >>>> + pgoff_t start, int len)
> >>>> {
> >>>> - f2fs_drop_rpages(cc, mapping, len, false);
> >>>> + int i;
> >>>> + for (i = 0; i < len; i++) {
> >>>> + struct page *page = find_get_page(mapping, start + i);
> >>>> +
> >>>> + put_page(page);
> >>>> + put_page(page);
> >>>> + }
> >>>> }
> >>>>
> >>>> static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
> >>>> @@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >>>>
> >>>> if (!PageUptodate(page)) {
> >>>> f2fs_unlock_rpages(cc, i + 1);
> >>>> - f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
> >>>> + f2fs_put_rpages_mapping(cc, mapping, start_idx,
> >>>> + cc->cluster_size);
> >>>> f2fs_destroy_compress_ctx(cc);
> >>>> goto retry;
> >>>> }
> >>>> @@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >>>> unlock_pages:
> >>>> f2fs_unlock_rpages(cc, i);
> >>>> release_pages:
> >>>> - f2fs_put_rpages_mapping(cc, mapping, i);
> >>>> + f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
> >>>> f2fs_destroy_compress_ctx(cc);
> >>>> return ret;
> >>>> }
> >>>> --
> >>>> 2.18.0.rc1
> >>>>
> >>>>
> >>>>
> >>>>>
> >>>>> One another fix in f2fs-tools as well.
> >>>>> https://github.com/jaegeuk/f2fs-tools
> >>>>>
> >>>>>>
> >>>>>>>
> >>>>>>> Thanks,
> >>>>> .
> >>>>>
> >>> .
> >>>
> > .
> >

2020-01-15 10:13:49

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 2020/1/15 6:48, Jaegeuk Kim wrote:
> On 01/14, Chao Yu wrote:
>> On 2020/1/14 0:11, Jaegeuk Kim wrote:
>>> On 01/13, Chao Yu wrote:
>>>> On 2020/1/12 2:02, Jaegeuk Kim wrote:
>>>>> On 01/11, Chao Yu wrote:
>>>>>> On 2020/1/11 7:52, Jaegeuk Kim wrote:
>>>>>>> On 01/06, Jaegeuk Kim wrote:
>>>>>>>> On 01/06, Chao Yu wrote:
>>>>>>>>> On 2020/1/3 14:50, Chao Yu wrote:
>>>>>>>>>> This works to me. Could you run fsstress tests on compressed root directory?
>>>>>>>>>> It seems still there are some bugs.
>>>>>>>>>
>>>>>>>>> Jaegeuk,
>>>>>>>>>
>>>>>>>>> Did you mean running por_fsstress testcase?
>>>>>>>>>
>>>>>>>>> Now, at least I didn't hit any problem for normal fsstress case.
>>>>>>>>
>>>>>>>> Yup. por_fsstress
>>>>>>>
>>>>>>> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
>>>>>>> I've fixed
>>>>>>> - truncation offset
>>>>>>> - i_compressed_blocks and its lock coverage
>>>>>>> - error handling
>>>>>>> - etc
>>>>>>
>>>>>> I changed as below, and por_fsstress stops panic the system.
>>>>>>
>>>>>> Could you merge all these fixes into original patch?
>>>>>
>>>>> Yup, let m roll up some early patches first once test results become good.
>>>>
>>>> I didn't encounter issue any more, how about por_fsstress test result in your
>>>> enviornment? If there is, please share the call stack with me.
>>>
>>> Sure, will do, once I hit an issue. BTW, I'm hitting another unreacheable nat
>>> entry issue during por_stress without compression. :(
>>
>> Did you enable any features during por_fsstress test?
>>
>> I only hit below warning during por_fsstress test on image w/o compression.
>>
>> ------------[ cut here ]------------
>> WARNING: CPU: 10 PID: 33483 at fs/fs-writeback.c:1448 __writeback_single_inode+0x28c/0x340
>> Call Trace:
>> writeback_single_inode+0xad/0x120
>> sync_inode_metadata+0x3d/0x60
>> f2fs_sync_inode_meta+0x90/0xe0 [f2fs]
>> block_operations+0x17c/0x360 [f2fs]
>> f2fs_write_checkpoint+0x101/0xff0 [f2fs]
>> f2fs_sync_fs+0xa8/0x130 [f2fs]
>> f2fs_do_sync_file+0x19c/0x880 [f2fs]
>> do_fsync+0x38/0x60
>> __x64_sys_fsync+0x10/0x20
>> do_syscall_64+0x5f/0x220
>> entry_SYSCALL_64_after_hwframe+0x44/0xa9
>
> Does gc_mutex patch fix this?

No, gc_mutex patch fixes another problem.

BTW, it looks like a bug of VFS.

Thanks,

>
>>
>> Thanks,
>>
>>>
>>> Thanks,
>>>
>>>>
>>>> Thanks,
>>>>
>>>>>
>>>>>>
>>>>>> >From bb17d7d77fe0b8a3e3632a7026550800ab9609e9 Mon Sep 17 00:00:00 2001
>>>>>> From: Chao Yu <[email protected]>
>>>>>> Date: Sat, 11 Jan 2020 16:58:20 +0800
>>>>>> Subject: [PATCH] f2fs: compress: fix f2fs_put_rpages_mapping()
>>>>>>
>>>>>> Signed-off-by: Chao Yu <[email protected]>
>>>>>> ---
>>>>>> fs/f2fs/compress.c | 30 +++++++++++++++---------------
>>>>>> 1 file changed, 15 insertions(+), 15 deletions(-)
>>>>>>
>>>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>>>> index 502cd0ddc2a7..5c6a31d84ce4 100644
>>>>>> --- a/fs/f2fs/compress.c
>>>>>> +++ b/fs/f2fs/compress.c
>>>>>> @@ -74,18 +74,10 @@ static void f2fs_put_compressed_page(struct page *page)
>>>>>> }
>>>>>>
>>>>>> static void f2fs_drop_rpages(struct compress_ctx *cc,
>>>>>> - struct address_space *mapping, int len, bool unlock)
>>>>>> + int len, bool unlock)
>>>>>> {
>>>>>> unsigned int i;
>>>>>> for (i = 0; i < len; i++) {
>>>>>> - if (mapping) {
>>>>>> - pgoff_t start = start_idx_of_cluster(cc);
>>>>>> - struct page *page = find_get_page(mapping, start + i);
>>>>>> -
>>>>>> - put_page(page);
>>>>>> - put_page(page);
>>>>>> - cc->rpages[i] = NULL;
>>>>>> - }
>>>>>> if (!cc->rpages[i])
>>>>>> continue;
>>>>>> if (unlock)
>>>>>> @@ -97,18 +89,25 @@ static void f2fs_drop_rpages(struct compress_ctx *cc,
>>>>>>
>>>>>> static void f2fs_put_rpages(struct compress_ctx *cc)
>>>>>> {
>>>>>> - f2fs_drop_rpages(cc, NULL, cc->cluster_size, false);
>>>>>> + f2fs_drop_rpages(cc, cc->cluster_size, false);
>>>>>> }
>>>>>>
>>>>>> static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
>>>>>> {
>>>>>> - f2fs_drop_rpages(cc, NULL, len, true);
>>>>>> + f2fs_drop_rpages(cc, len, true);
>>>>>> }
>>>>>>
>>>>>> static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
>>>>>> - struct address_space *mapping, int len)
>>>>>> + struct address_space *mapping,
>>>>>> + pgoff_t start, int len)
>>>>>> {
>>>>>> - f2fs_drop_rpages(cc, mapping, len, false);
>>>>>> + int i;
>>>>>> + for (i = 0; i < len; i++) {
>>>>>> + struct page *page = find_get_page(mapping, start + i);
>>>>>> +
>>>>>> + put_page(page);
>>>>>> + put_page(page);
>>>>>> + }
>>>>>> }
>>>>>>
>>>>>> static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
>>>>>> @@ -680,7 +679,8 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>>>
>>>>>> if (!PageUptodate(page)) {
>>>>>> f2fs_unlock_rpages(cc, i + 1);
>>>>>> - f2fs_put_rpages_mapping(cc, mapping, cc->cluster_size);
>>>>>> + f2fs_put_rpages_mapping(cc, mapping, start_idx,
>>>>>> + cc->cluster_size);
>>>>>> f2fs_destroy_compress_ctx(cc);
>>>>>> goto retry;
>>>>>> }
>>>>>> @@ -714,7 +714,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>>>>>> unlock_pages:
>>>>>> f2fs_unlock_rpages(cc, i);
>>>>>> release_pages:
>>>>>> - f2fs_put_rpages_mapping(cc, mapping, i);
>>>>>> + f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
>>>>>> f2fs_destroy_compress_ctx(cc);
>>>>>> return ret;
>>>>>> }
>>>>>> --
>>>>>> 2.18.0.rc1
>>>>>>
>>>>>>
>>>>>>
>>>>>>>
>>>>>>> One another fix in f2fs-tools as well.
>>>>>>> https://github.com/jaegeuk/f2fs-tools
>>>>>>>
>>>>>>>>
>>>>>>>>>
>>>>>>>>> Thanks,
>>>>>>> .
>>>>>>>
>>>>> .
>>>>>
>>> .
>>>
> .
>

2020-01-15 22:04:36

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [RFC PATCH v5] f2fs: support data compression

On 01/15, Chao Yu wrote:
> On 2020/1/15 6:48, Jaegeuk Kim wrote:
> > On 01/14, Chao Yu wrote:
> >> On 2020/1/14 0:11, Jaegeuk Kim wrote:
> >>> On 01/13, Chao Yu wrote:
> >>>> On 2020/1/12 2:02, Jaegeuk Kim wrote:
> >>>>> On 01/11, Chao Yu wrote:
> >>>>>> On 2020/1/11 7:52, Jaegeuk Kim wrote:
> >>>>>>> On 01/06, Jaegeuk Kim wrote:
> >>>>>>>> On 01/06, Chao Yu wrote:
> >>>>>>>>> On 2020/1/3 14:50, Chao Yu wrote:
> >>>>>>>>>> This works to me. Could you run fsstress tests on compressed root directory?
> >>>>>>>>>> It seems still there are some bugs.
> >>>>>>>>>
> >>>>>>>>> Jaegeuk,
> >>>>>>>>>
> >>>>>>>>> Did you mean running por_fsstress testcase?
> >>>>>>>>>
> >>>>>>>>> Now, at least I didn't hit any problem for normal fsstress case.
> >>>>>>>>
> >>>>>>>> Yup. por_fsstress
> >>>>>>>
> >>>>>>> Please check https://github.com/jaegeuk/f2fs/commits/g-dev-test.
> >>>>>>> I've fixed
> >>>>>>> - truncation offset
> >>>>>>> - i_compressed_blocks and its lock coverage
> >>>>>>> - error handling
> >>>>>>> - etc
> >>>>>>
> >>>>>> I changed as below, and por_fsstress stops panic the system.
> >>>>>>
> >>>>>> Could you merge all these fixes into original patch?
> >>>>>
> >>>>> Yup, let m roll up some early patches first once test results become good.
> >>>>
> >>>> I didn't encounter issue any more, how about por_fsstress test result in your
> >>>> enviornment? If there is, please share the call stack with me.
> >>>
> >>> Sure, will do, once I hit an issue. BTW, I'm hitting another unreacheable nat
> >>> entry issue during por_stress without compression. :(
> >>
> >> Did you enable any features during por_fsstress test?
> >>
> >> I only hit below warning during por_fsstress test on image w/o compression.
> >>
> >> ------------[ cut here ]------------
> >> WARNING: CPU: 10 PID: 33483 at fs/fs-writeback.c:1448 __writeback_single_inode+0x28c/0x340
> >> Call Trace:
> >> writeback_single_inode+0xad/0x120
> >> sync_inode_metadata+0x3d/0x60
> >> f2fs_sync_inode_meta+0x90/0xe0 [f2fs]
> >> block_operations+0x17c/0x360 [f2fs]
> >> f2fs_write_checkpoint+0x101/0xff0 [f2fs]
> >> f2fs_sync_fs+0xa8/0x130 [f2fs]
> >> f2fs_do_sync_file+0x19c/0x880 [f2fs]
> >> do_fsync+0x38/0x60
> >> __x64_sys_fsync+0x10/0x20
> >> do_syscall_64+0x5f/0x220
> >> entry_SYSCALL_64_after_hwframe+0x44/0xa9
> >
> > Does gc_mutex patch fix this?
>
> No, gc_mutex patch fixes another problem.
>
> BTW, it looks like a bug of VFS.

One fix below and rerun tests now.

---
fs/f2fs/compress.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 2480bff1e115..45a6f20ceb3e 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -556,10 +556,8 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)

blkaddr = datablock_addr(dn.inode,
dn.node_page, dn.ofs_in_node + i);
- if (blkaddr != NULL_ADDR) {
+ if (blkaddr != NULL_ADDR)
ret++;
- break;
- }
}
}
fail:
@@ -955,10 +953,14 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
- int i, _submitted, compr_blocks, ret;
- int err = 0;
+ int _submitted, compr_blocks, ret;
+ int i = -1, err = 0;

compr_blocks = f2fs_compressed_blocks(cc);
+ if (compr_blocks < 0) {
+ err = compr_blocks;
+ goto out_err;
+ }

for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
@@ -997,7 +999,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
out_fail:
/* TODO: revoke partially updated block addresses */
BUG_ON(compr_blocks);
-
+out_err:
for (++i; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
@@ -1020,9 +1022,9 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
*submitted = 0;
if (cluster_may_compress(cc)) {
err = f2fs_compress_pages(cc);
- if (err == -EAGAIN)
+ if (err == -EAGAIN) {
goto write;
- else if (err) {
+ } else if (err) {
f2fs_put_rpages_wbc(cc, wbc, true, 1);
goto destroy_out;
}
--
2.24.0.525.g8f36a354ae-goog