2020-01-06 08:03:14

by Chao Yu

[permalink] [raw]
Subject: [PATCH 3/4] f2fs: compress: fix error path in prepare_compress_overwrite()

- fix to release cluster pages in retry flow
- fix to call f2fs_put_dnode() & __do_map_lock() in error path

Signed-off-by: Chao Yu <[email protected]>
---
fs/f2fs/compress.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index fc4510729654..3390351d2e39 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -626,20 +626,26 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
}

for (i = 0; i < cc->cluster_size; i++) {
+ f2fs_bug_on(sbi, cc->rpages[i]);
+
page = find_lock_page(mapping, start_idx + i);
f2fs_bug_on(sbi, !page);

f2fs_wait_on_page_writeback(page, DATA, true, true);

- cc->rpages[i] = page;
+ f2fs_compress_ctx_add_page(cc, page);
f2fs_put_page(page, 0);

if (!PageUptodate(page)) {
- for (idx = 0; idx < cc->cluster_size; idx++) {
- f2fs_put_page(cc->rpages[idx],
- (idx <= i) ? 1 : 0);
+ for (idx = 0; idx <= i; idx++) {
+ unlock_page(cc->rpages[idx]);
cc->rpages[idx] = NULL;
}
+ for (idx = 0; idx < cc->cluster_size; idx++) {
+ page = find_lock_page(mapping, start_idx + idx);
+ f2fs_put_page(page, 1);
+ f2fs_put_page(page, 0);
+ }
kvfree(cc->rpages);
cc->nr_rpages = 0;
goto retry;
@@ -654,16 +660,20 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
for (i = cc->cluster_size - 1; i > 0; i--) {
ret = f2fs_get_block(&dn, start_idx + i);
if (ret) {
- /* TODO: release preallocate blocks */
i = cc->cluster_size;
- goto unlock_pages;
+ break;
}

if (dn.data_blkaddr != NEW_ADDR)
break;
}

+ f2fs_put_dnode(&dn);
+
__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+
+ if (ret)
+ goto unlock_pages;
}

*fsdata = cc->rpages;
--
2.18.0.rc1


2020-01-06 19:10:19

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [PATCH 3/4] f2fs: compress: fix error path in prepare_compress_overwrite()

On 01/06, Chao Yu wrote:
> - fix to release cluster pages in retry flow
> - fix to call f2fs_put_dnode() & __do_map_lock() in error path
>
> Signed-off-by: Chao Yu <[email protected]>
> ---
> fs/f2fs/compress.c | 22 ++++++++++++++++------
> 1 file changed, 16 insertions(+), 6 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index fc4510729654..3390351d2e39 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -626,20 +626,26 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> }
>
> for (i = 0; i < cc->cluster_size; i++) {
> + f2fs_bug_on(sbi, cc->rpages[i]);
> +
> page = find_lock_page(mapping, start_idx + i);
> f2fs_bug_on(sbi, !page);
>
> f2fs_wait_on_page_writeback(page, DATA, true, true);
>
> - cc->rpages[i] = page;
> + f2fs_compress_ctx_add_page(cc, page);
> f2fs_put_page(page, 0);
>
> if (!PageUptodate(page)) {
> - for (idx = 0; idx < cc->cluster_size; idx++) {
> - f2fs_put_page(cc->rpages[idx],
> - (idx <= i) ? 1 : 0);
> + for (idx = 0; idx <= i; idx++) {
> + unlock_page(cc->rpages[idx]);
> cc->rpages[idx] = NULL;
> }
> + for (idx = 0; idx < cc->cluster_size; idx++) {
> + page = find_lock_page(mapping, start_idx + idx);

Why do we need to lock the pages again?

> + f2fs_put_page(page, 1);
> + f2fs_put_page(page, 0);
> + }
> kvfree(cc->rpages);
> cc->nr_rpages = 0;
> goto retry;
> @@ -654,16 +660,20 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> for (i = cc->cluster_size - 1; i > 0; i--) {
> ret = f2fs_get_block(&dn, start_idx + i);
> if (ret) {
> - /* TODO: release preallocate blocks */
> i = cc->cluster_size;
> - goto unlock_pages;
> + break;
> }
>
> if (dn.data_blkaddr != NEW_ADDR)
> break;
> }
>
> + f2fs_put_dnode(&dn);

We don't neeed this, since f2fs_reserve_block() put the dnode.

> +
> __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
> +
> + if (ret)
> + goto unlock_pages;
> }
>
> *fsdata = cc->rpages;
> --
> 2.18.0.rc1

2020-01-07 01:37:12

by Chao Yu

[permalink] [raw]
Subject: Re: [PATCH 3/4] f2fs: compress: fix error path in prepare_compress_overwrite()

On 2020/1/7 3:08, Jaegeuk Kim wrote:
> On 01/06, Chao Yu wrote:
>> - fix to release cluster pages in retry flow
>> - fix to call f2fs_put_dnode() & __do_map_lock() in error path
>>
>> Signed-off-by: Chao Yu <[email protected]>
>> ---
>> fs/f2fs/compress.c | 22 ++++++++++++++++------
>> 1 file changed, 16 insertions(+), 6 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index fc4510729654..3390351d2e39 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -626,20 +626,26 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>> }
>>
>> for (i = 0; i < cc->cluster_size; i++) {
>> + f2fs_bug_on(sbi, cc->rpages[i]);
>> +
>> page = find_lock_page(mapping, start_idx + i);
>> f2fs_bug_on(sbi, !page);
>>
>> f2fs_wait_on_page_writeback(page, DATA, true, true);
>>
>> - cc->rpages[i] = page;
>> + f2fs_compress_ctx_add_page(cc, page);
>> f2fs_put_page(page, 0);
>>
>> if (!PageUptodate(page)) {
>> - for (idx = 0; idx < cc->cluster_size; idx++) {
>> - f2fs_put_page(cc->rpages[idx],
>> - (idx <= i) ? 1 : 0);
>> + for (idx = 0; idx <= i; idx++) {
>> + unlock_page(cc->rpages[idx]);
>> cc->rpages[idx] = NULL;
>> }
>> + for (idx = 0; idx < cc->cluster_size; idx++) {
>> + page = find_lock_page(mapping, start_idx + idx);
>
> Why do we need to lock the pages again?

Here, all pages in cluster has one extra reference count, we need to find all
pages, and release those references on them.

cc->rpages may not record all pages' pointers, so we can not use

f2fs_put_page(cc->rpages[idx], (idx <= i) ? 1 : 0); to release all pages' references.

BTW, find_get_page() should be fine to instead find_lock_page().

>
>> + f2fs_put_page(page, 1);
>> + f2fs_put_page(page, 0);
>> + }
>> kvfree(cc->rpages);
>> cc->nr_rpages = 0;
>> goto retry;
>> @@ -654,16 +660,20 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
>> for (i = cc->cluster_size - 1; i > 0; i--) {
>> ret = f2fs_get_block(&dn, start_idx + i);
>> if (ret) {
>> - /* TODO: release preallocate blocks */
>> i = cc->cluster_size;
>> - goto unlock_pages;
>> + break;
>> }
>>
>> if (dn.data_blkaddr != NEW_ADDR)
>> break;
>> }
>>
>> + f2fs_put_dnode(&dn);
>
> We don't neeed this, since f2fs_reserve_block() put the dnode.

Correct.

Thanks,

>
>> +
>> __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
>> +
>> + if (ret)
>> + goto unlock_pages;
>> }
>>
>> *fsdata = cc->rpages;
>> --
>> 2.18.0.rc1
> .
>

2020-01-08 20:51:00

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [PATCH 3/4] f2fs: compress: fix error path in prepare_compress_overwrite()

On 01/07, Chao Yu wrote:
> On 2020/1/7 3:08, Jaegeuk Kim wrote:
> > On 01/06, Chao Yu wrote:
> >> - fix to release cluster pages in retry flow
> >> - fix to call f2fs_put_dnode() & __do_map_lock() in error path
> >>
> >> Signed-off-by: Chao Yu <[email protected]>
> >> ---
> >> fs/f2fs/compress.c | 22 ++++++++++++++++------
> >> 1 file changed, 16 insertions(+), 6 deletions(-)
> >>
> >> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> >> index fc4510729654..3390351d2e39 100644
> >> --- a/fs/f2fs/compress.c
> >> +++ b/fs/f2fs/compress.c
> >> @@ -626,20 +626,26 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >> }
> >>
> >> for (i = 0; i < cc->cluster_size; i++) {
> >> + f2fs_bug_on(sbi, cc->rpages[i]);
> >> +
> >> page = find_lock_page(mapping, start_idx + i);
> >> f2fs_bug_on(sbi, !page);
> >>
> >> f2fs_wait_on_page_writeback(page, DATA, true, true);
> >>
> >> - cc->rpages[i] = page;
> >> + f2fs_compress_ctx_add_page(cc, page);
> >> f2fs_put_page(page, 0);
> >>
> >> if (!PageUptodate(page)) {
> >> - for (idx = 0; idx < cc->cluster_size; idx++) {
> >> - f2fs_put_page(cc->rpages[idx],
> >> - (idx <= i) ? 1 : 0);
> >> + for (idx = 0; idx <= i; idx++) {
> >> + unlock_page(cc->rpages[idx]);
> >> cc->rpages[idx] = NULL;
> >> }
> >> + for (idx = 0; idx < cc->cluster_size; idx++) {
> >> + page = find_lock_page(mapping, start_idx + idx);
> >
> > Why do we need to lock the pages again?
>
> Here, all pages in cluster has one extra reference count, we need to find all
> pages, and release those references on them.
>
> cc->rpages may not record all pages' pointers, so we can not use
>
> f2fs_put_page(cc->rpages[idx], (idx <= i) ? 1 : 0); to release all pages' references.
>
> BTW, find_get_page() should be fine to instead find_lock_page().

Could you take a look at this?

https://github.com/jaegeuk/f2fs/commit/2e4ea726633dd2666f57ae88dfec5d97694d6495


Thanks,

>
> >
> >> + f2fs_put_page(page, 1);
> >> + f2fs_put_page(page, 0);
> >> + }
> >> kvfree(cc->rpages);
> >> cc->nr_rpages = 0;
> >> goto retry;
> >> @@ -654,16 +660,20 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
> >> for (i = cc->cluster_size - 1; i > 0; i--) {
> >> ret = f2fs_get_block(&dn, start_idx + i);
> >> if (ret) {
> >> - /* TODO: release preallocate blocks */
> >> i = cc->cluster_size;
> >> - goto unlock_pages;
> >> + break;
> >> }
> >>
> >> if (dn.data_blkaddr != NEW_ADDR)
> >> break;
> >> }
> >>
> >> + f2fs_put_dnode(&dn);
> >
> > We don't neeed this, since f2fs_reserve_block() put the dnode.
>
> Correct.
>
> Thanks,
>
> >
> >> +
> >> __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
> >> +
> >> + if (ret)
> >> + goto unlock_pages;
> >> }
> >>
> >> *fsdata = cc->rpages;
> >> --
> >> 2.18.0.rc1
> > .
> >