This fixes skipping GC when segment is full in large section.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/gc.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 53312d7bc78b..65c0687ee2bb 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1018,8 +1018,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
* race condition along with SSR block allocation.
*/
if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
- get_valid_blocks(sbi, segno, false) ==
- sbi->blocks_per_seg)
+ get_valid_blocks(sbi, segno, true) ==
+ BLKS_PER_SEC(sbi))
return submitted;
if (check_valid_map(sbi, segno, off) == 0)
--
2.25.0.265.gbab2e86ba0-goog
FG_GC needs to move entire section more quickly.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/gc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index bbf4db3f6bb4..1676eebc8c8b 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
if (get_valid_blocks(sbi, segno, false) == 0)
goto freed;
- if (__is_large_section(sbi) &&
+ if (gc_type == BG_GC && __is_large_section(sbi) &&
migrated >= sbi->migration_granularity)
goto skip;
if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
--
2.25.0.265.gbab2e86ba0-goog
If first segment is empty and migration_granularity is 1, we can't move this
at all.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/gc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 65c0687ee2bb..bbf4db3f6bb4 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1233,12 +1233,12 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
segno, gc_type);
stat_inc_seg_count(sbi, type, gc_type);
+ migrated++;
freed:
if (gc_type == FG_GC &&
get_valid_blocks(sbi, segno, false) == 0)
seg_freed++;
- migrated++;
if (__is_large_section(sbi) && segno + 1 < end_segno)
sbi->next_victim_seg[gc_type] = segno + 1;
--
2.25.0.265.gbab2e86ba0-goog
On 2020/2/15 2:58, Jaegeuk Kim wrote:
> If first segment is empty and migration_granularity is 1, we can't move this
> at all.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Reviewed-by: Chao Yu <[email protected]>
Thanks,
On 2020/2/15 2:58, Jaegeuk Kim wrote:
> This fixes skipping GC when segment is full in large section.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
> fs/f2fs/gc.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index 53312d7bc78b..65c0687ee2bb 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -1018,8 +1018,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
> * race condition along with SSR block allocation.
> */
> if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
> - get_valid_blocks(sbi, segno, false) ==
> - sbi->blocks_per_seg)
> + get_valid_blocks(sbi, segno, true) ==
> + BLKS_PER_SEC(sbi))
Then in large section, if current segment is all valid, we won't skip scanning
it, so do we need to change like this:
if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
get_valid_blocks(sbi, segno, false) == sbi->blocks_per_seg ||
get_valid_blocks(sbi, segno, true) == BLKS_PER_SEC(sbi))
return submitted;
> return submitted;
>
> if (check_valid_map(sbi, segno, off) == 0)
>
BTW
f2fs: add migration count iff migration happens
typo: iff
On 2020/2/15 2:58, Jaegeuk Kim wrote:
> If first segment is empty and migration_granularity is 1, we can't move this
> at all.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
> fs/f2fs/gc.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index 65c0687ee2bb..bbf4db3f6bb4 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -1233,12 +1233,12 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
> segno, gc_type);
>
> stat_inc_seg_count(sbi, type, gc_type);
> + migrated++;
>
> freed:
> if (gc_type == FG_GC &&
> get_valid_blocks(sbi, segno, false) == 0)
> seg_freed++;
> - migrated++;
>
> if (__is_large_section(sbi) && segno + 1 < end_segno)
> sbi->next_victim_seg[gc_type] = segno + 1;
>
On 2020/2/15 2:58, Jaegeuk Kim wrote:
> FG_GC needs to move entire section more quickly.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
> fs/f2fs/gc.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index bbf4db3f6bb4..1676eebc8c8b 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
>
> if (get_valid_blocks(sbi, segno, false) == 0)
> goto freed;
> - if (__is_large_section(sbi) &&
> + if (gc_type == BG_GC && __is_large_section(sbi) &&
> migrated >= sbi->migration_granularity)
I knew migrating one large section is a more efficient way, but this can
increase long-tail latency of f2fs_balance_fs() occasionally, especially in
extreme fragmented space.
Thanks,
> goto skip;
> if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
>
On 02/17, Chao Yu wrote:
> On 2020/2/15 2:58, Jaegeuk Kim wrote:
> > FG_GC needs to move entire section more quickly.
> >
> > Signed-off-by: Jaegeuk Kim <[email protected]>
> > ---
> > fs/f2fs/gc.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > index bbf4db3f6bb4..1676eebc8c8b 100644
> > --- a/fs/f2fs/gc.c
> > +++ b/fs/f2fs/gc.c
> > @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
> >
> > if (get_valid_blocks(sbi, segno, false) == 0)
> > goto freed;
> > - if (__is_large_section(sbi) &&
> > + if (gc_type == BG_GC && __is_large_section(sbi) &&
> > migrated >= sbi->migration_granularity)
>
> I knew migrating one large section is a more efficient way, but this can
> increase long-tail latency of f2fs_balance_fs() occasionally, especially in
> extreme fragmented space.
FG_GC requires to wait for whole section migration which shows the entire
latency.
>
> Thanks,
>
> > goto skip;
> > if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
> >
On 2020/2/19 7:27, Jaegeuk Kim wrote:
> On 02/17, Chao Yu wrote:
>> On 2020/2/15 2:58, Jaegeuk Kim wrote:
>>> FG_GC needs to move entire section more quickly.
>>>
>>> Signed-off-by: Jaegeuk Kim <[email protected]>
>>> ---
>>> fs/f2fs/gc.c | 2 +-
>>> 1 file changed, 1 insertion(+), 1 deletion(-)
>>>
>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>>> index bbf4db3f6bb4..1676eebc8c8b 100644
>>> --- a/fs/f2fs/gc.c
>>> +++ b/fs/f2fs/gc.c
>>> @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
>>>
>>> if (get_valid_blocks(sbi, segno, false) == 0)
>>> goto freed;
>>> - if (__is_large_section(sbi) &&
>>> + if (gc_type == BG_GC && __is_large_section(sbi) &&
>>> migrated >= sbi->migration_granularity)
>>
>> I knew migrating one large section is a more efficient way, but this can
>> increase long-tail latency of f2fs_balance_fs() occasionally, especially in
>> extreme fragmented space.
>
> FG_GC requires to wait for whole section migration which shows the entire
> latency.
That will cause long-tail latency for single f2fs_balance_fs() procedure,
which it looks a very long hang when userspace call f2fs syscall, so why
not splitting total elapsed time into several f2fs_balance_fs() to avoid that.
Thanks,
>
>>
>> Thanks,
>>
>>> goto skip;
>>> if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
>>>
> .
>
On 02/19, Chao Yu wrote:
> On 2020/2/19 7:27, Jaegeuk Kim wrote:
> > On 02/17, Chao Yu wrote:
> >> On 2020/2/15 2:58, Jaegeuk Kim wrote:
> >>> FG_GC needs to move entire section more quickly.
> >>>
> >>> Signed-off-by: Jaegeuk Kim <[email protected]>
> >>> ---
> >>> fs/f2fs/gc.c | 2 +-
> >>> 1 file changed, 1 insertion(+), 1 deletion(-)
> >>>
> >>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> >>> index bbf4db3f6bb4..1676eebc8c8b 100644
> >>> --- a/fs/f2fs/gc.c
> >>> +++ b/fs/f2fs/gc.c
> >>> @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
> >>>
> >>> if (get_valid_blocks(sbi, segno, false) == 0)
> >>> goto freed;
> >>> - if (__is_large_section(sbi) &&
> >>> + if (gc_type == BG_GC && __is_large_section(sbi) &&
> >>> migrated >= sbi->migration_granularity)
> >>
> >> I knew migrating one large section is a more efficient way, but this can
> >> increase long-tail latency of f2fs_balance_fs() occasionally, especially in
> >> extreme fragmented space.
> >
> > FG_GC requires to wait for whole section migration which shows the entire
> > latency.
>
> That will cause long-tail latency for single f2fs_balance_fs() procedure,
> which it looks a very long hang when userspace call f2fs syscall, so why
> not splitting total elapsed time into several f2fs_balance_fs() to avoid that.
Then, other ops can easily make more dirty segments. The intention of FG_GC is
to block everything and make min. free segments as a best shot.
>
> Thanks,
>
> >
> >>
> >> Thanks,
> >>
> >>> goto skip;
> >>> if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
> >>>
> > .
> >
On 2020/2/19 11:04, Jaegeuk Kim wrote:
> On 02/19, Chao Yu wrote:
>> On 2020/2/19 7:27, Jaegeuk Kim wrote:
>>> On 02/17, Chao Yu wrote:
>>>> On 2020/2/15 2:58, Jaegeuk Kim wrote:
>>>>> FG_GC needs to move entire section more quickly.
>>>>>
>>>>> Signed-off-by: Jaegeuk Kim <[email protected]>
>>>>> ---
>>>>> fs/f2fs/gc.c | 2 +-
>>>>> 1 file changed, 1 insertion(+), 1 deletion(-)
>>>>>
>>>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>>>>> index bbf4db3f6bb4..1676eebc8c8b 100644
>>>>> --- a/fs/f2fs/gc.c
>>>>> +++ b/fs/f2fs/gc.c
>>>>> @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
>>>>>
>>>>> if (get_valid_blocks(sbi, segno, false) == 0)
>>>>> goto freed;
>>>>> - if (__is_large_section(sbi) &&
>>>>> + if (gc_type == BG_GC && __is_large_section(sbi) &&
>>>>> migrated >= sbi->migration_granularity)
>>>>
>>>> I knew migrating one large section is a more efficient way, but this can
>>>> increase long-tail latency of f2fs_balance_fs() occasionally, especially in
>>>> extreme fragmented space.
>>>
>>> FG_GC requires to wait for whole section migration which shows the entire
>>> latency.
>>
>> That will cause long-tail latency for single f2fs_balance_fs() procedure,
>> which it looks a very long hang when userspace call f2fs syscall, so why
>> not splitting total elapsed time into several f2fs_balance_fs() to avoid that.
>
> Then, other ops can easily make more dirty segments. The intention of FG_GC is
Yup, that's a problem, if there are more dirty datas being made, reserved segments
may be ran out during FG_GC.
> to block everything and make min. free segments as a best shot.
I just try to simulate write GC's logic in FTL to mitigate single op's max latency,
otherwise benchmark looks hang during FG_GC (in a 500mb+ section).
Thanks,
>
>>
>> Thanks,
>>
>>>
>>>>
>>>> Thanks,
>>>>
>>>>> goto skip;
>>>>> if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
>>>>>
>>> .
>>>
> .
>
On 02/19, Chao Yu wrote:
> On 2020/2/19 11:04, Jaegeuk Kim wrote:
> > On 02/19, Chao Yu wrote:
> >> On 2020/2/19 7:27, Jaegeuk Kim wrote:
> >>> On 02/17, Chao Yu wrote:
> >>>> On 2020/2/15 2:58, Jaegeuk Kim wrote:
> >>>>> FG_GC needs to move entire section more quickly.
> >>>>>
> >>>>> Signed-off-by: Jaegeuk Kim <[email protected]>
> >>>>> ---
> >>>>> fs/f2fs/gc.c | 2 +-
> >>>>> 1 file changed, 1 insertion(+), 1 deletion(-)
> >>>>>
> >>>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> >>>>> index bbf4db3f6bb4..1676eebc8c8b 100644
> >>>>> --- a/fs/f2fs/gc.c
> >>>>> +++ b/fs/f2fs/gc.c
> >>>>> @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
> >>>>>
> >>>>> if (get_valid_blocks(sbi, segno, false) == 0)
> >>>>> goto freed;
> >>>>> - if (__is_large_section(sbi) &&
> >>>>> + if (gc_type == BG_GC && __is_large_section(sbi) &&
> >>>>> migrated >= sbi->migration_granularity)
> >>>>
> >>>> I knew migrating one large section is a more efficient way, but this can
> >>>> increase long-tail latency of f2fs_balance_fs() occasionally, especially in
> >>>> extreme fragmented space.
> >>>
> >>> FG_GC requires to wait for whole section migration which shows the entire
> >>> latency.
> >>
> >> That will cause long-tail latency for single f2fs_balance_fs() procedure,
> >> which it looks a very long hang when userspace call f2fs syscall, so why
> >> not splitting total elapsed time into several f2fs_balance_fs() to avoid that.
> >
> > Then, other ops can easily make more dirty segments. The intention of FG_GC is
>
> Yup, that's a problem, if there are more dirty datas being made, reserved segments
> may be ran out during FG_GC.
>
> > to block everything and make min. free segments as a best shot.
>
> I just try to simulate write GC's logic in FTL to mitigate single op's max latency,
> otherwise benchmark looks hang during FG_GC (in a 500mb+ section).
Hmm, I think we may need to think another way like doing BG_GC more aggressively.
>
> Thanks,
>
> >
> >>
> >> Thanks,
> >>
> >>>
> >>>>
> >>>> Thanks,
> >>>>
> >>>>> goto skip;
> >>>>> if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
> >>>>>
> >>> .
> >>>
> > .
> >
On 2020/2/25 5:53, Jaegeuk Kim wrote:
> On 02/19, Chao Yu wrote:
>> On 2020/2/19 11:04, Jaegeuk Kim wrote:
>>> On 02/19, Chao Yu wrote:
>>>> On 2020/2/19 7:27, Jaegeuk Kim wrote:
>>>>> On 02/17, Chao Yu wrote:
>>>>>> On 2020/2/15 2:58, Jaegeuk Kim wrote:
>>>>>>> FG_GC needs to move entire section more quickly.
>>>>>>>
>>>>>>> Signed-off-by: Jaegeuk Kim <[email protected]>
>>>>>>> ---
>>>>>>> fs/f2fs/gc.c | 2 +-
>>>>>>> 1 file changed, 1 insertion(+), 1 deletion(-)
>>>>>>>
>>>>>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>>>>>>> index bbf4db3f6bb4..1676eebc8c8b 100644
>>>>>>> --- a/fs/f2fs/gc.c
>>>>>>> +++ b/fs/f2fs/gc.c
>>>>>>> @@ -1203,7 +1203,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
>>>>>>>
>>>>>>> if (get_valid_blocks(sbi, segno, false) == 0)
>>>>>>> goto freed;
>>>>>>> - if (__is_large_section(sbi) &&
>>>>>>> + if (gc_type == BG_GC && __is_large_section(sbi) &&
>>>>>>> migrated >= sbi->migration_granularity)
>>>>>>
>>>>>> I knew migrating one large section is a more efficient way, but this can
>>>>>> increase long-tail latency of f2fs_balance_fs() occasionally, especially in
>>>>>> extreme fragmented space.
>>>>>
>>>>> FG_GC requires to wait for whole section migration which shows the entire
>>>>> latency.
>>>>
>>>> That will cause long-tail latency for single f2fs_balance_fs() procedure,
>>>> which it looks a very long hang when userspace call f2fs syscall, so why
>>>> not splitting total elapsed time into several f2fs_balance_fs() to avoid that.
>>>
>>> Then, other ops can easily make more dirty segments. The intention of FG_GC is
>>
>> Yup, that's a problem, if there are more dirty datas being made, reserved segments
>> may be ran out during FG_GC.
>>
>>> to block everything and make min. free segments as a best shot.
>>
>> I just try to simulate write GC's logic in FTL to mitigate single op's max latency,
>> otherwise benchmark looks hang during FG_GC (in a 500mb+ section).
Oh, if we want to FG_GC only migrate on segment on time, it needs to change
has_not_enough_free_secs() condition as well, so previous logic is broken.
Nvm, please add:
Reviewed-by: Chao Yu <[email protected]>
>
> Hmm, I think we may need to think another way like doing BG_GC more aggressively.
Agreed, I guess SMR scenario may need such policy: higher trigger frequency on bggc
when free space decreases, but less block migration for each bggc cycle.
Thanks,
>
>>
>> Thanks,
>>
>>>
>>>>
>>>> Thanks,
>>>>
>>>>>
>>>>>>
>>>>>> Thanks,
>>>>>>
>>>>>>> goto skip;
>>>>>>> if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
>>>>>>>
>>>>> .
>>>>>
>>> .
>>>
> .
>