In f2fs_try_to_free_nids(), .nid_list_lock spinlock critical region will
increase as expected shrink number increase, to avoid spining other CPUs
for long time, it's better to implement like extent cache and nats
shrinker.
Signed-off-by: Chao Yu <[email protected]>
---
v2:
- fix unlock wrong spinlock.
fs/f2fs/node.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 4da0d8713df5..ad0b14f4dab8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2488,7 +2488,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct free_nid *i, *next;
int nr = nr_shrink;
if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
@@ -2498,14 +2497,22 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
return 0;
spin_lock(&nm_i->nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 ||
- nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
+ while (nr_shrink) {
+ struct free_nid *i;
+
+ if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
break;
+ i = list_first_entry(&nm_i->free_nid_list,
+ struct free_nid, list);
+ list_del(&i->list);
+ spin_unlock(&nm_i->nid_list_lock);
+
__remove_free_nid(sbi, i, FREE_NID);
kmem_cache_free(free_nid_slab, i);
nr_shrink--;
+
+ spin_lock(&nm_i->nid_list_lock);
}
spin_unlock(&nm_i->nid_list_lock);
mutex_unlock(&nm_i->build_lock);
--
2.18.0.rc1
On 05/06, Chao Yu wrote:
> In f2fs_try_to_free_nids(), .nid_list_lock spinlock critical region will
> increase as expected shrink number increase, to avoid spining other CPUs
> for long time, it's better to implement like extent cache and nats
> shrinker.
>
> Signed-off-by: Chao Yu <[email protected]>
> ---
> v2:
> - fix unlock wrong spinlock.
> fs/f2fs/node.c | 15 +++++++++++----
> 1 file changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index 4da0d8713df5..ad0b14f4dab8 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -2488,7 +2488,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
> int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
> {
> struct f2fs_nm_info *nm_i = NM_I(sbi);
> - struct free_nid *i, *next;
> int nr = nr_shrink;
>
> if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
> @@ -2498,14 +2497,22 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
> return 0;
>
> spin_lock(&nm_i->nid_list_lock);
> - list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
> - if (nr_shrink <= 0 ||
> - nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
> + while (nr_shrink) {
> + struct free_nid *i;
> +
> + if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
> break;
>
> + i = list_first_entry(&nm_i->free_nid_list,
> + struct free_nid, list);
> + list_del(&i->list);
> + spin_unlock(&nm_i->nid_list_lock);
> +
> __remove_free_nid(sbi, i, FREE_NID);
__remove_free_nid() will do list_del again. btw, how about just splitting out
given nr_shrink into multiple trials?
> kmem_cache_free(free_nid_slab, i);
> nr_shrink--;
> +
> + spin_lock(&nm_i->nid_list_lock);
> }
> spin_unlock(&nm_i->nid_list_lock);
> mutex_unlock(&nm_i->build_lock);
> --
> 2.18.0.rc1
On 2020/5/6 23:05, Jaegeuk Kim wrote:
> On 05/06, Chao Yu wrote:
>> In f2fs_try_to_free_nids(), .nid_list_lock spinlock critical region will
>> increase as expected shrink number increase, to avoid spining other CPUs
>> for long time, it's better to implement like extent cache and nats
>> shrinker.
>>
>> Signed-off-by: Chao Yu <[email protected]>
>> ---
>> v2:
>> - fix unlock wrong spinlock.
>> fs/f2fs/node.c | 15 +++++++++++----
>> 1 file changed, 11 insertions(+), 4 deletions(-)
>>
>> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
>> index 4da0d8713df5..ad0b14f4dab8 100644
>> --- a/fs/f2fs/node.c
>> +++ b/fs/f2fs/node.c
>> @@ -2488,7 +2488,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
>> int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
>> {
>> struct f2fs_nm_info *nm_i = NM_I(sbi);
>> - struct free_nid *i, *next;
>> int nr = nr_shrink;
>>
>> if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
>> @@ -2498,14 +2497,22 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
>> return 0;
>>
>> spin_lock(&nm_i->nid_list_lock);
>> - list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
>> - if (nr_shrink <= 0 ||
>> - nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
>> + while (nr_shrink) {
>> + struct free_nid *i;
>> +
>> + if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
>> break;
>>
>> + i = list_first_entry(&nm_i->free_nid_list,
>> + struct free_nid, list);
>> + list_del(&i->list);
>> + spin_unlock(&nm_i->nid_list_lock);
>> +
>> __remove_free_nid(sbi, i, FREE_NID);
>
> __remove_free_nid() will do list_del again. btw, how about just splitting out
Oh, my bad.
How about moving __remove_free_nid into .nid_list_lock coverage?
> given nr_shrink into multiple trials?
Like this?
while (shrink) {
batch = DEFAULT_BATCH_NUMBER; // 16
spinlock();
list_for_each_entry_safe() {
if (!shrink || !batch)
break;
remove_item_from_list;
shrink--;
batch--;
}
spin_unlock();
}
Thanks,
>
>> kmem_cache_free(free_nid_slab, i);
>> nr_shrink--;
>> +
>> + spin_lock(&nm_i->nid_list_lock);
>> }
>> spin_unlock(&nm_i->nid_list_lock);
>> mutex_unlock(&nm_i->build_lock);
>> --
>> 2.18.0.rc1
> .
>
On 05/07, Chao Yu wrote:
> On 2020/5/6 23:05, Jaegeuk Kim wrote:
> > On 05/06, Chao Yu wrote:
> >> In f2fs_try_to_free_nids(), .nid_list_lock spinlock critical region will
> >> increase as expected shrink number increase, to avoid spining other CPUs
> >> for long time, it's better to implement like extent cache and nats
> >> shrinker.
> >>
> >> Signed-off-by: Chao Yu <[email protected]>
> >> ---
> >> v2:
> >> - fix unlock wrong spinlock.
> >> fs/f2fs/node.c | 15 +++++++++++----
> >> 1 file changed, 11 insertions(+), 4 deletions(-)
> >>
> >> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> >> index 4da0d8713df5..ad0b14f4dab8 100644
> >> --- a/fs/f2fs/node.c
> >> +++ b/fs/f2fs/node.c
> >> @@ -2488,7 +2488,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
> >> int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
> >> {
> >> struct f2fs_nm_info *nm_i = NM_I(sbi);
> >> - struct free_nid *i, *next;
> >> int nr = nr_shrink;
> >>
> >> if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
> >> @@ -2498,14 +2497,22 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
> >> return 0;
> >>
> >> spin_lock(&nm_i->nid_list_lock);
> >> - list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
> >> - if (nr_shrink <= 0 ||
> >> - nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
> >> + while (nr_shrink) {
> >> + struct free_nid *i;
> >> +
> >> + if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
> >> break;
> >>
> >> + i = list_first_entry(&nm_i->free_nid_list,
> >> + struct free_nid, list);
> >> + list_del(&i->list);
> >> + spin_unlock(&nm_i->nid_list_lock);
> >> +
> >> __remove_free_nid(sbi, i, FREE_NID);
> >
> > __remove_free_nid() will do list_del again. btw, how about just splitting out
>
> Oh, my bad.
>
> How about moving __remove_free_nid into .nid_list_lock coverage?
>
> > given nr_shrink into multiple trials?
>
> Like this?
Yes.
>
> while (shrink) {
> batch = DEFAULT_BATCH_NUMBER; // 16
> spinlock();
> list_for_each_entry_safe() {
> if (!shrink || !batch)
> break;
> remove_item_from_list;
> shrink--;
> batch--;
> }
> spin_unlock();
> }
>
> Thanks,
>
> >
> >> kmem_cache_free(free_nid_slab, i);
> >> nr_shrink--;
> >> +
> >> + spin_lock(&nm_i->nid_list_lock);
> >> }
> >> spin_unlock(&nm_i->nid_list_lock);
> >> mutex_unlock(&nm_i->build_lock);
> >> --
> >> 2.18.0.rc1
> > .
> >