This patch revists retrial paths in f2fs.
The basic idea is to use cond_resched instead of retrying from the very early
stage.
Suggested-by: Gu Zheng <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/f2fs.h | 7 +++++++
fs/f2fs/gc.c | 5 ++---
fs/f2fs/node.c | 41 +++++++++--------------------------------
fs/f2fs/segment.c | 5 ++---
4 files changed, 20 insertions(+), 38 deletions(-)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c873140..c787fe3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1021,6 +1021,13 @@ retry:
return entry;
}
+static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *item)
+{
+ while (radix_tree_insert(root, index, item))
+ cond_resched();
+}
+
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
static inline bool IS_INODE(struct page *page)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2c58c58..eec0933 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -356,12 +356,11 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
iput(inode);
return;
}
-retry:
new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
new_ie->inode = inode;
-
+retry:
if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
- kmem_cache_free(winode_slab, new_ie);
+ cond_resched();
goto retry;
}
list_add_tail(&new_ie->list, &gc_list->ilist);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8de4f55..f83326c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -147,7 +147,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
if (get_nat_flag(ne, IS_DIRTY))
return;
-retry:
+
head = radix_tree_lookup(&nm_i->nat_set_root, set);
if (!head) {
head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
@@ -156,11 +156,7 @@ retry:
INIT_LIST_HEAD(&head->set_list);
head->set = set;
head->entry_cnt = 0;
-
- if (radix_tree_insert(&nm_i->nat_set_root, set, head)) {
- kmem_cache_free(nat_entry_set_slab, head);
- goto retry;
- }
+ f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
}
list_move_tail(&ne->list, &head->entry_list);
nm_i->dirty_nat_cnt++;
@@ -238,13 +234,8 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
{
struct nat_entry *new;
- new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
- if (!new)
- return NULL;
- if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
- kmem_cache_free(nat_entry_slab, new);
- return NULL;
- }
+ new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
+ f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
memset(new, 0, sizeof(struct nat_entry));
nat_set_nid(new, nid);
nat_reset_flag(new);
@@ -257,15 +248,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
struct f2fs_nat_entry *ne)
{
struct nat_entry *e;
-retry:
+
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (!e) {
e = grab_nat_entry(nm_i, nid);
- if (!e) {
- up_write(&nm_i->nat_tree_lock);
- goto retry;
- }
node_info_from_raw_nat(&e->ni, ne);
}
up_write(&nm_i->nat_tree_lock);
@@ -276,15 +263,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
-retry:
+
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
e = grab_nat_entry(nm_i, ni->nid);
- if (!e) {
- up_write(&nm_i->nat_tree_lock);
- goto retry;
- }
e->ni = *ni;
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
@@ -1833,19 +1816,13 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
raw_ne = nat_in_journal(sum, i);
-retry:
+
down_write(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
- if (ne)
- goto found;
-
- ne = grab_nat_entry(nm_i, nid);
if (!ne) {
- up_write(&nm_i->nat_tree_lock);
- goto retry;
+ ne = grab_nat_entry(nm_i, nid);
+ node_info_from_raw_nat(&ne->ni, &raw_ne);
}
- node_info_from_raw_nat(&ne->ni, &raw_ne);
-found:
__set_nat_cache_dirty(nm_i, ne);
up_write(&nm_i->nat_tree_lock);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 9a33e34..c79d67e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -179,13 +179,13 @@ void register_inmem_page(struct inode *inode, struct page *page)
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new;
int err;
-retry:
+
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
/* add atomic page indices to the list */
new->page = page;
INIT_LIST_HEAD(&new->list);
-
+retry:
/* increase reference count with clean state */
mutex_lock(&fi->inmem_lock);
err = radix_tree_insert(&fi->inmem_root, page->index, new);
@@ -195,7 +195,6 @@ retry:
return;
} else if (err) {
mutex_unlock(&fi->inmem_lock);
- kmem_cache_free(inmem_entry_slab, new);
goto retry;
}
get_page(page);
--
2.1.1
The inmemory pages should be handled by invalidate_page since it needs to be
released int the truncation path.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/segment.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index c79d67e..3ebcf96 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -180,6 +180,8 @@ void register_inmem_page(struct inode *inode, struct page *page)
struct inmem_pages *new;
int err;
+ SetPagePrivate(page);
+
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
/* add atomic page indices to the list */
--
2.1.1
In do_read_inode, if we failed __recover_inline_status, the inode has inline
flag without increasing its count.
Later, f2fs_evict_inode will decrease the count, which causes -1.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/inode.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 9fe110e..196cc78 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -148,6 +148,10 @@ static int do_read_inode(struct inode *inode)
__get_inode_rdev(inode, ri);
f2fs_put_page(node_page, 1);
+
+ stat_inc_inline_inode(inode);
+ stat_inc_inline_dir(inode);
+
return err;
}
@@ -199,8 +203,6 @@ make_now:
goto bad_inode;
}
unlock_new_inode(inode);
- stat_inc_inline_inode(inode);
- stat_inc_inline_dir(inode);
trace_f2fs_iget(inode);
return inode;
--
2.1.1
This patch adds counting # of inmemory pages in the page cache.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/debug.c | 3 +++
fs/f2fs/f2fs.h | 3 ++-
fs/f2fs/segment.c | 3 +++
3 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 40b679c..4e2e39c 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -39,6 +39,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_dirs = sbi->n_dirty_dirs;
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+ si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
@@ -249,6 +250,8 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
si->hit_ext, si->total_ext);
seq_puts(s, "\nBalancing F2FS Async:\n");
+ seq_printf(s, " - inmem: %4d\n",
+ si->inmem_pages);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d\n",
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c787fe3..f60b817 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -462,6 +462,7 @@ enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
+ F2FS_INMEM_PAGES,
NR_COUNT_TYPE,
};
@@ -1498,7 +1499,7 @@ struct f2fs_stat_info {
int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
int nats, sits, fnids;
int total_count, utilization;
- int bg_gc, inline_inode, inline_dir;
+ int bg_gc, inline_inode, inline_dir, inmem_pages;
unsigned int valid_count, valid_node_count, valid_inode_count;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 3ebcf96..42607a6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -201,6 +201,7 @@ retry:
}
get_page(page);
list_add_tail(&new->list, &fi->inmem_pages);
+ inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
mutex_unlock(&fi->inmem_lock);
}
@@ -216,6 +217,7 @@ void invalidate_inmem_page(struct inode *inode, struct page *page)
f2fs_put_page(cur->page, 0);
list_del(&cur->list);
kmem_cache_free(inmem_entry_slab, cur);
+ dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
}
mutex_unlock(&fi->inmem_lock);
}
@@ -257,6 +259,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
f2fs_put_page(cur->page, 1);
list_del(&cur->list);
kmem_cache_free(inmem_entry_slab, cur);
+ dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
}
if (submit_bio)
f2fs_submit_merged_bio(sbi, DATA, WRITE);
--
2.1.1
If file is closed, let's drop inmemory pages.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/file.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 146e58a..b6f3fbf 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -901,6 +901,14 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
return f2fs_convert_inline_inode(inode);
}
+static int f2fs_release_file(struct inode *inode, struct file *filp)
+{
+ /* some remained atomic pages should discarded */
+ if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
+ commit_inmem_pages(inode, true);
+ return 0;
+}
+
static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
@@ -1010,6 +1018,7 @@ const struct file_operations f2fs_file_operations = {
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.open = generic_file_open,
+ .release = f2fs_release_file,
.mmap = f2fs_file_mmap,
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
--
2.1.1
> -----Original Message-----
> From: Jaegeuk Kim [mailto:[email protected]]
> Sent: Sunday, December 07, 2014 3:56 AM
> To: [email protected]; [email protected];
> [email protected]
> Cc: Jaegeuk Kim
> Subject: [f2fs-dev] [PATCH 1/5] f2fs: do retry operations with cond_resched
>
> This patch revists retrial paths in f2fs.
> The basic idea is to use cond_resched instead of retrying from the very early
> stage.
>
> Suggested-by: Gu Zheng <[email protected]>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Reviewed-by: Chao Yu <[email protected]>
> ---
> fs/f2fs/f2fs.h | 7 +++++++
> fs/f2fs/gc.c | 5 ++---
> fs/f2fs/node.c | 41 +++++++++--------------------------------
> fs/f2fs/segment.c | 5 ++---
> 4 files changed, 20 insertions(+), 38 deletions(-)
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index c873140..c787fe3 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1021,6 +1021,13 @@ retry:
> return entry;
> }
>
> +static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
> + unsigned long index, void *item)
> +{
> + while (radix_tree_insert(root, index, item))
> + cond_resched();
> +}
> +
> #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
>
> static inline bool IS_INODE(struct page *page)
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index 2c58c58..eec0933 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -356,12 +356,11 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode
> *inode)
> iput(inode);
> return;
> }
> -retry:
> new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
> new_ie->inode = inode;
> -
> +retry:
> if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
> - kmem_cache_free(winode_slab, new_ie);
> + cond_resched();
> goto retry;
> }
use f2fs_radix_tree_insert instead?
Thanks,
Yu