A macro should not use duplicate parameter names.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/segment.h | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index e399bd4..c0d7740 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -23,13 +23,13 @@
((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) || \
(t == CURSEG_WARM_NODE))
-#define IS_CURSEG(sbi, segno) \
- ((segno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
- (segno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
- (segno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
- (segno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
- (segno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
- (segno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
+#define IS_CURSEG(sbi, seg) \
+ ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
+ (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
+ (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
+ (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
+ (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
+ (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
#define IS_CURSEC(sbi, secno) \
((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
--
1.8.1.3.566.gaa39828
The foreground GCs are triggered under not enough free sections.
So, we should not skip moving valid blocks in the victim segments.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/gc.c | 46 ++++++++++++++++++++++++++++++++++------------
1 file changed, 34 insertions(+), 12 deletions(-)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 01a1d60..54aceb2 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -131,7 +131,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- if (p->alloc_mode) {
+ if (p->alloc_mode == SSR) {
p->gc_mode = GC_GREEDY;
p->dirty_segmap = dirty_i->dirty_segmap[type];
p->ofs_unit = 1;
@@ -403,8 +403,14 @@ next_step:
continue;
/* set page dirty and write it */
- if (!PageWriteback(node_page))
+ if (gc_type == FG_GC) {
+ f2fs_submit_bio(sbi, NODE, true);
+ wait_on_page_writeback(node_page);
set_page_dirty(node_page);
+ } else {
+ if (!PageWriteback(node_page))
+ set_page_dirty(node_page);
+ }
f2fs_put_page(node_page, 1);
stat_inc_node_blk_count(sbi, 1);
}
@@ -420,6 +426,13 @@ next_step:
.for_reclaim = 0,
};
sync_node_pages(sbi, 0, &wbc);
+
+ /*
+ * In the case of FG_GC, it'd be better to reclaim this victim
+ * completely.
+ */
+ if (get_valid_blocks(sbi, segno, 1) != 0)
+ goto next_step;
}
}
@@ -483,20 +496,19 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
static void move_data_page(struct inode *inode, struct page *page, int gc_type)
{
- if (page->mapping != inode->i_mapping)
- goto out;
-
- if (inode != page->mapping->host)
- goto out;
-
- if (PageWriteback(page))
- goto out;
-
if (gc_type == BG_GC) {
+ if (PageWriteback(page))
+ goto out;
set_page_dirty(page);
set_cold_data(page);
} else {
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+
+ if (PageWriteback(page)) {
+ f2fs_submit_bio(sbi, DATA, true);
+ wait_on_page_writeback(page);
+ }
+
mutex_lock_op(sbi, DATA_WRITE);
if (clear_page_dirty_for_io(page) &&
S_ISDIR(inode->i_mode)) {
@@ -593,8 +605,18 @@ next_iput:
if (++phase < 4)
goto next_step;
- if (gc_type == FG_GC)
+ if (gc_type == FG_GC) {
f2fs_submit_bio(sbi, DATA, true);
+
+ /*
+ * In the case of FG_GC, it'd be better to reclaim this victim
+ * completely.
+ */
+ if (get_valid_blocks(sbi, segno, 1) != 0) {
+ phase = 2;
+ goto next_step;
+ }
+ }
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
--
1.8.1.3.566.gaa39828
Let's use a macro to get the total number of sections.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/debug.c | 7 +++----
fs/f2fs/segment.c | 19 +++++++++----------
fs/f2fs/segment.h | 1 +
3 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 025b9e2..20b8794 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -106,7 +106,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
}
}
mutex_unlock(&sit_i->sentry_lock);
- dist = sbi->total_sections * hblks_per_sec * hblks_per_sec / 100;
+ dist = TOTAL_SECS(sbi) * hblks_per_sec * hblks_per_sec / 100;
si->bimodal = bimodal / dist;
if (si->dirty_count)
si->avg_vblocks = total_vblocks / ndirty;
@@ -138,14 +138,13 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * TOTAL_SEGS(sbi);
if (sbi->segs_per_sec > 1)
- si->base_mem += sbi->total_sections *
- sizeof(struct sec_entry);
+ si->base_mem += TOTAL_SECS(sbi) * sizeof(struct sec_entry);
si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
/* build free segmap */
si->base_mem += sizeof(struct free_segmap_info);
si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
- si->base_mem += f2fs_bitmap_size(sbi->total_sections);
+ si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
/* build curseg */
si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1758149..179a13e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -348,9 +348,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
unsigned int *newseg, bool new_sec, int dir)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int total_secs = sbi->total_sections;
unsigned int segno, secno, zoneno;
- unsigned int total_zones = sbi->total_sections / sbi->secs_per_zone;
+ unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
unsigned int hint = *newseg / sbi->segs_per_sec;
unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
unsigned int left_start = hint;
@@ -367,12 +366,12 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
goto got_it;
}
find_other_zone:
- secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint);
- if (secno >= total_secs) {
+ secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
+ if (secno >= TOTAL_SECS(sbi)) {
if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap,
- total_secs, 0);
- BUG_ON(secno >= total_secs);
+ TOTAL_SECS(sbi), 0);
+ BUG_ON(secno >= TOTAL_SECS(sbi));
} else {
go_left = 1;
left_start = hint - 1;
@@ -387,8 +386,8 @@ find_other_zone:
continue;
}
left_start = find_next_zero_bit(free_i->free_secmap,
- total_secs, 0);
- BUG_ON(left_start >= total_secs);
+ TOTAL_SECS(sbi), 0);
+ BUG_ON(left_start >= TOTAL_SECS(sbi));
break;
}
secno = left_start;
@@ -1390,7 +1389,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
}
if (sbi->segs_per_sec > 1) {
- sit_i->sec_entries = vzalloc(sbi->total_sections *
+ sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
sizeof(struct sec_entry));
if (!sit_i->sec_entries)
return -ENOMEM;
@@ -1441,7 +1440,7 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
if (!free_i->free_segmap)
return -ENOMEM;
- sec_bitmap_size = f2fs_bitmap_size(sbi->total_sections);
+ sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index c0d7740..fea9245 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -81,6 +81,7 @@
#define f2fs_bitmap_size(nr) \
(BITS_TO_LONGS(nr) * sizeof(unsigned long))
#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
+#define TOTAL_SECS(sbi) (sbi->total_sections)
#define SECTOR_FROM_BLOCK(sbi, blk_addr) \
(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
--
1.8.1.3.566.gaa39828
Like below, there are 8 segment bitmaps for SSR victim candidates.
enum dirty_type {
DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
DIRTY, /* to count # of dirty segments */
PRE, /* to count # of entirely obsolete segments */
NR_DIRTY_TYPE
};
The upper 6 bitmaps indicates segments dirtied by active log areas respectively.
And, the DIRTY bitmap integrates all the 6 bitmaps.
For example,
o DIRTY_HOT_DATA : 1010000
o DIRTY_WARM_DATA: 0100000
o DIRTY_COLD_DATA: 0001000
o DIRTY_HOT_NODE : 0000010
o DIRTY_WARM_NODE: 0000001
o DIRTY_COLD_NODE: 0000000
In this case,
o DIRTY : 1111011,
which means that we should guarantee the consistency between DIRTY and other
bitmaps concreately.
However, the SSR mode selects victims freely from any log types, which can set
multiple bits across the various bitmap types.
So, this patch eliminates this inconsistency.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/segment.c | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fe520d3..7c67ec2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -49,9 +49,20 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (dirty_type == DIRTY) {
struct seg_entry *sentry = get_seg_entry(sbi, segno);
+ enum dirty_type t = DIRTY_HOT_DATA;
+
dirty_type = sentry->type;
+
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]++;
+
+ /* Only one bitmap should be set */
+ for (; t <= DIRTY_COLD_NODE; t++) {
+ if (t == dirty_type)
+ continue;
+ if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+ dirty_i->nr_dirty[t]--;
+ }
}
}
@@ -64,11 +75,13 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
dirty_i->nr_dirty[dirty_type]--;
if (dirty_type == DIRTY) {
- struct seg_entry *sentry = get_seg_entry(sbi, segno);
- dirty_type = sentry->type;
- if (test_and_clear_bit(segno,
- dirty_i->dirty_segmap[dirty_type]))
- dirty_i->nr_dirty[dirty_type]--;
+ enum dirty_type t = DIRTY_HOT_DATA;
+
+ /* clear all the bitmaps */
+ for (; t <= DIRTY_COLD_NODE; t++)
+ if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+ dirty_i->nr_dirty[t]--;
+
if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
clear_bit(GET_SECNO(sbi, segno),
dirty_i->victim_secmap);
--
1.8.1.3.566.gaa39828
In order to do GC more reliably, I'd like to lock the vicitm summary page
until its GC is completed, and also prevent any checkpoint process.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/gc.c | 8 +-------
fs/f2fs/node.c | 2 +-
fs/f2fs/super.c | 7 +++++--
3 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 54aceb2..54ac13d 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -641,12 +641,6 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
if (IS_ERR(sum_page))
return;
- /*
- * CP needs to lock sum_page. In this time, we don't need
- * to lock this page, because this summary page is not gone anywhere.
- * Also, this page is not gonna be updated before GC is done.
- */
- unlock_page(sum_page);
sum = page_address(sum_page);
switch (GET_SUM_TYPE((&sum->footer))) {
@@ -660,7 +654,7 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
stat_inc_call_count(sbi->stat_info);
- f2fs_put_page(sum_page, 0);
+ f2fs_put_page(sum_page, 1);
}
int f2fs_gc(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index c5360b7..7555fb7 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1148,7 +1148,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
/* First check balancing cached NAT entries */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
- write_checkpoint(sbi, false);
+ f2fs_sync_fs(sbi->sb, true);
return 0;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ea325c8..161e6c6 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -137,10 +137,13 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
return 0;
- if (sync)
+ if (sync) {
+ mutex_lock(&sbi->gc_mutex);
write_checkpoint(sbi, false);
- else
+ mutex_unlock(&sbi->gc_mutex);
+ } else {
f2fs_balance_fs(sbi);
+ }
return 0;
}
--
1.8.1.3.566.gaa39828
This patch adds a new condition that allocates free segments in the current
active section even if SSR is needed.
Otherwise, f2fs cannot allocate remained free segments in the section since
SSR finds dirty segments only.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/segment.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index d5244f6..fe520d3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -347,6 +347,17 @@ next:
return NULL_SEGNO;
}
+static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ unsigned int segno = curseg->segno;
+ struct free_segmap_info *free_i = FREE_I(sbi);
+
+ if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
+ return !test_bit(segno + 1, free_i->free_segmap);
+ return 0;
+}
+
/*
* Find a new segment from the free segments bitmap to right order
* This function should be returned with success, otherwise BUG
@@ -580,6 +591,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
change_curseg(sbi, type, false);
else if (type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
+ else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
+ new_curseg(sbi, type, false);
else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
change_curseg(sbi, type, true);
else
--
1.8.1.3.566.gaa39828
When allocating a new segment under the LFS mode, we should keep the section
boundary.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/segment.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 179a13e..b3486f3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -362,7 +362,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
TOTAL_SEGS(sbi), *newseg + 1);
- if (segno < TOTAL_SEGS(sbi))
+ if (segno - *newseg < sbi->segs_per_sec -
+ (*newseg % sbi->segs_per_sec))
goto got_it;
}
find_other_zone:
--
1.8.1.3.566.gaa39828
This patch removes a bitmap for victim segments selected by foreground GC, and
modifies the other bitmap for victim segments selected by background GC.
1) foreground GC bitmap
: We don't need to manage this, since we just only one previous victim section
number instead of the whole victim history.
The f2fs uses the victim section number in order not to allocate currently
GC'ed section to current active logs.
2) background GC bitmap
: This bitmap is used to avoid selecting victims repeatedly by background GCs.
In addition, the victims are able to be selected by foreground GCs, since
there is no need to read victim blocks during foreground GCs.
By the fact that the foreground GC reclaims segments in a section unit, it'd
be better to manage this bitmap based on the section granularity.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/checkpoint.c | 2 --
fs/f2fs/debug.c | 2 +-
fs/f2fs/f2fs.h | 2 +-
fs/f2fs/gc.c | 42 ++++++++++++++++++---------------
fs/f2fs/segment.c | 66 ++++++++++++++++++++++++----------------------------
fs/f2fs/segment.h | 11 ++++++++-
fs/f2fs/super.c | 2 ++
7 files changed, 68 insertions(+), 59 deletions(-)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d947e66..93fd57d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -748,8 +748,6 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
flush_nat_entries(sbi);
flush_sit_entries(sbi);
- reset_victim_segmap(sbi);
-
/* unlock all the fs_lock[] in do_checkpoint() */
do_checkpoint(sbi, is_umount);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 20b8794..c3bf343 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -153,7 +153,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build dirty segmap */
si->base_mem += sizeof(struct dirty_seglist_info);
si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
- si->base_mem += 2 * f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
/* buld nm */
si->base_mem += sizeof(struct f2fs_nm_info);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 77e2eb0..71eacd3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -410,6 +410,7 @@ struct f2fs_sb_info {
/* for cleaning operations */
struct mutex gc_mutex; /* mutex for GC */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
+ unsigned int cur_victim_sec; /* current victim section num */
/*
* for stat information.
@@ -979,7 +980,6 @@ int lookup_journal_in_cursum(struct f2fs_summary_block *,
int, unsigned int, int);
void flush_sit_entries(struct f2fs_sb_info *);
int build_segment_manager(struct f2fs_sb_info *);
-void reset_victim_segmap(struct f2fs_sb_info *);
void destroy_segment_manager(struct f2fs_sb_info *);
/*
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2e3eb2d..01a1d60 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -160,18 +160,21 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned int segno;
+ unsigned int hint = 0;
+ unsigned int secno;
/*
* If the gc_type is FG_GC, we can select victim segments
* selected by background GC before.
* Those segments guarantee they have small valid blocks.
*/
- segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
- TOTAL_SEGS(sbi), 0);
- if (segno < TOTAL_SEGS(sbi)) {
- clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
- return segno;
+next:
+ secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
+ if (secno < TOTAL_SECS(sbi)) {
+ if (sec_usage_check(sbi, secno))
+ goto next;
+ clear_bit(secno, dirty_i->victim_secmap);
+ return secno * sbi->segs_per_sec;
}
return NULL_SEGNO;
}
@@ -234,7 +237,6 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
- unsigned int segno;
int nsearched = 0;
p.alloc_mode = alloc_mode;
@@ -253,6 +255,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
while (1) {
unsigned long cost;
+ unsigned int segno, secno;
segno = find_next_bit(p.dirty_segmap,
TOTAL_SEGS(sbi), p.offset);
@@ -265,13 +268,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
break;
}
p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+ secno = GET_SECNO(sbi, segno);
- if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
+ if (sec_usage_check(sbi, secno))
continue;
- if (gc_type == BG_GC &&
- test_bit(segno, dirty_i->victim_segmap[BG_GC]))
- continue;
- if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
+ if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
continue;
cost = get_gc_cost(sbi, segno, &p);
@@ -291,13 +292,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
}
got_it:
if (p.min_segno != NULL_SEGNO) {
- *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
if (p.alloc_mode == LFS) {
- int i;
- for (i = 0; i < p.ofs_unit; i++)
- set_bit(*result + i,
- dirty_i->victim_segmap[gc_type]);
+ unsigned int secno = GET_SECNO(sbi, p.min_segno);
+ if (gc_type == FG_GC)
+ sbi->cur_victim_sec = secno;
+ else
+ set_bit(secno, dirty_i->victim_secmap);
}
+ *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
}
mutex_unlock(&dirty_i->seglist_lock);
@@ -662,9 +664,11 @@ gc_more:
for (i = 0; i < sbi->segs_per_sec; i++)
do_garbage_collect(sbi, segno + i, &ilist, gc_type);
- if (gc_type == FG_GC &&
- get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+ if (gc_type == FG_GC) {
+ sbi->cur_victim_sec = NULL_SEGNO;
nfree++;
+ WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
+ }
if (has_not_enough_free_secs(sbi, nfree))
goto gc_more;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index b3486f3..d5244f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -69,8 +69,9 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (test_and_clear_bit(segno,
dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
- clear_bit(segno, dirty_i->victim_segmap[FG_GC]);
- clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
+ if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+ clear_bit(GET_SECNO(sbi, segno),
+ dirty_i->victim_secmap);
}
}
@@ -296,13 +297,12 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
f2fs_put_page(page, 1);
}
-static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
- int ofs_unit, int type)
+static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
- unsigned int segno, next_segno, i;
- int ofs = 0;
+ unsigned int segno;
+ unsigned int ofs = 0;
/*
* If there is not enough reserved sections,
@@ -318,23 +318,30 @@ static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
if (IS_NODESEG(type))
return NULL_SEGNO;
next:
- segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++);
- ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
+ segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
+ ofs += sbi->segs_per_sec;
+
if (segno < TOTAL_SEGS(sbi)) {
+ int i;
+
/* skip intermediate segments in a section */
- if (segno % ofs_unit)
+ if (segno % sbi->segs_per_sec)
goto next;
- /* skip if whole section is not prefree */
- next_segno = find_next_zero_bit(prefree_segmap,
- TOTAL_SEGS(sbi), segno + 1);
- if (next_segno - segno < ofs_unit)
+ /* skip if the section is currently used */
+ if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
goto next;
+ /* skip if whole section is not prefree */
+ for (i = 1; i < sbi->segs_per_sec; i++)
+ if (!test_bit(segno + i, prefree_segmap))
+ goto next;
+
/* skip if whole section was not free at the last checkpoint */
- for (i = 0; i < ofs_unit; i++)
- if (get_seg_entry(sbi, segno)->ckpt_valid_blocks)
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
goto next;
+
return segno;
}
return NULL_SEGNO;
@@ -561,15 +568,13 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
int type, bool force)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int ofs_unit;
if (force) {
new_curseg(sbi, type, true);
goto out;
}
- ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec;
- curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type);
+ curseg->next_segno = check_prefree_segments(sbi, type);
if (curseg->next_segno != NULL_SEGNO)
change_curseg(sbi, type, false);
@@ -1558,14 +1563,13 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
}
}
-static int init_victim_segmap(struct f2fs_sb_info *sbi)
+static int init_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
- dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
- dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC])
+ dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
}
@@ -1592,7 +1596,7 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
}
init_dirty_segmap(sbi);
- return init_victim_segmap(sbi);
+ return init_victim_secmap(sbi);
}
/*
@@ -1679,18 +1683,10 @@ static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
mutex_unlock(&dirty_i->seglist_lock);
}
-void reset_victim_segmap(struct f2fs_sb_info *sbi)
-{
- unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
- memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size);
-}
-
-static void destroy_victim_segmap(struct f2fs_sb_info *sbi)
+static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-
- kfree(dirty_i->victim_segmap[FG_GC]);
- kfree(dirty_i->victim_segmap[BG_GC]);
+ kfree(dirty_i->victim_secmap);
}
static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
@@ -1705,7 +1701,7 @@ static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_DIRTY_TYPE; i++)
discard_dirty_segmap(sbi, i);
- destroy_victim_segmap(sbi);
+ destroy_victim_secmap(sbi);
SM_I(sbi)->dirty_info = NULL;
kfree(dirty_i);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index fea9245..4c2cd9e 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -214,7 +214,7 @@ struct dirty_seglist_info {
unsigned long *dirty_segmap[NR_DIRTY_TYPE];
struct mutex seglist_lock; /* lock for segment bitmaps */
int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
- unsigned long *victim_segmap[2]; /* BG_GC, FG_GC */
+ unsigned long *victim_secmap; /* background GC victims */
};
/* victim selection function for cleaning and SSR */
@@ -616,3 +616,12 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
- (base + 1) + type;
}
+
+static inline int sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
+{
+ if (IS_CURSEC(sbi, secno))
+ return -1;
+ if (sbi->cur_victim_sec == secno)
+ return -1;
+ return 0;
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 252890e..ea325c8 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -26,6 +26,7 @@
#include "f2fs.h"
#include "node.h"
+#include "segment.h"
#include "xattr.h"
static struct kmem_cache *f2fs_inode_cachep;
@@ -458,6 +459,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
+ sbi->cur_victim_sec = NULL_SEGNO;
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
--
1.8.1.3.566.gaa39828
In get_node_page, we do not need to call lock_page all the time.
If the node page is cached as uptodate,
1. grab_cache_page locks the page,
2. read_node_page unlocks the page, and
3. lock_page is called for further process.
Let's avoid this.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/node.c | 63 +++++++++++++++++++++++++++++++++-------------------------
1 file changed, 36 insertions(+), 27 deletions(-)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 10cbee9..c5360b7 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -847,32 +847,13 @@ fail:
return ERR_PTR(err);
}
-static int read_node_page(struct page *page, int type)
-{
- struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
- struct node_info ni;
-
- get_node_info(sbi, page->index, &ni);
-
- if (ni.blk_addr == NULL_ADDR) {
- f2fs_put_page(page, 1);
- return -ENOENT;
- }
-
- if (PageUptodate(page)) {
- unlock_page(page);
- return 0;
- }
-
- return f2fs_readpage(sbi, page, ni.blk_addr, type);
-}
-
/*
* Readahead a node page
*/
void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
{
struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct node_info ni;
struct page *apage;
apage = find_get_page(mapping, nid);
@@ -886,22 +867,40 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (!apage)
return;
- if (read_node_page(apage, READA) == 0)
+ get_node_info(sbi, nid, &ni);
+
+ if (ni.blk_addr == NULL_ADDR || PageUptodate(apage)) {
+ f2fs_put_page(apage, 1);
+ return;
+ }
+
+ if (f2fs_readpage(sbi, apage, ni.blk_addr, READA) == 0)
f2fs_put_page(apage, 0);
return;
}
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
{
- int err;
- struct page *page;
struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct page *page;
+ struct node_info ni;
+ int err;
page = grab_cache_page(mapping, nid);
if (!page)
return ERR_PTR(-ENOMEM);
- err = read_node_page(page, READ_SYNC);
+ get_node_info(sbi, nid, &ni);
+
+ if (ni.blk_addr == NULL_ADDR) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (PageUptodate(page))
+ goto got_it;
+
+ err = f2fs_readpage(sbi, page, ni.blk_addr, READ_SYNC);
if (err)
return ERR_PTR(err);
@@ -910,6 +909,7 @@ struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
+got_it:
BUG_ON(nid != nid_of_node(page));
mark_page_accessed(page);
return page;
@@ -923,6 +923,7 @@ struct page *get_node_page_ra(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct node_info ni;
int i, end;
int err = 0;
nid_t nid;
@@ -936,10 +937,18 @@ struct page *get_node_page_ra(struct page *parent, int start)
page = grab_cache_page(mapping, nid);
if (!page)
return ERR_PTR(-ENOMEM);
- else if (PageUptodate(page))
+
+ get_node_info(sbi, nid, &ni);
+
+ if (ni.blk_addr == NULL_ADDR) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (PageUptodate(page))
goto page_hit;
- err = read_node_page(page, READ_SYNC);
+ err = f2fs_readpage(sbi, page, ni.blk_addr, READ_SYNC);
if (err)
return ERR_PTR(err);
@@ -956,7 +965,7 @@ struct page *get_node_page_ra(struct page *parent, int start)
lock_page(page);
page_hit:
- if (PageError(page)) {
+ if (!PageUptodate(page)) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
--
1.8.1.3.566.gaa39828
2013/4/1, Jaegeuk Kim <[email protected]>:
> Like below, there are 8 segment bitmaps for SSR victim candidates.
>
> enum dirty_type {
> DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
> DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
> DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
> DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
> DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
> DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
> DIRTY, /* to count # of dirty segments */
> PRE, /* to count # of entirely obsolete segments */
> NR_DIRTY_TYPE
> };
>
> The upper 6 bitmaps indicates segments dirtied by active log areas
> respectively.
> And, the DIRTY bitmap integrates all the 6 bitmaps.
>
> For example,
> o DIRTY_HOT_DATA : 1010000
> o DIRTY_WARM_DATA: 0100000
> o DIRTY_COLD_DATA: 0001000
> o DIRTY_HOT_NODE : 0000010
> o DIRTY_WARM_NODE: 0000001
> o DIRTY_COLD_NODE: 0000000
> In this case,
> o DIRTY : 1111011,
>
> which means that we should guarantee the consistency between DIRTY and
> other
> bitmaps concreately.
>
> However, the SSR mode selects victims freely from any log types, which can
> set
> multiple bits across the various bitmap types.
>
> So, this patch eliminates this inconsistency.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Looks good to me~
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
2013/4/1, Jaegeuk Kim <[email protected]>:
> Let's use a macro to get the total number of sections.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Looks reasonable to me~
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
> ---
2013/4/1, Jaegeuk Kim <[email protected]>:
> A macro should not use duplicate parameter names.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Looks reasonable to me ~
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
> ---
2013/4/1, Jaegeuk Kim <[email protected]>:
> This patch removes a bitmap for victim segments selected by foreground GC,
> and
> modifies the other bitmap for victim segments selected by background GC.
>
> 1) foreground GC bitmap
> : We don't need to manage this, since we just only one previous victim
> section
> number instead of the whole victim history.
> The f2fs uses the victim section number in order not to allocate
> currently
> GC'ed section to current active logs.
>
> 2) background GC bitmap
> : This bitmap is used to avoid selecting victims repeatedly by background
> GCs.
> In addition, the victims are able to be selected by foreground GCs,
> since
> there is no need to read victim blocks during foreground GCs.
>
> By the fact that the foreground GC reclaims segments in a section unit,
> it'd
> be better to manage this bitmap based on the section granularity.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
> fs/f2fs/checkpoint.c | 2 --
> fs/f2fs/debug.c | 2 +-
> fs/f2fs/f2fs.h | 2 +-
> fs/f2fs/gc.c | 42 ++++++++++++++++++---------------
> fs/f2fs/segment.c | 66
> ++++++++++++++++++++++++----------------------------
> fs/f2fs/segment.h | 11 ++++++++-
> fs/f2fs/super.c | 2 ++
> 7 files changed, 68 insertions(+), 59 deletions(-)
>
> diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
> index d947e66..93fd57d 100644
> --- a/fs/f2fs/checkpoint.c
> +++ b/fs/f2fs/checkpoint.c
> @@ -748,8 +748,6 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool
> is_umount)
> flush_nat_entries(sbi);
> flush_sit_entries(sbi);
>
> - reset_victim_segmap(sbi);
> -
> /* unlock all the fs_lock[] in do_checkpoint() */
> do_checkpoint(sbi, is_umount);
>
> diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
> index 20b8794..c3bf343 100644
> --- a/fs/f2fs/debug.c
> +++ b/fs/f2fs/debug.c
> @@ -153,7 +153,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
> /* build dirty segmap */
> si->base_mem += sizeof(struct dirty_seglist_info);
> si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
> - si->base_mem += 2 * f2fs_bitmap_size(TOTAL_SEGS(sbi));
> + si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
>
> /* buld nm */
> si->base_mem += sizeof(struct f2fs_nm_info);
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 77e2eb0..71eacd3 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -410,6 +410,7 @@ struct f2fs_sb_info {
> /* for cleaning operations */
> struct mutex gc_mutex; /* mutex for GC */
> struct f2fs_gc_kthread *gc_thread; /* GC thread */
> + unsigned int cur_victim_sec; /* current victim section num */
>
> /*
> * for stat information.
> @@ -979,7 +980,6 @@ int lookup_journal_in_cursum(struct f2fs_summary_block
> *,
> int, unsigned int, int);
> void flush_sit_entries(struct f2fs_sb_info *);
> int build_segment_manager(struct f2fs_sb_info *);
> -void reset_victim_segmap(struct f2fs_sb_info *);
> void destroy_segment_manager(struct f2fs_sb_info *);
>
> /*
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index 2e3eb2d..01a1d60 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -160,18 +160,21 @@ static unsigned int get_max_cost(struct f2fs_sb_info
> *sbi,
> static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
> {
> struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
> - unsigned int segno;
> + unsigned int hint = 0;
> + unsigned int secno;
>
> /*
> * If the gc_type is FG_GC, we can select victim segments
> * selected by background GC before.
> * Those segments guarantee they have small valid blocks.
> */
> - segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
> - TOTAL_SEGS(sbi), 0);
> - if (segno < TOTAL_SEGS(sbi)) {
> - clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
> - return segno;
> +next:
> + secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
> + if (secno < TOTAL_SECS(sbi)) {
> + if (sec_usage_check(sbi, secno))
> + goto next;
> + clear_bit(secno, dirty_i->victim_secmap);
> + return secno * sbi->segs_per_sec;
> }
> return NULL_SEGNO;
> }
> @@ -234,7 +237,6 @@ static int get_victim_by_default(struct f2fs_sb_info
> *sbi,
> {
> struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
> struct victim_sel_policy p;
> - unsigned int segno;
> int nsearched = 0;
>
> p.alloc_mode = alloc_mode;
> @@ -253,6 +255,7 @@ static int get_victim_by_default(struct f2fs_sb_info
> *sbi,
>
> while (1) {
> unsigned long cost;
> + unsigned int segno, secno;
"secno" variable is used outside of the while loop also. So, instead
of limiting the scope to "while loop".
We can define this at the start of the function and use the same
variable throughout the function.
>
> segno = find_next_bit(p.dirty_segmap,
> TOTAL_SEGS(sbi), p.offset);
> @@ -265,13 +268,11 @@ static int get_victim_by_default(struct f2fs_sb_info
> *sbi,
> break;
> }
> p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
> + secno = GET_SECNO(sbi, segno);
>
> - if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
> + if (sec_usage_check(sbi, secno))
> continue;
> - if (gc_type == BG_GC &&
> - test_bit(segno, dirty_i->victim_segmap[BG_GC]))
> - continue;
> - if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
> + if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
> continue;
>
> cost = get_gc_cost(sbi, segno, &p);
> @@ -291,13 +292,14 @@ static int get_victim_by_default(struct f2fs_sb_info
> *sbi,
> }
> got_it:
> if (p.min_segno != NULL_SEGNO) {
> - *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
> if (p.alloc_mode == LFS) {
> - int i;
> - for (i = 0; i < p.ofs_unit; i++)
> - set_bit(*result + i,
> - dirty_i->victim_segmap[gc_type]);
> + unsigned int secno = GET_SECNO(sbi, p.min_segno);
> + if (gc_type == FG_GC)
> + sbi->cur_victim_sec = secno;
> + else
> + set_bit(secno, dirty_i->victim_secmap);
> }
> + *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
> }
> mutex_unlock(&dirty_i->seglist_lock);
>
> @@ -662,9 +664,11 @@ gc_more:
> for (i = 0; i < sbi->segs_per_sec; i++)
> do_garbage_collect(sbi, segno + i, &ilist, gc_type);
>
> - if (gc_type == FG_GC &&
> - get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
> + if (gc_type == FG_GC) {
> + sbi->cur_victim_sec = NULL_SEGNO;
> nfree++;
> + WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
> + }
>
> if (has_not_enough_free_secs(sbi, nfree))
> goto gc_more;
> diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
> index b3486f3..d5244f6 100644
> --- a/fs/f2fs/segment.c
> +++ b/fs/f2fs/segment.c
> @@ -69,8 +69,9 @@ static void __remove_dirty_segment(struct f2fs_sb_info
> *sbi, unsigned int segno,
> if (test_and_clear_bit(segno,
> dirty_i->dirty_segmap[dirty_type]))
> dirty_i->nr_dirty[dirty_type]--;
> - clear_bit(segno, dirty_i->victim_segmap[FG_GC]);
> - clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
> + if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
> + clear_bit(GET_SECNO(sbi, segno),
> + dirty_i->victim_secmap);
> }
> }
>
> @@ -296,13 +297,12 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
> f2fs_put_page(page, 1);
> }
>
> -static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
> - int ofs_unit, int type)
> +static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int
> type)
> {
> struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
> unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
> - unsigned int segno, next_segno, i;
> - int ofs = 0;
> + unsigned int segno;
> + unsigned int ofs = 0;
>
> /*
> * If there is not enough reserved sections,
> @@ -318,23 +318,30 @@ static unsigned int check_prefree_segments(struct
> f2fs_sb_info *sbi,
> if (IS_NODESEG(type))
> return NULL_SEGNO;
> next:
> - segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++);
> - ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
> + segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
> + ofs += sbi->segs_per_sec;
> +
> if (segno < TOTAL_SEGS(sbi)) {
> + int i;
> +
> /* skip intermediate segments in a section */
> - if (segno % ofs_unit)
> + if (segno % sbi->segs_per_sec)
> goto next;
>
> - /* skip if whole section is not prefree */
> - next_segno = find_next_zero_bit(prefree_segmap,
> - TOTAL_SEGS(sbi), segno + 1);
> - if (next_segno - segno < ofs_unit)
> + /* skip if the section is currently used */
> + if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
> goto next;
>
> + /* skip if whole section is not prefree */
> + for (i = 1; i < sbi->segs_per_sec; i++)
> + if (!test_bit(segno + i, prefree_segmap))
> + goto next;
> +
> /* skip if whole section was not free at the last checkpoint */
> - for (i = 0; i < ofs_unit; i++)
> - if (get_seg_entry(sbi, segno)->ckpt_valid_blocks)
> + for (i = 0; i < sbi->segs_per_sec; i++)
> + if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
> goto next;
> +
> return segno;
> }
> return NULL_SEGNO;
> @@ -561,15 +568,13 @@ static void allocate_segment_by_default(struct
> f2fs_sb_info *sbi,
> int type, bool force)
> {
> struct curseg_info *curseg = CURSEG_I(sbi, type);
> - unsigned int ofs_unit;
>
> if (force) {
> new_curseg(sbi, type, true);
> goto out;
> }
>
> - ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec;
> - curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type);
> + curseg->next_segno = check_prefree_segments(sbi, type);
>
> if (curseg->next_segno != NULL_SEGNO)
> change_curseg(sbi, type, false);
> @@ -1558,14 +1563,13 @@ static void init_dirty_segmap(struct f2fs_sb_info
> *sbi)
> }
> }
>
> -static int init_victim_segmap(struct f2fs_sb_info *sbi)
> +static int init_victim_secmap(struct f2fs_sb_info *sbi)
> {
> struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
> - unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
> + unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
>
> - dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
> - dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
> - if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC])
> + dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
> + if (!dirty_i->victim_secmap)
> return -ENOMEM;
> return 0;
> }
> @@ -1592,7 +1596,7 @@ static int build_dirty_segmap(struct f2fs_sb_info
> *sbi)
> }
>
> init_dirty_segmap(sbi);
> - return init_victim_segmap(sbi);
> + return init_victim_secmap(sbi);
> }
>
> /*
> @@ -1679,18 +1683,10 @@ static void discard_dirty_segmap(struct f2fs_sb_info
> *sbi,
> mutex_unlock(&dirty_i->seglist_lock);
> }
>
> -void reset_victim_segmap(struct f2fs_sb_info *sbi)
> -{
> - unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
> - memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size);
> -}
> -
> -static void destroy_victim_segmap(struct f2fs_sb_info *sbi)
> +static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
> {
> struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
> -
> - kfree(dirty_i->victim_segmap[FG_GC]);
> - kfree(dirty_i->victim_segmap[BG_GC]);
> + kfree(dirty_i->victim_secmap);
> }
>
> static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
> @@ -1705,7 +1701,7 @@ static void destroy_dirty_segmap(struct f2fs_sb_info
> *sbi)
> for (i = 0; i < NR_DIRTY_TYPE; i++)
> discard_dirty_segmap(sbi, i);
>
> - destroy_victim_segmap(sbi);
> + destroy_victim_secmap(sbi);
> SM_I(sbi)->dirty_info = NULL;
> kfree(dirty_i);
> }
> diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
> index fea9245..4c2cd9e 100644
> --- a/fs/f2fs/segment.h
> +++ b/fs/f2fs/segment.h
> @@ -214,7 +214,7 @@ struct dirty_seglist_info {
> unsigned long *dirty_segmap[NR_DIRTY_TYPE];
> struct mutex seglist_lock; /* lock for segment bitmaps */
> int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
> - unsigned long *victim_segmap[2]; /* BG_GC, FG_GC */
> + unsigned long *victim_secmap; /* background GC victims */
> };
>
> /* victim selection function for cleaning and SSR */
> @@ -616,3 +616,12 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info
> *sbi, int base, int type)
> le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
> - (base + 1) + type;
> }
> +
> +static inline int sec_usage_check(struct f2fs_sb_info *sbi, unsigned int
> secno)
> +{
> + if (IS_CURSEC(sbi, secno))
> + return -1;
> + if (sbi->cur_victim_sec == secno)
> + return -1;
> + return 0;
> +}
we can combine two if condition and use bool value.
So, Instead it should be changed like this:
How about this ?
static inline bool sec_usage_check(struct f2fs_sb_info * sbi, unsigned
int secno)
{
if (IS_CURSEC(sbi, secno) || (sbi->cur->victim_sec == secno))
return true;
return false;
}
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index 252890e..ea325c8 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -26,6 +26,7 @@
>
> #include "f2fs.h"
> #include "node.h"
> +#include "segment.h"
> #include "xattr.h"
>
> static struct kmem_cache *f2fs_inode_cachep;
> @@ -458,6 +459,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
> sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
> sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
> sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
> + sbi->cur_victim_sec = NULL_SEGNO;
instead of NULL_SEGNO, Could we use NULL_SECNO after declaring ? even
though NULL_SECNO not defined but it should be defined to maintain the
code readability and avoid confusion in sections and segments
Thanks.
>
> for (i = 0; i < NR_COUNT_TYPE; i++)
> atomic_set(&sbi->nr_pages[i], 0);
> --
> 1.8.1.3.566.gaa39828
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
2013/4/1, Jaegeuk Kim <[email protected]>:
> In order to do GC more reliably, I'd like to lock the vicitm summary page
> until its GC is completed, and also prevent any checkpoint process.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
> fs/f2fs/gc.c | 8 +-------
> fs/f2fs/node.c | 2 +-
> fs/f2fs/super.c | 7 +++++--
> 3 files changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index 54aceb2..54ac13d 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -641,12 +641,6 @@ static void do_garbage_collect(struct f2fs_sb_info
> *sbi, unsigned int segno,
> if (IS_ERR(sum_page))
> return;
>
> - /*
> - * CP needs to lock sum_page. In this time, we don't need
> - * to lock this page, because this summary page is not gone anywhere.
> - * Also, this page is not gonna be updated before GC is done.
> - */
> - unlock_page(sum_page);
> sum = page_address(sum_page);
>
> switch (GET_SUM_TYPE((&sum->footer))) {
> @@ -660,7 +654,7 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi,
> unsigned int segno,
> stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
> stat_inc_call_count(sbi->stat_info);
>
> - f2fs_put_page(sum_page, 0);
> + f2fs_put_page(sum_page, 1);
> }
>
> int f2fs_gc(struct f2fs_sb_info *sbi)
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index c5360b7..7555fb7 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -1148,7 +1148,7 @@ static int f2fs_write_node_pages(struct address_space
> *mapping,
>
> /* First check balancing cached NAT entries */
> if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
> - write_checkpoint(sbi, false);
> + f2fs_sync_fs(sbi->sb, true);
> return 0;
> }
>
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index ea325c8..161e6c6 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -137,10 +137,13 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
> if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
> return 0;
>
> - if (sync)
> + if (sync) {
> + mutex_lock(&sbi->gc_mutex);
> write_checkpoint(sbi, false);
> - else
> + mutex_unlock(&sbi->gc_mutex);
> + } else {
> f2fs_balance_fs(sbi);
> + }
We can remove { } of else because single line code is used.
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
>
> return 0;
> }
> --
> 1.8.1.3.566.gaa39828
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
2013/4/1, Jaegeuk Kim <[email protected]>:
> The foreground GCs are triggered under not enough free sections.
> So, we should not skip moving valid blocks in the victim segments.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Looks good to me~
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
2013/4/1, Jaegeuk Kim <[email protected]>:
> In get_node_page, we do not need to call lock_page all the time.
>
> If the node page is cached as uptodate,
>
> 1. grab_cache_page locks the page,
> 2. read_node_page unlocks the page, and
> 3. lock_page is called for further process.
>
> Let's avoid this.
Instead of removing the function "read_node_page" completely to avoid
the redundant locking, we can simply remove the "unlock" part from the
read_node_page and use the same code in ra_node_page, get_node_page,
get_node_page_ra..
With this patch change, the same code is getting repeated at several
places and IMHO It is not good for the puprose of the modularity.
Please share your opinion.
Thanks.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
2013/4/1, Jaegeuk Kim <[email protected]>:
> This patch adds a new condition that allocates free segments in the current
> active section even if SSR is needed.
> Otherwise, f2fs cannot allocate remained free segments in the section since
> SSR finds dirty segments only.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Looks good!
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
2013/4/1, Jaegeuk Kim <[email protected]>:
> When allocating a new segment under the LFS mode, we should keep the
> section
> boundary.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
You can add
Reviewed-by: Namjae Jeon <[email protected]>
Thanks!
> ---
Hi,
Agreed, and resolved all the issues like below.
Thanks,
change log from v1:
o change local variable position
o change function shape
o add NULL_SECNO
From f1802031a467751df6475bd3f56300137fd2ac34 Mon Sep 17 00:00:00 2001
From: Jaegeuk Kim <[email protected]>
Date: Sun, 31 Mar 2013 13:26:03 +0900
Subject: [PATCH] f2fs: change GC bitmaps to apply the section
granularity
Cc: [email protected], [email protected],
[email protected]
This patch removes a bitmap for victim segments selected by foreground
GC, and
modifies the other bitmap for victim segments selected by background GC.
1) foreground GC bitmap
: We don't need to manage this, since we just only one previous victim
section
number instead of the whole victim history.
The f2fs uses the victim section number in order not to allocate
currently
GC'ed section to current active logs.
2) background GC bitmap
: This bitmap is used to avoid selecting victims repeatedly by
background GCs.
In addition, the victims are able to be selected by foreground GCs,
since
there is no need to read victim blocks during foreground GCs.
By the fact that the foreground GC reclaims segments in a section
unit, it'd
be better to manage this bitmap based on the section granularity.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/checkpoint.c | 2 --
fs/f2fs/debug.c | 2 +-
fs/f2fs/f2fs.h | 2 +-
fs/f2fs/gc.c | 43 +++++++++++++++++++---------------
fs/f2fs/segment.c | 66
++++++++++++++++++++++++----------------------------
fs/f2fs/segment.h | 10 +++++++-
fs/f2fs/super.c | 2 ++
7 files changed, 68 insertions(+), 59 deletions(-)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d947e66..93fd57d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -748,8 +748,6 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool
is_umount)
flush_nat_entries(sbi);
flush_sit_entries(sbi);
- reset_victim_segmap(sbi);
-
/* unlock all the fs_lock[] in do_checkpoint() */
do_checkpoint(sbi, is_umount);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 20b8794..c3bf343 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -153,7 +153,7 @@ static void update_mem_info(struct f2fs_sb_info
*sbi)
/* build dirty segmap */
si->base_mem += sizeof(struct dirty_seglist_info);
si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
- si->base_mem += 2 * f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
/* buld nm */
si->base_mem += sizeof(struct f2fs_nm_info);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 77e2eb0..71eacd3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -410,6 +410,7 @@ struct f2fs_sb_info {
/* for cleaning operations */
struct mutex gc_mutex; /* mutex for GC */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
+ unsigned int cur_victim_sec; /* current victim section num */
/*
* for stat information.
@@ -979,7 +980,6 @@ int lookup_journal_in_cursum(struct
f2fs_summary_block *,
int, unsigned int, int);
void flush_sit_entries(struct f2fs_sb_info *);
int build_segment_manager(struct f2fs_sb_info *);
-void reset_victim_segmap(struct f2fs_sb_info *);
void destroy_segment_manager(struct f2fs_sb_info *);
/*
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2e3eb2d..09b8a90 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -160,18 +160,21 @@ static unsigned int get_max_cost(struct
f2fs_sb_info *sbi,
static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned int segno;
+ unsigned int hint = 0;
+ unsigned int secno;
/*
* If the gc_type is FG_GC, we can select victim segments
* selected by background GC before.
* Those segments guarantee they have small valid blocks.
*/
- segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
- TOTAL_SEGS(sbi), 0);
- if (segno < TOTAL_SEGS(sbi)) {
- clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
- return segno;
+next:
+ secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint
++);
+ if (secno < TOTAL_SECS(sbi)) {
+ if (sec_usage_check(sbi, secno))
+ goto next;
+ clear_bit(secno, dirty_i->victim_secmap);
+ return secno * sbi->segs_per_sec;
}
return NULL_SEGNO;
}
@@ -234,7 +237,7 @@ static int get_victim_by_default(struct f2fs_sb_info
*sbi,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
- unsigned int segno;
+ unsigned int secno;
int nsearched = 0;
p.alloc_mode = alloc_mode;
@@ -253,6 +256,7 @@ static int get_victim_by_default(struct f2fs_sb_info
*sbi,
while (1) {
unsigned long cost;
+ unsigned int segno;
segno = find_next_bit(p.dirty_segmap,
TOTAL_SEGS(sbi), p.offset);
@@ -265,13 +269,11 @@ static int get_victim_by_default(struct
f2fs_sb_info *sbi,
break;
}
p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+ secno = GET_SECNO(sbi, segno);
- if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
+ if (sec_usage_check(sbi, secno))
continue;
- if (gc_type == BG_GC &&
- test_bit(segno, dirty_i->victim_segmap[BG_GC]))
- continue;
- if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
+ if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
continue;
cost = get_gc_cost(sbi, segno, &p);
@@ -291,13 +293,14 @@ static int get_victim_by_default(struct
f2fs_sb_info *sbi,
}
got_it:
if (p.min_segno != NULL_SEGNO) {
- *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
if (p.alloc_mode == LFS) {
- int i;
- for (i = 0; i < p.ofs_unit; i++)
- set_bit(*result + i,
- dirty_i->victim_segmap[gc_type]);
+ secno = GET_SECNO(sbi, p.min_segno);
+ if (gc_type == FG_GC)
+ sbi->cur_victim_sec = secno;
+ else
+ set_bit(secno, dirty_i->victim_secmap);
}
+ *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
}
mutex_unlock(&dirty_i->seglist_lock);
@@ -662,9 +665,11 @@ gc_more:
for (i = 0; i < sbi->segs_per_sec; i++)
do_garbage_collect(sbi, segno + i, &ilist, gc_type);
- if (gc_type == FG_GC &&
- get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+ if (gc_type == FG_GC) {
+ sbi->cur_victim_sec = NULL_SEGNO;
nfree++;
+ WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
+ }
if (has_not_enough_free_secs(sbi, nfree))
goto gc_more;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index b3486f3..d5244f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -69,8 +69,9 @@ static void __remove_dirty_segment(struct f2fs_sb_info
*sbi, unsigned int segno,
if (test_and_clear_bit(segno,
dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
- clear_bit(segno, dirty_i->victim_segmap[FG_GC]);
- clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
+ if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+ clear_bit(GET_SECNO(sbi, segno),
+ dirty_i->victim_secmap);
}
}
@@ -296,13 +297,12 @@ static void write_sum_page(struct f2fs_sb_info
*sbi,
f2fs_put_page(page, 1);
}
-static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
- int ofs_unit, int type)
+static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
int type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
- unsigned int segno, next_segno, i;
- int ofs = 0;
+ unsigned int segno;
+ unsigned int ofs = 0;
/*
* If there is not enough reserved sections,
@@ -318,23 +318,30 @@ static unsigned int check_prefree_segments(struct
f2fs_sb_info *sbi,
if (IS_NODESEG(type))
return NULL_SEGNO;
next:
- segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++);
- ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
+ segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
+ ofs += sbi->segs_per_sec;
+
if (segno < TOTAL_SEGS(sbi)) {
+ int i;
+
/* skip intermediate segments in a section */
- if (segno % ofs_unit)
+ if (segno % sbi->segs_per_sec)
goto next;
- /* skip if whole section is not prefree */
- next_segno = find_next_zero_bit(prefree_segmap,
- TOTAL_SEGS(sbi), segno + 1);
- if (next_segno - segno < ofs_unit)
+ /* skip if the section is currently used */
+ if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
goto next;
+ /* skip if whole section is not prefree */
+ for (i = 1; i < sbi->segs_per_sec; i++)
+ if (!test_bit(segno + i, prefree_segmap))
+ goto next;
+
/* skip if whole section was not free at the last checkpoint */
- for (i = 0; i < ofs_unit; i++)
- if (get_seg_entry(sbi, segno)->ckpt_valid_blocks)
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
goto next;
+
return segno;
}
return NULL_SEGNO;
@@ -561,15 +568,13 @@ static void allocate_segment_by_default(struct
f2fs_sb_info *sbi,
int type, bool force)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int ofs_unit;
if (force) {
new_curseg(sbi, type, true);
goto out;
}
- ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec;
- curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type);
+ curseg->next_segno = check_prefree_segments(sbi, type);
if (curseg->next_segno != NULL_SEGNO)
change_curseg(sbi, type, false);
@@ -1558,14 +1563,13 @@ static void init_dirty_segmap(struct
f2fs_sb_info *sbi)
}
}
-static int init_victim_segmap(struct f2fs_sb_info *sbi)
+static int init_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
- dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
- dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC])
+ dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
}
@@ -1592,7 +1596,7 @@ static int build_dirty_segmap(struct f2fs_sb_info
*sbi)
}
init_dirty_segmap(sbi);
- return init_victim_segmap(sbi);
+ return init_victim_secmap(sbi);
}
/*
@@ -1679,18 +1683,10 @@ static void discard_dirty_segmap(struct
f2fs_sb_info *sbi,
mutex_unlock(&dirty_i->seglist_lock);
}
-void reset_victim_segmap(struct f2fs_sb_info *sbi)
-{
- unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
- memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size);
-}
-
-static void destroy_victim_segmap(struct f2fs_sb_info *sbi)
+static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-
- kfree(dirty_i->victim_segmap[FG_GC]);
- kfree(dirty_i->victim_segmap[BG_GC]);
+ kfree(dirty_i->victim_secmap);
}
static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
@@ -1705,7 +1701,7 @@ static void destroy_dirty_segmap(struct
f2fs_sb_info *sbi)
for (i = 0; i < NR_DIRTY_TYPE; i++)
discard_dirty_segmap(sbi, i);
- destroy_victim_segmap(sbi);
+ destroy_victim_secmap(sbi);
SM_I(sbi)->dirty_info = NULL;
kfree(dirty_i);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index fea9245..c1782bd 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -10,6 +10,7 @@
*/
/* constant macro */
#define NULL_SEGNO ((unsigned int)(~0))
+#define NULL_SECNO ((unsigned int)(~0))
/* V: Logical segment # in volume, R: Relative segment # in main area
*/
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
@@ -214,7 +215,7 @@ struct dirty_seglist_info {
unsigned long *dirty_segmap[NR_DIRTY_TYPE];
struct mutex seglist_lock; /* lock for segment bitmaps */
int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
- unsigned long *victim_segmap[2]; /* BG_GC, FG_GC */
+ unsigned long *victim_secmap; /* background GC victims */
};
/* victim selection function for cleaning and SSR */
@@ -616,3 +617,10 @@ static inline block_t sum_blk_addr(struct
f2fs_sb_info *sbi, int base, int type)
le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
- (base + 1) + type;
}
+
+static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned
int secno)
+{
+ if (IS_CURSEC(sbi, secno) || (sbi->cur->victim_sec == secno))
+ return true;
+ return false;
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 252890e..728c20a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -26,6 +26,7 @@
#include "f2fs.h"
#include "node.h"
+#include "segment.h"
#include "xattr.h"
static struct kmem_cache *f2fs_inode_cachep;
@@ -458,6 +459,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
+ sbi->cur_victim_sec = NULL_SECNO;
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
--
1.8.1.3.566.gaa39828
--
Jaegeuk Kim
Samsung
Hi,
Agreed, and send v2.
Change log from v1:
o remain read_node_page and remove the lock part
From 04006aecac2882c574fe8a7de926bc52c73a8ad1 Mon Sep 17 00:00:00 2001
From: Jaegeuk Kim <[email protected]>
Date: Sun, 31 Mar 2013 12:47:20 +0900
Subject: [PATCH] f2fs: remove redundant lock_page calls
Cc: [email protected], [email protected],
[email protected]
In get_node_page, we do not need to call lock_page all the time.
If the node page is cached as uptodate,
1. grab_cache_page locks the page,
2. read_node_page unlocks the page, and
3. lock_page is called for further process.
Let's avoid this.
Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/node.c | 44 +++++++++++++++++++++++++++-----------------
fs/f2fs/node.h | 3 +++
2 files changed, 30 insertions(+), 17 deletions(-)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 10cbee9..e3484b5 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -847,6 +847,12 @@ fail:
return ERR_PTR(err);
}
+/*
+ * Caller should do after getting the following values.
+ * 0: f2fs_put_page(page, 0)
+ * LOCKED_PAGE: f2fs_put_page(page, 1)
+ * error: nothing
+ */
static int read_node_page(struct page *page, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
@@ -859,10 +865,8 @@ static int read_node_page(struct page *page, int
type)
return -ENOENT;
}
- if (PageUptodate(page)) {
- unlock_page(page);
- return 0;
- }
+ if (PageUptodate(page))
+ return LOCKED_PAGE;
return f2fs_readpage(sbi, page, ni.blk_addr, type);
}
@@ -874,6 +878,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t
nid)
{
struct address_space *mapping = sbi->node_inode->i_mapping;
struct page *apage;
+ int err;
apage = find_get_page(mapping, nid);
if (apage && PageUptodate(apage)) {
@@ -886,30 +891,36 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t
nid)
if (!apage)
return;
- if (read_node_page(apage, READA) == 0)
+ err = read_node_page(page, READA);
+ if (err == 0)
f2fs_put_page(apage, 0);
+ else if (err == LOCKED_PAGE)
+ f2fs_put_page(apage, 1);
return;
}
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
{
- int err;
- struct page *page;
struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct page *page;
+ int err;
page = grab_cache_page(mapping, nid);
if (!page)
return ERR_PTR(-ENOMEM);
err = read_node_page(page, READ_SYNC);
- if (err)
- return ERR_PTR(err);
+ if (err < 0)
+ return ERR_PTR(ret);
+ else if (err == LOCKED_PAGE)
+ goto got_it;
lock_page(page);
if (!PageUptodate(page)) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
+got_it:
BUG_ON(nid != nid_of_node(page));
mark_page_accessed(page);
return page;
@@ -923,10 +934,9 @@ struct page *get_node_page_ra(struct page *parent,
int start)
{
struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
struct address_space *mapping = sbi->node_inode->i_mapping;
- int i, end;
- int err = 0;
- nid_t nid;
struct page *page;
+ int err, i, end;
+ nid_t nid;
/* First, try getting the desired direct node. */
nid = get_nid(parent, start, false);
@@ -936,12 +946,12 @@ struct page *get_node_page_ra(struct page *parent,
int start)
page = grab_cache_page(mapping, nid);
if (!page)
return ERR_PTR(-ENOMEM);
- else if (PageUptodate(page))
- goto page_hit;
err = read_node_page(page, READ_SYNC);
- if (err)
- return ERR_PTR(err);
+ if (err < 0)
+ return ERR_PTR(ret);
+ else if (err == LOCKED_PAGE)
+ goto page_hit;
/* Then, try readahead for siblings of the desired node */
end = start + MAX_RA_NODE;
@@ -956,7 +966,7 @@ struct page *get_node_page_ra(struct page *parent,
int start)
lock_page(page);
page_hit:
- if (PageError(page)) {
+ if (!PageUptodate(page)) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index d009cdf..271a61c 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -29,6 +29,9 @@
/* vector size for gang look-up from nat cache that consists of radix
tree */
#define NATVEC_SIZE 64
+/* return value for read_node_page */
+#define LOCKED_PAGE 1
+
/*
* For node information
*/
--
1.8.1.3.566.gaa39828
--
Jaegeuk Kim
Samsung
2013/4/3, Jaegeuk Kim <[email protected]>:
> Hi,
> Agreed, and resolved all the issues like below.
> Thanks,
>
> change log from v1:
> o change local variable position
> o change function shape
> o add NULL_SECNO
>
> From f1802031a467751df6475bd3f56300137fd2ac34 Mon Sep 17 00:00:00 2001
> From: Jaegeuk Kim <[email protected]>
> Date: Sun, 31 Mar 2013 13:26:03 +0900
> Subject: [PATCH] f2fs: change GC bitmaps to apply the section
> granularity
> Cc: [email protected], [email protected],
> [email protected]
>
> This patch removes a bitmap for victim segments selected by foreground
> GC, and
> modifies the other bitmap for victim segments selected by background GC.
>
> 1) foreground GC bitmap
> : We don't need to manage this, since we just only one previous victim
> section
> number instead of the whole victim history.
> The f2fs uses the victim section number in order not to allocate
> currently
> GC'ed section to current active logs.
>
> 2) background GC bitmap
> : This bitmap is used to avoid selecting victims repeatedly by
> background GCs.
> In addition, the victims are able to be selected by foreground GCs,
> since
> there is no need to read victim blocks during foreground GCs.
>
> By the fact that the foreground GC reclaims segments in a section
> unit, it'd
> be better to manage this bitmap based on the section granularity.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Looks good to me!
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
> ---
2013/4/3, Jaegeuk Kim <[email protected]>:
Hi. Jaegeuk.
> Hi,
>
> Agreed, and send v2.
>
> Change log from v1:
> o remain read_node_page and remove the lock part
>
> From 04006aecac2882c574fe8a7de926bc52c73a8ad1 Mon Sep 17 00:00:00 2001
> From: Jaegeuk Kim <[email protected]>
> Date: Sun, 31 Mar 2013 12:47:20 +0900
> Subject: [PATCH] f2fs: remove redundant lock_page calls
> Cc: [email protected], [email protected],
> [email protected]
>
> In get_node_page, we do not need to call lock_page all the time.
>
> If the node page is cached as uptodate,
>
> 1. grab_cache_page locks the page,
> 2. read_node_page unlocks the page, and
> 3. lock_page is called for further process.
>
> Let's avoid this.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
Yes, Looks good!
Reviewed-by: Namjae Jeon <[email protected]>
Thanks.
> ---