2022-10-14 20:40:24

by Ojaswin Mujoo

[permalink] [raw]
Subject: [PATCH v2 3/8] ext4: Refactor code in ext4_mb_normalize_request() and ext4_mb_use_preallocated()

Change some variable names to be more consistent and
refactor some of the code to make it easier to read.

There are no functional changes in this patch

Signed-off-by: Ojaswin Mujoo <[email protected]>
Reviewed-by: Ritesh Harjani (IBM) <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
---
fs/ext4/mballoc.c | 97 ++++++++++++++++++++++++-----------------------
1 file changed, 49 insertions(+), 48 deletions(-)

diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 45cb92510e5c..89dcb72869cd 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3999,7 +3999,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
- struct ext4_prealloc_space *pa;
+ struct ext4_prealloc_space *tmp_pa;
+ ext4_lblk_t tmp_pa_start, tmp_pa_end;

/* do normalize only data requests, metadata requests
do not need preallocation */
@@ -4102,56 +4103,53 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,

/* check we don't cross already preallocated blocks */
rcu_read_lock();
- list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
- ext4_lblk_t pa_end;
-
- if (pa->pa_deleted)
+ list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
+ if (tmp_pa->pa_deleted)
continue;
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted) {
- spin_unlock(&pa->pa_lock);
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted) {
+ spin_unlock(&tmp_pa->pa_lock);
continue;
}

- pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
- pa->pa_len);
+ tmp_pa_start = tmp_pa->pa_lstart;
+ tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);

/* PA must not overlap original request */
- BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
- ac->ac_o_ex.fe_logical < pa->pa_lstart));
+ BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
+ ac->ac_o_ex.fe_logical < tmp_pa_start));

/* skip PAs this normalized request doesn't overlap with */
- if (pa->pa_lstart >= end || pa_end <= start) {
- spin_unlock(&pa->pa_lock);
+ if (tmp_pa_start >= end || tmp_pa_end <= start) {
+ spin_unlock(&tmp_pa->pa_lock);
continue;
}
- BUG_ON(pa->pa_lstart <= start && pa_end >= end);
+ BUG_ON(tmp_pa_start <= start && tmp_pa_end >= end);

/* adjust start or end to be adjacent to this pa */
- if (pa_end <= ac->ac_o_ex.fe_logical) {
- BUG_ON(pa_end < start);
- start = pa_end;
- } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
- BUG_ON(pa->pa_lstart > end);
- end = pa->pa_lstart;
+ if (tmp_pa_end <= ac->ac_o_ex.fe_logical) {
+ BUG_ON(tmp_pa_end < start);
+ start = tmp_pa_end;
+ } else if (tmp_pa_start > ac->ac_o_ex.fe_logical) {
+ BUG_ON(tmp_pa_start > end);
+ end = tmp_pa_start;
}
- spin_unlock(&pa->pa_lock);
+ spin_unlock(&tmp_pa->pa_lock);
}
rcu_read_unlock();
size = end - start;

/* XXX: extra loop to check we really don't overlap preallocations */
rcu_read_lock();
- list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
- ext4_lblk_t pa_end;
+ list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0) {
+ tmp_pa_start = tmp_pa->pa_lstart;
+ tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);

- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted == 0) {
- pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
- pa->pa_len);
- BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
+ BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
}
- spin_unlock(&pa->pa_lock);
+ spin_unlock(&tmp_pa->pa_lock);
}
rcu_read_unlock();

@@ -4361,7 +4359,8 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
int order, i;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg;
- struct ext4_prealloc_space *pa, *cpa = NULL;
+ struct ext4_prealloc_space *tmp_pa, *cpa = NULL;
+ ext4_lblk_t tmp_pa_start, tmp_pa_end;
ext4_fsblk_t goal_block;

/* only data can be preallocated */
@@ -4370,18 +4369,20 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)

/* first, try per-file preallocation */
rcu_read_lock();
- list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
+ list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {

/* all fields in this condition don't change,
* so we can skip locking for them */
- if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
- ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
- EXT4_C2B(sbi, pa->pa_len)))
+ tmp_pa_start = tmp_pa->pa_lstart;
+ tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+
+ if (ac->ac_o_ex.fe_logical < tmp_pa_start ||
+ ac->ac_o_ex.fe_logical >= tmp_pa_end)
continue;

/* non-extent files can't have physical blocks past 2^32 */
if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
- (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
+ (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
EXT4_MAX_BLOCK_FILE_PHYS)) {
/*
* Since PAs don't overlap, we won't find any
@@ -4391,16 +4392,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
}

/* found preallocated blocks, use them */
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted == 0 && pa->pa_free) {
- atomic_inc(&pa->pa_count);
- ext4_mb_use_inode_pa(ac, pa);
- spin_unlock(&pa->pa_lock);
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) {
+ atomic_inc(&tmp_pa->pa_count);
+ ext4_mb_use_inode_pa(ac, tmp_pa);
+ spin_unlock(&tmp_pa->pa_lock);
ac->ac_criteria = 10;
rcu_read_unlock();
return true;
}
- spin_unlock(&pa->pa_lock);
+ spin_unlock(&tmp_pa->pa_lock);
}
rcu_read_unlock();

@@ -4424,16 +4425,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
*/
for (i = order; i < PREALLOC_TB_SIZE; i++) {
rcu_read_lock();
- list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
+ list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
pa_inode_list) {
- spin_lock(&pa->pa_lock);
- if (pa->pa_deleted == 0 &&
- pa->pa_free >= ac->ac_o_ex.fe_len) {
+ spin_lock(&tmp_pa->pa_lock);
+ if (tmp_pa->pa_deleted == 0 &&
+ tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {

cpa = ext4_mb_check_group_pa(goal_block,
- pa, cpa);
+ tmp_pa, cpa);
}
- spin_unlock(&pa->pa_lock);
+ spin_unlock(&tmp_pa->pa_lock);
}
rcu_read_unlock();
}
--
2.31.1