2023-08-26 14:07:58

by Kemeng Shi

[permalink] [raw]
Subject: [PATCH v6 01/11] ext4: factor out codes to update block bitmap and group descriptor on disk from ext4_mb_mark_bb

There are several reasons to add a general function to update block
bitmap and group descriptor on disk:
1. pair behavior of alloc/free bits. For example,
ext4_mb_new_blocks_simple will update free_clusters in struct flex_groups
in ext4_mb_mark_bb while ext4_free_blocks_simple forgets this.
2. remove repeat code to read from disk, update and write back to disk.
3. reduce future unit test mocks to catch real IO to update structure
on disk.

Signed-off-by: Kemeng Shi <[email protected]>
Reviewed-by: Ojaswin Mujoo <[email protected]>
---
fs/ext4/mballoc.c | 169 +++++++++++++++++++++++++++-------------------
1 file changed, 99 insertions(+), 70 deletions(-)

diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c91db9f57524..e2be572deb75 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3952,6 +3952,100 @@ void ext4_exit_mballoc(void)
ext4_groupinfo_destroy_slabs();
}

+/*
+ * Collect global setting to reduce the number of variable passing to
+ * ext4_mb_mark_context. Pass target group blocks range directly to
+ * reuse the prepared global setting for multiple block ranges and
+ * to show clearly the specific block range will be marked.
+ */
+struct ext4_mark_context {
+ struct super_block *sb;
+ int state;
+};
+
+static inline void ext4_mb_prepare_mark_context(struct ext4_mark_context *mc,
+ struct super_block *sb,
+ int state)
+{
+ mc->sb = sb;
+ mc->state = state;
+}
+
+static int
+ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
+ ext4_grpblk_t blkoff, ext4_grpblk_t len)
+{
+ struct super_block *sb = mc->sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *bitmap_bh = NULL;
+ struct ext4_group_desc *gdp;
+ struct buffer_head *gdp_bh;
+ int err;
+ unsigned int i, already, changed;
+
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh))
+ return PTR_ERR(bitmap_bh);
+
+ err = -EIO;
+ gdp = ext4_get_group_desc(sb, group, &gdp_bh);
+ if (!gdp)
+ goto out_err;
+
+ ext4_lock_group(sb, group);
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_clusters_after_init(sb, group, gdp));
+ }
+
+ already = 0;
+ for (i = 0; i < len; i++)
+ if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
+ mc->state)
+ already++;
+ changed = len - already;
+
+ if (mc->state) {
+ mb_set_bits(bitmap_bh->b_data, blkoff, len);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_group_clusters(sb, gdp) - changed);
+ } else {
+ mb_clear_bits(bitmap_bh->b_data, blkoff, len);
+ ext4_free_group_clusters_set(sb, gdp,
+ ext4_free_group_clusters(sb, gdp) + changed);
+ }
+
+ ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
+ ext4_group_desc_csum_set(sb, group, gdp);
+ ext4_unlock_group(sb, group);
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, group);
+ struct flex_groups *fg = sbi_array_rcu_deref(sbi,
+ s_flex_groups, flex_group);
+
+ if (mc->state)
+ atomic64_sub(changed, &fg->free_clusters);
+ else
+ atomic64_add(changed, &fg->free_clusters);
+ }
+
+ err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
+ if (err)
+ goto out_err;
+ err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
+ if (err)
+ goto out_err;
+
+ sync_dirty_buffer(bitmap_bh);
+ sync_dirty_buffer(gdp_bh);
+
+out_err:
+ brelse(bitmap_bh);
+ return err;
+}

/*
* Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
@@ -4078,16 +4172,14 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
int len, int state)
{
- struct buffer_head *bitmap_bh = NULL;
- struct ext4_group_desc *gdp;
- struct buffer_head *gdp_bh;
+ struct ext4_mark_context mc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i, err = 0;
- int already;
- unsigned int clen, clen_changed, thisgrp_len;
+ int err = 0;
+ unsigned int clen, thisgrp_len;

+ ext4_mb_prepare_mark_context(&mc, sb, state);
while (len > 0) {
ext4_get_group_no_and_offset(sb, block, &group, &blkoff);

@@ -4107,80 +4199,17 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
ext4_error(sb, "Marking blocks in system zone - "
"Block = %llu, len = %u",
block, thisgrp_len);
- bitmap_bh = NULL;
- break;
- }
-
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (IS_ERR(bitmap_bh)) {
- err = PTR_ERR(bitmap_bh);
- bitmap_bh = NULL;
break;
}

- err = -EIO;
- gdp = ext4_get_group_desc(sb, group, &gdp_bh);
- if (!gdp)
- break;
-
- ext4_lock_group(sb, group);
- already = 0;
- for (i = 0; i < clen; i++)
- if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
- !state)
- already++;
-
- clen_changed = clen - already;
- if (state)
- mb_set_bits(bitmap_bh->b_data, blkoff, clen);
- else
- mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
- if (ext4_has_group_desc_csum(sb) &&
- (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
- ext4_free_group_clusters_set(sb, gdp,
- ext4_free_clusters_after_init(sb, group, gdp));
- }
- if (state)
- clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
- else
- clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
-
- ext4_free_group_clusters_set(sb, gdp, clen);
- ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
- ext4_group_desc_csum_set(sb, group, gdp);
-
- ext4_unlock_group(sb, group);
-
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group = ext4_flex_group(sbi, group);
- struct flex_groups *fg = sbi_array_rcu_deref(sbi,
- s_flex_groups, flex_group);
-
- if (state)
- atomic64_sub(clen_changed, &fg->free_clusters);
- else
- atomic64_add(clen_changed, &fg->free_clusters);
-
- }
-
- err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
- if (err)
- break;
- sync_dirty_buffer(bitmap_bh);
- err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
- sync_dirty_buffer(gdp_bh);
+ err = ext4_mb_mark_context(&mc, group, blkoff, clen);
if (err)
break;

block += thisgrp_len;
len -= thisgrp_len;
- brelse(bitmap_bh);
BUG_ON(len < 0);
}
-
- if (err)
- brelse(bitmap_bh);
}

/*
--
2.30.0



2023-08-31 23:16:38

by Kemeng Shi

[permalink] [raw]
Subject: Re: [PATCH v6 01/11] ext4: factor out codes to update block bitmap and group descriptor on disk from ext4_mb_mark_bb



on 8/31/2023 8:33 PM, Ritesh Harjani wrote:
> Kemeng Shi <[email protected]> writes:
>
> Hello Kemeng,
>
>> There are several reasons to add a general function to update block
>> bitmap and group descriptor on disk:
>
> ... named ext4_mb_mark_context(<params>)
>
>> 1. pair behavior of alloc/free bits. For example,
>> ext4_mb_new_blocks_simple will update free_clusters in struct flex_groups
>> in ext4_mb_mark_bb while ext4_free_blocks_simple forgets this.
>> 2. remove repeat code to read from disk, update and write back to disk.
>> 3. reduce future unit test mocks to catch real IO to update structure
>> on disk.
>>
>> Signed-off-by: Kemeng Shi <[email protected]>
>> Reviewed-by: Ojaswin Mujoo <[email protected]>
>> ---
>> fs/ext4/mballoc.c | 169 +++++++++++++++++++++++++++-------------------
>> 1 file changed, 99 insertions(+), 70 deletions(-)
>>
>> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
>> index c91db9f57524..e2be572deb75 100644
>> --- a/fs/ext4/mballoc.c
>> +++ b/fs/ext4/mballoc.c
>> @@ -3952,6 +3952,100 @@ void ext4_exit_mballoc(void)
>> ext4_groupinfo_destroy_slabs();
>> }
>>
>> +/*
>> + * Collect global setting to reduce the number of variable passing to
>> + * ext4_mb_mark_context. Pass target group blocks range directly to
>> + * reuse the prepared global setting for multiple block ranges and
>> + * to show clearly the specific block range will be marked.
>> + */
>> +struct ext4_mark_context {
>> + struct super_block *sb;
>> + int state;
>> +};
>
> This structure definition does not reflect of it's naming.
> Why can't we also add cblk & clen, flags to it?
>
> I think the idea of defining a new function named
> ext4_mb_prepare_mark_context() was that we can prepare "struct ext4_mark_context"
> with different cblk, clen & flags arguments for cases where we might
> have to call ext4_mb_mark_context() more than once in the same function
> or call ext4_mb_mark_context() anywhere but at the start of the function.
>
> As I see it in the current series, we are calling
> ext4_mb_prepare_mark_context() at the start of every function. Just for
> this purpose we don't need an extra function, right? That we can directly do
> at the time of declaring a structure variable itself (like how you did
> in previous version)
>
Hi Ritesh, thanks for reply. The ext4_mark_context structure aims to reduce
variable passing to ext4_mb_mark_context. If we have to prepare a lot
member in ext4_mb_prepare_mark_context, then too many variables issue occurs
in ext4_mb_prepare_mark_context.
The name of ext4_mark_context maybe not proper. Actually I want a structure
to collect information which is not strongly relevant to mark blk bits. In
this way, we can initialize them at beginning of function, then there is no
need to pay attention to them or to pass them respectively in each call to
ext4_mb_mark_context. Instead, we foucus on the useful information called
with ext4_mb_mark_context.
This design also achive the goal to define ext4_mb_mark_context once for
multiple use in the same function as ext4_mark_context unlikely changes
after initialization at beginning.
> What do you think of the approach where we add cblk, clen & flags
> variables to ext4_mark_context()? Do you see any problems/difficulties
> with that design?
>
The providing desgin looks good to me. Please let me konw if you still
perfre this and I will do this in next version. Thanks!

>> +
>> +static inline void ext4_mb_prepare_mark_context(struct ext4_mark_context *mc,
>> + struct super_block *sb,
>> + int state)
>> +{
>> + mc->sb = sb;
>> + mc->state = state;
>> +}
>> +
>> +static int
>> +ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
>> + ext4_grpblk_t blkoff, ext4_grpblk_t len)
>> +{
>> + struct super_block *sb = mc->sb;
>> + struct ext4_sb_info *sbi = EXT4_SB(sb);
>> + struct buffer_head *bitmap_bh = NULL;
>> + struct ext4_group_desc *gdp;
>> + struct buffer_head *gdp_bh;
>> + int err;
>> + unsigned int i, already, changed;
>> +
>> + bitmap_bh = ext4_read_block_bitmap(sb, group);
>> + if (IS_ERR(bitmap_bh))
>> + return PTR_ERR(bitmap_bh);
>> +
>> + err = -EIO;
>> + gdp = ext4_get_group_desc(sb, group, &gdp_bh);
>> + if (!gdp)
>> + goto out_err;
>> +
>> + ext4_lock_group(sb, group);
>> + if (ext4_has_group_desc_csum(sb) &&
>> + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
>> + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
>> + ext4_free_group_clusters_set(sb, gdp,
>> + ext4_free_clusters_after_init(sb, group, gdp));
>> + }
>> +
>> + already = 0;
>> + for (i = 0; i < len; i++)
>> + if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
>> + mc->state)
>> + already++;
>> + changed = len - already;
>> +
>> + if (mc->state) {
>> + mb_set_bits(bitmap_bh->b_data, blkoff, len);
>> + ext4_free_group_clusters_set(sb, gdp,
>> + ext4_free_group_clusters(sb, gdp) - changed);
>> + } else {
>> + mb_clear_bits(bitmap_bh->b_data, blkoff, len);
>> + ext4_free_group_clusters_set(sb, gdp,
>> + ext4_free_group_clusters(sb, gdp) + changed);
>> + }
>> +
>> + ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
>> + ext4_group_desc_csum_set(sb, group, gdp);
>> + ext4_unlock_group(sb, group);
>> +
>> + if (sbi->s_log_groups_per_flex) {
>> + ext4_group_t flex_group = ext4_flex_group(sbi, group);
>> + struct flex_groups *fg = sbi_array_rcu_deref(sbi,
>> + s_flex_groups, flex_group);
>> +
>> + if (mc->state)
>> + atomic64_sub(changed, &fg->free_clusters);
>> + else
>> + atomic64_add(changed, &fg->free_clusters);
>> + }
>> +
>> + err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
>> + if (err)
>> + goto out_err;
>> + err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
>> + if (err)
>> + goto out_err;
>> +
>> + sync_dirty_buffer(bitmap_bh);
>> + sync_dirty_buffer(gdp_bh);
>> +
>> +out_err:
>> + brelse(bitmap_bh);
>> + return err;
>> +}
>>
>> /*
>> * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
>> @@ -4078,16 +4172,14 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
>> void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
>> int len, int state)
>> {
>> - struct buffer_head *bitmap_bh = NULL;
>> - struct ext4_group_desc *gdp;
>> - struct buffer_head *gdp_bh;
>> + struct ext4_mark_context mc;
>> struct ext4_sb_info *sbi = EXT4_SB(sb);
>> ext4_group_t group;
>> ext4_grpblk_t blkoff;
>> - int i, err = 0;
>> - int already;
>> - unsigned int clen, clen_changed, thisgrp_len;
>> + int err = 0;
>> + unsigned int clen, thisgrp_len;
>>
>> + ext4_mb_prepare_mark_context(&mc, sb, state);
>> while (len > 0) {
>> ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
>>
>> @@ -4107,80 +4199,17 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
>> ext4_error(sb, "Marking blocks in system zone - "
>> "Block = %llu, len = %u",
>> block, thisgrp_len);
>> - bitmap_bh = NULL;
>> - break;
>> - }
>> -
>> - bitmap_bh = ext4_read_block_bitmap(sb, group);
>> - if (IS_ERR(bitmap_bh)) {
>> - err = PTR_ERR(bitmap_bh);
>> - bitmap_bh = NULL;
>> break;
>> }
>>
>> - err = -EIO;
>> - gdp = ext4_get_group_desc(sb, group, &gdp_bh);
>> - if (!gdp)
>> - break;
>> -
>> - ext4_lock_group(sb, group);
>> - already = 0;
>> - for (i = 0; i < clen; i++)
>> - if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
>> - !state)
>> - already++;
>> -
>> - clen_changed = clen - already;
>> - if (state)
>> - mb_set_bits(bitmap_bh->b_data, blkoff, clen);
>> - else
>> - mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
>> - if (ext4_has_group_desc_csum(sb) &&
>> - (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
>> - gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
>> - ext4_free_group_clusters_set(sb, gdp,
>> - ext4_free_clusters_after_init(sb, group, gdp));
>> - }
>> - if (state)
>> - clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
>> - else
>> - clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
>> -
>> - ext4_free_group_clusters_set(sb, gdp, clen);
>> - ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
>> - ext4_group_desc_csum_set(sb, group, gdp);
>> -
>> - ext4_unlock_group(sb, group);
>> -
>> - if (sbi->s_log_groups_per_flex) {
>> - ext4_group_t flex_group = ext4_flex_group(sbi, group);
>> - struct flex_groups *fg = sbi_array_rcu_deref(sbi,
>> - s_flex_groups, flex_group);
>> -
>> - if (state)
>> - atomic64_sub(clen_changed, &fg->free_clusters);
>> - else
>> - atomic64_add(clen_changed, &fg->free_clusters);
>> -
>> - }
>> -
>> - err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
>> - if (err)
>> - break;
>> - sync_dirty_buffer(bitmap_bh);
>> - err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
>> - sync_dirty_buffer(gdp_bh);
>> + err = ext4_mb_mark_context(&mc, group, blkoff, clen);
>> if (err)
>> break;
>>
>> block += thisgrp_len;
>> len -= thisgrp_len;
>> - brelse(bitmap_bh);
>> BUG_ON(len < 0);
>> }
>> -
>> - if (err)
>> - brelse(bitmap_bh);
>> }
>>
>> /*
>> --
>> 2.30.0
>
>
> -ritesh
>


2023-09-01 15:42:34

by Ritesh Harjani

[permalink] [raw]
Subject: Re: [PATCH v6 01/11] ext4: factor out codes to update block bitmap and group descriptor on disk from ext4_mb_mark_bb

Kemeng Shi <[email protected]> writes:

Hello Kemeng,

> There are several reasons to add a general function to update block
> bitmap and group descriptor on disk:

... named ext4_mb_mark_context(<params>)

> 1. pair behavior of alloc/free bits. For example,
> ext4_mb_new_blocks_simple will update free_clusters in struct flex_groups
> in ext4_mb_mark_bb while ext4_free_blocks_simple forgets this.
> 2. remove repeat code to read from disk, update and write back to disk.
> 3. reduce future unit test mocks to catch real IO to update structure
> on disk.
>
> Signed-off-by: Kemeng Shi <[email protected]>
> Reviewed-by: Ojaswin Mujoo <[email protected]>
> ---
> fs/ext4/mballoc.c | 169 +++++++++++++++++++++++++++-------------------
> 1 file changed, 99 insertions(+), 70 deletions(-)
>
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index c91db9f57524..e2be572deb75 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -3952,6 +3952,100 @@ void ext4_exit_mballoc(void)
> ext4_groupinfo_destroy_slabs();
> }
>
> +/*
> + * Collect global setting to reduce the number of variable passing to
> + * ext4_mb_mark_context. Pass target group blocks range directly to
> + * reuse the prepared global setting for multiple block ranges and
> + * to show clearly the specific block range will be marked.
> + */
> +struct ext4_mark_context {
> + struct super_block *sb;
> + int state;
> +};

This structure definition does not reflect of it's naming.
Why can't we also add cblk & clen, flags to it?

I think the idea of defining a new function named
ext4_mb_prepare_mark_context() was that we can prepare "struct ext4_mark_context"
with different cblk, clen & flags arguments for cases where we might
have to call ext4_mb_mark_context() more than once in the same function
or call ext4_mb_mark_context() anywhere but at the start of the function.

As I see it in the current series, we are calling
ext4_mb_prepare_mark_context() at the start of every function. Just for
this purpose we don't need an extra function, right? That we can directly do
at the time of declaring a structure variable itself (like how you did
in previous version)

What do you think of the approach where we add cblk, clen & flags
variables to ext4_mark_context()? Do you see any problems/difficulties
with that design?

> +
> +static inline void ext4_mb_prepare_mark_context(struct ext4_mark_context *mc,
> + struct super_block *sb,
> + int state)
> +{
> + mc->sb = sb;
> + mc->state = state;
> +}
> +
> +static int
> +ext4_mb_mark_context(struct ext4_mark_context *mc, ext4_group_t group,
> + ext4_grpblk_t blkoff, ext4_grpblk_t len)
> +{
> + struct super_block *sb = mc->sb;
> + struct ext4_sb_info *sbi = EXT4_SB(sb);
> + struct buffer_head *bitmap_bh = NULL;
> + struct ext4_group_desc *gdp;
> + struct buffer_head *gdp_bh;
> + int err;
> + unsigned int i, already, changed;
> +
> + bitmap_bh = ext4_read_block_bitmap(sb, group);
> + if (IS_ERR(bitmap_bh))
> + return PTR_ERR(bitmap_bh);
> +
> + err = -EIO;
> + gdp = ext4_get_group_desc(sb, group, &gdp_bh);
> + if (!gdp)
> + goto out_err;
> +
> + ext4_lock_group(sb, group);
> + if (ext4_has_group_desc_csum(sb) &&
> + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
> + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
> + ext4_free_group_clusters_set(sb, gdp,
> + ext4_free_clusters_after_init(sb, group, gdp));
> + }
> +
> + already = 0;
> + for (i = 0; i < len; i++)
> + if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
> + mc->state)
> + already++;
> + changed = len - already;
> +
> + if (mc->state) {
> + mb_set_bits(bitmap_bh->b_data, blkoff, len);
> + ext4_free_group_clusters_set(sb, gdp,
> + ext4_free_group_clusters(sb, gdp) - changed);
> + } else {
> + mb_clear_bits(bitmap_bh->b_data, blkoff, len);
> + ext4_free_group_clusters_set(sb, gdp,
> + ext4_free_group_clusters(sb, gdp) + changed);
> + }
> +
> + ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
> + ext4_group_desc_csum_set(sb, group, gdp);
> + ext4_unlock_group(sb, group);
> +
> + if (sbi->s_log_groups_per_flex) {
> + ext4_group_t flex_group = ext4_flex_group(sbi, group);
> + struct flex_groups *fg = sbi_array_rcu_deref(sbi,
> + s_flex_groups, flex_group);
> +
> + if (mc->state)
> + atomic64_sub(changed, &fg->free_clusters);
> + else
> + atomic64_add(changed, &fg->free_clusters);
> + }
> +
> + err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
> + if (err)
> + goto out_err;
> + err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
> + if (err)
> + goto out_err;
> +
> + sync_dirty_buffer(bitmap_bh);
> + sync_dirty_buffer(gdp_bh);
> +
> +out_err:
> + brelse(bitmap_bh);
> + return err;
> +}
>
> /*
> * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
> @@ -4078,16 +4172,14 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
> void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
> int len, int state)
> {
> - struct buffer_head *bitmap_bh = NULL;
> - struct ext4_group_desc *gdp;
> - struct buffer_head *gdp_bh;
> + struct ext4_mark_context mc;
> struct ext4_sb_info *sbi = EXT4_SB(sb);
> ext4_group_t group;
> ext4_grpblk_t blkoff;
> - int i, err = 0;
> - int already;
> - unsigned int clen, clen_changed, thisgrp_len;
> + int err = 0;
> + unsigned int clen, thisgrp_len;
>
> + ext4_mb_prepare_mark_context(&mc, sb, state);
> while (len > 0) {
> ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
>
> @@ -4107,80 +4199,17 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
> ext4_error(sb, "Marking blocks in system zone - "
> "Block = %llu, len = %u",
> block, thisgrp_len);
> - bitmap_bh = NULL;
> - break;
> - }
> -
> - bitmap_bh = ext4_read_block_bitmap(sb, group);
> - if (IS_ERR(bitmap_bh)) {
> - err = PTR_ERR(bitmap_bh);
> - bitmap_bh = NULL;
> break;
> }
>
> - err = -EIO;
> - gdp = ext4_get_group_desc(sb, group, &gdp_bh);
> - if (!gdp)
> - break;
> -
> - ext4_lock_group(sb, group);
> - already = 0;
> - for (i = 0; i < clen; i++)
> - if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
> - !state)
> - already++;
> -
> - clen_changed = clen - already;
> - if (state)
> - mb_set_bits(bitmap_bh->b_data, blkoff, clen);
> - else
> - mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
> - if (ext4_has_group_desc_csum(sb) &&
> - (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
> - gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
> - ext4_free_group_clusters_set(sb, gdp,
> - ext4_free_clusters_after_init(sb, group, gdp));
> - }
> - if (state)
> - clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
> - else
> - clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
> -
> - ext4_free_group_clusters_set(sb, gdp, clen);
> - ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
> - ext4_group_desc_csum_set(sb, group, gdp);
> -
> - ext4_unlock_group(sb, group);
> -
> - if (sbi->s_log_groups_per_flex) {
> - ext4_group_t flex_group = ext4_flex_group(sbi, group);
> - struct flex_groups *fg = sbi_array_rcu_deref(sbi,
> - s_flex_groups, flex_group);
> -
> - if (state)
> - atomic64_sub(clen_changed, &fg->free_clusters);
> - else
> - atomic64_add(clen_changed, &fg->free_clusters);
> -
> - }
> -
> - err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
> - if (err)
> - break;
> - sync_dirty_buffer(bitmap_bh);
> - err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
> - sync_dirty_buffer(gdp_bh);
> + err = ext4_mb_mark_context(&mc, group, blkoff, clen);
> if (err)
> break;
>
> block += thisgrp_len;
> len -= thisgrp_len;
> - brelse(bitmap_bh);
> BUG_ON(len < 0);
> }
> -
> - if (err)
> - brelse(bitmap_bh);
> }
>
> /*
> --
> 2.30.0


-ritesh