Refactor the raid5_get_active_stripe() to read more linearly in
the order it's typically executed.
The init_stripe() call is called if a free stripe is found and the
function is exited early which removes a lot of if (sh) checks and
unindents the following code.
Remove the while loop in favour of the 'goto retry' pattern, which
reduces indentation further. And use a 'goto wait_for_stripe' instead
of an additional indent seeing it is the unusual path and this makes
the code easier to read.
No functional changes intended. Will make subsequent changes
in patches easier to understand.
Signed-off-by: Logan Gunthorpe <[email protected]>
---
drivers/md/raid5.c | 67 +++++++++++++++++++++++++---------------------
1 file changed, 36 insertions(+), 31 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3b7887428cd0..b1cb0be8fa67 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -766,41 +766,46 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
spin_lock_irq(conf->hash_locks + hash);
- do {
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0 || noquiesce,
- *(conf->hash_locks + hash));
- sh = find_get_stripe(conf, sector, conf->generation - previous,
- hash);
- if (sh)
- break;
+retry:
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ conf->quiesce == 0 || noquiesce,
+ *(conf->hash_locks + hash));
+ sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
+ if (sh)
+ goto out;
- if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
- sh = get_free_stripe(conf, hash);
- if (!sh && !test_bit(R5_DID_ALLOC, &conf->cache_state))
- set_bit(R5_ALLOC_MORE, &conf->cache_state);
- }
- if (noblock && !sh)
- break;
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ goto wait_for_stripe;
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
r5c_check_stripe_cache_usage(conf);
- if (!sh) {
- set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(conf->inactive_list + hash) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes * 3 / 4)
- || !test_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state)),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- } else {
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- }
- } while (sh == NULL);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ goto out;
+ }
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+
+wait_for_stripe:
+ if (noblock)
+ goto out;
+
+ r5c_check_stripe_cache_usage(conf);
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash) &&
+ (atomic_read(&conf->active_stripes)
+ < (conf->max_nr_stripes * 3 / 4)
+ || !test_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state)),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ goto retry;
+
+out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
--
2.30.2
On Wed, Jul 27, 2022 at 03:05:56PM -0600, Logan Gunthorpe wrote:
> Refactor the raid5_get_active_stripe() to read more linearly in
> the order it's typically executed.
>
> The init_stripe() call is called if a free stripe is found and the
> function is exited early which removes a lot of if (sh) checks and
> unindents the following code.
>
> Remove the while loop in favour of the 'goto retry' pattern, which
> reduces indentation further. And use a 'goto wait_for_stripe' instead
> of an additional indent seeing it is the unusual path and this makes
> the code easier to read.
>
> No functional changes intended. Will make subsequent changes
> in patches easier to understand.
I find the new loop even more confusing than the old one. I'd go
with something like the version below (on top of the whol md-next tree
that pulled this in way too fast..)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4456ac51f7c53..cd8ec4995a49b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -811,54 +811,54 @@ static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
spin_lock_irq(conf->hash_locks + hash);
-retry:
- if (!noquiesce && conf->quiesce) {
- /*
- * Must release the reference to batch_last before waiting,
- * on quiesce, otherwise the batch_last will hold a reference
- * to a stripe and raid5_quiesce() will deadlock waiting for
- * active_stripes to go to zero.
- */
- if (ctx && ctx->batch_last) {
- raid5_release_stripe(ctx->batch_last);
- ctx->batch_last = NULL;
- }
-
- wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
- *(conf->hash_locks + hash));
- }
+ for (;;) {
+ if (!noquiesce && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before
+ * waiting on quiesce, otherwise the batch_last will
+ * hold a reference to a stripe and raid5_quiesce()
+ * will deadlock waiting for active_stripes to go to
+ * zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
- sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
- if (sh)
- goto out;
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ !conf->quiesce,
+ *(conf->hash_locks + hash));
+ }
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- goto wait_for_stripe;
+ sh = find_get_stripe(conf, sector, conf->generation - previous,
+ hash);
+ if (sh)
+ break;
- sh = get_free_stripe(conf, hash);
- if (sh) {
- r5c_check_stripe_cache_usage(conf);
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- goto out;
- }
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ break;
+ }
- if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
- set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ }
-wait_for_stripe:
- if (noblock)
- goto out;
+ if (noblock)
+ break;
- set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(conf->wait_for_stripe,
- is_inactive_blocked(conf, hash),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- goto retry;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ }
-out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
On Thu, Jul 28, 2022 at 7:13 AM Christoph Hellwig <[email protected]> wrote:
>
> On Wed, Jul 27, 2022 at 03:05:56PM -0600, Logan Gunthorpe wrote:
> > Refactor the raid5_get_active_stripe() to read more linearly in
> > the order it's typically executed.
> >
> > The init_stripe() call is called if a free stripe is found and the
> > function is exited early which removes a lot of if (sh) checks and
> > unindents the following code.
> >
> > Remove the while loop in favour of the 'goto retry' pattern, which
> > reduces indentation further. And use a 'goto wait_for_stripe' instead
> > of an additional indent seeing it is the unusual path and this makes
> > the code easier to read.
> >
> > No functional changes intended. Will make subsequent changes
> > in patches easier to understand.
>
> I find the new loop even more confusing than the old one. I'd go
> with something like the version below (on top of the whol md-next tree
> that pulled this in way too fast..)
This looks good to me. Christoph, would you mind send official patch
for this?
Thanks,
Song
>
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index 4456ac51f7c53..cd8ec4995a49b 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -811,54 +811,54 @@ static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
>
> spin_lock_irq(conf->hash_locks + hash);
>
> -retry:
> - if (!noquiesce && conf->quiesce) {
> - /*
> - * Must release the reference to batch_last before waiting,
> - * on quiesce, otherwise the batch_last will hold a reference
> - * to a stripe and raid5_quiesce() will deadlock waiting for
> - * active_stripes to go to zero.
> - */
> - if (ctx && ctx->batch_last) {
> - raid5_release_stripe(ctx->batch_last);
> - ctx->batch_last = NULL;
> - }
> -
> - wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
> - *(conf->hash_locks + hash));
> - }
> + for (;;) {
> + if (!noquiesce && conf->quiesce) {
> + /*
> + * Must release the reference to batch_last before
> + * waiting on quiesce, otherwise the batch_last will
> + * hold a reference to a stripe and raid5_quiesce()
> + * will deadlock waiting for active_stripes to go to
> + * zero.
> + */
> + if (ctx && ctx->batch_last) {
> + raid5_release_stripe(ctx->batch_last);
> + ctx->batch_last = NULL;
> + }
>
> - sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
> - if (sh)
> - goto out;
> + wait_event_lock_irq(conf->wait_for_quiescent,
> + !conf->quiesce,
> + *(conf->hash_locks + hash));
> + }
>
> - if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
> - goto wait_for_stripe;
> + sh = find_get_stripe(conf, sector, conf->generation - previous,
> + hash);
> + if (sh)
> + break;
>
> - sh = get_free_stripe(conf, hash);
> - if (sh) {
> - r5c_check_stripe_cache_usage(conf);
> - init_stripe(sh, sector, previous);
> - atomic_inc(&sh->count);
> - goto out;
> - }
> + if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
> + sh = get_free_stripe(conf, hash);
> + if (sh) {
> + r5c_check_stripe_cache_usage(conf);
> + init_stripe(sh, sector, previous);
> + atomic_inc(&sh->count);
> + break;
> + }
>
> - if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
> - set_bit(R5_ALLOC_MORE, &conf->cache_state);
> + if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
> + set_bit(R5_ALLOC_MORE, &conf->cache_state);
> + }
>
> -wait_for_stripe:
> - if (noblock)
> - goto out;
> + if (noblock)
> + break;
>
> - set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
> - r5l_wake_reclaim(conf->log, 0);
> - wait_event_lock_irq(conf->wait_for_stripe,
> - is_inactive_blocked(conf, hash),
> - *(conf->hash_locks + hash));
> - clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
> - goto retry;
> + set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
> + r5l_wake_reclaim(conf->log, 0);
> + wait_event_lock_irq(conf->wait_for_stripe,
> + is_inactive_blocked(conf, hash),
> + *(conf->hash_locks + hash));
> + clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
> + }
>
> -out:
> spin_unlock_irq(conf->hash_locks + hash);
> return sh;
> }
On July 29, 2022 7:48:48 PM ADT, Song Liu <[email protected]> wrote:
>On Thu, Jul 28, 2022 at 7:13 AM Christoph Hellwig <[email protected]> wrote:
>>
>> On Wed, Jul 27, 2022 at 03:05:56PM -0600, Logan Gunthorpe wrote:
>> > Refactor the raid5_get_active_stripe() to read more linearly in
>> > the order it's typically executed.
>> >
>> > The init_stripe() call is called if a free stripe is found and the
>> > function is exited early which removes a lot of if (sh) checks and
>> > unindents the following code.
>> >
>> > Remove the while loop in favour of the 'goto retry' pattern, which
>> > reduces indentation further. And use a 'goto wait_for_stripe' instead
>> > of an additional indent seeing it is the unusual path and this makes
>> > the code easier to read.
>> >
>> > No functional changes intended. Will make subsequent changes
>> > in patches easier to understand.
>>
>> I find the new loop even more confusing than the old one. I'd go
>> with something like the version below (on top of the whol md-next tree
>> that pulled this in way too fast..)
>
>This looks good to me. Christoph, would you mind send official patch
>for this?
>
>Thanks,
>Song
I'm on vacation this week, but I'd be happy to send patches addressing Christoph's feedback when I'm back next week.
Thanks,
Logan
On Mon, Aug 1, 2022 at 4:48 AM Logan Gunthorpe <[email protected]> wrote:
>
>
>
> On July 29, 2022 7:48:48 PM ADT, Song Liu <[email protected]> wrote:
> >On Thu, Jul 28, 2022 at 7:13 AM Christoph Hellwig <[email protected]> wrote:
> >>
> >> On Wed, Jul 27, 2022 at 03:05:56PM -0600, Logan Gunthorpe wrote:
> >> > Refactor the raid5_get_active_stripe() to read more linearly in
> >> > the order it's typically executed.
> >> >
> >> > The init_stripe() call is called if a free stripe is found and the
> >> > function is exited early which removes a lot of if (sh) checks and
> >> > unindents the following code.
> >> >
> >> > Remove the while loop in favour of the 'goto retry' pattern, which
> >> > reduces indentation further. And use a 'goto wait_for_stripe' instead
> >> > of an additional indent seeing it is the unusual path and this makes
> >> > the code easier to read.
> >> >
> >> > No functional changes intended. Will make subsequent changes
> >> > in patches easier to understand.
> >>
> >> I find the new loop even more confusing than the old one. I'd go
> >> with something like the version below (on top of the whol md-next tree
> >> that pulled this in way too fast..)
> >
> >This looks good to me. Christoph, would you mind send official patch
> >for this?
> >
> >Thanks,
> >Song
>
> I'm on vacation this week, but I'd be happy to send patches addressing Christoph's feedback when I'm back next week.
We are in the merge window right now. So the timing is a little tricky. I will
try to send pull requests with this set as-is. Then we can do follow-ups.
Thanks,
Song
On Mon, Aug 01, 2022 at 09:49:38AM -0700, Song Liu wrote:
> We are in the merge window right now. So the timing is a little tricky. I will
> try to send pull requests with this set as-is. Then we can do follow-ups.
I can send the patch. I don't think it's anywhere near critical enought to
rush it into the current merge window, though.
On Mon, Aug 1, 2022 at 10:15 AM Christoph Hellwig <[email protected]> wrote:
>
> On Mon, Aug 01, 2022 at 09:49:38AM -0700, Song Liu wrote:
> > We are in the merge window right now. So the timing is a little tricky. I will
> > try to send pull requests with this set as-is. Then we can do follow-ups.
>
> I can send the patch. I don't think it's anywhere near critical enought to
> rush it into the current merge window, though.
Agreed. Let's improve it in 6.0.
Thanks!
Song