Hey,
We hit another bug on my recent batching patch. In this case the
bug has never been hit with the current md/md-next branch but
some other patches we were working on changed the timing such
that we hit this bug. It is theoretically possible to hit in
the md/md-next batch so this patchset contains a fix.
The fix is the last commit. The first four commits are some
basic refactoring that makes the final commit a bit easier.
A git repo is here and is based on current md/md-next (7a6f9e9cf1):
https://github.com/sbates130272/linux-p2pmem raid5_batch_quiesce
Thanks,
Logan
--
Logan Gunthorpe (5):
md/raid5: Refactor raid5_get_active_stripe()
md/raid5: Make is_inactive_blocked() helper
md/raid5: Drop unnecessary call to r5c_check_stripe_cache_usage()
md/raid5: Move stripe_request_ctx up
md/raid5: Ensure batch_last is released before sleeping for quiesce
drivers/md/raid5.c | 162 ++++++++++++++++++++++++++++-----------------
drivers/md/raid5.h | 2 +-
2 files changed, 101 insertions(+), 63 deletions(-)
base-commit: 7a6f9e9cf1befa0a1578501966d3c9b0cae46727
--
2.30.2
Move stripe_request_ctx up. No functional changes intended.
This will be necessary in the next patch to release the batch_last
in the context before sleeping.
Signed-off-by: Logan Gunthorpe <[email protected]>
---
drivers/md/raid5.c | 54 +++++++++++++++++++++++-----------------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e09fa55960cc..0a8687fd1748 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -755,6 +755,33 @@ static bool has_failed(struct r5conf *conf)
return degraded > conf->max_degraded;
}
+enum stripe_result {
+ STRIPE_SUCCESS = 0,
+ STRIPE_RETRY,
+ STRIPE_SCHEDULE_AND_RETRY,
+ STRIPE_FAIL,
+};
+
+struct stripe_request_ctx {
+ /* a reference to the last stripe_head for batching */
+ struct stripe_head *batch_last;
+
+ /* first sector in the request */
+ sector_t first_sector;
+
+ /* last sector in the request */
+ sector_t last_sector;
+
+ /*
+ * bitmap to track stripe sectors that have been added to stripes
+ * add one to account for unaligned requests
+ */
+ DECLARE_BITMAP(sectors_to_do, RAID5_MAX_REQ_STRIPES + 1);
+
+ /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
+ bool do_flush;
+};
+
/*
* Block until another thread clears R5_INACTIVE_BLOCKED or
* there are fewer than 3/4 the maximum number of active stripes
@@ -5874,33 +5901,6 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
return ret;
}
-enum stripe_result {
- STRIPE_SUCCESS = 0,
- STRIPE_RETRY,
- STRIPE_SCHEDULE_AND_RETRY,
- STRIPE_FAIL,
-};
-
-struct stripe_request_ctx {
- /* a reference to the last stripe_head for batching */
- struct stripe_head *batch_last;
-
- /* first sector in the request */
- sector_t first_sector;
-
- /* last sector in the request */
- sector_t last_sector;
-
- /*
- * bitmap to track stripe sectors that have been added to stripes
- * add one to account for unaligned requests
- */
- DECLARE_BITMAP(sectors_to_do, RAID5_MAX_REQ_STRIPES + 1);
-
- /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
- bool do_flush;
-};
-
static int add_all_stripe_bios(struct r5conf *conf,
struct stripe_request_ctx *ctx, struct stripe_head *sh,
struct bio *bi, int forwrite, int previous)
--
2.30.2
Now that raid5_get_active_stripe() has been refactored it is appearant
that r5c_check_stripe_cache_usage() doesn't need to be called in
the wait_for_stripe branch.
r5c_check_stripe_cache_usage() will only conditionally call
r5l_wake_reclaim(), but that function is called two lines later.
Drop the call for cleanup.
Reported-by: Martin Oliveira <[email protected]>
Signed-off-by: Logan Gunthorpe <[email protected]>
---
drivers/md/raid5.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e7e02a979670..e09fa55960cc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -810,7 +810,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
if (noblock)
goto out;
- r5c_check_stripe_cache_usage(conf);
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
r5l_wake_reclaim(conf->log, 0);
wait_event_lock_irq(conf->wait_for_stripe,
--
2.30.2
A race condition exists where if raid5_quiesce() is called in the
middle of a request that has set batch_last, it will deadlock.
batch_last will hold a reference to a stripe when raid5_quiesce() is
called. This will cause the next raid5_get_active_stripe() call to
sleep waiting for the quiesce to finish, but the raid5_quiesce() thread
will wait for active_stripes to go to zero which will never happen
because request thread is waiting for the quiesce to stop.
Fix this by creating a special __raid5_get_active_stripe() function
which takes the request context and clears the last_batch before
sleeping.
While we're at it, change the arguments of raid5_get_active_stripe()
to bools.
Fixes: 4fcbd9abb6f2 ("md/raid5: Keep a reference to last stripe_head for batch")
Reported-by: David Sloan <[email protected]>
Signed-off-by: Logan Gunthorpe <[email protected]>
---
drivers/md/raid5.c | 36 ++++++++++++++++++++++++++++--------
drivers/md/raid5.h | 2 +-
2 files changed, 29 insertions(+), 9 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0a8687fd1748..421bac221a74 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -800,9 +800,9 @@ static bool is_inactive_blocked(struct r5conf *conf, int hash)
return active < (conf->max_nr_stripes * 3 / 4);
}
-struct stripe_head *
-raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce)
+static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, sector_t sector,
+ bool previous, bool noblock, bool noquiesce)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
@@ -812,9 +812,22 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
spin_lock_irq(conf->hash_locks + hash);
retry:
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0 || noquiesce,
- *(conf->hash_locks + hash));
+ if (!noquiesce && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before waiting,
+ * on quiesce, otherwise the batch_last will hold a reference
+ * to a stripe and raid5_quiesce() will deadlock waiting for
+ * active_stripes to go to zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
+ wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
+ *(conf->hash_locks + hash));
+ }
+
sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
if (sh)
goto out;
@@ -850,6 +863,13 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
return sh;
}
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ sector_t sector, bool previous, bool noblock, bool noquiesce)
+{
+ return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
+ noquiesce);
+}
+
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -5992,8 +6012,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
new_sector, logical_sector);
- sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
+ sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
+ (bi->bi_opf & REQ_RAHEAD), 0);
if (unlikely(!sh)) {
/* cannot get stripe, just give-up */
bi->bi_status = BLK_STS_IOERR;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 638d29863503..a5082bed83c8 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -812,7 +812,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
struct stripe_head *sh);
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce);
+ bool previous, bool noblock, bool noquiesce);
extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
#endif
--
2.30.2
On Wed, Jul 27, 2022 at 2:06 PM Logan Gunthorpe <[email protected]> wrote:
>
> Hey,
>
> We hit another bug on my recent batching patch. In this case the
> bug has never been hit with the current md/md-next branch but
> some other patches we were working on changed the timing such
> that we hit this bug. It is theoretically possible to hit in
> the md/md-next batch so this patchset contains a fix.
>
> The fix is the last commit. The first four commits are some
> basic refactoring that makes the final commit a bit easier.
>
> A git repo is here and is based on current md/md-next (7a6f9e9cf1):
>
> https://github.com/sbates130272/linux-p2pmem raid5_batch_quiesce
Applied to md-next. Thanks!
Song
On Wed, Jul 27, 2022 at 03:06:00PM -0600, Logan Gunthorpe wrote:
> +static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
> + struct stripe_request_ctx *ctx, sector_t sector,
> + bool previous, bool noblock, bool noquiesce)
Passing three different bool arguments right after another is a really
confusing calling convention, at some point this should become a set of
flags. I'd also drop the __raid5_get_active_stripe vs
raid5_get_active_stripe distinction.