Subject: [PATCH v3 0/3] blk-mq: Don't complete in IRQ, use llist_head

Patch 2+3 were applied and then dropped by Jens due to a NOHZ+softirq
related warning [0]. Turns out a successful wakeup via
set_nr_if_polling() will not process any softirqs and the CPU may go
back to idle. This is addressed by patch #1.

smpcfd_dying_cpu() will also invoke SMP-functions calls via
flush_smp_call_function_queue() but the block layer shouldn't queue
anything because the CPU isn't online anymore.
The two caller of flush_smp_call_function_from_idle() look fine with
opening interrupts from within do_softirq().

[0] https://lkml.kernel.org/r/[email protected]

Sebastian



Subject: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

Controllers with multiple queues have their IRQ-handelers pinned to a
CPU. The core shouldn't need to complete the request on a remote CPU.

Remove this case and always raise the softirq to complete the request.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
block/blk-mq.c | 14 +-------------
1 file changed, 1 insertion(+), 13 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f285a9123a8b0..90348ae518461 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -628,19 +628,7 @@ static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;

- /*
- * For most of single queue controllers, there is only one irq vector
- * for handling I/O completion, and the only irq's affinity is set
- * to all possible CPUs. On most of ARCHs, this affinity means the irq
- * is handled on one specific CPU.
- *
- * So complete I/O requests in softirq context in case of single queue
- * devices to avoid degrading I/O performance due to irqsoff latency.
- */
- if (rq->q->nr_hw_queues == 1)
- blk_mq_trigger_softirq(rq);
- else
- rq->q->mq_ops->complete(rq);
+ blk_mq_trigger_softirq(rq);
}

static inline bool blk_mq_complete_need_ipi(struct request *rq)
--
2.30.0

Subject: [PATCH 3/3] blk-mq: Use llist_head for blk_cpu_done

With llist_head it is possible to avoid the locking (the irq-off region)
when items are added. This makes it possible to add items on a remote
CPU without additional locking.
llist_add() returns true if the list was previously empty. This can be
used to invoke the SMP function call / raise sofirq only if the first
item was added (otherwise it is already pending).
This simplifies the code a little and reduces the IRQ-off regions.

blk_mq_raise_softirq() needs a preempt-disable section to ensure the
request is enqueued on the same CPU as the softirq is raised.
Some callers (USB-storage) invoke this path in preemptible context.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
block/blk-mq.c | 97 ++++++++++++++++++------------------------
include/linux/blkdev.h | 2 +-
2 files changed, 42 insertions(+), 57 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 90348ae518461..463de2981df8a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"

-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);

static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -567,68 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
}
EXPORT_SYMBOL(blk_mq_end_request);

-/*
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static void blk_complete_reqs(struct llist_head *list)
{
- struct list_head *cpu_list, local_list;
+ struct llist_node *entry = llist_reverse_order(llist_del_all(list));
+ struct request *rq, *next;

- local_irq_disable();
- cpu_list = this_cpu_ptr(&blk_cpu_done);
- list_replace_init(cpu_list, &local_list);
- local_irq_enable();
-
- while (!list_empty(&local_list)) {
- struct request *rq;
-
- rq = list_entry(local_list.next, struct request, ipi_list);
- list_del_init(&rq->ipi_list);
+ llist_for_each_entry_safe(rq, next, entry, ipi_list)
rq->q->mq_ops->complete(rq);
- }
}

-static void blk_mq_trigger_softirq(struct request *rq)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *list;
- unsigned long flags;
-
- local_irq_save(flags);
- list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->ipi_list, list);
-
- /*
- * If the list only contains our just added request, signal a raise of
- * the softirq. If there are already entries there, someone already
- * raised the irq but it hasn't run yet.
- */
- if (list->next == &rq->ipi_list)
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_restore(flags);
+ blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}

static int blk_softirq_cpu_dead(unsigned int cpu)
{
- /*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
- */
- local_irq_disable();
- list_splice_init(&per_cpu(blk_cpu_done, cpu),
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-
+ blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
return 0;
}

-
static void __blk_mq_complete_request_remote(void *data)
{
- struct request *rq = data;
-
- blk_mq_trigger_softirq(rq);
+ __raise_softirq_irqoff(BLOCK_SOFTIRQ);
}

static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -657,6 +618,30 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
return cpu_online(rq->mq_ctx->cpu);
}

+static void blk_mq_complete_send_ipi(struct request *rq)
+{
+ struct llist_head *list;
+ unsigned int cpu;
+
+ cpu = rq->mq_ctx->cpu;
+ list = &per_cpu(blk_cpu_done, cpu);
+ if (llist_add(&rq->ipi_list, list)) {
+ INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
+ smp_call_function_single_async(cpu, &rq->csd);
+ }
+}
+
+static void blk_mq_raise_softirq(struct request *rq)
+{
+ struct llist_head *list;
+
+ preempt_disable();
+ list = this_cpu_ptr(&blk_cpu_done);
+ if (llist_add(&rq->ipi_list, list))
+ raise_softirq(BLOCK_SOFTIRQ);
+ preempt_enable();
+}
+
bool blk_mq_complete_request_remote(struct request *rq)
{
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
@@ -669,15 +654,15 @@ bool blk_mq_complete_request_remote(struct request *rq)
return false;

if (blk_mq_complete_need_ipi(rq)) {
- INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
- smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
- } else {
- if (rq->q->nr_hw_queues > 1)
- return false;
- blk_mq_trigger_softirq(rq);
+ blk_mq_complete_send_ipi(rq);
+ return true;
}

- return true;
+ if (rq->q->nr_hw_queues == 1) {
+ blk_mq_raise_softirq(rq);
+ return true;
+ }
+ return false;
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);

@@ -3892,7 +3877,7 @@ static int __init blk_mq_init(void)
int i;

for_each_possible_cpu(i)
- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+ init_llist_head(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);

cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94ee3089e015..89a444c5a5833 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -153,7 +153,7 @@ struct request {
*/
union {
struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
+ struct llist_node ipi_list;
};

/*
--
2.30.0

2021-01-25 04:31:03

by Jens Axboe

[permalink] [raw]
Subject: Re: [PATCH v3 0/3] blk-mq: Don't complete in IRQ, use llist_head

On 1/23/21 1:10 PM, Sebastian Andrzej Siewior wrote:
> Patch 2+3 were applied and then dropped by Jens due to a NOHZ+softirq
> related warning [0]. Turns out a successful wakeup via
> set_nr_if_polling() will not process any softirqs and the CPU may go
> back to idle. This is addressed by patch #1.
>
> smpcfd_dying_cpu() will also invoke SMP-functions calls via
> flush_smp_call_function_queue() but the block layer shouldn't queue
> anything because the CPU isn't online anymore.
> The two caller of flush_smp_call_function_from_idle() look fine with
> opening interrupts from within do_softirq().
>
> [0] https://lkml.kernel.org/r/[email protected]

I can queue up the block side once the IPI fix is in some stable branch
that I can pull in.

--
Jens Axboe

2021-01-25 08:49:11

by Hannes Reinecke

[permalink] [raw]
Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On 1/23/21 9:10 PM, Sebastian Andrzej Siewior wrote:
> Controllers with multiple queues have their IRQ-handelers pinned to a
> CPU. The core shouldn't need to complete the request on a remote CPU.
>
> Remove this case and always raise the softirq to complete the request.
>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
> ---
> block/blk-mq.c | 14 +-------------
> 1 file changed, 1 insertion(+), 13 deletions(-)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index f285a9123a8b0..90348ae518461 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -628,19 +628,7 @@ static void __blk_mq_complete_request_remote(void *data)
> {
> struct request *rq = data;
>
> - /*
> - * For most of single queue controllers, there is only one irq vector
> - * for handling I/O completion, and the only irq's affinity is set
> - * to all possible CPUs. On most of ARCHs, this affinity means the irq
> - * is handled on one specific CPU.
> - *
> - * So complete I/O requests in softirq context in case of single queue
> - * devices to avoid degrading I/O performance due to irqsoff latency.
> - */
> - if (rq->q->nr_hw_queues == 1)
> - blk_mq_trigger_softirq(rq);
> - else
> - rq->q->mq_ops->complete(rq);
> + blk_mq_trigger_softirq(rq);
> }
>
> static inline bool blk_mq_complete_need_ipi(struct request *rq)
>
I don't get this.
This code is about _avoiding_ having to raise a softirq if the driver
exports more than one hardware queue.
So where exactly does the remote CPU case come in here?

Cheers,

Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
[email protected] +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer

2021-01-25 09:10:21

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 3/3] blk-mq: Use llist_head for blk_cpu_done

On Mon, Jan 25, 2021 at 09:32:04AM +0100, Sebastian Andrzej Siewior wrote:
> On 2021-01-25 08:30:12 [+0000], Christoph Hellwig wrote:
> > > +static void blk_mq_complete_send_ipi(struct request *rq)
> > > +{
> > > + struct llist_head *list;
> > > + unsigned int cpu;
> > > +
> > > + cpu = rq->mq_ctx->cpu;
> > > + list = &per_cpu(blk_cpu_done, cpu);
> > > + if (llist_add(&rq->ipi_list, list)) {
> > > + INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> > > + smp_call_function_single_async(cpu, &rq->csd);
> > > + }
> > > +}
> >
> > Nit: it would be nice to initialize cpu and list in the declaration
> > lines.
>
> Why? They get initialized later.

Because:

unsigned int cpu = rq->mq_ctx->cpu;
struct llist_head *list = &per_cpu(blk_cpu_done, cpu);

is a lot easier to follow than:

struct llist_head *list;
unsigned int cpu;

cpu = rq->mq_ctx->cpu;
list = &per_cpu(blk_cpu_done, cpu);

2021-01-25 09:10:25

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 3/3] blk-mq: Use llist_head for blk_cpu_done

> +static void blk_mq_complete_send_ipi(struct request *rq)
> +{
> + struct llist_head *list;
> + unsigned int cpu;
> +
> + cpu = rq->mq_ctx->cpu;
> + list = &per_cpu(blk_cpu_done, cpu);
> + if (llist_add(&rq->ipi_list, list)) {
> + INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> + smp_call_function_single_async(cpu, &rq->csd);
> + }
> +}

Nit: it would be nice to initialize cpu and list in the declaration
lines.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <[email protected]>

Subject: Re: [PATCH 3/3] blk-mq: Use llist_head for blk_cpu_done

On 2021-01-25 08:30:12 [+0000], Christoph Hellwig wrote:
> > +static void blk_mq_complete_send_ipi(struct request *rq)
> > +{
> > + struct llist_head *list;
> > + unsigned int cpu;
> > +
> > + cpu = rq->mq_ctx->cpu;
> > + list = &per_cpu(blk_cpu_done, cpu);
> > + if (llist_add(&rq->ipi_list, list)) {
> > + INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> > + smp_call_function_single_async(cpu, &rq->csd);
> > + }
> > +}
>
> Nit: it would be nice to initialize cpu and list in the declaration
> lines.

Why? They get initialized later.

> Otherwise looks good:
>
> Reviewed-by: Christoph Hellwig <[email protected]>

Sebastian

Subject: [PATCH 3/3 v2] blk-mq: Use llist_head for blk_cpu_done

With llist_head it is possible to avoid the locking (the irq-off region)
when items are added. This makes it possible to add items on a remote
CPU without additional locking.
llist_add() returns true if the list was previously empty. This can be
used to invoke the SMP function call / raise sofirq only if the first
item was added (otherwise it is already pending).
This simplifies the code a little and reduces the IRQ-off regions.

blk_mq_raise_softirq() needs a preempt-disable section to ensure the
request is enqueued on the same CPU as the softirq is raised.
Some callers (USB-storage) invoke this path in preemptible context.

Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
v1…v2: Move var initialisation to declaration in
blk_mq_complete_send_ipi(). Suggested by hch.

block/blk-mq.c | 95 +++++++++++++++++-------------------------
include/linux/blkdev.h | 2 +-
2 files changed, 40 insertions(+), 57 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 90348ae518461..8429be0d9b8dd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"

-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);

static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -567,68 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
}
EXPORT_SYMBOL(blk_mq_end_request);

-/*
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static void blk_complete_reqs(struct llist_head *list)
{
- struct list_head *cpu_list, local_list;
+ struct llist_node *entry = llist_reverse_order(llist_del_all(list));
+ struct request *rq, *next;

- local_irq_disable();
- cpu_list = this_cpu_ptr(&blk_cpu_done);
- list_replace_init(cpu_list, &local_list);
- local_irq_enable();
-
- while (!list_empty(&local_list)) {
- struct request *rq;
-
- rq = list_entry(local_list.next, struct request, ipi_list);
- list_del_init(&rq->ipi_list);
+ llist_for_each_entry_safe(rq, next, entry, ipi_list)
rq->q->mq_ops->complete(rq);
- }
}

-static void blk_mq_trigger_softirq(struct request *rq)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *list;
- unsigned long flags;
-
- local_irq_save(flags);
- list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->ipi_list, list);
-
- /*
- * If the list only contains our just added request, signal a raise of
- * the softirq. If there are already entries there, someone already
- * raised the irq but it hasn't run yet.
- */
- if (list->next == &rq->ipi_list)
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_restore(flags);
+ blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}

static int blk_softirq_cpu_dead(unsigned int cpu)
{
- /*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
- */
- local_irq_disable();
- list_splice_init(&per_cpu(blk_cpu_done, cpu),
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-
+ blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
return 0;
}

-
static void __blk_mq_complete_request_remote(void *data)
{
- struct request *rq = data;
-
- blk_mq_trigger_softirq(rq);
+ __raise_softirq_irqoff(BLOCK_SOFTIRQ);
}

static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -657,6 +618,28 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
return cpu_online(rq->mq_ctx->cpu);
}

+static void blk_mq_complete_send_ipi(struct request *rq)
+{
+ unsigned int cpu = rq->mq_ctx->cpu;
+ struct llist_head *list = &per_cpu(blk_cpu_done, cpu);
+
+ if (llist_add(&rq->ipi_list, list)) {
+ INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
+ smp_call_function_single_async(cpu, &rq->csd);
+ }
+}
+
+static void blk_mq_raise_softirq(struct request *rq)
+{
+ struct llist_head *list;
+
+ preempt_disable();
+ list = this_cpu_ptr(&blk_cpu_done);
+ if (llist_add(&rq->ipi_list, list))
+ raise_softirq(BLOCK_SOFTIRQ);
+ preempt_enable();
+}
+
bool blk_mq_complete_request_remote(struct request *rq)
{
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
@@ -669,15 +652,15 @@ bool blk_mq_complete_request_remote(struct request *rq)
return false;

if (blk_mq_complete_need_ipi(rq)) {
- INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
- smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
- } else {
- if (rq->q->nr_hw_queues > 1)
- return false;
- blk_mq_trigger_softirq(rq);
+ blk_mq_complete_send_ipi(rq);
+ return true;
}

- return true;
+ if (rq->q->nr_hw_queues == 1) {
+ blk_mq_raise_softirq(rq);
+ return true;
+ }
+ return false;
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);

@@ -3892,7 +3875,7 @@ static int __init blk_mq_init(void)
int i;

for_each_possible_cpu(i)
- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+ init_llist_head(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);

cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94ee3089e015..89a444c5a5833 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -153,7 +153,7 @@ struct request {
*/
union {
struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
+ struct llist_node ipi_list;
};

/*
--
2.30.0

2021-01-26 05:09:23

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On Mon, Jan 25, 2021 at 08:23:03AM +0000, Christoph Hellwig wrote:
> On Sat, Jan 23, 2021 at 09:10:26PM +0100, Sebastian Andrzej Siewior wrote:
> > Controllers with multiple queues have their IRQ-handelers pinned to a
> > CPU. The core shouldn't need to complete the request on a remote CPU.
> >
> > Remove this case and always raise the softirq to complete the request.
>
> What about changing blk_mq_trigger_softirq to take a void * argument
> and thus removing __blk_mq_complete_request_remote entirely?

I'll take this back - that change is in the way of what you do in patch
3. So this looks good as-is:

Reviewed-by: Christoph Hellwig <[email protected]>

2021-01-26 17:31:07

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On Sat, Jan 23, 2021 at 09:10:26PM +0100, Sebastian Andrzej Siewior wrote:
> Controllers with multiple queues have their IRQ-handelers pinned to a
> CPU. The core shouldn't need to complete the request on a remote CPU.
>
> Remove this case and always raise the softirq to complete the request.

What about changing blk_mq_trigger_softirq to take a void * argument
and thus removing __blk_mq_complete_request_remote entirely?

2021-01-26 22:21:27

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On Mon, Jan 25, 2021 at 09:30:29AM +0100, Sebastian Andrzej Siewior wrote:
> On 2021-01-25 08:25:42 [+0000], Christoph Hellwig wrote:
> > On Mon, Jan 25, 2021 at 08:10:16AM +0100, Hannes Reinecke wrote:
> > > I don't get this.
> > > This code is about _avoiding_ having to raise a softirq if the driver
> > > exports more than one hardware queue.
> > > So where exactly does the remote CPU case come in here?
> >
> > __blk_mq_complete_request_remote is only called for the case where we
> > do not completelky locally. The case that "degrades" here is where
> > the device supports multiple queues, but less than the number of CPUs,
> > and we bounce the completion to another CPU.
>
> Does it really "degrade" or just use the softirq more often? The usual
> case is run the softirqs in irq_exit() which is just after IPI.

Well, I put it in quotes because I'm not sure what the exact effect
is. But we do delay these completions to the softirq now instead of
hardirq context, which at least in theory increases latency. OTOH it
might even have positive effects on the rest of the system.

Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On 2021-01-25 08:25:42 [+0000], Christoph Hellwig wrote:
> On Mon, Jan 25, 2021 at 08:10:16AM +0100, Hannes Reinecke wrote:
> > I don't get this.
> > This code is about _avoiding_ having to raise a softirq if the driver
> > exports more than one hardware queue.
> > So where exactly does the remote CPU case come in here?
>
> __blk_mq_complete_request_remote is only called for the case where we
> do not completelky locally. The case that "degrades" here is where
> the device supports multiple queues, but less than the number of CPUs,
> and we bounce the completion to another CPU.

Does it really "degrade" or just use the softirq more often? The usual
case is run the softirqs in irq_exit() which is just after IPI.

Sebastian

2021-01-26 22:40:47

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On Mon, Jan 25, 2021 at 08:10:16AM +0100, Hannes Reinecke wrote:
> I don't get this.
> This code is about _avoiding_ having to raise a softirq if the driver
> exports more than one hardware queue.
> So where exactly does the remote CPU case come in here?

__blk_mq_complete_request_remote is only called for the case where we
do not completelky locally. The case that "degrades" here is where
the device supports multiple queues, but less than the number of CPUs,
and we bounce the completion to another CPU.

Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On 2021-01-25 08:32:48 [+0000], Christoph Hellwig wrote:
> Well, I put it in quotes because I'm not sure what the exact effect
> is. But we do delay these completions to the softirq now instead of
> hardirq context, which at least in theory increases latency. OTOH it
> might even have positive effects on the rest of the system.

The last part is/was my motivation ;)

Sebastian

2021-01-27 03:31:14

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 3/3 v2] blk-mq: Use llist_head for blk_cpu_done

Looks good,

Reviewed-by: Christoph Hellwig <[email protected]>

2021-01-27 23:50:51

by Daniel Wagner

[permalink] [raw]
Subject: Re: [PATCH 3/3 v2] blk-mq: Use llist_head for blk_cpu_done

On Mon, Jan 25, 2021 at 10:54:12AM +0100, Sebastian Andrzej Siewior wrote:
> With llist_head it is possible to avoid the locking (the irq-off region)
> when items are added. This makes it possible to add items on a remote
> CPU without additional locking.
> llist_add() returns true if the list was previously empty. This can be
> used to invoke the SMP function call / raise sofirq only if the first
> item was added (otherwise it is already pending).
> This simplifies the code a little and reduces the IRQ-off regions.
>
> blk_mq_raise_softirq() needs a preempt-disable section to ensure the
> request is enqueued on the same CPU as the softirq is raised.
> Some callers (USB-storage) invoke this path in preemptible context.
>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>

I did a quick test run with the whole series. Looks good.

Reviewed-by: Daniel Wagner <[email protected]>

2021-01-27 23:52:26

by Daniel Wagner

[permalink] [raw]
Subject: Re: [PATCH 2/3] blk-mq: Always complete remote completions requests in softirq

On Sat, Jan 23, 2021 at 09:10:26PM +0100, Sebastian Andrzej Siewior wrote:
> Controllers with multiple queues have their IRQ-handelers pinned to a
> CPU. The core shouldn't need to complete the request on a remote CPU.
>
> Remove this case and always raise the softirq to complete the request.
>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>

Reviewed-by: Daniel Wagner <[email protected]>

2021-02-10 14:47:16

by Jens Axboe

[permalink] [raw]
Subject: Re: [PATCH v3 0/3] blk-mq: Don't complete in IRQ, use llist_head

On 1/23/21 1:10 PM, Sebastian Andrzej Siewior wrote:
> Patch 2+3 were applied and then dropped by Jens due to a NOHZ+softirq
> related warning [0]. Turns out a successful wakeup via
> set_nr_if_polling() will not process any softirqs and the CPU may go
> back to idle. This is addressed by patch #1.
>
> smpcfd_dying_cpu() will also invoke SMP-functions calls via
> flush_smp_call_function_queue() but the block layer shouldn't queue
> anything because the CPU isn't online anymore.
> The two caller of flush_smp_call_function_from_idle() look fine with
> opening interrupts from within do_softirq().

Applied, thanks.

--
Jens Axboe