2022-07-01 20:42:47

by Samuel Holland

[permalink] [raw]
Subject: [PATCH v3 0/2] irqchip: RISC-V PLIC cleanup and optimization

This series removes the spinlocks and cpumask operations from the PLIC
driver's hot path. As far as I know, using the priority to mask
interrupts is an intended usage and will work on all existing
implementations. See [1] for more discussion.

This series depends on my other series[2] making the IRQ affinity mask
behavior more consistent between uniprocessor and SMP configurations.
(The Allwinner D1 is a uniprocessor SoC containing a PLIC.)

A further optimization is to take advantage of the fact that multiple
IRQs can be claimed at once. This allows removing the mask operations
for oneshot IRQs -- i.e. the combination of IRQCHIP_ONESHOT_SAFE and
IRQCHIP_EOI_THREADED, which is not currently supported. I will send
this as a separate series, since it makes more invasive changes to the
generic IRQ code.

[1]: https://lore.kernel.org/lkml/[email protected]/
[2]: https://lore.kernel.org/lkml/[email protected]/

Changes in v3:
- Rebased on top of irqchip-next
- Split affinity series and PLIC series

Samuel Holland (2):
irqchip/sifive-plic: Make better use of the effective affinity mask
irqchip/sifive-plic: Separate the enable and mask operations

drivers/irqchip/Kconfig | 1 +
drivers/irqchip/irq-sifive-plic.c | 64 ++++++++++++++++---------------
2 files changed, 35 insertions(+), 30 deletions(-)

--
2.35.1


2022-07-01 20:59:53

by Samuel Holland

[permalink] [raw]
Subject: [PATCH v3 2/2] irqchip/sifive-plic: Separate the enable and mask operations

The PLIC has two per-IRQ checks before sending an IRQ to a hart context.
First, it checks that the IRQ's priority is nonzero. Then, it checks
that the enable bit is set for that combination of IRQ and context.

Currently, the PLIC driver sets both the priority value and the enable
bit in its (un)mask operations. However, modifying the enable bit is
problematic for two reasons:
1) The enable bits are packed, so changes are not atomic and require
taking a spinlock.
2) The following requirement from the PLIC spec, which explains the
racy (un)mask operations in plic_irq_eoi():

If the completion ID does not match an interrupt source
that is currently enabled for the target, the completion
is silently ignored.

Both of these problems are solved by using the priority value to mask
IRQs. Each IRQ has a separate priority register, so writing the priority
value is atomic. And since the enable bit remains set while an IRQ is
masked, the EOI operation works normally. The enable bits are still used
to control the IRQ's affinity.

Signed-off-by: Samuel Holland <[email protected]>
---

Changes in v3:
- Rebased on top of irqchip-next

drivers/irqchip/irq-sifive-plic.c | 55 +++++++++++++++++++------------
1 file changed, 34 insertions(+), 21 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 46595e607b0e..ba4938188570 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -108,9 +108,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
struct irq_data *d, int enable)
{
int cpu;
- struct plic_priv *priv = irq_data_get_irq_chip_data(d);

- writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);

@@ -118,16 +116,37 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}

-static void plic_irq_unmask(struct irq_data *d)
+static void plic_irq_enable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
}

-static void plic_irq_mask(struct irq_data *d)
+static void plic_irq_disable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
}

+static void plic_irq_unmask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_mask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
#ifdef CONFIG_SMP
static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
@@ -146,32 +165,21 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;

- plic_irq_mask(d);
+ plic_irq_disable(d);

irq_data_update_effective_affinity(d, cpumask_of(cpu));

- if (!irqd_irq_masked(d))
- plic_irq_unmask(d);
+ if (!irqd_irq_disabled(d))
+ plic_irq_enable(d);

return IRQ_SET_MASK_OK_DONE;
}
#endif

-static void plic_irq_eoi(struct irq_data *d)
-{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- if (irqd_irq_masked(d)) {
- plic_irq_unmask(d);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- plic_irq_mask(d);
- } else {
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- }
-}
-
static struct irq_chip plic_edge_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_ack = plic_irq_eoi,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
@@ -184,6 +192,8 @@ static struct irq_chip plic_edge_chip = {

static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
.irq_eoi = plic_irq_eoi,
@@ -429,8 +439,11 @@ static int __init __plic_init(struct device_node *node,
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
done:
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
+ writel(1, priv->regs + PRIORITY_BASE +
+ hwirq * PRIORITY_PER_ID);
+ }
nr_handlers++;
}

--
2.35.1

Subject: [irqchip: irq/irqchip-next] irqchip/sifive-plic: Separate the enable and mask operations

The following commit has been merged into the irq/irqchip-next branch of irqchip:

Commit-ID: b5121acddbfc5be09947b95991cae8c645eabcb5
Gitweb: https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms/b5121acddbfc5be09947b95991cae8c645eabcb5
Author: Samuel Holland <[email protected]>
AuthorDate: Fri, 01 Jul 2022 15:24:40 -05:00
Committer: Marc Zyngier <[email protected]>
CommitterDate: Thu, 07 Jul 2022 14:19:48 +01:00

irqchip/sifive-plic: Separate the enable and mask operations

The PLIC has two per-IRQ checks before sending an IRQ to a hart context.
First, it checks that the IRQ's priority is nonzero. Then, it checks
that the enable bit is set for that combination of IRQ and context.

Currently, the PLIC driver sets both the priority value and the enable
bit in its (un)mask operations. However, modifying the enable bit is
problematic for two reasons:
1) The enable bits are packed, so changes are not atomic and require
taking a spinlock.
2) The following requirement from the PLIC spec, which explains the
racy (un)mask operations in plic_irq_eoi():

If the completion ID does not match an interrupt source
that is currently enabled for the target, the completion
is silently ignored.

Both of these problems are solved by using the priority value to mask
IRQs. Each IRQ has a separate priority register, so writing the priority
value is atomic. And since the enable bit remains set while an IRQ is
masked, the EOI operation works normally. The enable bits are still used
to control the IRQ's affinity.

Signed-off-by: Samuel Holland <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
drivers/irqchip/irq-sifive-plic.c | 55 ++++++++++++++++++------------
1 file changed, 34 insertions(+), 21 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 46595e6..ba49381 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -108,9 +108,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
struct irq_data *d, int enable)
{
int cpu;
- struct plic_priv *priv = irq_data_get_irq_chip_data(d);

- writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);

@@ -118,16 +116,37 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}

-static void plic_irq_unmask(struct irq_data *d)
+static void plic_irq_enable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
}

-static void plic_irq_mask(struct irq_data *d)
+static void plic_irq_disable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
}

+static void plic_irq_unmask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_mask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
#ifdef CONFIG_SMP
static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
@@ -146,32 +165,21 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;

- plic_irq_mask(d);
+ plic_irq_disable(d);

irq_data_update_effective_affinity(d, cpumask_of(cpu));

- if (!irqd_irq_masked(d))
- plic_irq_unmask(d);
+ if (!irqd_irq_disabled(d))
+ plic_irq_enable(d);

return IRQ_SET_MASK_OK_DONE;
}
#endif

-static void plic_irq_eoi(struct irq_data *d)
-{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- if (irqd_irq_masked(d)) {
- plic_irq_unmask(d);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- plic_irq_mask(d);
- } else {
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- }
-}
-
static struct irq_chip plic_edge_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_ack = plic_irq_eoi,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
@@ -184,6 +192,8 @@ static struct irq_chip plic_edge_chip = {

static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
.irq_eoi = plic_irq_eoi,
@@ -429,8 +439,11 @@ static int __init __plic_init(struct device_node *node,
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
done:
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
+ writel(1, priv->regs + PRIORITY_BASE +
+ hwirq * PRIORITY_PER_ID);
+ }
nr_handlers++;
}

Subject: [irqchip: irq/irqchip-next] irqchip/sifive-plic: Separate the enable and mask operations

The following commit has been merged into the irq/irqchip-next branch of irqchip:

Commit-ID: a1706a1c5062e0908528170f853601ed53f428c8
Gitweb: https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms/a1706a1c5062e0908528170f853601ed53f428c8
Author: Samuel Holland <[email protected]>
AuthorDate: Fri, 01 Jul 2022 15:24:40 -05:00
Committer: Marc Zyngier <[email protected]>
CommitterDate: Sun, 10 Jul 2022 09:50:04 +01:00

irqchip/sifive-plic: Separate the enable and mask operations

The PLIC has two per-IRQ checks before sending an IRQ to a hart context.
First, it checks that the IRQ's priority is nonzero. Then, it checks
that the enable bit is set for that combination of IRQ and context.

Currently, the PLIC driver sets both the priority value and the enable
bit in its (un)mask operations. However, modifying the enable bit is
problematic for two reasons:
1) The enable bits are packed, so changes are not atomic and require
taking a spinlock.
2) The following requirement from the PLIC spec, which explains the
racy (un)mask operations in plic_irq_eoi():

If the completion ID does not match an interrupt source
that is currently enabled for the target, the completion
is silently ignored.

Both of these problems are solved by using the priority value to mask
IRQs. Each IRQ has a separate priority register, so writing the priority
value is atomic. And since the enable bit remains set while an IRQ is
masked, the EOI operation works normally. The enable bits are still used
to control the IRQ's affinity.

Signed-off-by: Samuel Holland <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
drivers/irqchip/irq-sifive-plic.c | 55 ++++++++++++++++++------------
1 file changed, 34 insertions(+), 21 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 46595e6..ba49381 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -108,9 +108,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
struct irq_data *d, int enable)
{
int cpu;
- struct plic_priv *priv = irq_data_get_irq_chip_data(d);

- writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);

@@ -118,16 +116,37 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}

-static void plic_irq_unmask(struct irq_data *d)
+static void plic_irq_enable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
}

-static void plic_irq_mask(struct irq_data *d)
+static void plic_irq_disable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
}

+static void plic_irq_unmask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_mask(struct irq_data *d)
+{
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+}
+
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
#ifdef CONFIG_SMP
static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
@@ -146,32 +165,21 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;

- plic_irq_mask(d);
+ plic_irq_disable(d);

irq_data_update_effective_affinity(d, cpumask_of(cpu));

- if (!irqd_irq_masked(d))
- plic_irq_unmask(d);
+ if (!irqd_irq_disabled(d))
+ plic_irq_enable(d);

return IRQ_SET_MASK_OK_DONE;
}
#endif

-static void plic_irq_eoi(struct irq_data *d)
-{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- if (irqd_irq_masked(d)) {
- plic_irq_unmask(d);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- plic_irq_mask(d);
- } else {
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
- }
-}
-
static struct irq_chip plic_edge_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_ack = plic_irq_eoi,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
@@ -184,6 +192,8 @@ static struct irq_chip plic_edge_chip = {

static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
+ .irq_enable = plic_irq_enable,
+ .irq_disable = plic_irq_disable,
.irq_mask = plic_irq_mask,
.irq_unmask = plic_irq_unmask,
.irq_eoi = plic_irq_eoi,
@@ -429,8 +439,11 @@ static int __init __plic_init(struct device_node *node,
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
done:
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
+ writel(1, priv->regs + PRIORITY_BASE +
+ hwirq * PRIORITY_PER_ID);
+ }
nr_handlers++;
}