This patchset primarily adds IRQ affinity support in PLIC driver and
other improvements.
It gives mechanism for explicitly route external interrupts to particular
CPUs using smp_affinity attribute of each Linux IRQs. Also, we can now
use IRQ balancer from kernel-space or user-space.
The patchset is tested on QEMU virt machine. It is based on Linux-4.20
and can be found at riscv_plic_irq_affinity_v4 branch of:
https://github.com/avpatel/linux.git
Changes since v3:
- Dropped PATCH2
- Added PATCH to not inline plic_toggle() and plic_irq_toggle()
- Moved PATCH3 changes to PATCH6
- Used WARN_ON_ONCE() instead of WARN_ON() in PATCH5
Changes since v2:
- Fixed incorrect address of enable registers using sizeof(u32) in PATCH1
- Retained comment about need for locking in PATCH1
- Split PATCH2 into two patches
- Split PATCH3 into two patches
- Minor fix in commit description of PATCH4
Changes since v1:
- Removed few whitspace changes from PATCH1
- Keep use of DEFINE_PER_CPU() as it is
Anup Patel (5):
irqchip: sifive-plic: Pre-compute context hart base and enable base
irqchip: sifive-plic: Don't inline plic_toggle() and plic_irq_toggle()
irqchip: sifive-plic: Add warning in plic_init() if handler already
present
irqchip: sifive-plic: Differentiate between PLIC handler and context
irqchip: sifive-plic: Implement irq_set_affinity() for SMP host
drivers/irqchip/irq-sifive-plic.c | 110 +++++++++++++++++++-----------
1 file changed, 71 insertions(+), 39 deletions(-)
--
2.17.1
This patch does following optimizations:
1. Pre-compute hart base for each context handler
2. Pre-compute enable base for each context handler
3. Have enable lock for each context handler instead
of global plic_toggle_lock
Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 47 ++++++++++++++-----------------
1 file changed, 21 insertions(+), 26 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 357e9daf94ae..c23a293a2aae 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -59,37 +59,28 @@ static void __iomem *plic_regs;
struct plic_handler {
bool present;
- int ctxid;
+ void __iomem *hart_base;
+ /*
+ * Protect mask operations on the registers given that we can't
+ * assume atomic memory operations work on them.
+ */
+ raw_spinlock_t enable_lock;
+ void __iomem *enable_base;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void __iomem *plic_hart_offset(int ctxid)
-{
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
-}
-
-static inline u32 __iomem *plic_enable_base(int ctxid)
-{
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
-}
-
-/*
- * Protect mask operations on the registers given that we can't assume that
- * atomic memory operations work on them.
- */
-static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
-
-static inline void plic_toggle(int ctxid, int hwirq, int enable)
+static inline void plic_toggle(struct plic_handler *handler,
+ int hwirq, int enable)
{
- u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
- raw_spin_lock(&plic_toggle_lock);
+ raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
- raw_spin_unlock(&plic_toggle_lock);
+ raw_spin_unlock(&handler->enable_lock);
}
static inline void plic_irq_toggle(struct irq_data *d, int enable)
@@ -101,7 +92,7 @@ static inline void plic_irq_toggle(struct irq_data *d, int enable)
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
- plic_toggle(handler->ctxid, d->hwirq, enable);
+ plic_toggle(handler, d->hwirq, enable);
}
}
@@ -150,7 +141,7 @@ static struct irq_domain *plic_irqdomain;
static void plic_handle_irq(struct pt_regs *regs)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
@@ -239,12 +230,16 @@ static int __init plic_init(struct device_node *node,
cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(&plic_handlers, cpu);
handler->present = true;
- handler->ctxid = i;
+ handler->hart_base =
+ plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ raw_spin_lock_init(&handler->enable_lock);
+ handler->enable_base =
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
/* priority must be > threshold to trigger an interrupt */
- writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ writel(0, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- plic_toggle(i, hwirq, 0);
+ plic_toggle(handler, hwirq, 0);
nr_mapped++;
}
--
2.17.1
The plic_toggle() uses raw_spin_lock() and plic_irq_toggle has a
for loop so both these functions are not suitable for being inline
hence this patch removes the inline keyword.
Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index c23a293a2aae..01bbbbffbcae 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -69,8 +69,8 @@ struct plic_handler {
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void plic_toggle(struct plic_handler *handler,
- int hwirq, int enable)
+static void plic_toggle(struct plic_handler *handler,
+ int hwirq, int enable)
{
u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
@@ -83,7 +83,7 @@ static inline void plic_toggle(struct plic_handler *handler,
raw_spin_unlock(&handler->enable_lock);
}
-static inline void plic_irq_toggle(struct irq_data *d, int enable)
+static void plic_irq_toggle(struct irq_data *d, int enable)
{
int cpu;
--
2.17.1
We have two enteries (one for M-mode and another for S-mode) in the
interrupts-extended DT property of PLIC DT node for each HART. It is
expected that firmware/bootloader will set M-mode HWIRQ line of each
HART to 0xffffffff (i.e. -1) in interrupts-extended DT property
because Linux runs in S-mode only.
If firmware/bootloader is buggy then it will not correctly update
interrupts-extended DT property which might result in a plic_handler
configured twice. This patch adds a warning in plic_init() if a
plic_handler is already marked present. This warning provides us
a hint about incorrectly updated interrupts-extended DT property.
Signed-off-by: Anup Patel <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 01bbbbffbcae..b9a0bcefe426 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -229,6 +229,11 @@ static int __init plic_init(struct device_node *node,
cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (handler->present) {
+ pr_warn("handler already present for context %d.\n", i);
+ continue;
+ }
+
handler->present = true;
handler->hart_base =
plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
--
2.17.1
Currently on SMP host, all CPUs take external interrupts routed via
PLIC. All CPUs will try to claim a given external interrupt but only
one of them will succeed while other CPUs would simply resume whatever
they were doing before. This means if we have N CPUs then for every
external interrupt N-1 CPUs will always fail to claim it and waste
their CPU time.
Instead of above, external interrupts should be taken by only one CPU
and we should have provision to explicitly specify IRQ affinity from
kernel-space or user-space.
This patch provides irq_set_affinity() implementation for PLIC driver.
It also updates irq_enable() such that PLIC interrupts are only enabled
for one of CPUs specified in IRQ affinity mask.
With this patch in-place, we can change IRQ affinity at any-time from
user-space using procfs.
Example:
/ # cat /proc/interrupts
CPU0 CPU1 CPU2 CPU3
8: 44 0 0 0 SiFive PLIC 8 virtio0
10: 48 0 0 0 SiFive PLIC 10 ttyS0
IPI0: 55 663 58 363 Rescheduling interrupts
IPI1: 0 1 3 16 Function call interrupts
/ #
/ #
/ # echo 4 > /proc/irq/10/smp_affinity
/ #
/ # cat /proc/interrupts
CPU0 CPU1 CPU2 CPU3
8: 45 0 0 0 SiFive PLIC 8 virtio0
10: 160 0 17 0 SiFive PLIC 10 ttyS0
IPI0: 68 693 77 410 Rescheduling interrupts
IPI1: 0 2 3 16 Function call interrupts
Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 44 ++++++++++++++++++++++++++-----
1 file changed, 38 insertions(+), 6 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 24c906f4be93..47da70795145 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -83,29 +83,58 @@ static void plic_toggle(struct plic_handler *handler,
raw_spin_unlock(&handler->enable_lock);
}
-static void plic_irq_toggle(struct irq_data *d, int enable)
+static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
{
int cpu;
- writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
- for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
+ writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
- plic_toggle(handler, d->hwirq, enable);
+ plic_toggle(handler, hwirq, enable);
}
}
static void plic_irq_enable(struct irq_data *d)
{
- plic_irq_toggle(d, 1);
+ unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ return;
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
static void plic_irq_disable(struct irq_data *d)
{
- plic_irq_toggle(d, 0);
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ unsigned int cpu;
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (!irqd_irq_disabled(d)) {
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
/*
@@ -114,6 +143,9 @@ static struct irq_chip plic_chip = {
*/
.irq_enable = plic_irq_enable,
.irq_disable = plic_irq_disable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
};
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
--
2.17.1
We explicitly differentiate between PLIC handler and context because
PLIC context is for given mode of HART whereas PLIC handler is per-CPU
software construct meant for handling interrupts from a particular
PLIC context.
To achieve this differentiation, we rename "nr_handlers" to "nr_contexts"
and "nr_mapped" to "nr_handlers" in plic_init().
Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index b9a0bcefe426..24c906f4be93 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -177,7 +177,7 @@ static int plic_find_hart_id(struct device_node *node)
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
- int error = 0, nr_handlers, nr_mapped = 0, i;
+ int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
if (plic_regs) {
@@ -194,10 +194,10 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!nr_irqs))
goto out_iounmap;
- nr_handlers = of_irq_count(node);
- if (WARN_ON(!nr_handlers))
+ nr_contexts = of_irq_count(node);
+ if (WARN_ON(!nr_contexts))
goto out_iounmap;
- if (WARN_ON(nr_handlers < num_possible_cpus()))
+ if (WARN_ON(nr_contexts < num_possible_cpus()))
goto out_iounmap;
error = -ENOMEM;
@@ -206,7 +206,7 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!plic_irqdomain))
goto out_iounmap;
- for (i = 0; i < nr_handlers; i++) {
+ for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
@@ -245,11 +245,11 @@ static int __init plic_init(struct device_node *node,
writel(0, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
- nr_mapped++;
+ nr_handlers++;
}
- pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
- nr_irqs, nr_mapped, nr_handlers);
+ pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
+ nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
--
2.17.1
On Thu, Dec 27, 2018 at 4:48 PM Anup Patel <[email protected]> wrote:
>
> This patchset primarily adds IRQ affinity support in PLIC driver and
> other improvements.
>
> It gives mechanism for explicitly route external interrupts to particular
> CPUs using smp_affinity attribute of each Linux IRQs. Also, we can now
> use IRQ balancer from kernel-space or user-space.
>
> The patchset is tested on QEMU virt machine. It is based on Linux-4.20
> and can be found at riscv_plic_irq_affinity_v4 branch of:
> https://github.com/avpatel/linux.git
>
> Changes since v3:
> - Dropped PATCH2
> - Added PATCH to not inline plic_toggle() and plic_irq_toggle()
> - Moved PATCH3 changes to PATCH6
> - Used WARN_ON_ONCE() instead of WARN_ON() in PATCH5
>
> Changes since v2:
> - Fixed incorrect address of enable registers using sizeof(u32) in PATCH1
> - Retained comment about need for locking in PATCH1
> - Split PATCH2 into two patches
> - Split PATCH3 into two patches
> - Minor fix in commit description of PATCH4
>
> Changes since v1:
> - Removed few whitspace changes from PATCH1
> - Keep use of DEFINE_PER_CPU() as it is
>
> Anup Patel (5):
> irqchip: sifive-plic: Pre-compute context hart base and enable base
> irqchip: sifive-plic: Don't inline plic_toggle() and plic_irq_toggle()
> irqchip: sifive-plic: Add warning in plic_init() if handler already
> present
> irqchip: sifive-plic: Differentiate between PLIC handler and context
> irqchip: sifive-plic: Implement irq_set_affinity() for SMP host
>
> drivers/irqchip/irq-sifive-plic.c | 110 +++++++++++++++++++-----------
> 1 file changed, 71 insertions(+), 39 deletions(-)
>
> --
> 2.17.1
>
Hi All,
Any comments on this series??
Regards,
Anup
On Thu, Dec 27, 2018 at 04:48:19PM +0530, Anup Patel wrote:
> We have two enteries (one for M-mode and another for S-mode) in the
> interrupts-extended DT property of PLIC DT node for each HART. It is
> expected that firmware/bootloader will set M-mode HWIRQ line of each
> HART to 0xffffffff (i.e. -1) in interrupts-extended DT property
> because Linux runs in S-mode only.
>
> If firmware/bootloader is buggy then it will not correctly update
> interrupts-extended DT property which might result in a plic_handler
> configured twice. This patch adds a warning in plic_init() if a
> plic_handler is already marked present. This warning provides us
> a hint about incorrectly updated interrupts-extended DT property.
>
> Signed-off-by: Anup Patel <[email protected]>
> Reviewed-by: Christoph Hellwig <[email protected]>
> ---
> drivers/irqchip/irq-sifive-plic.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
> index 01bbbbffbcae..b9a0bcefe426 100644
> --- a/drivers/irqchip/irq-sifive-plic.c
> +++ b/drivers/irqchip/irq-sifive-plic.c
> @@ -229,6 +229,11 @@ static int __init plic_init(struct device_node *node,
>
> cpu = riscv_hartid_to_cpuid(hartid);
> handler = per_cpu_ptr(&plic_handlers, cpu);
> + if (handler->present) {
> + pr_warn("handler already present for context %d.\n", i);
> + continue;
> + }
> +
Just use WARN_ON_ONCE?
On Thu, Dec 27, 2018 at 04:48:18PM +0530, Anup Patel wrote:
> The plic_toggle() uses raw_spin_lock() and plic_irq_toggle has a
> for loop so both these functions are not suitable for being inline
> hence this patch removes the inline keyword.
That is a weird argument for a function which has by design exactly
two callers and is in the hot path. The alternative to the inline
here would be to duplicate the code.
> + if (!force)
> + cpu = cpumask_any_and(mask_val, cpu_online_mask);
> + else
> + cpu = cpumask_first(mask_val);
Any reason for the inverted test?
Otherwise this looks fine to me:
Reviewed-by: Christoph Hellwig <[email protected]>
On Thu, Dec 27, 2018 at 04:48:20PM +0530, Anup Patel wrote:
> We explicitly differentiate between PLIC handler and context because
> PLIC context is for given mode of HART whereas PLIC handler is per-CPU
> software construct meant for handling interrupts from a particular
> PLIC context.
>
> To achieve this differentiation, we rename "nr_handlers" to "nr_contexts"
> and "nr_mapped" to "nr_handlers" in plic_init().
Not sure what this differenciates that we previously didn't. As far
as I can tell it just is a slight naming cleanup.
On Tue, Jan 15, 2019 at 9:24 PM Christoph Hellwig <[email protected]> wrote:
>
> On Thu, Dec 27, 2018 at 04:48:19PM +0530, Anup Patel wrote:
> > We have two enteries (one for M-mode and another for S-mode) in the
> > interrupts-extended DT property of PLIC DT node for each HART. It is
> > expected that firmware/bootloader will set M-mode HWIRQ line of each
> > HART to 0xffffffff (i.e. -1) in interrupts-extended DT property
> > because Linux runs in S-mode only.
> >
> > If firmware/bootloader is buggy then it will not correctly update
> > interrupts-extended DT property which might result in a plic_handler
> > configured twice. This patch adds a warning in plic_init() if a
> > plic_handler is already marked present. This warning provides us
> > a hint about incorrectly updated interrupts-extended DT property.
> >
> > Signed-off-by: Anup Patel <[email protected]>
> > Reviewed-by: Christoph Hellwig <[email protected]>
> > ---
> > drivers/irqchip/irq-sifive-plic.c | 5 +++++
> > 1 file changed, 5 insertions(+)
> >
> > diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
> > index 01bbbbffbcae..b9a0bcefe426 100644
> > --- a/drivers/irqchip/irq-sifive-plic.c
> > +++ b/drivers/irqchip/irq-sifive-plic.c
> > @@ -229,6 +229,11 @@ static int __init plic_init(struct device_node *node,
> >
> > cpu = riscv_hartid_to_cpuid(hartid);
> > handler = per_cpu_ptr(&plic_handlers, cpu);
> > + if (handler->present) {
> > + pr_warn("handler already present for context %d.\n", i);
> > + continue;
> > + }
> > +
>
> Just use WARN_ON_ONCE?
WARN_ON_ONCE() is not suitable here because we want know
all the context IDs for which handler is already present.
Regards,
Anup
On Tue, Jan 15, 2019 at 9:24 PM Christoph Hellwig <[email protected]> wrote:
>
> On Thu, Dec 27, 2018 at 04:48:18PM +0530, Anup Patel wrote:
> > The plic_toggle() uses raw_spin_lock() and plic_irq_toggle has a
> > for loop so both these functions are not suitable for being inline
> > hence this patch removes the inline keyword.
>
> That is a weird argument for a function which has by design exactly
> two callers and is in the hot path. The alternative to the inline
> here would be to duplicate the code.
It's strange that you see it as weird argument. Both plic_toggle()
and plic_irq_toggle() are 5+ lines functions with loops. The loop
is clear in plic_irq_toggle() whereas raw_spin_lock() in plic_toggle()
expands into inline-assembly spin-loop because raw_spin_lock()
is a macro (not function).
Further looking at disassembly of both functions, these are 55+
instructions. I think we let GCC decide whether these functions
should be inlined or not rather than us explicitly making these
functions inline.
Regards,
Anup
On Tue, Jan 15, 2019 at 9:27 PM Christoph Hellwig <[email protected]> wrote:
>
> > + if (!force)
> > + cpu = cpumask_any_and(mask_val, cpu_online_mask);
> > + else
> > + cpu = cpumask_first(mask_val);
>
> Any reason for the inverted test?
Okay, I will not use inverted test here.
>
> Otherwise this looks fine to me:
>
> Reviewed-by: Christoph Hellwig <[email protected]>
Thanks,
Anup
On Tue, Jan 15, 2019 at 9:26 PM Christoph Hellwig <[email protected]> wrote:
>
> On Thu, Dec 27, 2018 at 04:48:20PM +0530, Anup Patel wrote:
> > We explicitly differentiate between PLIC handler and context because
> > PLIC context is for given mode of HART whereas PLIC handler is per-CPU
> > software construct meant for handling interrupts from a particular
> > PLIC context.
> >
> > To achieve this differentiation, we rename "nr_handlers" to "nr_contexts"
> > and "nr_mapped" to "nr_handlers" in plic_init().
>
> Not sure what this differenciates that we previously didn't. As far
> as I can tell it just is a slight naming cleanup.
There is a disconnect between PLIC documentation and PLIC driver.
If we have 8 HARTs then we can have 16 PLIC contexts whereas PLIC
handlers (SW construct) are always same as number of HARTs. In other
words, there will be separate PLIC context for M-mode and S-mode of a
HART.
Linux runs in S-mode so we can only use PLIC context meant for S-mode.
This means per-CPU handlers in PLIC driver only handle PLIC S-mode
context of a HART.
This patch makes PLIC driver more readable and matching PLIC HW
documentation by differentiating between PLIC handler and PLIC
context.
Regards,
Anup
On Tue, 08 Jan 2019 04:14:05 PST (-0800), [email protected] wrote:
> On Thu, Dec 27, 2018 at 4:48 PM Anup Patel <[email protected]> wrote:
>>
>> This patchset primarily adds IRQ affinity support in PLIC driver and
>> other improvements.
>>
>> It gives mechanism for explicitly route external interrupts to particular
>> CPUs using smp_affinity attribute of each Linux IRQs. Also, we can now
>> use IRQ balancer from kernel-space or user-space.
>>
>> The patchset is tested on QEMU virt machine. It is based on Linux-4.20
>> and can be found at riscv_plic_irq_affinity_v4 branch of:
>> https://github.com/avpatel/linux.git
>>
>> Changes since v3:
>> - Dropped PATCH2
>> - Added PATCH to not inline plic_toggle() and plic_irq_toggle()
>> - Moved PATCH3 changes to PATCH6
>> - Used WARN_ON_ONCE() instead of WARN_ON() in PATCH5
>>
>> Changes since v2:
>> - Fixed incorrect address of enable registers using sizeof(u32) in PATCH1
>> - Retained comment about need for locking in PATCH1
>> - Split PATCH2 into two patches
>> - Split PATCH3 into two patches
>> - Minor fix in commit description of PATCH4
>>
>> Changes since v1:
>> - Removed few whitspace changes from PATCH1
>> - Keep use of DEFINE_PER_CPU() as it is
>>
>> Anup Patel (5):
>> irqchip: sifive-plic: Pre-compute context hart base and enable base
>> irqchip: sifive-plic: Don't inline plic_toggle() and plic_irq_toggle()
>> irqchip: sifive-plic: Add warning in plic_init() if handler already
>> present
>> irqchip: sifive-plic: Differentiate between PLIC handler and context
>> irqchip: sifive-plic: Implement irq_set_affinity() for SMP host
>>
>> drivers/irqchip/irq-sifive-plic.c | 110 +++++++++++++++++++-----------
>> 1 file changed, 71 insertions(+), 39 deletions(-)
>>
>> --
>> 2.17.1
>>
>
> Any comments on this series??
Sorry, I haven't had a chance to take a look -- I've still a bit too behind on
getting my act together on our fixes and haven't had time to take a look at
things for the next merge window.