2018-10-22 12:19:36

by Anup Patel

[permalink] [raw]
Subject: [PATCH 0/4] IRQ affinity support in PLIC driver

This patchset primarily adds IRQ affinity support in PLIC driver and
other improvements.

The patchset gives mechanism for explicitly routing external interrupts to
particular CPUs using smp_affinity attribute of each Linux IRQs. Also, we
can now use IRQ balancer from kernel-space or user-space.

The patchset is tested on QEMU virt machine. It is based on Linux-4.19-rc2
and can be found at riscv_plic_irq_affinity_v1 branch of:
https://github.com/avpatel/linux.git

Anup Patel (4):
irqchip: sifive-plic: Pre-compute context hart base and enable base
irqchip: sifive-plic: More flexible plic_irq_toggle()
irqchip: sifive-plic: Differentiate between PLIC handler and context
irqchip: sifive-plic: Implement irq_set_affinity() for SMP host

drivers/irqchip/irq-sifive-plic.c | 164 ++++++++++++++++++------------
1 file changed, 99 insertions(+), 65 deletions(-)

--
2.17.1



2018-10-22 12:20:44

by Anup Patel

[permalink] [raw]
Subject: [PATCH 1/4] irqchip: sifive-plic: Pre-compute context hart base and enable base

This patch does following optimizations:
1. Pre-compute hart base for each context handler
2. Pre-compute enable base for each context handler
3. Have enable lock for each context handler instead
of global plic_toggle_lock

Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 51 +++++++++++++------------------
1 file changed, 22 insertions(+), 29 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 357e9daf94ae..f93ec83eaff4 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -36,14 +36,14 @@
* We always hardwire it to one in Linux.
*/
#define PRIORITY_BASE 0
-#define PRIORITY_PER_ID 4
+#define PRIORITY_PER_ID 4

/*
* Each hart context has a vector of interrupt enable bits associated with it.
* There's one bit for each interrupt source.
*/
#define ENABLE_BASE 0x2000
-#define ENABLE_PER_HART 0x80
+#define ENABLE_PER_HART 0x80

/*
* Each hart context has a set of control registers associated with it. Right
@@ -51,45 +51,33 @@
* take an interrupt, and a register to claim interrupts.
*/
#define CONTEXT_BASE 0x200000
-#define CONTEXT_PER_HART 0x1000
-#define CONTEXT_THRESHOLD 0x00
-#define CONTEXT_CLAIM 0x04
+#define CONTEXT_PER_HART 0x1000
+#define CONTEXT_THRESHOLD 0x00
+#define CONTEXT_CLAIM 0x04

static void __iomem *plic_regs;

struct plic_handler {
bool present;
int ctxid;
+ void __iomem *hart_base;
+ raw_spinlock_t enable_lock;
+ void __iomem *enable_base;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);

-static inline void __iomem *plic_hart_offset(int ctxid)
+static inline void plic_toggle(struct plic_handler *handler,
+ int hwirq, int enable)
{
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
-}
-
-static inline u32 __iomem *plic_enable_base(int ctxid)
-{
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
-}
-
-/*
- * Protect mask operations on the registers given that we can't assume that
- * atomic memory operations work on them.
- */
-static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
-
-static inline void plic_toggle(int ctxid, int hwirq, int enable)
-{
- u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32);
u32 hwirq_mask = 1 << (hwirq % 32);

- raw_spin_lock(&plic_toggle_lock);
+ raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
- raw_spin_unlock(&plic_toggle_lock);
+ raw_spin_unlock(&handler->enable_lock);
}

static inline void plic_irq_toggle(struct irq_data *d, int enable)
@@ -101,7 +89,7 @@ static inline void plic_irq_toggle(struct irq_data *d, int enable)
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);

if (handler->present)
- plic_toggle(handler->ctxid, d->hwirq, enable);
+ plic_toggle(handler, d->hwirq, enable);
}
}

@@ -150,7 +138,7 @@ static struct irq_domain *plic_irqdomain;
static void plic_handle_irq(struct pt_regs *regs)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;

WARN_ON_ONCE(!handler->present);
@@ -240,11 +228,16 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu);
handler->present = true;
handler->ctxid = i;
+ handler->hart_base =
+ plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ raw_spin_lock_init(&handler->enable_lock);
+ handler->enable_base =
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;

/* priority must be > threshold to trigger an interrupt */
- writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ writel(0, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- plic_toggle(i, hwirq, 0);
+ plic_toggle(handler, hwirq, 0);
nr_mapped++;
}

--
2.17.1


2018-10-22 12:22:39

by Anup Patel

[permalink] [raw]
Subject: [PATCH 3/4] irqchip: sifive-plic: Differentiate between PLIC handler and context

We explicitly differentiate between PLIC handler and context because
PLIC context is for given mode of HART whereas PLIC handler is per-CPU
software construct meant to handling interrupts from a particular
PLIC context.

Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 869355d2a713..eb9e8aee1a1a 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -64,8 +64,8 @@ struct plic_handler {

struct plic_hw {
u32 nr_irqs;
+ u32 nr_contexts;
u32 nr_handlers;
- u32 nr_mapped;
void __iomem *regs;
struct plic_handler *handlers;
struct irq_domain *irqdomain;
@@ -190,10 +190,10 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!plic.nr_irqs))
goto out_iounmap;

- plic.nr_handlers = of_irq_count(node);
- if (WARN_ON(!plic.nr_handlers))
+ plic.nr_contexts = of_irq_count(node);
+ if (WARN_ON(!plic.nr_contexts))
goto out_iounmap;
- if (WARN_ON(plic.nr_handlers < num_possible_cpus()))
+ if (WARN_ON(plic.nr_contexts < num_possible_cpus()))
goto out_iounmap;

error = -ENOMEM;
@@ -206,7 +206,7 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!plic.irqdomain))
goto out_free_handlers;

- for (i = 0; i < plic.nr_handlers; i++) {
+ for (i = 0; i < plic.nr_contexts; i++) {
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
@@ -229,6 +229,11 @@ static int __init plic_init(struct device_node *node,

cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(plic.handlers, cpu);
+ if (handler->present) {
+ pr_warn("handler not available for context %d.\n", i);
+ continue;
+ }
+
handler->present = true;
handler->hart_base =
plic.regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
@@ -241,11 +246,11 @@ static int __init plic_init(struct device_node *node,
for (hwirq = 1; hwirq <= plic.nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);

- plic.nr_mapped++;
+ plic.nr_handlers++;
}

- pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
- plic.nr_irqs, plic.nr_mapped, plic.nr_handlers);
+ pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
+ plic.nr_irqs, plic.nr_handlers, plic.nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;

--
2.17.1


2018-10-22 12:23:43

by Anup Patel

[permalink] [raw]
Subject: [PATCH 4/4] irqchip: sifive-plic: Implement irq_set_affinity() for SMP host

Currently on SMP host, all CPUs take external interrupts routed via
PLIC. All CPUs will try to claim a given external interrupt but only
one of them will succeed while other CPUs would simply resume whatever
they were doing before. This means if we have N CPUs then for every
external interrupt N-1 CPUs will always fail to claim it and waste
their CPU time.

Instead of above, external interrupts should be taken by only one CPU
and we should have provision to explicity specify IRQ affinity from
kernel-space or user-space.

This patch provides irq_set_affinity() implementation for PLIC driver.
It also updates irq_enable() such that PLIC interrupts are only enabled
for one of CPUs specified in IRQ affinity mask.

With this patch in-place, we can change IRQ affinity at any-time from
user-space using procfs.

Example:

/ # cat /proc/interrupts
CPU0 CPU1 CPU2 CPU3
8: 44 0 0 0 SiFive PLIC 8 virtio0
10: 48 0 0 0 SiFive PLIC 10 ttyS0
IPI0: 55 663 58 363 Rescheduling interrupts
IPI1: 0 1 3 16 Function call interrupts
/ #
/ #
/ # echo 4 > /proc/irq/10/smp_affinity
/ #
/ # cat /proc/interrupts
CPU0 CPU1 CPU2 CPU3
8: 45 0 0 0 SiFive PLIC 8 virtio0
10: 160 0 17 0 SiFive PLIC 10 ttyS0
IPI0: 68 693 77 410 Rescheduling interrupts
IPI1: 0 2 3 16 Function call interrupts

Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 35 +++++++++++++++++++++++++++++--
1 file changed, 33 insertions(+), 2 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index eb9e8aee1a1a..5a53ffb3c413 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -97,14 +97,42 @@ static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)

static void plic_irq_enable(struct irq_data *d)
{
- plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
+ unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
+ WARN_ON(cpu >= nr_cpu_ids);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}

static void plic_irq_disable(struct irq_data *d)
{
- plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}

+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ unsigned int cpu;
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (!irqd_irq_disabled(d)) {
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
/*
@@ -113,6 +141,9 @@ static struct irq_chip plic_chip = {
*/
.irq_enable = plic_irq_enable,
.irq_disable = plic_irq_disable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
};

static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
--
2.17.1


2018-10-22 13:15:58

by Anup Patel

[permalink] [raw]
Subject: [PATCH 2/4] irqchip: sifive-plic: More flexible plic_irq_toggle()

We make plic_irq_toggle() more generic so that we can enable/disable
hwirq for given cpumask. This generic plic_irq_toggle() will be
eventually used to implement set_affinity for PLIC driver.

Signed-off-by: Anup Patel <[email protected]>
---
drivers/irqchip/irq-sifive-plic.c | 89 ++++++++++++++++---------------
1 file changed, 47 insertions(+), 42 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index f93ec83eaff4..869355d2a713 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -55,19 +55,25 @@
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04

-static void __iomem *plic_regs;
-
struct plic_handler {
bool present;
- int ctxid;
void __iomem *hart_base;
raw_spinlock_t enable_lock;
void __iomem *enable_base;
};
-static DEFINE_PER_CPU(struct plic_handler, plic_handlers);

-static inline void plic_toggle(struct plic_handler *handler,
- int hwirq, int enable)
+struct plic_hw {
+ u32 nr_irqs;
+ u32 nr_handlers;
+ u32 nr_mapped;
+ void __iomem *regs;
+ struct plic_handler *handlers;
+ struct irq_domain *irqdomain;
+};
+
+static struct plic_hw plic;
+
+static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
u32 __iomem *reg = handler->enable_base + (hwirq / 32);
u32 hwirq_mask = 1 << (hwirq % 32);
@@ -80,27 +86,23 @@ static inline void plic_toggle(struct plic_handler *handler,
raw_spin_unlock(&handler->enable_lock);
}

-static inline void plic_irq_toggle(struct irq_data *d, int enable)
+static void plic_irq_toggle(const struct cpumask *mask, int hwirq, int enable)
{
int cpu;

- writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
- for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
- struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
-
- if (handler->present)
- plic_toggle(handler, d->hwirq, enable);
- }
+ writel(enable, plic.regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, mask)
+ plic_toggle(per_cpu_ptr(plic.handlers, cpu), hwirq, enable);
}

static void plic_irq_enable(struct irq_data *d)
{
- plic_irq_toggle(d, 1);
+ plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 1);
}

static void plic_irq_disable(struct irq_data *d)
{
- plic_irq_toggle(d, 0);
+ plic_irq_toggle(irq_data_get_affinity_mask(d), d->hwirq, 0);
}

static struct irq_chip plic_chip = {
@@ -127,8 +129,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};

-static struct irq_domain *plic_irqdomain;
-
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
@@ -137,7 +137,7 @@ static struct irq_domain *plic_irqdomain;
*/
static void plic_handle_irq(struct pt_regs *regs)
{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct plic_handler *handler = this_cpu_ptr(plic.handlers);
void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;

@@ -145,7 +145,7 @@ static void plic_handle_irq(struct pt_regs *regs)

csr_clear(sie, SIE_SEIE);
while ((hwirq = readl(claim))) {
- int irq = irq_find_mapping(plic_irqdomain, hwirq);
+ int irq = irq_find_mapping(plic.irqdomain, hwirq);

if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
@@ -174,36 +174,39 @@ static int plic_find_hart_id(struct device_node *node)
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
- int error = 0, nr_handlers, nr_mapped = 0, i;
- u32 nr_irqs;
+ int error = 0, i;

- if (plic_regs) {
+ if (plic.regs) {
pr_warn("PLIC already present.\n");
return -ENXIO;
}

- plic_regs = of_iomap(node, 0);
- if (WARN_ON(!plic_regs))
+ plic.regs = of_iomap(node, 0);
+ if (WARN_ON(!plic.regs))
return -EIO;

error = -EINVAL;
- of_property_read_u32(node, "riscv,ndev", &nr_irqs);
- if (WARN_ON(!nr_irqs))
+ of_property_read_u32(node, "riscv,ndev", &plic.nr_irqs);
+ if (WARN_ON(!plic.nr_irqs))
goto out_iounmap;

- nr_handlers = of_irq_count(node);
- if (WARN_ON(!nr_handlers))
+ plic.nr_handlers = of_irq_count(node);
+ if (WARN_ON(!plic.nr_handlers))
goto out_iounmap;
- if (WARN_ON(nr_handlers < num_possible_cpus()))
+ if (WARN_ON(plic.nr_handlers < num_possible_cpus()))
goto out_iounmap;

error = -ENOMEM;
- plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, NULL);
- if (WARN_ON(!plic_irqdomain))
+ plic.handlers = alloc_percpu(struct plic_handler);
+ if (!plic.handlers)
goto out_iounmap;

- for (i = 0; i < nr_handlers; i++) {
+ plic.irqdomain = irq_domain_add_linear(node, plic.nr_irqs + 1,
+ &plic_irqdomain_ops, NULL);
+ if (WARN_ON(!plic.irqdomain))
+ goto out_free_handlers;
+
+ for (i = 0; i < plic.nr_handlers; i++) {
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
@@ -225,29 +228,31 @@ static int __init plic_init(struct device_node *node,
}

cpu = riscv_hartid_to_cpuid(hartid);
- handler = per_cpu_ptr(&plic_handlers, cpu);
+ handler = per_cpu_ptr(plic.handlers, cpu);
handler->present = true;
- handler->ctxid = i;
handler->hart_base =
- plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ plic.regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
- plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ plic.regs + ENABLE_BASE + i * ENABLE_PER_HART;

/* priority must be > threshold to trigger an interrupt */
writel(0, handler->hart_base + CONTEXT_THRESHOLD);
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ for (hwirq = 1; hwirq <= plic.nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
- nr_mapped++;
+
+ plic.nr_mapped++;
}

pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
- nr_irqs, nr_mapped, nr_handlers);
+ plic.nr_irqs, plic.nr_mapped, plic.nr_handlers);
set_handle_irq(plic_handle_irq);
return 0;

+out_free_handlers:
+ free_percpu(plic.handlers);
out_iounmap:
- iounmap(plic_regs);
+ iounmap(plic.regs);
return error;
}

--
2.17.1


2018-11-09 08:44:08

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 1/4] irqchip: sifive-plic: Pre-compute context hart base and enable base

On Mon, Oct 22, 2018 at 05:15:14PM +0530, Anup Patel wrote:
> This patch does following optimizations:
> 1. Pre-compute hart base for each context handler
> 2. Pre-compute enable base for each context handler

Why?

> 3. Have enable lock for each context handler instead
> of global plic_toggle_lock

Why? Also even if you want this it should be a separate patch.

> #define PRIORITY_BASE 0
> -#define PRIORITY_PER_ID 4
> +#define PRIORITY_PER_ID 4

Also please drop the random whitespace changes.

2018-11-09 08:44:41

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 2/4] irqchip: sifive-plic: More flexible plic_irq_toggle()

> -
> struct plic_handler {
> bool present;
> - int ctxid;
> void __iomem *hart_base;
> raw_spinlock_t enable_lock;
> void __iomem *enable_base;
> };
> -static DEFINE_PER_CPU(struct plic_handler, plic_handlers);

This does not match the changelog at all.

2018-11-12 04:27:53

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH 1/4] irqchip: sifive-plic: Pre-compute context hart base and enable base

On Fri, Nov 9, 2018 at 2:12 PM Christoph Hellwig <[email protected]> wrote:
>
> On Mon, Oct 22, 2018 at 05:15:14PM +0530, Anup Patel wrote:
> > This patch does following optimizations:
> > 1. Pre-compute hart base for each context handler
> > 2. Pre-compute enable base for each context handler
>
> Why?

This is micro-optimizations. We don't need to re-compute
hart base and hart enable base everytime.

>
> > 3. Have enable lock for each context handler instead
> > of global plic_toggle_lock
>
> Why? Also even if you want this it should be a separate patch.

Well, the PLIC register space it a bit strange.

Most PLIC context specific registers are in one place
except context IRQ enable registers which are part of
global registers.

To handle this, we had a global plic_toggle_lock which
was taken whenever PLIC driver touched context IRQ
enable registers. Instead of this, we can have per-context
IRQ enable lock for more granular locking.

Later when we implement IRQ set_affinity, we touch
IRQ enable registers of each context whenever IRQ
affinity changes. This fine grained IRQ enable locking
helps when IRQ load-balancer is changing affinity of
different IRQs parallely on separate cores. Again this
is a micro-optimization.

>
> > #define PRIORITY_BASE 0
> > -#define PRIORITY_PER_ID 4
> > +#define PRIORITY_PER_ID 4
>
> Also please drop the random whitespace changes.

Instead of dropping I will make it separate patch because
we are replacing "\t" between #define and define_name
with a space.

Regards,
Anup

2018-11-12 12:34:04

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH 2/4] irqchip: sifive-plic: More flexible plic_irq_toggle()

On Fri, Nov 9, 2018 at 2:13 PM Christoph Hellwig <[email protected]> wrote:
>
> > -
> > struct plic_handler {
> > bool present;
> > - int ctxid;
> > void __iomem *hart_base;
> > raw_spinlock_t enable_lock;
> > void __iomem *enable_base;
> > };
> > -static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
>
> This does not match the changelog at all.

My bad. I should have put this change into separate patch.

I will refactor this patch.

Regards,
Anup