2020-02-21 23:23:49

by Atish Patra

[permalink] [raw]
Subject: [v2 PATCH] irqchip/sifive-plic: Add support for multiple PLICs

Current, PLIC driver can support only 1 PLIC on the board. However,
there can be multiple PLICs present on a two socket systems in RISC-V.

Modify the driver so that each PLIC handler can have a information
about individual PLIC registers and an irqdomain associated with it.

Tested on two socket RISC-V system based on VCU118 FPGA connected via
OmniXtend protocol.

Signed-off-by: Atish Patra <[email protected]>
---
This patch is rebased on top of 5.6-rc2 and following plic fix from
hotplug series.

https://lkml.org/lkml/2020/2/20/1220

Changes from v1->v2:
1. Use irq_chip_get_data to retrieve host_data
2. Renamed plic_hw to plic_node_ctx
---
drivers/irqchip/irq-sifive-plic.c | 82 ++++++++++++++++++++-----------
1 file changed, 52 insertions(+), 30 deletions(-)

diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 7c7f37393f99..9b9b6f4def4f 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -59,7 +59,11 @@
#define PLIC_DISABLE_THRESHOLD 0xf
#define PLIC_ENABLE_THRESHOLD 0

-static void __iomem *plic_regs;
+struct plic_node_ctx {
+ struct cpumask lmask;
+ struct irq_domain *irqdomain;
+ void __iomem *regs;
+};

struct plic_handler {
bool present;
@@ -70,6 +74,7 @@ struct plic_handler {
*/
raw_spinlock_t enable_lock;
void __iomem *enable_base;
+ struct plic_node_ctx *node_ctx;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);

@@ -88,31 +93,41 @@ static inline void plic_toggle(struct plic_handler *handler,
}

static inline void plic_irq_toggle(const struct cpumask *mask,
- int hwirq, int enable)
+ struct irq_data *d, int enable)
{
int cpu;
+ struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);

- writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ writel(enable,
+ node_ctx->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);

- if (handler->present)
- plic_toggle(handler, hwirq, enable);
+ if (handler->present &&
+ cpumask_test_cpu(cpu, &handler->node_ctx->lmask))
+ plic_toggle(handler, d->hwirq, enable);
}
}

static void plic_irq_unmask(struct irq_data *d)
{
- unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
- cpu_online_mask);
+ struct cpumask amask;
+ unsigned int cpu;
+ struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
+
+ cpumask_and(&amask, &node_ctx->lmask, cpu_online_mask);
+ cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ &amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return;
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ plic_irq_toggle(cpumask_of(cpu), d, 1);
}

static void plic_irq_mask(struct irq_data *d)
{
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
+
+ plic_irq_toggle(&node_ctx->lmask, d, 0);
}

#ifdef CONFIG_SMP
@@ -120,17 +135,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
+ struct cpumask amask;
+ struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
+
+ cpumask_and(&amask, &node_ctx->lmask, mask_val);

if (force)
- cpu = cpumask_first(mask_val);
+ cpu = cpumask_first(&amask);
else
- cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ cpu = cpumask_any_and(&amask, cpu_online_mask);

if (cpu >= nr_cpu_ids)
return -EINVAL;

- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ plic_irq_toggle(&node_ctx->lmask, d, 0);
+ plic_irq_toggle(cpumask_of(cpu), d, 1);

irq_data_update_effective_affinity(d, cpumask_of(cpu));

@@ -191,8 +210,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top,
};

-static struct irq_domain *plic_irqdomain;
-
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
@@ -209,7 +226,7 @@ static void plic_handle_irq(struct pt_regs *regs)

csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
- int irq = irq_find_mapping(plic_irqdomain, hwirq);
+ int irq = irq_find_mapping(handler->node_ctx->irqdomain, hwirq);

if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
@@ -265,15 +282,17 @@ static int __init plic_init(struct device_node *node,
{
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
+ struct plic_node_ctx *node_ctx;

- if (plic_regs) {
- pr_warn("PLIC already present.\n");
- return -ENXIO;
- }
+ node_ctx = kzalloc(sizeof(*node_ctx), GFP_KERNEL);
+ if (!node_ctx)
+ return -ENOMEM;

- plic_regs = of_iomap(node, 0);
- if (WARN_ON(!plic_regs))
- return -EIO;
+ node_ctx->regs = of_iomap(node, 0);
+ if (WARN_ON(!node_ctx->regs)) {
+ error = -EIO;
+ goto out_free_nctx;
+ }

error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
@@ -287,9 +306,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap;

error = -ENOMEM;
- plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, NULL);
- if (WARN_ON(!plic_irqdomain))
+ node_ctx->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+ &plic_irqdomain_ops, node_ctx);
+ if (WARN_ON(!node_ctx->irqdomain))
goto out_iounmap;

for (i = 0; i < nr_contexts; i++) {
@@ -334,13 +353,14 @@ static int __init plic_init(struct device_node *node,
goto done;
}

+ cpumask_set_cpu(cpu, &node_ctx->lmask);
handler->present = true;
handler->hart_base =
- plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ node_ctx->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
- plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
-
+ node_ctx->regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ handler->node_ctx = node_ctx;
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
@@ -356,7 +376,9 @@ static int __init plic_init(struct device_node *node,
return 0;

out_iounmap:
- iounmap(plic_regs);
+ iounmap(node_ctx->regs);
+out_free_nctx:
+ kfree(node_ctx);
return error;
}

--
2.25.0


2020-02-28 04:07:59

by Anup Patel

[permalink] [raw]
Subject: Re: [v2 PATCH] irqchip/sifive-plic: Add support for multiple PLICs

On Sat, Feb 22, 2020 at 4:53 AM Atish Patra <[email protected]> wrote:
>
> Current, PLIC driver can support only 1 PLIC on the board. However,
> there can be multiple PLICs present on a two socket systems in RISC-V.
>
> Modify the driver so that each PLIC handler can have a information
> about individual PLIC registers and an irqdomain associated with it.
>
> Tested on two socket RISC-V system based on VCU118 FPGA connected via
> OmniXtend protocol.
>
> Signed-off-by: Atish Patra <[email protected]>
> ---
> This patch is rebased on top of 5.6-rc2 and following plic fix from
> hotplug series.
>
> https://lkml.org/lkml/2020/2/20/1220
>
> Changes from v1->v2:
> 1. Use irq_chip_get_data to retrieve host_data
> 2. Renamed plic_hw to plic_node_ctx
> ---
> drivers/irqchip/irq-sifive-plic.c | 82 ++++++++++++++++++++-----------
> 1 file changed, 52 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
> index 7c7f37393f99..9b9b6f4def4f 100644
> --- a/drivers/irqchip/irq-sifive-plic.c
> +++ b/drivers/irqchip/irq-sifive-plic.c
> @@ -59,7 +59,11 @@
> #define PLIC_DISABLE_THRESHOLD 0xf
> #define PLIC_ENABLE_THRESHOLD 0
>
> -static void __iomem *plic_regs;
> +struct plic_node_ctx {

I think "plic_node_ctx" is a non-trivial name. I guess much
simpler and cleaner name will be "plic_priv" because this
structure represents private data for each PLIC instance.

> + struct cpumask lmask;
> + struct irq_domain *irqdomain;
> + void __iomem *regs;
> +};
>
> struct plic_handler {
> bool present;
> @@ -70,6 +74,7 @@ struct plic_handler {
> */
> raw_spinlock_t enable_lock;
> void __iomem *enable_base;
> + struct plic_node_ctx *node_ctx;
> };
> static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
>
> @@ -88,31 +93,41 @@ static inline void plic_toggle(struct plic_handler *handler,
> }
>
> static inline void plic_irq_toggle(const struct cpumask *mask,
> - int hwirq, int enable)
> + struct irq_data *d, int enable)
> {
> int cpu;
> + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
>
> - writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
> + writel(enable,
> + node_ctx->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
> for_each_cpu(cpu, mask) {
> struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
>
> - if (handler->present)
> - plic_toggle(handler, hwirq, enable);
> + if (handler->present &&
> + cpumask_test_cpu(cpu, &handler->node_ctx->lmask))
> + plic_toggle(handler, d->hwirq, enable);
> }
> }
>
> static void plic_irq_unmask(struct irq_data *d)
> {
> - unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> - cpu_online_mask);
> + struct cpumask amask;
> + unsigned int cpu;
> + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> +
> + cpumask_and(&amask, &node_ctx->lmask, cpu_online_mask);
> + cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> + &amask);
> if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
> return;
> - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> + plic_irq_toggle(cpumask_of(cpu), d, 1);
> }
>
> static void plic_irq_mask(struct irq_data *d)
> {
> - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> +
> + plic_irq_toggle(&node_ctx->lmask, d, 0);
> }
>
> #ifdef CONFIG_SMP
> @@ -120,17 +135,21 @@ static int plic_set_affinity(struct irq_data *d,
> const struct cpumask *mask_val, bool force)
> {
> unsigned int cpu;
> + struct cpumask amask;
> + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> +
> + cpumask_and(&amask, &node_ctx->lmask, mask_val);
>
> if (force)
> - cpu = cpumask_first(mask_val);
> + cpu = cpumask_first(&amask);
> else
> - cpu = cpumask_any_and(mask_val, cpu_online_mask);
> + cpu = cpumask_any_and(&amask, cpu_online_mask);
>
> if (cpu >= nr_cpu_ids)
> return -EINVAL;
>
> - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> + plic_irq_toggle(&node_ctx->lmask, d, 0);
> + plic_irq_toggle(cpumask_of(cpu), d, 1);
>
> irq_data_update_effective_affinity(d, cpumask_of(cpu));
>
> @@ -191,8 +210,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
> .free = irq_domain_free_irqs_top,
> };
>
> -static struct irq_domain *plic_irqdomain;
> -
> /*
> * Handling an interrupt is a two-step process: first you claim the interrupt
> * by reading the claim register, then you complete the interrupt by writing
> @@ -209,7 +226,7 @@ static void plic_handle_irq(struct pt_regs *regs)
>
> csr_clear(CSR_IE, IE_EIE);
> while ((hwirq = readl(claim))) {
> - int irq = irq_find_mapping(plic_irqdomain, hwirq);
> + int irq = irq_find_mapping(handler->node_ctx->irqdomain, hwirq);
>
> if (unlikely(irq <= 0))
> pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
> @@ -265,15 +282,17 @@ static int __init plic_init(struct device_node *node,
> {
> int error = 0, nr_contexts, nr_handlers = 0, i;
> u32 nr_irqs;
> + struct plic_node_ctx *node_ctx;
>
> - if (plic_regs) {
> - pr_warn("PLIC already present.\n");
> - return -ENXIO;
> - }
> + node_ctx = kzalloc(sizeof(*node_ctx), GFP_KERNEL);
> + if (!node_ctx)
> + return -ENOMEM;
>
> - plic_regs = of_iomap(node, 0);
> - if (WARN_ON(!plic_regs))
> - return -EIO;
> + node_ctx->regs = of_iomap(node, 0);
> + if (WARN_ON(!node_ctx->regs)) {
> + error = -EIO;
> + goto out_free_nctx;
> + }
>
> error = -EINVAL;
> of_property_read_u32(node, "riscv,ndev", &nr_irqs);
> @@ -287,9 +306,9 @@ static int __init plic_init(struct device_node *node,
> goto out_iounmap;
>
> error = -ENOMEM;
> - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
> - &plic_irqdomain_ops, NULL);
> - if (WARN_ON(!plic_irqdomain))
> + node_ctx->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
> + &plic_irqdomain_ops, node_ctx);
> + if (WARN_ON(!node_ctx->irqdomain))
> goto out_iounmap;
>
> for (i = 0; i < nr_contexts; i++) {
> @@ -334,13 +353,14 @@ static int __init plic_init(struct device_node *node,
> goto done;
> }
>
> + cpumask_set_cpu(cpu, &node_ctx->lmask);
> handler->present = true;
> handler->hart_base =
> - plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
> + node_ctx->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
> raw_spin_lock_init(&handler->enable_lock);
> handler->enable_base =
> - plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
> -
> + node_ctx->regs + ENABLE_BASE + i * ENABLE_PER_HART;
> + handler->node_ctx = node_ctx;
> done:
> for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
> plic_toggle(handler, hwirq, 0);
> @@ -356,7 +376,9 @@ static int __init plic_init(struct device_node *node,
> return 0;
>
> out_iounmap:
> - iounmap(plic_regs);
> + iounmap(node_ctx->regs);
> +out_free_nctx:
> + kfree(node_ctx);
> return error;
> }
>
> --
> 2.25.0
>

Apart from above nit, looks good to me.

Reviewed-by: Anup Patel <[email protected]>

Regards,
Anup

2020-02-28 18:26:35

by Atish Patra

[permalink] [raw]
Subject: Re: [v2 PATCH] irqchip/sifive-plic: Add support for multiple PLICs

On Fri, 2020-02-28 at 09:36 +0530, Anup Patel wrote:
> On Sat, Feb 22, 2020 at 4:53 AM Atish Patra <[email protected]>
> wrote:
> > Current, PLIC driver can support only 1 PLIC on the board. However,
> > there can be multiple PLICs present on a two socket systems in
> > RISC-V.
> >
> > Modify the driver so that each PLIC handler can have a information
> > about individual PLIC registers and an irqdomain associated with
> > it.
> >
> > Tested on two socket RISC-V system based on VCU118 FPGA connected
> > via
> > OmniXtend protocol.
> >
> > Signed-off-by: Atish Patra <[email protected]>
> > ---
> > This patch is rebased on top of 5.6-rc2 and following plic fix from
> > hotplug series.
> >
> > https://lkml.org/lkml/2020/2/20/1220
> >
> > Changes from v1->v2:
> > 1. Use irq_chip_get_data to retrieve host_data
> > 2. Renamed plic_hw to plic_node_ctx
> > ---
> > drivers/irqchip/irq-sifive-plic.c | 82 ++++++++++++++++++++-------
> > ----
> > 1 file changed, 52 insertions(+), 30 deletions(-)
> >
> > diff --git a/drivers/irqchip/irq-sifive-plic.c
> > b/drivers/irqchip/irq-sifive-plic.c
> > index 7c7f37393f99..9b9b6f4def4f 100644
> > --- a/drivers/irqchip/irq-sifive-plic.c
> > +++ b/drivers/irqchip/irq-sifive-plic.c
> > @@ -59,7 +59,11 @@
> > #define PLIC_DISABLE_THRESHOLD 0xf
> > #define PLIC_ENABLE_THRESHOLD 0
> >
> > -static void __iomem *plic_regs;
> > +struct plic_node_ctx {
>
> I think "plic_node_ctx" is a non-trivial name. I guess much
> simpler and cleaner name will be "plic_priv" because this
> structure represents private data for each PLIC instance.
>

ok. Sure. I will update the patch.

> > + struct cpumask lmask;
> > + struct irq_domain *irqdomain;
> > + void __iomem *regs;
> > +};
> >
> > struct plic_handler {
> > bool present;
> > @@ -70,6 +74,7 @@ struct plic_handler {
> > */
> > raw_spinlock_t enable_lock;
> > void __iomem *enable_base;
> > + struct plic_node_ctx *node_ctx;
> > };
> > static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
> >
> > @@ -88,31 +93,41 @@ static inline void plic_toggle(struct
> > plic_handler *handler,
> > }
> >
> > static inline void plic_irq_toggle(const struct cpumask *mask,
> > - int hwirq, int enable)
> > + struct irq_data *d, int enable)
> > {
> > int cpu;
> > + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> >
> > - writel(enable, plic_regs + PRIORITY_BASE + hwirq *
> > PRIORITY_PER_ID);
> > + writel(enable,
> > + node_ctx->regs + PRIORITY_BASE + d->hwirq *
> > PRIORITY_PER_ID);
> > for_each_cpu(cpu, mask) {
> > struct plic_handler *handler =
> > per_cpu_ptr(&plic_handlers, cpu);
> >
> > - if (handler->present)
> > - plic_toggle(handler, hwirq, enable);
> > + if (handler->present &&
> > + cpumask_test_cpu(cpu, &handler->node_ctx-
> > >lmask))
> > + plic_toggle(handler, d->hwirq, enable);
> > }
> > }
> >
> > static void plic_irq_unmask(struct irq_data *d)
> > {
> > - unsigned int cpu =
> > cpumask_any_and(irq_data_get_affinity_mask(d),
> > - cpu_online_mask);
> > + struct cpumask amask;
> > + unsigned int cpu;
> > + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> > +
> > + cpumask_and(&amask, &node_ctx->lmask, cpu_online_mask);
> > + cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> > + &amask);
> > if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
> > return;
> > - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> > + plic_irq_toggle(cpumask_of(cpu), d, 1);
> > }
> >
> > static void plic_irq_mask(struct irq_data *d)
> > {
> > - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> > + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> > +
> > + plic_irq_toggle(&node_ctx->lmask, d, 0);
> > }
> >
> > #ifdef CONFIG_SMP
> > @@ -120,17 +135,21 @@ static int plic_set_affinity(struct irq_data
> > *d,
> > const struct cpumask *mask_val, bool
> > force)
> > {
> > unsigned int cpu;
> > + struct cpumask amask;
> > + struct plic_node_ctx *node_ctx = irq_get_chip_data(d->irq);
> > +
> > + cpumask_and(&amask, &node_ctx->lmask, mask_val);
> >
> > if (force)
> > - cpu = cpumask_first(mask_val);
> > + cpu = cpumask_first(&amask);
> > else
> > - cpu = cpumask_any_and(mask_val, cpu_online_mask);
> > + cpu = cpumask_any_and(&amask, cpu_online_mask);
> >
> > if (cpu >= nr_cpu_ids)
> > return -EINVAL;
> >
> > - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> > - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> > + plic_irq_toggle(&node_ctx->lmask, d, 0);
> > + plic_irq_toggle(cpumask_of(cpu), d, 1);
> >
> > irq_data_update_effective_affinity(d, cpumask_of(cpu));
> >
> > @@ -191,8 +210,6 @@ static const struct irq_domain_ops
> > plic_irqdomain_ops = {
> > .free = irq_domain_free_irqs_top,
> > };
> >
> > -static struct irq_domain *plic_irqdomain;
> > -
> > /*
> > * Handling an interrupt is a two-step process: first you claim
> > the interrupt
> > * by reading the claim register, then you complete the interrupt
> > by writing
> > @@ -209,7 +226,7 @@ static void plic_handle_irq(struct pt_regs
> > *regs)
> >
> > csr_clear(CSR_IE, IE_EIE);
> > while ((hwirq = readl(claim))) {
> > - int irq = irq_find_mapping(plic_irqdomain, hwirq);
> > + int irq = irq_find_mapping(handler->node_ctx-
> > >irqdomain, hwirq);
> >
> > if (unlikely(irq <= 0))
> > pr_warn_ratelimited("can't find mapping for
> > hwirq %lu\n",
> > @@ -265,15 +282,17 @@ static int __init plic_init(struct
> > device_node *node,
> > {
> > int error = 0, nr_contexts, nr_handlers = 0, i;
> > u32 nr_irqs;
> > + struct plic_node_ctx *node_ctx;
> >
> > - if (plic_regs) {
> > - pr_warn("PLIC already present.\n");
> > - return -ENXIO;
> > - }
> > + node_ctx = kzalloc(sizeof(*node_ctx), GFP_KERNEL);
> > + if (!node_ctx)
> > + return -ENOMEM;
> >
> > - plic_regs = of_iomap(node, 0);
> > - if (WARN_ON(!plic_regs))
> > - return -EIO;
> > + node_ctx->regs = of_iomap(node, 0);
> > + if (WARN_ON(!node_ctx->regs)) {
> > + error = -EIO;
> > + goto out_free_nctx;
> > + }
> >
> > error = -EINVAL;
> > of_property_read_u32(node, "riscv,ndev", &nr_irqs);
> > @@ -287,9 +306,9 @@ static int __init plic_init(struct device_node
> > *node,
> > goto out_iounmap;
> >
> > error = -ENOMEM;
> > - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
> > - &plic_irqdomain_ops, NULL);
> > - if (WARN_ON(!plic_irqdomain))
> > + node_ctx->irqdomain = irq_domain_add_linear(node, nr_irqs +
> > 1,
> > + &plic_irqdomain_ops, node_ctx);
> > + if (WARN_ON(!node_ctx->irqdomain))
> > goto out_iounmap;
> >
> > for (i = 0; i < nr_contexts; i++) {
> > @@ -334,13 +353,14 @@ static int __init plic_init(struct
> > device_node *node,
> > goto done;
> > }
> >
> > + cpumask_set_cpu(cpu, &node_ctx->lmask);
> > handler->present = true;
> > handler->hart_base =
> > - plic_regs + CONTEXT_BASE + i *
> > CONTEXT_PER_HART;
> > + node_ctx->regs + CONTEXT_BASE + i *
> > CONTEXT_PER_HART;
> > raw_spin_lock_init(&handler->enable_lock);
> > handler->enable_base =
> > - plic_regs + ENABLE_BASE + i *
> > ENABLE_PER_HART;
> > -
> > + node_ctx->regs + ENABLE_BASE + i *
> > ENABLE_PER_HART;
> > + handler->node_ctx = node_ctx;
> > done:
> > for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
> > plic_toggle(handler, hwirq, 0);
> > @@ -356,7 +376,9 @@ static int __init plic_init(struct device_node
> > *node,
> > return 0;
> >
> > out_iounmap:
> > - iounmap(plic_regs);
> > + iounmap(node_ctx->regs);
> > +out_free_nctx:
> > + kfree(node_ctx);
> > return error;
> > }
> >
> > --
> > 2.25.0
> >
>
> Apart from above nit, looks good to me.
>
> Reviewed-by: Anup Patel <[email protected]>
>

Thanks.

> Regards,
> Anup

--
Regards,
Atish

2020-02-28 18:54:26

by Marc Zyngier

[permalink] [raw]
Subject: Re: [v2 PATCH] irqchip/sifive-plic: Add support for multiple PLICs

On 2020-02-21 23:22, Atish Patra wrote:
> Current, PLIC driver can support only 1 PLIC on the board. However,
> there can be multiple PLICs present on a two socket systems in RISC-V.
>
> Modify the driver so that each PLIC handler can have a information
> about individual PLIC registers and an irqdomain associated with it.
>
> Tested on two socket RISC-V system based on VCU118 FPGA connected via
> OmniXtend protocol.
>
> Signed-off-by: Atish Patra <[email protected]>
> ---
> This patch is rebased on top of 5.6-rc2 and following plic fix from
> hotplug series.
>
> https://lkml.org/lkml/2020/2/20/1220

How do you want this to be merged? I haven't really followed the hotplug
series, but given that this is a pretty simple patch, I'd rather have
things
based the other way around so that it can be merged independently.

Thanks,

M.
--
Jazz is not dead. It just smells funny...

2020-02-28 19:03:26

by Atish Patra

[permalink] [raw]
Subject: Re: [v2 PATCH] irqchip/sifive-plic: Add support for multiple PLICs

On Fri, 2020-02-28 at 18:53 +0000, Marc Zyngier wrote:
> On 2020-02-21 23:22, Atish Patra wrote:
> > Current, PLIC driver can support only 1 PLIC on the board. However,
> > there can be multiple PLICs present on a two socket systems in
> > RISC-V.
> >
> > Modify the driver so that each PLIC handler can have a information
> > about individual PLIC registers and an irqdomain associated with
> > it.
> >
> > Tested on two socket RISC-V system based on VCU118 FPGA connected
> > via
> > OmniXtend protocol.
> >
> > Signed-off-by: Atish Patra <[email protected]>
> > ---
> > This patch is rebased on top of 5.6-rc2 and following plic fix from
> > hotplug series.
> >
> > https://lkml.org/lkml/2020/2/20/1220
>
> How do you want this to be merged? I haven't really followed the
> hotplug
> series, but given that this is a pretty simple patch, I'd rather
> have
> things
> based the other way around so that it can be merged independently.
>
I am fine with that or

I can remove the PLIC patch from the hotplug series and include this
series as that patch is not really dependant on hotplug code.

https://patchwork.kernel.org/patch/11407379/

Let me know what do you prefer.

> Thanks,
>
> M.

--
Regards,
Atish

2020-02-28 19:57:25

by Marc Zyngier

[permalink] [raw]
Subject: Re: [v2 PATCH] irqchip/sifive-plic: Add support for multiple PLICs

On 2020-02-28 19:03, Atish Patra wrote:
> On Fri, 2020-02-28 at 18:53 +0000, Marc Zyngier wrote:
>> On 2020-02-21 23:22, Atish Patra wrote:
>> > Current, PLIC driver can support only 1 PLIC on the board. However,
>> > there can be multiple PLICs present on a two socket systems in
>> > RISC-V.
>> >
>> > Modify the driver so that each PLIC handler can have a information
>> > about individual PLIC registers and an irqdomain associated with
>> > it.
>> >
>> > Tested on two socket RISC-V system based on VCU118 FPGA connected
>> > via
>> > OmniXtend protocol.
>> >
>> > Signed-off-by: Atish Patra <[email protected]>
>> > ---
>> > This patch is rebased on top of 5.6-rc2 and following plic fix from
>> > hotplug series.
>> >
>> > https://lkml.org/lkml/2020/2/20/1220
>>
>> How do you want this to be merged? I haven't really followed the
>> hotplug
>> series, but given that this is a pretty simple patch, I'd rather
>> have
>> things
>> based the other way around so that it can be merged independently.
>>
> I am fine with that or
>
> I can remove the PLIC patch from the hotplug series and include this
> series as that patch is not really dependant on hotplug code.
>
> https://patchwork.kernel.org/patch/11407379/
>
> Let me know what do you prefer.

I'd rather have an independent PLIC series that I can take into 5.7
independently of the rest of the hotplug series. This will make things
simpler for everyone.

Thanks,

M.
--
Jazz is not dead. It just smells funny...