2022-05-09 02:33:15

by Pali Rohár

[permalink] [raw]
Subject: [PATCH 2/6] irqchip/armada-370-xp: Implement SoC Error interrupts

MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
domain for accessing this IRQ hierarchy.

Signed-off-by: Pali Rohár <[email protected]>
---
drivers/irqchip/irq-armada-370-xp.c | 213 +++++++++++++++++++++++++++-
1 file changed, 210 insertions(+), 3 deletions(-)

diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index ebd76ea1c69b..71578b65f5c8 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -117,6 +117,8 @@
/* Registers relative to main_int_base */
#define ARMADA_370_XP_INT_CONTROL (0x00)
#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
+#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS (0x20)
+#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS (0x24)
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
@@ -130,6 +132,8 @@
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
+#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF (0x50)
+#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF (0x54)
#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)

@@ -146,6 +150,8 @@
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
+static struct irq_domain *armada_370_xp_soc_err_domain;
+static unsigned int soc_err_irq_num_regs;
static u32 doorbell_mask_reg;
static int parent_irq;
#ifdef CONFIG_PCI_MSI
@@ -156,6 +162,8 @@ static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
#endif

+static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
+
static inline bool is_percpu_irq(irq_hw_number_t irq)
{
if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
@@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
armada_370_xp_irq_unmask(data);
}

+ /* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
+ for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
+ struct irq_data *data;
+ int virq;
+
+ virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
+ if (virq == 0)
+ continue;
+
+ data = irq_get_irq_data(virq);
+
+ if (!irq_percpu_is_enabled(virq))
+ continue;
+
+ armada_370_xp_soc_err_irq_unmask(data);
+ }
+
+ /* Unmask summary SoC Error Interrupt */
+ if (soc_err_irq_num_regs > 0)
+ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
ipi_resume();
}

@@ -546,8 +575,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
- /* IRQs 0 and 1 cannot be mapped, they are handled internally */
- if (hw <= 1)
+ /* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
+ if (hw <= 1 || hw == 4)
return -EINVAL;

armada_370_xp_irq_mask(irq_get_irq_data(virq));
@@ -577,6 +606,99 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.xlate = irq_domain_xlate_onecell,
};

+static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
+
+static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 reg, mask;
+
+ reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
+ : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
+
+ raw_spin_lock(&armada_370_xp_soc_err_lock);
+ mask = readl(per_cpu_int_base + reg);
+ mask &= ~BIT(hwirq % 32);
+ writel(mask, per_cpu_int_base + reg);
+ raw_spin_unlock(&armada_370_xp_soc_err_lock);
+}
+
+static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 reg, mask;
+
+ reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
+ : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
+
+ raw_spin_lock(&armada_370_xp_soc_err_lock);
+ mask = readl(per_cpu_int_base + reg);
+ mask |= BIT(hwirq % 32);
+ writel(mask, per_cpu_int_base + reg);
+ raw_spin_unlock(&armada_370_xp_soc_err_lock);
+}
+
+static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
+{
+ struct irq_data *d = par;
+ armada_370_xp_soc_err_irq_mask(d);
+ return 0;
+}
+
+static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
+{
+ struct irq_data *d = par;
+ armada_370_xp_soc_err_irq_unmask(d);
+ return 0;
+}
+
+static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *mask,
+ bool force)
+{
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ /* First disable IRQ on all cores */
+ for_each_online_cpu(cpu)
+ smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
+
+ /* Select a single core from the affinity mask which is online */
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
+
+ cpus_read_unlock();
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip armada_370_xp_soc_err_irq_chip = {
+ .name = "MPIC SOC",
+ .irq_mask = armada_370_xp_soc_err_irq_mask,
+ .irq_unmask = armada_370_xp_soc_err_irq_unmask,
+ .irq_set_affinity = armada_xp_soc_err_irq_set_affinity,
+};
+
+static int armada_370_xp_soc_err_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ armada_370_xp_soc_err_irq_mask(irq_get_irq_data(virq));
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &armada_370_xp_soc_err_irq_chip,
+ handle_percpu_devid_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_soc_err_irq_ops = {
+ .map = armada_370_xp_soc_err_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
#ifdef CONFIG_PCI_MSI
static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
{
@@ -605,6 +727,32 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
#endif

+static void armada_370_xp_handle_soc_err_irq(void)
+{
+ unsigned long status, bit;
+ u32 mask, cause;
+
+ if (soc_err_irq_num_regs < 1)
+ return;
+
+ mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF);
+ cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS);
+ status = cause & mask;
+
+ for_each_set_bit(bit, &status, 32)
+ generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit);
+
+ if (soc_err_irq_num_regs < 2)
+ return;
+
+ mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF);
+ cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS);
+ status = cause & mask;
+
+ for_each_set_bit(bit, &status, 32)
+ generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit + 32);
+}
+
static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -630,6 +778,11 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
continue;
}

+ if (irqn == 4) {
+ armada_370_xp_handle_soc_err_irq();
+ continue;
+ }
+
generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
}

@@ -649,7 +802,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
if (irqnr > 1022)
break;

- if (irqnr > 1) {
+ if (irqnr > 1 && irqnr != 4) {
generic_handle_domain_irq(armada_370_xp_mpic_domain,
irqnr);
continue;
@@ -659,6 +812,10 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
if (irqnr == 1)
armada_370_xp_handle_msi_irq(regs, false);

+ /* SoC Error handling */
+ if (irqnr == 4)
+ armada_370_xp_handle_soc_err_irq();
+
#ifdef CONFIG_SMP
/* IPI Handling */
if (irqnr == 0) {
@@ -722,6 +879,26 @@ static void armada_370_xp_mpic_resume(void)
}
}

+ /* Re-enable per-CPU SoC Error interrupts */
+ for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
+ struct irq_data *data;
+ int virq;
+
+ virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
+ if (virq == 0)
+ continue;
+
+ data = irq_get_irq_data(virq);
+
+ /*
+ * Re-enable on the current CPU,
+ * armada_xp_mpic_reenable_percpu() will take
+ * care of secondary CPUs when they come up.
+ */
+ if (irq_percpu_is_enabled(virq))
+ armada_370_xp_soc_err_irq_unmask(data);
+ }
+
/* Reconfigure doorbells for IPIs and MSIs */
writel(doorbell_mask_reg,
per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
@@ -730,6 +907,10 @@ static void armada_370_xp_mpic_resume(void)
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);

+ /* Unmask summary SoC Error Interrupt */
+ if (soc_err_irq_num_regs > 0)
+ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
ipi_resume();
}

@@ -742,6 +923,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
+ struct device_node *soc_err_node;
int nr_irqs, i;
u32 control;

@@ -775,12 +957,37 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);

+ soc_err_node = of_get_next_child(node, NULL);
+ if (!soc_err_node) {
+ pr_warn("Missing SoC Error Interrupt Controller node\n");
+ pr_warn("Extended interrupts are not supported\n");
+ } else {
+ pr_info("Registering MPIC SoC Error Interrupt Controller\n");
+ /*
+ * Armada 370 and XP have only 32 SoC Error IRQs in one register
+ * and other Armada platforms have 64 IRQs in two registers.
+ */
+ soc_err_irq_num_regs =
+ of_machine_is_compatible("marvell,armada-370-xp") ? 1 : 2;
+ armada_370_xp_soc_err_domain =
+ irq_domain_add_hierarchy(armada_370_xp_mpic_domain, 0,
+ soc_err_irq_num_regs * 32,
+ soc_err_node,
+ &armada_370_xp_soc_err_irq_ops,
+ NULL);
+ BUG_ON(!armada_370_xp_soc_err_domain);
+ }
+
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
armada_xp_mpic_smp_cpu_init();

armada_370_xp_msi_init(node, main_int_res.start);

+ /* Unmask summary SoC Error Interrupt */
+ if (soc_err_irq_num_regs > 0)
+ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
parent_irq = irq_of_parse_and_map(node, 0);
if (parent_irq <= 0) {
irq_set_default_host(armada_370_xp_mpic_domain);
--
2.20.1



2022-05-09 04:59:49

by Pali Rohár

[permalink] [raw]
Subject: Re: [PATCH 2/6] irqchip/armada-370-xp: Implement SoC Error interrupts

On Saturday 07 May 2022 10:01:52 Marc Zyngier wrote:
> On Fri, 06 May 2022 19:55:46 +0100,
> Pali Rohár <[email protected]> wrote:
> >
> > On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > > On Fri, 06 May 2022 19:30:51 +0100,
> > > Pali Rohár <[email protected]> wrote:
> > > >
> > > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > > Pali Rohár <[email protected]> wrote:
> > > > > >
> > > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > > +
> > > > > > static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > > > {
> > > > > > if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > > > armada_370_xp_irq_unmask(data);
> > > > > > }
> > > > > >
> > > > > > + /* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > > + for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > > + struct irq_data *data;
> > > > > > + int virq;
> > > > > > +
> > > > > > + virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > > + if (virq == 0)
> > > > > > + continue;
> > > > > > +
> > > > > > + data = irq_get_irq_data(virq);
> > > > > > +
> > > > > > + if (!irq_percpu_is_enabled(virq))
> > > > > > + continue;
> > > > > > +
> > > > > > + armada_370_xp_soc_err_irq_unmask(data);
> > > > > > + }
> > > > >
> > > > > So you do this loop and all these lookups, both here and in the resume
> > > > > function (duplicated code!) just to be able to call the unmask
> > > > > function? This would be better served by two straight writes of the
> > > > > mask register, which you'd conveniently save on suspend.
> > > > >
> > > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > > something better to do.
> > > >
> > > > Yes, I just used existing logic.
> > > >
> > > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > > the scope of the PCIe AER interrupt support.
> > >
> > > Fair enough. By the same logic, I'm not taking any change to the
> > > driver until it is put in a better shape. Your call.
> >
> > If you are maintainer of this code then it is expected from _you_ to
> > move the current code into _better shape_ as you wrote and expect. And
> > then show us exactly, how new changes in this driver should look like,
> > in examples.
>
> Sorry, but that's not how this works. You are the one willing to
> change a sub-par piece of code, you get to make it better. You
> obviously have the means (the HW) and the incentive (these patches).
> But you don't get to make something even more unmaintainable because
> you're unwilling to do some extra work.
>
> If you're unhappy with my position, that's fine. I suggest you take it
> with Thomas, and maybe even Linus. As I suggested before, you can also
> post a patch removing me as the irqchip maintainer. I'm sure that will
> spark an interesting discussion.

You have already suggested it in email [1] but apparently you are _not_
maintainer of mvebu pci controller. get_maintainer.pl for part about
which you have talked in [1] says:

$ ./scripts/get_maintainer.pl -f drivers/pci/controller/pci-aardvark.c
Thomas Petazzoni <[email protected]> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
"Pali Rohár" <[email protected]> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
Lorenzo Pieralisi <[email protected]> (supporter:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
Rob Herring <[email protected]> (reviewer:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
"Krzysztof Wilczyński" <[email protected]> (reviewer:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
Bjorn Helgaas <[email protected]> (supporter:PCI SUBSYSTEM)
[email protected] (open list:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
[email protected] (moderated list:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
[email protected] (open list)

So I do not have to remove anything, you are _not_ on that list.
On the other hand, Thomas Petazzoni is on this list...

> > > > > > +static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
> > > > > > + const struct cpumask *mask,
> > > > > > + bool force)
> > > > > > +{
> > > > > > + unsigned int cpu;
> > > > > > +
> > > > > > + cpus_read_lock();
> > > > > > +
> > > > > > + /* First disable IRQ on all cores */
> > > > > > + for_each_online_cpu(cpu)
> > > > > > + smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
> > > > > > +
> > > > > > + /* Select a single core from the affinity mask which is online */
> > > > > > + cpu = cpumask_any_and(mask, cpu_online_mask);
> > > > > > + smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
> > > > > > +
> > > > > > + cpus_read_unlock();
> > > > > > +
> > > > > > + irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > > > > > +
> > > > > > + return IRQ_SET_MASK_OK;
> > > > > > +}
> > > > >
> > > > > Aren't these per-CPU interrupts anyway? What does it mean to set their
> > > > > affinity? /me rolls eyes...
> > > >
> > > > Yes, they are per-CPU interrupts. But to mask or unmask particular
> > > > interrupt for specific CPU is possible only from that CPU. CPU 0 just
> > > > cannot move interrupt from CPU 0 to CPU 1. CPU 0 can only mask that
> > > > interrupt and CPU 1 has to unmask it.
> > >
> > > And that's no different form other per-CPU interrupts that have the
> > > exact same requirements. NAK to this sort of hacks.
> >
> > You forgot to mention in your previous email how to do it, right? So we
> > are waiting...
>
> I didn't forget. I explained that it should be handled just like any
> other per-CPU interrupt. There is plenty of example of how to do that
> in the tree (timers, for example), and if you had even looked at it,
> you'd have seen that your approach most probably results in an
> arbitrary pointer dereference on anything but CPU0 because the
> requesting driver knows nothing about per-CPU interrupts.
>
> But you're obviously trying to make a very different point here. I'll
> let you play that game for as long as you want, no skin off my nose.
> Maybe in the future, you'll be more interested in actively
> collaborating on the kernel code instead of throwing your toys out of
> the pram.
>
> Thanks,

The only _toy_ here is your broken mvebu board which your ego was unable
to fix, and you have put it into recycling pile [2] and since than for
months you are trying to reject every change or improvement in mvebu
drivers and trying to find out a way how to remove all mvebu code, like
if you were not able to fix your toy, then broke it also to all other
people. You have already expressed this, but I'm not going to search
emails more and find these your statements.

Sorry, I'm stopping here. This is just a prove that you are not
qualified in reviewing mvebu code.

[1] - https://lore.kernel.org/linux-pci/[email protected]/
[2] - https://lore.kernel.org/linux-pci/[email protected]/

2022-05-10 00:18:56

by Rob Herring (Arm)

[permalink] [raw]
Subject: Re: [PATCH 2/6] irqchip/armada-370-xp: Implement SoC Error interrupts

On Sat, May 07, 2022 at 11:20:54AM +0200, Pali Roh?r wrote:
> On Saturday 07 May 2022 10:01:52 Marc Zyngier wrote:
> > On Fri, 06 May 2022 19:55:46 +0100,
> > Pali Roh?r <[email protected]> wrote:
> > >
> > > On Friday 06 May 2022 19:47:25 Marc Zyngier wrote:
> > > > On Fri, 06 May 2022 19:30:51 +0100,
> > > > Pali Roh?r <[email protected]> wrote:
> > > > >
> > > > > On Friday 06 May 2022 19:19:46 Marc Zyngier wrote:
> > > > > > On Fri, 06 May 2022 14:40:25 +0100,
> > > > > > Pali Roh?r <[email protected]> wrote:
> > > > > > >
> > > > > > > +static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
> > > > > > > +
> > > > > > > static inline bool is_percpu_irq(irq_hw_number_t irq)
> > > > > > > {
> > > > > > > if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
> > > > > > > @@ -509,6 +517,27 @@ static void armada_xp_mpic_reenable_percpu(void)
> > > > > > > armada_370_xp_irq_unmask(data);
> > > > > > > }
> > > > > > >
> > > > > > > + /* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
> > > > > > > + for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
> > > > > > > + struct irq_data *data;
> > > > > > > + int virq;
> > > > > > > +
> > > > > > > + virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
> > > > > > > + if (virq == 0)
> > > > > > > + continue;
> > > > > > > +
> > > > > > > + data = irq_get_irq_data(virq);
> > > > > > > +
> > > > > > > + if (!irq_percpu_is_enabled(virq))
> > > > > > > + continue;
> > > > > > > +
> > > > > > > + armada_370_xp_soc_err_irq_unmask(data);
> > > > > > > + }
> > > > > >
> > > > > > So you do this loop and all these lookups, both here and in the resume
> > > > > > function (duplicated code!) just to be able to call the unmask
> > > > > > function? This would be better served by two straight writes of the
> > > > > > mask register, which you'd conveniently save on suspend.
> > > > > >
> > > > > > Yes, you have only duplicated the existing logic. But surely there is
> > > > > > something better to do.
> > > > >
> > > > > Yes, I just used existing logic.
> > > > >
> > > > > I'm not rewriting driver or doing big refactor of it, as this is not in
> > > > > the scope of the PCIe AER interrupt support.
> > > >
> > > > Fair enough. By the same logic, I'm not taking any change to the
> > > > driver until it is put in a better shape. Your call.
> > >
> > > If you are maintainer of this code then it is expected from _you_ to
> > > move the current code into _better shape_ as you wrote and expect. And
> > > then show us exactly, how new changes in this driver should look like,
> > > in examples.
> >
> > Sorry, but that's not how this works. You are the one willing to
> > change a sub-par piece of code, you get to make it better. You
> > obviously have the means (the HW) and the incentive (these patches).
> > But you don't get to make something even more unmaintainable because
> > you're unwilling to do some extra work.
> >
> > If you're unhappy with my position, that's fine. I suggest you take it
> > with Thomas, and maybe even Linus. As I suggested before, you can also
> > post a patch removing me as the irqchip maintainer. I'm sure that will
> > spark an interesting discussion.
>
> You have already suggested it in email [1] but apparently you are _not_
> maintainer of mvebu pci controller. get_maintainer.pl for part about
> which you have talked in [1] says:
>
> $ ./scripts/get_maintainer.pl -f drivers/pci/controller/pci-aardvark.c
> Thomas Petazzoni <[email protected]> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
> "Pali Roh?r" <[email protected]> (maintainer:PCI DRIVER FOR AARDVARK (Marvell Armada 3700))
> Lorenzo Pieralisi <[email protected]> (supporter:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)
> Rob Herring <[email protected]> (reviewer:PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS)

Please just refactor the code in question. You've wasted more time
arguing about it than it would take to do. Having done a bit of PCI
refactoring, I can tell you hardly anyone else does. I can barely even
get comments/acks on refactoring until I break platforms (which happens
a lot). Maintainers have no other leverage other than what Marc pointed
out.

In any case, I think there's no way the PCI maintainers will take this
as-is at this point.

Rob