From: Julien Thierry <[email protected]>
Currently the PMU interrupt can either be a normal irq or a percpu irq.
Supporting NMI will introduce two cases for each existing one. It becomes
a mess of 'if's when managing the interrupt.
Define sets of callbacks for operations commonly done on the interrupt. The
appropriate set of callbacks is selected at interrupt request time and
simplifies interrupt enabling/disabling and freeing.
Cc: Julien Thierry <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Mark Rutland <[email protected]>
Signed-off-by: Julien Thierry <[email protected]>
Signed-off-by: Alexandru Elisei <[email protected]>
---
drivers/perf/arm_pmu.c | 86 ++++++++++++++++++++++++++++++++++--------
1 file changed, 70 insertions(+), 16 deletions(-)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df352b334ea7..17e5952d21e4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -26,8 +26,46 @@
#include <asm/irq_regs.h>
+static int armpmu_count_irq_users(const int irq);
+
+struct pmu_irq_ops {
+ void (*enable_pmuirq)(unsigned int irq);
+ void (*disable_pmuirq)(unsigned int irq);
+ void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
+};
+
+static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
+{
+ free_irq(irq, per_cpu_ptr(devid, cpu));
+}
+
+static const struct pmu_irq_ops pmuirq_ops = {
+ .enable_pmuirq = enable_irq,
+ .disable_pmuirq = disable_irq_nosync,
+ .free_pmuirq = armpmu_free_pmuirq
+};
+
+static void armpmu_enable_percpu_pmuirq(unsigned int irq)
+{
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
+ void __percpu *devid)
+{
+ if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_irq(irq, devid);
+}
+
+static const struct pmu_irq_ops percpu_pmuirq_ops = {
+ .enable_pmuirq = armpmu_enable_percpu_pmuirq,
+ .disable_pmuirq = disable_percpu_irq,
+ .free_pmuirq = armpmu_free_percpu_pmuirq
+};
+
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
+static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
@@ -544,6 +582,19 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
+static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(cpu_irq, cpu) == irq
+ && per_cpu(cpu_irq_ops, cpu))
+ return per_cpu(cpu_irq_ops, cpu);
+ }
+
+ return NULL;
+}
+
void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
@@ -551,18 +602,18 @@ void armpmu_free_irq(int irq, int cpu)
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
- if (!irq_is_percpu_devid(irq))
- free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
- else if (armpmu_count_irq_users(irq) == 1)
- free_percpu_irq(irq, &cpu_armpmu);
+ per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0;
+ per_cpu(cpu_irq_ops, cpu) = NULL;
}
int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
const irq_handler_t handler = armpmu_dispatch_irq;
+ const struct pmu_irq_ops *irq_ops;
+
if (!irq)
return 0;
@@ -584,15 +635,26 @@ int armpmu_request_irq(int irq, int cpu)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
+
+ irq_ops = &pmuirq_ops;
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu);
+
+ irq_ops = &percpu_pmuirq_ops;
+ } else {
+ /* Per cpudevid irq was already requested by another CPU */
+ irq_ops = armpmu_find_irq_ops(irq);
+
+ if (WARN_ON(!irq_ops))
+ err = -EINVAL;
}
if (err)
goto err_out;
per_cpu(cpu_irq, cpu) = irq;
+ per_cpu(cpu_irq_ops, cpu) = irq_ops;
return 0;
err_out:
@@ -625,12 +687,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
per_cpu(cpu_armpmu, cpu) = pmu;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq) {
- if (irq_is_percpu_devid(irq))
- enable_percpu_irq(irq, IRQ_TYPE_NONE);
- else
- enable_irq(irq);
- }
+ if (irq)
+ per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
return 0;
}
@@ -644,12 +702,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq) {
- if (irq_is_percpu_devid(irq))
- disable_percpu_irq(irq);
- else
- disable_irq_nosync(irq);
- }
+ if (irq)
+ per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
per_cpu(cpu_armpmu, cpu) = NULL;
--
2.28.0
On Wed, Aug 19, 2020 at 02:34:18PM +0100, Alexandru Elisei wrote:
> From: Julien Thierry <[email protected]>
>
> Currently the PMU interrupt can either be a normal irq or a percpu irq.
> Supporting NMI will introduce two cases for each existing one. It becomes
> a mess of 'if's when managing the interrupt.
>
> Define sets of callbacks for operations commonly done on the interrupt. The
> appropriate set of callbacks is selected at interrupt request time and
> simplifies interrupt enabling/disabling and freeing.
>
> Cc: Julien Thierry <[email protected]>
> Cc: Will Deacon <[email protected]>
> Cc: Mark Rutland <[email protected]>
> Signed-off-by: Julien Thierry <[email protected]>
> Signed-off-by: Alexandru Elisei <[email protected]>
> ---
> drivers/perf/arm_pmu.c | 86 ++++++++++++++++++++++++++++++++++--------
> 1 file changed, 70 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
> index df352b334ea7..17e5952d21e4 100644
> --- a/drivers/perf/arm_pmu.c
> +++ b/drivers/perf/arm_pmu.c
> @@ -26,8 +26,46 @@
>
> #include <asm/irq_regs.h>
>
> +static int armpmu_count_irq_users(const int irq);
> +
> +struct pmu_irq_ops {
> + void (*enable_pmuirq)(unsigned int irq);
> + void (*disable_pmuirq)(unsigned int irq);
> + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
> +};
> +
> +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
> +{
> + free_irq(irq, per_cpu_ptr(devid, cpu));
> +}
> +
> +static const struct pmu_irq_ops pmuirq_ops = {
> + .enable_pmuirq = enable_irq,
> + .disable_pmuirq = disable_irq_nosync,
> + .free_pmuirq = armpmu_free_pmuirq
> +};
> +
> +static void armpmu_enable_percpu_pmuirq(unsigned int irq)
> +{
> + enable_percpu_irq(irq, IRQ_TYPE_NONE);
> +}
> +
> +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
> + void __percpu *devid)
> +{
> + if (armpmu_count_irq_users(irq) == 1)
> + free_percpu_irq(irq, devid);
> +}
> +
> +static const struct pmu_irq_ops percpu_pmuirq_ops = {
> + .enable_pmuirq = armpmu_enable_percpu_pmuirq,
> + .disable_pmuirq = disable_percpu_irq,
> + .free_pmuirq = armpmu_free_percpu_pmuirq
> +};
> +
> static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
> static DEFINE_PER_CPU(int, cpu_irq);
> +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
Would it make sense to put this in a structure alongside the irq?
>
> static inline u64 arm_pmu_event_max_period(struct perf_event *event)
> {
> @@ -544,6 +582,19 @@ static int armpmu_count_irq_users(const int irq)
> return count;
> }
>
> +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
> +{
> + int cpu;
> +
> + for_each_possible_cpu(cpu) {
> + if (per_cpu(cpu_irq, cpu) == irq
> + && per_cpu(cpu_irq_ops, cpu))
> + return per_cpu(cpu_irq_ops, cpu);
> + }
nit, but you could make this a bit more readable:
struct pmu_irq_ops *ops = NULL;
for_each_possible_cpu(cpu) {
if (per_cpu(cpu_irq, cpu) != irq)
continue;
ops = per_cpu(cpu_irq_ops, cpu);
if (ops)
break;
}
return ops;
Will
Hi Will,
On 9/21/20 2:55 PM, Will Deacon wrote:
> On Wed, Aug 19, 2020 at 02:34:18PM +0100, Alexandru Elisei wrote:
>> From: Julien Thierry <[email protected]>
>>
>> Currently the PMU interrupt can either be a normal irq or a percpu irq.
>> Supporting NMI will introduce two cases for each existing one. It becomes
>> a mess of 'if's when managing the interrupt.
>>
>> Define sets of callbacks for operations commonly done on the interrupt. The
>> appropriate set of callbacks is selected at interrupt request time and
>> simplifies interrupt enabling/disabling and freeing.
>>
>> Cc: Julien Thierry <[email protected]>
>> Cc: Will Deacon <[email protected]>
>> Cc: Mark Rutland <[email protected]>
>> Signed-off-by: Julien Thierry <[email protected]>
>> Signed-off-by: Alexandru Elisei <[email protected]>
>> ---
>> drivers/perf/arm_pmu.c | 86 ++++++++++++++++++++++++++++++++++--------
>> 1 file changed, 70 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
>> index df352b334ea7..17e5952d21e4 100644
>> --- a/drivers/perf/arm_pmu.c
>> +++ b/drivers/perf/arm_pmu.c
>> @@ -26,8 +26,46 @@
>>
>> #include <asm/irq_regs.h>
>>
>> +static int armpmu_count_irq_users(const int irq);
>> +
>> +struct pmu_irq_ops {
>> + void (*enable_pmuirq)(unsigned int irq);
>> + void (*disable_pmuirq)(unsigned int irq);
>> + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
>> +};
>> +
>> +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
>> +{
>> + free_irq(irq, per_cpu_ptr(devid, cpu));
>> +}
>> +
>> +static const struct pmu_irq_ops pmuirq_ops = {
>> + .enable_pmuirq = enable_irq,
>> + .disable_pmuirq = disable_irq_nosync,
>> + .free_pmuirq = armpmu_free_pmuirq
>> +};
>> +
>> +static void armpmu_enable_percpu_pmuirq(unsigned int irq)
>> +{
>> + enable_percpu_irq(irq, IRQ_TYPE_NONE);
>> +}
>> +
>> +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
>> + void __percpu *devid)
>> +{
>> + if (armpmu_count_irq_users(irq) == 1)
>> + free_percpu_irq(irq, devid);
>> +}
>> +
>> +static const struct pmu_irq_ops percpu_pmuirq_ops = {
>> + .enable_pmuirq = armpmu_enable_percpu_pmuirq,
>> + .disable_pmuirq = disable_percpu_irq,
>> + .free_pmuirq = armpmu_free_percpu_pmuirq
>> +};
>> +
>> static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
>> static DEFINE_PER_CPU(int, cpu_irq);
>> +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
> Would it make sense to put this in a structure alongside the irq?
It doesn't really work, because we need the irq number to be percpu for
armpmu_free_irq() to work correctly. If we have a percpu pointer to a struct, the
first cpu that frees the irq will set it to 0, and all subsequent CPUs that share
the same struct will read 0 as the irq number, which will trigger the WARN_ON and
then return early.
>
>>
>> static inline u64 arm_pmu_event_max_period(struct perf_event *event)
>> {
>> @@ -544,6 +582,19 @@ static int armpmu_count_irq_users(const int irq)
>> return count;
>> }
>>
>> +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
>> +{
>> + int cpu;
>> +
>> + for_each_possible_cpu(cpu) {
>> + if (per_cpu(cpu_irq, cpu) == irq
>> + && per_cpu(cpu_irq_ops, cpu))
>> + return per_cpu(cpu_irq_ops, cpu);
>> + }
> nit, but you could make this a bit more readable:
>
> struct pmu_irq_ops *ops = NULL;
>
> for_each_possible_cpu(cpu) {
> if (per_cpu(cpu_irq, cpu) != irq)
> continue;
>
> ops = per_cpu(cpu_irq_ops, cpu);
> if (ops)
> break;
> }
>
> return ops;
That looks better, I will change it.
Thanks,
Alex