Enable ARM performance monitoring units on ACPI/arm64 machines.
This patch expands and reworks the patches published by Mark Salter
in order to clean up a few of the previous review comments, as well as
add support for A72's and big/little configurations.
I've been testing this patch in convert with an assortment of ACPI
patches to enable things like PCIe. Its been tested on juno, seattle
and some xgene systems.
Thanks,
Jeremy Linton (1):
arm64: pmu: add A72 cpu type, support multiple PMU types
Mark Salter (3):
arm: pmu: Fix non-devicetree probing
arm64: pmu: add fallback probe table
arm64: pmu: Add support for probing with ACPI
arch/arm64/include/asm/cputype.h | 1 +
arch/arm64/kernel/perf_event.c | 11 +-
arch/arm64/kernel/smp.c | 5 +
drivers/perf/Kconfig | 4 +
drivers/perf/Makefile | 1 +
drivers/perf/arm_pmu.c | 54 ++++++++--
drivers/perf/arm_pmu_acpi.c | 212 +++++++++++++++++++++++++++++++++++++++
include/linux/perf/arm_pmu.h | 10 ++
8 files changed, 287 insertions(+), 11 deletions(-)
create mode 100644 drivers/perf/arm_pmu_acpi.c
--
2.4.3
From: Mark Salter <[email protected]>
In preparation for ACPI support, add a pmu_probe_info table to
the arm_pmu_device_probe() call. This table gets used when
probing in the absence of a devicetree node for PMU.
Signed-off-by: Mark Salter <[email protected]>
Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/perf_event.c | 10 +++++++++-
include/linux/perf/arm_pmu.h | 3 +++
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f419a7c..8f12eac 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -867,9 +867,17 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{},
};
+static const struct pmu_probe_info armv8_pmu_probe_table[] = {
+ ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A53, armv8_a53_pmu_init),
+ ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A57, armv8_a57_pmu_init),
+ PMU_PROBE(0, 0, armv8_pmuv3_init), /* if all else fails... */
+ { /* sentinel value */ }
+};
+
static int armv8_pmu_device_probe(struct platform_device *pdev)
{
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
+ return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
+ armv8_pmu_probe_table);
}
static struct platform_driver armv8_pmu_driver = {
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 4196c90..495332f 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -145,6 +145,9 @@ struct pmu_probe_info {
#define XSCALE_PMU_PROBE(_version, _fn) \
PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+#define ARMV8_PMU_PART_PROBE(_part, _fn) \
+ PMU_PROBE((_part) << MIDR_PARTNUM_SHIFT, MIDR_PARTNUM_MASK, _fn)
+
int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table);
--
2.4.3
ARM big/little machines can have PMU's with differing PMU counters.
ACPI systems should be able to support this as well. Also add support
for A72 PMU counters.
Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/include/asm/cputype.h | 1 +
arch/arm64/kernel/perf_event.c | 1 +
drivers/perf/arm_pmu.c | 54 +++++++--
drivers/perf/arm_pmu_acpi.c | 229 +++++++++++++++++++++++++++------------
4 files changed, 204 insertions(+), 81 deletions(-)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 87e1985..1e40799 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -74,6 +74,7 @@
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
+#define ARM_CPU_PART_CORTEX_A72 0xD08
#define ARM_CPU_PART_CORTEX_A57 0xD07
#define ARM_CPU_PART_CORTEX_A53 0xD03
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 8f12eac..1893f77 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -870,6 +870,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
static const struct pmu_probe_info armv8_pmu_probe_table[] = {
ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A53, armv8_a53_pmu_init),
ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A57, armv8_a57_pmu_init),
+ ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A72, armv8_a72_pmu_init),
PMU_PROBE(0, 0, armv8_pmuv3_init), /* if all else fails... */
{ /* sentinel value */ }
};
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 49fa845..ffca517 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
@@ -24,6 +25,7 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
+#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
@@ -853,25 +855,51 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
}
/*
- * CPU PMU identification and probing.
+ * CPU PMU identification and probing. Its possible to have
+ * multiple CPU types in an ARM machine. Assure that we are
+ * picking the right PMU types based on the CPU in question
*/
-static int probe_current_pmu(struct arm_pmu *pmu,
- const struct pmu_probe_info *info)
+static int probe_plat_pmu(struct arm_pmu *pmu,
+ const struct pmu_probe_info *info,
+ unsigned int pmuid)
{
- int cpu = get_cpu();
- unsigned int cpuid = read_cpuid_id();
int ret = -ENODEV;
+ int cpu;
+ int aff_ctr = 0;
+ struct platform_device *pdev = pmu->plat_device;
+ int irq = platform_get_irq(pdev, 0);
- pr_info("probing PMU on CPU %d\n", cpu);
+ if (irq >= 0 && !irq_is_percpu(irq)) {
+ pmu->irq_affinity = kcalloc(pdev->num_resources, sizeof(int),
+ GFP_KERNEL);
+ if (!pmu->irq_affinity)
+ return -ENOMEM;
+ }
+ for_each_possible_cpu(cpu) {
+ struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, cpu);
+ unsigned int cpuid = cinfo->reg_midr;
+
+ if (cpuid == pmuid) {
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+ pr_devel("enable pmu on cpu %d\n", cpu);
+ if (pmu->irq_affinity) {
+ pmu->irq_affinity[aff_ctr] = cpu;
+ aff_ctr++;
+ }
+ }
+ }
+
+ pr_debug("probing PMU %X\n", pmuid);
+ /* find the type of PMU given the CPU */
for (; info->init != NULL; info++) {
- if ((cpuid & info->mask) != info->cpuid)
+ if ((pmuid & info->mask) != info->cpuid)
continue;
+ pr_devel("Found PMU\n");
ret = info->init(pmu);
break;
}
- put_cpu();
return ret;
}
@@ -997,8 +1025,14 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!ret)
ret = init_fn(pmu);
} else {
- cpumask_setall(&pmu->supported_cpus);
- ret = probe_current_pmu(pmu, probe_table);
+ if (acpi_disabled) {
+ /* use the boot cpu. */
+ struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, 0);
+ unsigned int cpuid = cinfo->reg_midr;
+
+ ret = probe_plat_pmu(pmu, probe_table, cpuid);
+ } else
+ ret = probe_plat_pmu(pmu, probe_table, pdev->id);
}
if (ret) {
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 722f4ca..793092c 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -2,6 +2,7 @@
* PMU support
*
* Copyright (C) 2015 Red Hat Inc.
+ * Copyright (C) 2016 ARM Ltd.
* Author: Mark Salter <[email protected]>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
@@ -9,21 +10,35 @@
*
*/
+#define pr_fmt(fmt) "ACPI-PMU: " fmt
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
+#include <asm/cpu.h>
+
#define PMU_PDEV_NAME "armv8-pmu"
struct pmu_irq {
- int gsi;
- int trigger;
+ int gsi;
+ int trigger;
+ bool registered;
+};
+
+struct pmu_types {
+ int cpu_type;
+ int cpu_count;
};
static struct pmu_irq pmu_irqs[NR_CPUS] __initdata;
+/*
+ * called from acpi_map_gic_cpu_interface()'s MADT parsing callback during boot
+ * this routine saves off the GSI's and their trigger state for use when we are
+ * ready to build the PMU platform device.
+*/
void __init arm_pmu_parse_acpi(int cpu, struct acpi_madt_generic_interrupt *gic)
{
pmu_irqs[cpu].gsi = gic->performance_interrupt;
@@ -31,95 +46,167 @@ void __init arm_pmu_parse_acpi(int cpu, struct acpi_madt_generic_interrupt *gic)
pmu_irqs[cpu].trigger = ACPI_EDGE_SENSITIVE;
else
pmu_irqs[cpu].trigger = ACPI_LEVEL_SENSITIVE;
+ pr_info("Assign CPU %d girq %d level %d\n", cpu, pmu_irqs[cpu].gsi,
+ pmu_irqs[cpu].trigger);
}
-#ifndef CONFIG_SMP
-/*
- * In !SMP case, we parse for boot CPU IRQ here.
- */
-static int __init acpi_parse_pmu_irqs(struct acpi_subtable_header *header,
- const unsigned long end)
-{
- struct acpi_madt_generic_interrupt *gic;
-
- gic = (struct acpi_madt_generic_interrupt *)header;
-
- if (cpu_logical_map(0) == (gic->arm_mpidr & MPIDR_HWID_BITMASK))
- arm_pmu_parse_acpi(0, gic);
-
- return 0;
-}
-
-static void __init acpi_parse_boot_cpu(void)
+/* count number and type of CPU's in system */
+static void __init arm_pmu_acpi_determine_cpu_types(struct pmu_types *pmus)
{
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_pmu_irqs, 0);
+ int i, j;
+
+ for_each_possible_cpu(i) {
+ struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, i);
+
+ pr_devel("Present CPU %d is a %X\n", i,
+ MIDR_PARTNUM(cinfo->reg_midr));
+ for (j = 0; j < NR_CPUS; j++) {
+ if (pmus[j].cpu_type == MIDR_PARTNUM(cinfo->reg_midr)) {
+ pmus[j].cpu_count++;
+ break;
+ }
+ if (pmus[j].cpu_count == 0) {
+ pmus[j].cpu_type = MIDR_PARTNUM(cinfo->reg_midr);
+ pmus[j].cpu_count++;
+ break;
+ }
+ }
+ }
}
-#else
-#define acpi_parse_boot_cpu() do {} while (0)
-#endif
-static int __init pmu_acpi_init(void)
+static int __init arm_pmu_acpi_register_pmu(int count, struct resource *res,
+ int last_cpu_id)
{
- struct platform_device *pdev;
- struct pmu_irq *pirq = pmu_irqs;
- struct resource *res, *r;
+ int i;
int err = -ENOMEM;
- int i, count, irq;
+ bool free_gsi = false;
+ struct platform_device *pdev;
- if (acpi_disabled)
- return 0;
+ if (count) {
+ pdev = platform_device_alloc(PMU_PDEV_NAME, last_cpu_id);
+
+ if (pdev) {
+ err = platform_device_add_resources(pdev,
+ res, count);
+ if (!err) {
+ err = platform_device_add(pdev);
+ if (err) {
+ pr_warn("Unable to register PMU device\n");
+ free_gsi = true;
+ }
+ } else {
+ pr_warn("Unable to add resources to device\n");
+ free_gsi = true;
+ platform_device_put(pdev);
+ }
+ } else {
+ pr_warn("Unable to allocate platform device\n");
+ free_gsi = true;
+ }
+ }
- acpi_parse_boot_cpu();
+ /* unmark (and possibly unregister) registered GSIs */
+ for_each_possible_cpu(i) {
+ if (pmu_irqs[i].registered) {
+ if (free_gsi)
+ acpi_unregister_gsi(pmu_irqs[i].gsi);
+ pmu_irqs[i].registered = false;
+ }
+ }
- /* Must have irq for boot boot cpu, at least */
- if (pirq->gsi == 0)
- return -EINVAL;
+ return err;
+}
- irq = acpi_register_gsi(NULL, pirq->gsi, pirq->trigger,
- ACPI_ACTIVE_HIGH);
+/*
+ * For the given cpu/pmu type, walk all known GSIs, register them, and add
+ * them to the resource structure. Return the number of GSI's contained
+ * in the res structure, and the id of the last CPU/PMU we added.
+ */
+static int __init arm_pmu_acpi_gsi_res(struct pmu_types *pmus,
+ struct resource *res, int *last_cpu_id)
+{
+ int i, count;
+ int irq;
+
+ pr_info("Setting up %d PMUs for CPU type %X\n", pmus->cpu_count,
+ pmus->cpu_type);
+ /* lets group all the PMU's from similar CPU's together */
+ count = 0;
+ for_each_possible_cpu(i) {
+ struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, i);
+
+ if (pmus->cpu_type == MIDR_PARTNUM(cinfo->reg_midr)) {
+ pr_devel("Setting up CPU %d\n", i);
+ if (pmu_irqs[i].gsi == 0)
+ continue;
+
+ irq = acpi_register_gsi(NULL, pmu_irqs[i].gsi,
+ pmu_irqs[i].trigger,
+ ACPI_ACTIVE_HIGH);
- if (irq_is_percpu(irq))
- count = 1;
- else
- for (i = 1, count = 1; i < NR_CPUS; i++)
- if (pmu_irqs[i].gsi)
- ++count;
+ res[count].start = res[count].end = irq;
+ res[count].flags = IORESOURCE_IRQ;
- pdev = platform_device_alloc(PMU_PDEV_NAME, -1);
- if (!pdev)
- goto err_free_gsi;
+ if (pmu_irqs[i].trigger == ACPI_EDGE_SENSITIVE)
+ res[count].flags |= IORESOURCE_IRQ_HIGHEDGE;
+ else
+ res[count].flags |= IORESOURCE_IRQ_HIGHLEVEL;
- res = kcalloc(count, sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err_free_device;
+ pmu_irqs[i].registered = true;
+ count++;
+ (*last_cpu_id) = cinfo->reg_midr;
- for (i = 0, r = res; i < count; i++, pirq++, r++) {
- if (i)
- irq = acpi_register_gsi(NULL, pirq->gsi, pirq->trigger,
- ACPI_ACTIVE_HIGH);
- r->start = r->end = irq;
- r->flags = IORESOURCE_IRQ;
- if (pirq->trigger == ACPI_EDGE_SENSITIVE)
- r->flags |= IORESOURCE_IRQ_HIGHEDGE;
- else
- r->flags |= IORESOURCE_IRQ_HIGHLEVEL;
+ if (irq_is_percpu(irq))
+ pr_debug("PPI detected\n");
+ }
}
+ return count;
+}
- err = platform_device_add_resources(pdev, res, count);
- if (!err)
- err = platform_device_add(pdev);
- kfree(res);
- if (!err)
- return 0;
+static int __init pmu_acpi_init(void)
+{
+ struct resource *res;
+ int err = -ENOMEM;
+ int count;
+ int j, last_cpu_id;
+ struct pmu_types *pmus;
-err_free_device:
- platform_device_put(pdev);
+ pr_debug("Prepare registration\n");
+ if (acpi_disabled)
+ return 0;
-err_free_gsi:
- for (i = 0; i < count; i++)
- acpi_unregister_gsi(pmu_irqs[i].gsi);
+ pmus = kcalloc(NR_CPUS, sizeof(struct pmu_types), GFP_KERNEL);
+
+ if (pmus) {
+ arm_pmu_acpi_determine_cpu_types(pmus);
+
+ for (j = 0; pmus[j].cpu_count; j++) {
+ pr_devel("CPU type %d, count %d\n", pmus[j].cpu_type,
+ pmus[j].cpu_count);
+ res = kcalloc(pmus[j].cpu_count,
+ sizeof(struct resource), GFP_KERNEL);
+
+ /* for a given PMU type collect all the GSIs. */
+ if (res) {
+ count = arm_pmu_acpi_gsi_res(&pmus[j], res,
+ &last_cpu_id);
+ /*
+ * register this set of interrupts
+ * with a new PMU device
+ */
+ err = arm_pmu_acpi_register_pmu(count,
+ res,
+ last_cpu_id);
+ kfree(res);
+ } else
+ pr_warn("PMU unable to allocate interrupt resource space\n");
+ }
+
+ kfree(pmus);
+ } else
+ pr_warn("PMU: Unable to allocate pmu count structures\n");
return err;
}
+
arch_initcall(pmu_acpi_init);
--
2.4.3
From: Mark Salter <[email protected]>
In the case of ACPI, the PMU IRQ information is contained in the
MADT table. Also, since the PMU does not exist as a device in the
ACPI DSDT table, it is necessary to create a platform device so
that the appropriate driver probing is triggered.
Signed-off-by: Mark Salter <[email protected]>
Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/smp.c | 5 ++
drivers/perf/Kconfig | 4 ++
drivers/perf/Makefile | 1 +
drivers/perf/arm_pmu_acpi.c | 125 +++++++++++++++++++++++++++++++++++++++++++
include/linux/perf/arm_pmu.h | 7 +++
5 files changed, 142 insertions(+)
create mode 100644 drivers/perf/arm_pmu_acpi.c
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b2d5f4e..c6f2c53 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
+#include <linux/perf/arm_pmu.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@@ -502,6 +503,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
bootcpu_valid = true;
+ arm_pmu_parse_acpi(0, processor);
return;
}
@@ -522,6 +524,9 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
*/
acpi_set_mailbox_entry(cpu_count, processor);
+ /* get PMU irq info */
+ arm_pmu_parse_acpi(cpu_count, processor);
+
cpu_count++;
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 04e2653..818fa3b 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -12,4 +12,8 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config ARM_PMU_ACPI
+ def_bool y
+ depends on ARM_PMU && ACPI
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index acd2397..fd8090d 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
new file mode 100644
index 0000000..722f4ca
--- /dev/null
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -0,0 +1,125 @@
+/*
+ * PMU support
+ *
+ * Copyright (C) 2015 Red Hat Inc.
+ * Author: Mark Salter <[email protected]>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+
+#define PMU_PDEV_NAME "armv8-pmu"
+
+struct pmu_irq {
+ int gsi;
+ int trigger;
+};
+
+static struct pmu_irq pmu_irqs[NR_CPUS] __initdata;
+
+void __init arm_pmu_parse_acpi(int cpu, struct acpi_madt_generic_interrupt *gic)
+{
+ pmu_irqs[cpu].gsi = gic->performance_interrupt;
+ if (gic->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
+ pmu_irqs[cpu].trigger = ACPI_EDGE_SENSITIVE;
+ else
+ pmu_irqs[cpu].trigger = ACPI_LEVEL_SENSITIVE;
+}
+
+#ifndef CONFIG_SMP
+/*
+ * In !SMP case, we parse for boot CPU IRQ here.
+ */
+static int __init acpi_parse_pmu_irqs(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gic;
+
+ gic = (struct acpi_madt_generic_interrupt *)header;
+
+ if (cpu_logical_map(0) == (gic->arm_mpidr & MPIDR_HWID_BITMASK))
+ arm_pmu_parse_acpi(0, gic);
+
+ return 0;
+}
+
+static void __init acpi_parse_boot_cpu(void)
+{
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_pmu_irqs, 0);
+}
+#else
+#define acpi_parse_boot_cpu() do {} while (0)
+#endif
+
+static int __init pmu_acpi_init(void)
+{
+ struct platform_device *pdev;
+ struct pmu_irq *pirq = pmu_irqs;
+ struct resource *res, *r;
+ int err = -ENOMEM;
+ int i, count, irq;
+
+ if (acpi_disabled)
+ return 0;
+
+ acpi_parse_boot_cpu();
+
+ /* Must have irq for boot boot cpu, at least */
+ if (pirq->gsi == 0)
+ return -EINVAL;
+
+ irq = acpi_register_gsi(NULL, pirq->gsi, pirq->trigger,
+ ACPI_ACTIVE_HIGH);
+
+ if (irq_is_percpu(irq))
+ count = 1;
+ else
+ for (i = 1, count = 1; i < NR_CPUS; i++)
+ if (pmu_irqs[i].gsi)
+ ++count;
+
+ pdev = platform_device_alloc(PMU_PDEV_NAME, -1);
+ if (!pdev)
+ goto err_free_gsi;
+
+ res = kcalloc(count, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ goto err_free_device;
+
+ for (i = 0, r = res; i < count; i++, pirq++, r++) {
+ if (i)
+ irq = acpi_register_gsi(NULL, pirq->gsi, pirq->trigger,
+ ACPI_ACTIVE_HIGH);
+ r->start = r->end = irq;
+ r->flags = IORESOURCE_IRQ;
+ if (pirq->trigger == ACPI_EDGE_SENSITIVE)
+ r->flags |= IORESOURCE_IRQ_HIGHEDGE;
+ else
+ r->flags |= IORESOURCE_IRQ_HIGHLEVEL;
+ }
+
+ err = platform_device_add_resources(pdev, res, count);
+ if (!err)
+ err = platform_device_add(pdev);
+ kfree(res);
+ if (!err)
+ return 0;
+
+err_free_device:
+ platform_device_put(pdev);
+
+err_free_gsi:
+ for (i = 0; i < count; i++)
+ acpi_unregister_gsi(pmu_irqs[i].gsi);
+
+ return err;
+}
+arch_initcall(pmu_acpi_init);
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 495332f..d002269 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -154,4 +154,11 @@ int arm_pmu_device_probe(struct platform_device *pdev,
#endif /* CONFIG_ARM_PMU */
+#ifdef CONFIG_ARM_PMU_ACPI
+struct acpi_madt_generic_interrupt;
+void arm_pmu_parse_acpi(int cpu, struct acpi_madt_generic_interrupt *gic);
+#else
+#define arm_pmu_parse_acpi(a, b) do { } while (0)
+#endif /* CONFIG_ARM_PMU_ACPI */
+
#endif /* __ARM_PMU_H__ */
--
2.4.3
From: Mark Salter <[email protected]>
There is a problem in the non-devicetree PMU probing where some
probe functions may get the number of supported events through
smp_call_function_any() using the arm_pmu supported_cpus mask.
But at the time the probe function is called, the supported_cpus
mask is empty so the call fails. This patch makes sure the mask
is set before calling the init function rather than after.
Signed-off-by: Mark Salter <[email protected]>
Signed-off-by: Jeremy Linton <[email protected]>
---
drivers/perf/arm_pmu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5..49fa845 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -997,8 +997,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!ret)
ret = init_fn(pmu);
} else {
- ret = probe_current_pmu(pmu, probe_table);
cpumask_setall(&pmu->supported_cpus);
+ ret = probe_current_pmu(pmu, probe_table);
}
if (ret) {
--
2.4.3
Hi Jeremy,
[auto build test ERROR on arm64/for-next/core]
[also build test ERROR on v4.6-rc2 next-20160408]
[if your patch is applied to the wrong git tree, please drop us a note to help improving the system]
url: https://github.com/0day-ci/linux/commits/Jeremy-Linton/arm-pmu-Fix-non-devicetree-probing/20160409-060104
base: https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux for-next/core
config: arm-multi_v5_defconfig (attached as .config)
reproduce:
wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=arm
All error/warnings (new ones prefixed by >>):
In file included from include/asm-generic/percpu.h:6:0,
from arch/arm/include/asm/percpu.h:50,
from include/linux/percpu.h:12,
from include/linux/topology.h:34,
from include/linux/gfp.h:8,
from include/linux/slab.h:14,
from include/linux/resource_ext.h:19,
from include/linux/acpi.h:26,
from drivers/perf/arm_pmu.c:14:
drivers/perf/arm_pmu.c: In function 'probe_plat_pmu':
>> include/linux/percpu-defs.h:250:31: warning: initialization from incompatible pointer type
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
^
>> drivers/perf/arm_pmu.c:880:33: note: in expansion of macro 'per_cpu_ptr'
struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, cpu);
^
>> drivers/perf/arm_pmu.c:881:29: error: dereferencing pointer to incomplete type
unsigned int cpuid = cinfo->reg_midr;
^
In file included from include/asm-generic/percpu.h:6:0,
from arch/arm/include/asm/percpu.h:50,
from include/linux/percpu.h:12,
from include/linux/topology.h:34,
from include/linux/gfp.h:8,
from include/linux/slab.h:14,
from include/linux/resource_ext.h:19,
from include/linux/acpi.h:26,
from drivers/perf/arm_pmu.c:14:
drivers/perf/arm_pmu.c: In function 'arm_pmu_device_probe':
>> include/linux/percpu-defs.h:250:31: warning: initialization from incompatible pointer type
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
^
drivers/perf/arm_pmu.c:1030:34: note: in expansion of macro 'per_cpu_ptr'
struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, 0);
^
drivers/perf/arm_pmu.c:1031:30: error: dereferencing pointer to incomplete type
unsigned int cpuid = cinfo->reg_midr;
^
vim +881 drivers/perf/arm_pmu.c
874 GFP_KERNEL);
875 if (!pmu->irq_affinity)
876 return -ENOMEM;
877 }
878
879 for_each_possible_cpu(cpu) {
> 880 struct cpuinfo_arm64 *cinfo = per_cpu_ptr(&cpu_data, cpu);
> 881 unsigned int cpuid = cinfo->reg_midr;
882
883 if (cpuid == pmuid) {
884 cpumask_set_cpu(cpu, &pmu->supported_cpus);
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
On Fri, Apr 08, 2016 at 04:57:05PM -0500, Jeremy Linton wrote:
> From: Mark Salter <[email protected]>
>
> In preparation for ACPI support, add a pmu_probe_info table to
> the arm_pmu_device_probe() call. This table gets used when
> probing in the absence of a devicetree node for PMU.
>
> Signed-off-by: Mark Salter <[email protected]>
> Signed-off-by: Jeremy Linton <[email protected]>
> ---
> arch/arm64/kernel/perf_event.c | 10 +++++++++-
> include/linux/perf/arm_pmu.h | 3 +++
> 2 files changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index f419a7c..8f12eac 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -867,9 +867,17 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
> {},
> };
>
> +static const struct pmu_probe_info armv8_pmu_probe_table[] = {
> + ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A53, armv8_a53_pmu_init),
> + ARMV8_PMU_PART_PROBE(ARM_CPU_PART_CORTEX_A57, armv8_a57_pmu_init),
> + PMU_PROBE(0, 0, armv8_pmuv3_init), /* if all else fails... */
> + { /* sentinel value */ }
> +};
> +
Hi Jeremy,
with 4.6 ThunderX PMU support was added, so I think above table is
missing a line like:
ARMV8_PMU_PART_PROBE(CAVIUM_CPU_PART_THUNDERX, armv8_thunder_pmu_init)
Thanks,
Jan