Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752542AbbHCHgh (ORCPT ); Mon, 3 Aug 2015 03:36:37 -0400 Received: from e28smtp05.in.ibm.com ([122.248.162.5]:60340 "EHLO e28smtp05.in.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751933AbbHCHge (ORCPT ); Mon, 3 Aug 2015 03:36:34 -0400 X-Helo: d28dlp03.in.ibm.com X-MailFrom: maddy@linux.vnet.ibm.com X-RcptTo: linux-kernel@vger.kernel.org From: Madhavan Srinivasan To: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org Cc: Madhavan Srinivasan , Michael Ellerman , Benjamin Herrenschmidt , Paul Mackerras , Anton Blanchard , Sukadev Bhattiprolu , Daniel Axtens , Stephane Eranian , Preeti U Murthy , Ingo Molnar , Peter Zijlstra Subject: [PATCH v7 7/7] powerpc/powernv: nest pmu cpumask and cpu hotplug support Date: Mon, 3 Aug 2015 13:05:59 +0530 Message-Id: <1438587359-6165-8-git-send-email-maddy@linux.vnet.ibm.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1438587359-6165-1-git-send-email-maddy@linux.vnet.ibm.com> References: <1438587359-6165-1-git-send-email-maddy@linux.vnet.ibm.com> X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 15080307-0017-0000-0000-0000067F337D Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6333 Lines: 236 Adds cpumask attribute to be used by each nest pmu since nest units are per-chip. Only one cpu (first online cpu) from each chip is designated to read counters. On cpu hotplug, dying cpu is checked to see whether it is one of the designated cpus, if yes, next online cpu from the same chip is designated as new cpu to read counters. Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Anton Blanchard Cc: Sukadev Bhattiprolu Cc: Daniel Axtens Cc: Stephane Eranian Cc: Preeti U Murthy Cc: Ingo Molnar Cc: Peter Zijlstra Signed-off-by: Madhavan Srinivasan --- arch/powerpc/perf/nest-pmu.c | 173 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) diff --git a/arch/powerpc/perf/nest-pmu.c b/arch/powerpc/perf/nest-pmu.c index f653a7ab6ed7..ec173b7c36e5 100644 --- a/arch/powerpc/perf/nest-pmu.c +++ b/arch/powerpc/perf/nest-pmu.c @@ -12,6 +12,7 @@ static struct perchip_nest_info p8_nest_perchip_info[P8_NEST_MAX_CHIPS]; static struct nest_pmu *per_nest_pmu_arr[P8_NEST_MAX_PMUS]; +static cpumask_t nest_pmu_cpu_mask; PMU_FORMAT_ATTR(event, "config:0-20"); static struct attribute *p8_nest_format_attrs[] = { @@ -24,6 +25,173 @@ static struct attribute_group p8_nest_format_group = { .attrs = p8_nest_format_attrs, }; +static ssize_t nest_pmu_cpumask_get_attr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return cpumap_print_to_pagebuf(true, buf, &nest_pmu_cpu_mask); +} + +static DEVICE_ATTR(cpumask, S_IRUGO, nest_pmu_cpumask_get_attr, NULL); + +static struct attribute *nest_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group nest_pmu_cpumask_attr_group = { + .attrs = nest_pmu_cpumask_attrs, +}; + +static void nest_init(int *loc) +{ + int rc; + + rc = opal_nest_ima_control( + P8_NEST_MODE_PRODUCTION, P8_NEST_ENGINE_START); + if (rc) + loc[smp_processor_id()] = 1; +} + +static void nest_change_cpu_context(int old_cpu, int new_cpu) +{ + int i; + + for (i = 0; + (per_nest_pmu_arr[i] != NULL) && (i < P8_NEST_MAX_PMUS); i++) + perf_pmu_migrate_context(&per_nest_pmu_arr[i]->pmu, + old_cpu, new_cpu); +} + +static void nest_exit_cpu(int cpu) +{ + int nid, target = -1; + struct cpumask *l_cpumask; + + /* + * Check in the designated list for this cpu. Dont bother + * if not one of them. + */ + if (!cpumask_test_and_clear_cpu(cpu, &nest_pmu_cpu_mask)) + return; + + /* + * Now that this cpu is one of the designated, + * find a next cpu a) which is online and b) in same chip. + */ + nid = cpu_to_node(cpu); + l_cpumask = cpumask_of_node(nid); + target = cpumask_next(cpu, l_cpumask); + + /* + * Update the cpumask with the target cpu and + * migrate the context if needed + */ + if (target >= 0 && target <= nr_cpu_ids) { + cpumask_set_cpu(target, &nest_pmu_cpu_mask); + nest_change_cpu_context(cpu, target); + } +} + +static void nest_init_cpu(int cpu) +{ + int nid, fcpu, ncpu; + struct cpumask *l_cpumask, tmp_mask; + + nid = cpu_to_node(cpu); + l_cpumask = cpumask_of_node(nid); + + /* + * if empty cpumask, just add incoming cpu and move on. + */ + if (!cpumask_and(&tmp_mask, l_cpumask, &nest_pmu_cpu_mask)) { + cpumask_set_cpu(cpu, &nest_pmu_cpu_mask); + return; + } + + /* + * Alway have the first online cpu of a chip as designated one. + */ + fcpu = cpumask_first(l_cpumask); + ncpu = cpumask_next(cpu, l_cpumask); + if (cpu == fcpu) { + if (cpumask_test_and_clear_cpu(ncpu, &nest_pmu_cpu_mask)) { + cpumask_set_cpu(cpu, &nest_pmu_cpu_mask); + nest_change_cpu_context(ncpu, cpu); + } + } +} + +static int nest_pmu_cpu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + nest_init_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + nest_exit_cpu(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block nest_pmu_cpu_nb = { + .notifier_call = nest_pmu_cpu_notifier, + .priority = CPU_PRI_PERF + 1, +}; + +static int nest_pmu_cpumask_init(void) +{ + const struct cpumask *l_cpumask; + int cpu, nid; + int *cpus_opal_rc; + + cpu_notifier_register_begin(); + + /* + * Nest PMUs are per-chip counters. So designate a cpu + * from each chip for counter collection. + */ + for_each_online_node(nid) { + l_cpumask = cpumask_of_node(nid); + + /* designate first online cpu in this node */ + cpu = cpumask_first(l_cpumask); + cpumask_set_cpu(cpu, &nest_pmu_cpu_mask); + } + + /* + * Memory for OPAL call return value. + */ + cpus_opal_rc = kzalloc((sizeof(int) * nr_cpu_ids), GFP_KERNEL); + if (!cpus_opal_rc) + goto fail; + + /* Initialize Nest PMUs in each node using designated cpus */ + on_each_cpu_mask(&nest_pmu_cpu_mask, (smp_call_func_t)nest_init, + (void *)cpus_opal_rc, 1); + + /* Check return value array for any OPAL call failure */ + for_each_cpu(cpu, &nest_pmu_cpu_mask) { + if (cpus_opal_rc[cpu]) + goto fail; + } + + __register_cpu_notifier(&nest_pmu_cpu_nb); + + cpu_notifier_register_done(); + return 0; + +fail: + cpu_notifier_register_done(); + return -ENODEV; +} + static int p8_nest_event_init(struct perf_event *event) { int chip_id; @@ -255,6 +423,7 @@ static int nest_pmu_create(struct device_node *dev, int pmu_index) sprintf(buf, "Nest_%s", (char *)pp->value); pmu_ptr->pmu.name = (char *)buf; pmu_ptr->attr_groups[1] = &p8_nest_format_group; + pmu_ptr->attr_groups[2] = &nest_pmu_cpumask_attr_group; continue; } @@ -359,6 +528,10 @@ static int __init nest_pmu_init(void) if (!of_find_node_with_property(NULL, "ibm,ima-chip")) return ret; + /* Add cpumask and register for hotplug notification */ + if (nest_pmu_cpumask_init()) + return ret; + /* * Parse device-tree for Nest PMU information */ -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/