Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756133Ab3JGQJ4 (ORCPT ); Mon, 7 Oct 2013 12:09:56 -0400 Received: from mail-we0-f172.google.com ([74.125.82.172]:55666 "EHLO mail-we0-f172.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753823Ab3JGQJs (ORCPT ); Mon, 7 Oct 2013 12:09:48 -0400 From: Stephane Eranian To: linux-kernel@vger.kernel.org Cc: peterz@infradead.org, mingo@elte.hu, ak@linux.intel.com, acme@redhat.com, jolsa@redhat.com, zheng.z.yan@intel.com Subject: [PATCH v1 1/2] perf,x86: add Intel RAPL PMU support Date: Mon, 7 Oct 2013 18:09:16 +0200 Message-Id: <1381162158-24329-2-git-send-email-eranian@google.com> X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1381162158-24329-1-git-send-email-eranian@google.com> References: <1381162158-24329-1-git-send-email-eranian@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 17923 Lines: 671 This patch adds a new uncore PMU to expose the Intel RAPL energy consumption counters. Up to 3 counters, each counting a particular RAPL event are exposed. The RAPL counters are available on Intel SandyBridge, IvyBridge, Haswell. The server skus add a 3rd counter. The following events are available nd exposed in sysfs: - rapl-energy-cores: power consumption of all cores on socket - rapl-energy-pkg: power consumption of all cores + LLc cache - rapl-energy-dram: power consumption of DRAM The RAPL PMU is uncore by nature and is implemented such that it only works in system-wide mode. Measuring only one CPU per socket is sufficient. The /sys/devices/rapl/cpumask is exported and can be used by tools to figure out which CPU to monitor by default. For instance, on a 2-socket system, 2 CPUs (one on each socket) will be shown. The counters all count in the same unit. The perf_events API exposes all RAPL counters as 64-bit integers counting in unit of 1/2^32 Joules (or 0.23 nJ). User level tools must convert the counts by multiplying them by 0.23 and divide 10^9 to obtain Joules. The reason for this is that the kernel avoids doing floating point math whenever possible because it is expensive (user floating-point state must be saved). The method used avoids kernel floating-point and minimizes the loss of precision (bits). Thanks to PeterZ for suggesting this approach. To convert the raw count in Watt: W = C * 0.23 / (1e9 * time) RAPL PMU is a new standalone PMU which registers with the perf_event core subsystem. The PMU type (attr->type) is dynamically allocated and is available from /sys/device/rapl/type. Sampling is not supported by the RAPL PMU. There is no privilege level filtering either. We artificially limit the number of simultaneous RAPL events to a max of 1 instance of each (so up to 3). That helps track events and is sufficient given that RAPL events do not support any filters, i.e., no gain in measuring the same event twice in an event group. Signed-off-by: Stephane Eranian --- arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/perf_event_intel_rapl.c | 580 +++++++++++++++++++++++++++ tools/perf/util/evsel.c | 1 - 3 files changed, 581 insertions(+), 2 deletions(-) create mode 100644 arch/x86/kernel/cpu/perf_event_intel_rapl.c diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 47b56a7..6359506 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o endif diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c new file mode 100644 index 0000000..f59dbd4 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -0,0 +1,580 @@ +#include +#include +#include +#include +#include "perf_event.h" +/* + * RAPL energy status counters + */ +#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */ +#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */ +#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */ +#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ +#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ +#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ + +#define RAPL_IDX_MAX 4 /* Max number of RAPL counters */ + +/* Desktops have PP0, PKG */ +#define RAPL_IDX_CLN (1<config + * any other bit is reserved + */ +#define RAPL_EVENT_MASK 0xFFULL + +#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __rapl_##_var##_show, NULL) + +#define RAPL_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \ + .config = _config, \ +} + +#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ + +struct rapl_pmu { + atomic_t refcnt; + int hw_unit; /* 1/2^hw_unit Joule */ + int phys_id; + int n_active; /* number of active events */ + unsigned long active_mask[BITS_TO_LONGS(RAPL_IDX_MAX)]; + struct perf_event *events[RAPL_IDX_MAX]; +}; + +static struct pmu rapl_pmu_class; +static cpumask_t rapl_cpu_mask; +static int rapl_cntr_mask; + +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); +static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_kfree); + +static inline u64 rapl_read_counter(struct perf_event *event) +{ + u64 raw; + rdmsrl(event->hw.event_base, raw); + return raw; +} + +static inline u64 rapl_scale(u64 v) +{ + /* + * scale delta to smallest unit (1/2^32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * Watts = Joules/Time delta + */ + return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + int shift = RAPL_CNTR_WIDTH; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + rdmsrl(event->hw.event_base, new_raw_count); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + sdelta = rapl_scale(delta); + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_pmu_event_start(struct perf_event *event, int flags) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + local64_set(&event->hw.prev_count, rapl_read_counter(event)); + + pmu->n_active++; +} + +static int rapl_pmu_event_add(struct perf_event *event, int flags) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = event->hw.idx; + + /* counter already in use */ + if (__test_and_set_bit(idx, pmu->active_mask)) + return -EAGAIN; + + pmu->events[idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + rapl_pmu_event_start(event, 0); + + return 0; +} + +static void rapl_pmu_event_stop(struct perf_event *event, int flags) +{ + struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); + struct hw_perf_event *hwc = &event->hw; + + /* mark event as deactivated and stopped */ + if (__test_and_clear_bit(hwc->idx, pmu->active_mask)) { + WARN_ON_ONCE(pmu->n_active <= 0); + pmu->n_active--; + pmu->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* check if update of sw counter is necessary */ + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static int rapl_validate_group(struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + unsigned long active_mask[BITS_TO_LONGS(RAPL_IDX_MAX)]; + struct perf_event *e; + + /* + * group can only have RAPL PMU events + * just need to verify they don't use the same + * counter + * Although the RAPL counters are read-only and + * we could have as many RAPL events as we would + * like, we artificially limit to a maximum of + * one event of each kind. That helps us track + * events better especially when we want to avoid + * missing counter overflows. Furthermore, the + * counters have no filters, thus adding 2 instances + * of an event does not buy anything. + */ + bitmap_zero(active_mask, RAPL_IDX_MAX); + + /* + * event is not yet connected with siblings + */ + __set_bit(event->hw.idx, active_mask); + + /* + * add leader too + */ + if (__test_and_set_bit(leader->hw.idx, active_mask)) + return -EINVAL; + + /* + * now check existing siblings + */ + list_for_each_entry(e, &leader->sibling_list, group_entry) { + if (__test_and_set_bit(e->hw.idx, active_mask)) + return -EINVAL; + } + return 0; +} + +static int rapl_pmu_event_init(struct perf_event *event) +{ + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int bit, msr, ret = 0; + + /* only look at RAPL events */ + if (event->attr.type != rapl_pmu_class.type) + return -ENOENT; + + /* check only supported bits are set */ + if (event->attr.config & ~RAPL_EVENT_MASK) + return -EINVAL; + + /* + * check event is known (determines counter) + */ + switch (cfg) { + case INTEL_RAPL_PP0: + bit = RAPL_IDX_PP0_NRG_STAT; + msr = MSR_PP0_ENERGY_STATUS; + break; + case INTEL_RAPL_PKG: + bit = RAPL_IDX_PKG_NRG_STAT; + msr = MSR_PKG_ENERGY_STATUS; + break; + case INTEL_RAPL_RAM: + bit = RAPL_IDX_RAM_NRG_STAT; + msr = MSR_DRAM_ENERGY_STATUS; + break; + default: + return -EINVAL; + } + /* check event supported */ + if (!(rapl_cntr_mask & (1 << bit))) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + /* must be done before validate_group */ + event->hw.event_base = msr; + event->hw.idx = bit; + + if (event->group_leader != event) + ret = rapl_validate_group(event); + + return ret; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static ssize_t rapl_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask); + + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} + +static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); + +static struct attribute *rapl_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_attr_group = { + .attrs = rapl_pmu_attrs, +}; + +EVENT_ATTR_STR(rapl-energy-cores, rapl_pp0, "event=0x01"); +EVENT_ATTR_STR(rapl-energy-pkg , rapl_pkg, "event=0x02"); +EVENT_ATTR_STR(rapl-energy-ram , rapl_ram, "event=0x03"); + +static struct attribute *rapl_events_srv_attr[] = { + EVENT_PTR(rapl_pp0), + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_ram), + NULL, +}; + +static struct attribute *rapl_events_cln_attr[] = { + EVENT_PTR(rapl_pp0), + EVENT_PTR(rapl_pkg), + NULL, +}; + +static struct attribute_group rapl_pmu_events_group = { + .name = "events", + .attrs = NULL, /* patched at runtime */ +}; + +DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); +static struct attribute *rapl_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group rapl_pmu_format_group = { + .name = "format", + .attrs = rapl_formats_attr, +}; + +const struct attribute_group *rapl_attr_groups[] = { + &rapl_pmu_attr_group, + &rapl_pmu_format_group, + &rapl_pmu_events_group, + NULL, +}; + +static struct pmu rapl_pmu_class = { + .attr_groups = rapl_attr_groups, + .task_ctx_nr = perf_invalid_context, /* system-wide only */ + .event_init = rapl_pmu_event_init, + .add = rapl_pmu_event_add, /* must have */ + .del = rapl_pmu_event_del, /* must have */ + .start = rapl_pmu_event_start, + .stop = rapl_pmu_event_stop, + .read = rapl_pmu_event_read, +}; + +static void rapl_exit_cpu(int cpu) +{ + int i, phys_id = topology_physical_package_id(cpu); + + /* if CPU not in RAPL mask, nothing to do */ + if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) + return; + + /* find a new cpu on same package */ + for_each_online_cpu(i) { + if (i == cpu || i == 0) + continue; + if (phys_id == topology_physical_package_id(i)) { + cpumask_set_cpu(i, &rapl_cpu_mask); + break; + } + } + + WARN_ON(cpumask_empty(&rapl_cpu_mask)); +} + +static void rapl_init_cpu(int cpu) +{ + int i, phys_id = topology_physical_package_id(cpu); + + /* check if phys_is is already covered */ + for_each_cpu(i, &rapl_cpu_mask) { + if (i == 0) + continue; + if (phys_id == topology_physical_package_id(i)) + return; + } + /* was not found, so add it */ + cpumask_set_cpu(cpu, &rapl_cpu_mask); +} + +static int rapl_cpu_prepare(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int phys_id = topology_physical_package_id(cpu); + + if (pmu) + return 0; + + if (phys_id < 0) + return -1; + + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -1; + + atomic_set(&pmu->refcnt, 1); + pmu->phys_id = phys_id; + /* + * grab power unit as: 1/2^unit Joules + * + * we cache in local PMU instance + */ + rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); + pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; + + /* set RAPL pmu for this cpu for now */ + per_cpu(rapl_pmu_kfree, cpu) = NULL; + per_cpu(rapl_pmu, cpu) = pmu; + + return 0; +} + +static int rapl_cpu_starting(int cpu) +{ + struct rapl_pmu *pmu2; + struct rapl_pmu *pmu1 = per_cpu(rapl_pmu, cpu); + int i, phys_id = topology_physical_package_id(cpu); + + if (pmu1) + return 0; + + for_each_online_cpu(i) { + pmu2 = per_cpu(rapl_pmu, i); + + if (!pmu2 || i == cpu) + continue; + + if (pmu2->phys_id == phys_id) { + per_cpu(rapl_pmu, cpu) = pmu2; + per_cpu(rapl_pmu_kfree, cpu) = pmu1; + atomic_inc(&pmu2->refcnt); + break; + } + } + return 0; +} + +static int rapl_cpu_dying(int cpu) +{ + struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); + int i; + + if (!pmu) + return 0; + /* + * stop all syswide RAPL events on that CPU + * as a consequence also stops the hrtimer + */ + for (i = 0; i < RAPL_IDX_MAX; i++) { + if (pmu->events[i]) + rapl_pmu_event_stop(pmu->events[i], PERF_EF_UPDATE); + } + per_cpu(rapl_pmu, cpu) = NULL; + + if (atomic_dec_and_test(&pmu->refcnt)) + kfree(pmu); + + return 0; +} + +static int rapl_cpu_notifier(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + /* allocate/free data structure for uncore box */ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + rapl_cpu_prepare(cpu); + break; + case CPU_STARTING: + rapl_cpu_starting(cpu); + break; + case CPU_UP_CANCELED: + case CPU_DYING: + rapl_cpu_dying(cpu); + break; + case CPU_ONLINE: + kfree(per_cpu(rapl_pmu_kfree, cpu)); + per_cpu(rapl_pmu_kfree, cpu) = NULL; + break; + case CPU_DEAD: + per_cpu(rapl_pmu, cpu) = NULL; + break; + default: + break; + } + + /* select the cpu that collects uncore events */ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_FAILED: + case CPU_STARTING: + rapl_init_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + rapl_exit_cpu(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int __init rapl_pmu_init(void) +{ + struct rapl_pmu *pmu; + int i, cpu, ret; + + /* check supported CPU */ + switch (boot_cpu_data.x86_model) { + case 42: /* Sandy Bridge */ + case 58: /* Ivy Bridge */ + case 60: /* Haswell */ + rapl_cntr_mask = RAPL_IDX_CLN; + rapl_pmu_events_group.attrs = rapl_events_cln_attr; + break; + case 45: /* Sandy Bridge-EP */ + case 62: /* IvyTown */ + rapl_cntr_mask = RAPL_IDX_SRV; + rapl_pmu_events_group.attrs = rapl_events_srv_attr; + break; + + default: + /* unsupported */ + return 0; + } + get_online_cpus(); + + for_each_online_cpu(cpu) { + int phys_id = topology_physical_package_id(cpu); + + /* save on prepare by only calling prepare for new phys_id */ + for_each_cpu(i, &rapl_cpu_mask) { + if (phys_id == topology_physical_package_id(i)) { + phys_id = -1; + break; + } + } + if (phys_id < 0) { + pmu = per_cpu(rapl_pmu, i); + if (pmu) { + per_cpu(rapl_pmu, cpu) = pmu; + atomic_inc(&pmu->refcnt); + } + continue; + } + rapl_cpu_prepare(cpu); + cpumask_set_cpu(cpu, &rapl_cpu_mask); + } + + perf_cpu_notifier(rapl_cpu_notifier); + + ret = perf_pmu_register(&rapl_pmu_class, "rapl", -1); + WARN_ON(ret); + + pmu = __get_cpu_var(rapl_pmu); + pr_info("RAPL PMU detected, hw unit 2^-%d Joules, " + " API unit is 2^-32 Joules," + " %d fixed counters\n", + pmu->hw_unit, + hweight32(rapl_cntr_mask)); + + put_online_cpus(); + + return 0; +} +device_initcall(rapl_pmu_init); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index abe69af..12bfd7d 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -895,7 +895,6 @@ int __perf_evsel__read(struct perf_evsel *evsel, if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) return -errno; - aggr->val += count.val; if (scale) { aggr->ena += count.ena; -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/