Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754400AbbGWTEj (ORCPT ); Thu, 23 Jul 2015 15:04:39 -0400 Received: from mga02.intel.com ([134.134.136.20]:40612 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753913AbbGWTEc (ORCPT ); Thu, 23 Jul 2015 15:04:32 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.15,532,1432623600"; d="scan'208";a="768439083" From: kan.liang@intel.com To: a.p.zijlstra@chello.nl, acme@kernel.org Cc: luto@kernel.org, mingo@redhat.com, eranian@google.com, ak@linux.intel.com, mark.rutland@arm.com, adrian.hunter@intel.com, jolsa@kernel.org, namhyung@kernel.org, linux-kernel@vger.kernel.org, Kan Liang Subject: [PATCH 5/5] perf,tools: Show freq/CPU%/CORE_BUSY% in perf report --stdio Date: Thu, 23 Jul 2015 07:49:43 -0400 Message-Id: <1437652183-62080-6-git-send-email-kan.liang@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1437652183-62080-1-git-send-email-kan.liang@intel.com> References: <1437652183-62080-1-git-send-email-kan.liang@intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 12197 Lines: 355 From: Kan Liang Show frequency, CPU Utilization and percent performance for each symbol in perf report by --stdio --show-freq-perf In sampling group, only group leader do sampling. So only need to print group leader's freq in --group. Here is an example. $ perf report --stdio --group --show-freq-perf Overhead FREQ MHz CPU% CORE_BUSY% Command Shared Object Symbol ........................................ ......... ..... .......... ........... ................ ...................... 99.54% 99.54% 99.53% 99.53% 99.53% 2301 96 99 tchain_edit tchain_edit [.] f3 0.20% 0.20% 0.20% 0.20% 0.20% 2301 98 99 tchain_edit tchain_edit [.] f2 0.05% 0.05% 0.05% 0.05% 0.05% 2300 98 99 tchain_edit [kernel.vmlinux] [k] read_tsc Signed-off-by: Kan Liang --- tools/perf/Documentation/perf-report.txt | 12 ++++++ tools/perf/builtin-report.c | 19 +++++++++ tools/perf/perf.h | 1 + tools/perf/ui/hist.c | 71 +++++++++++++++++++++++++++++--- tools/perf/util/hist.h | 3 ++ tools/perf/util/session.c | 2 +- tools/perf/util/sort.c | 3 ++ tools/perf/util/symbol.h | 3 +- tools/perf/util/util.c | 2 + 9 files changed, 109 insertions(+), 7 deletions(-) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index c33b69f..faa8825 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -303,6 +303,18 @@ OPTIONS special event -e cpu/mem-loads/ or -e cpu/mem-stores/. See 'perf mem' for simpler access. +--show-freq-perf:: + Show CPU frequency and performance result from sample read. + To generate the frequency and performance output, the perf.data file + must have been obtained by group read and using special events cycles, + ref-cycles, msr/tsc/, msr/aperf/ or msr/mperf/ + Freq MHz: The frequency during the sample interval. Needs cycles and + ref-cycles event. + CPU%: CPU utilization during the sample interval. Needs ref-cycles and + msr/tsc/ events. + CORE_BUSY%: actual percent performance (APERF/MPERF%) during the + sample interval. Needs msr/aperf/ and msr/mperf/ events. + --percent-limit:: Do not show entries which have an overhead under that percent. (Default: 0). diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0cd0573..961b848 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -166,6 +166,8 @@ static int process_sample_event(struct perf_tool *tool, iter.ops = &hist_iter_mem; else if (symbol_conf.cumulate_callchain) iter.ops = &hist_iter_cumulative; + else if (symbol_conf.show_freq_perf) + iter.ops = &hist_iter_freq_perf; else iter.ops = &hist_iter_normal; @@ -723,6 +725,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, "Enable kernel symbol demangling"), OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"), + OPT_BOOLEAN(0, "show-freq-perf", &symbol_conf.show_freq_perf, + "show CPU freqency and performance info"), OPT_CALLBACK(0, "percent-limit", &report, "percent", "Don't show entries under that percent", parse_percent_limit), OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", @@ -735,7 +739,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) struct perf_data_file file = { .mode = PERF_DATA_MODE_READ, }; + struct perf_evsel *pos; int ret = hists__init(); + bool freq_perf_info[FREQ_PERF_MAX] = {0}; if (ret < 0) return ret; @@ -823,6 +829,19 @@ repeat: msr_pmu = perf_pmu__find("msr"); cpu_max_freq = get_cpu_max_freq() / 1000; + if (symbol_conf.show_freq_perf) { + perf_freq = perf_cpu_u = perf_core_busy = false; + evlist__for_each(session->evlist, pos) { + SET_FREQ_PERF_VALUE(pos, freq_perf_info, true); + } + if (HAS_FREQ(freq_perf_info)) + perf_freq = true; + if (HAS_CPU_U(freq_perf_info)) + perf_cpu_u = true; + if (HAS_CORE_BUSY(freq_perf_info)) + perf_core_busy = true; + } + if (setup_sorting() < 0) { if (sort_order) parse_options_usage(report_usage, options, "s", 1); diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 937b16a..87daab8 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -33,6 +33,7 @@ static inline unsigned long long rdclock(void) extern const char *input_name; extern bool perf_host, perf_guest; +extern bool perf_freq, perf_cpu_u, perf_core_busy; extern const char perf_version_string[]; void pthread__unblock_sigwinch(void); diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 25d6083..949bbf2 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -17,7 +17,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, const char *fmt, int len, - hpp_snprint_fn print_fn, bool fmt_percent) + hpp_snprint_fn print_fn, bool fmt_percent, bool single) { int ret; struct hists *hists = he->hists; @@ -36,7 +36,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, } else ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); - if (perf_evsel__is_group_event(evsel)) { + if (perf_evsel__is_group_event(evsel) && !single) { int prev_idx, idx_delta; struct hist_entry *pair; int nr_members = evsel->nr_members; @@ -109,10 +109,17 @@ int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) { int len = fmt->user_len ?: fmt->len; + bool single = false; + + if (symbol_conf.show_freq_perf && + ((fmt == &perf_hpp__format[PERF_HPP__FREQ]) || + (fmt == &perf_hpp__format[PERF_HPP__CPU_U]) || + (fmt == &perf_hpp__format[PERF_HPP__CORE_BUSY]))) + single = true; if (symbol_conf.field_sep) { return __hpp__fmt(hpp, he, get_field, fmtstr, 1, - print_fn, fmt_percent); + print_fn, fmt_percent, single); } if (fmt_percent) @@ -120,7 +127,7 @@ int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, else len -= 1; - return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); + return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent, single); } int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, @@ -234,6 +241,30 @@ static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); } +static int hpp__single_width_fn(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp __maybe_unused, + struct perf_evsel *evsel) +{ + int len = fmt->user_len ?: fmt->len; + + if (symbol_conf.event_group && !symbol_conf.show_freq_perf) + len = max(len, evsel->nr_members * fmt->len); + + if (len < (int)strlen(fmt->name)) + len = strlen(fmt->name); + + return len; +} + +static int hpp__single_header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel) +{ + int len = hpp__single_width_fn(fmt, hpp, evsel); + + return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); +} + + static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) { va_list args; @@ -363,6 +394,9 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period) HPP_RAW_FNS(samples, nr_events) HPP_RAW_FNS(period, period) +HPP_RAW_FNS(freq, freq) +HPP_RAW_FNS(cpu_u, cpu_u) +HPP_RAW_FNS(core_busy, core_busy) static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, struct hist_entry *a __maybe_unused, @@ -395,6 +429,17 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, .sort = hpp__sort_ ## _fn, \ } +#define HPP__SINGLE_PRINT_FNS(_name, _fn) \ + { \ + .name = _name, \ + .header = hpp__single_header_fn, \ + .width = hpp__single_width_fn, \ + .entry = hpp__entry_ ## _fn, \ + .cmp = hpp__nop_cmp, \ + .collapse = hpp__nop_cmp, \ + .sort = hpp__sort_ ## _fn, \ + } + #define HPP__PRINT_FNS(_name, _fn) \ { \ .name = _name, \ @@ -414,7 +459,10 @@ struct perf_hpp_fmt perf_hpp__format[] = { HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us), HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc), HPP__PRINT_FNS("Samples", samples), - HPP__PRINT_FNS("Period", period) + HPP__PRINT_FNS("Period", period), + HPP__SINGLE_PRINT_FNS("FREQ MHz", freq), + HPP__SINGLE_PRINT_FNS("CPU%", cpu_u), + HPP__SINGLE_PRINT_FNS("CORE_BUSY%", core_busy) }; LIST_HEAD(perf_hpp__list); @@ -485,6 +533,14 @@ void perf_hpp__init(void) if (symbol_conf.show_total_period) perf_hpp__column_enable(PERF_HPP__PERIOD); + if (symbol_conf.show_freq_perf) { + if (perf_freq) + perf_hpp__column_enable(PERF_HPP__FREQ); + if (perf_cpu_u) + perf_hpp__column_enable(PERF_HPP__CPU_U); + if (perf_core_busy) + perf_hpp__column_enable(PERF_HPP__CORE_BUSY); + } /* prepend overhead field for backward compatiblity. */ list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list; if (list_empty(list)) @@ -652,6 +708,9 @@ void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) return; switch (idx) { + case PERF_HPP__CPU_U: + fmt->len = 5; + break; case PERF_HPP__OVERHEAD: case PERF_HPP__OVERHEAD_SYS: case PERF_HPP__OVERHEAD_US: @@ -661,6 +720,8 @@ void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) case PERF_HPP__OVERHEAD_GUEST_SYS: case PERF_HPP__OVERHEAD_GUEST_US: + case PERF_HPP__FREQ: + case PERF_HPP__CORE_BUSY: fmt->len = 9; break; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 70bd557..ec64234 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -237,6 +237,9 @@ enum { PERF_HPP__OVERHEAD_ACC, PERF_HPP__SAMPLES, PERF_HPP__PERIOD, + PERF_HPP__FREQ, + PERF_HPP__CPU_U, + PERF_HPP__CORE_BUSY, PERF_HPP__MAX_INDEX }; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 939dfed..43551f7 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1013,7 +1013,7 @@ static int deliver_sample_value(struct perf_evlist *evlist, return 0; } - if (perf_evsel__is_group_leader(sid->evsel)) { + if (symbol_conf.show_freq_perf && perf_evsel__is_group_leader(sid->evsel)) { evsel = sid->evsel; SET_FREQ_PERF_VALUE(evsel, sample->freq_perf_data, sample->read.group.values[nr].value); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 4c65a14..690e173 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1225,6 +1225,9 @@ static struct hpp_dimension hpp_sort_dimensions[] = { DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), DIM(PERF_HPP__SAMPLES, "sample"), DIM(PERF_HPP__PERIOD, "period"), + DIM(PERF_HPP__FREQ, "freq"), + DIM(PERF_HPP__CPU_U, "cpu_u"), + DIM(PERF_HPP__CORE_BUSY, "core_busy"), }; #undef DIM diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index fa0ccf3..7d70c89 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -106,7 +106,8 @@ struct symbol_conf { filter_relative, show_hist_headers, branch_callstack, - has_filter; + has_filter, + show_freq_perf; const char *vmlinux_name, *kallsyms_name, *source_prefix, diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index edc2d63..648b307 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -34,6 +34,8 @@ bool test_attr__enabled; bool perf_host = true; bool perf_guest = false; +bool perf_freq, perf_cpu_u, perf_core_busy; + char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; void event_attr_init(struct perf_event_attr *attr) -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/