Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754580AbbLJHyj (ORCPT ); Thu, 10 Dec 2015 02:54:39 -0500 Received: from LGEAMRELO13.lge.com ([156.147.23.53]:58108 "EHLO lgeamrelo13.lge.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753822AbbLJHxp (ORCPT ); Thu, 10 Dec 2015 02:53:45 -0500 X-Original-SENDERIP: 156.147.1.121 X-Original-MAILFROM: namhyung@kernel.org X-Original-SENDERIP: 10.177.227.17 X-Original-MAILFROM: namhyung@kernel.org From: Namhyung Kim To: Arnaldo Carvalho de Melo Cc: Ingo Molnar , Peter Zijlstra , Jiri Olsa , LKML , David Ahern , Frederic Weisbecker , Andi Kleen , Stephane Eranian , Adrian Hunter Subject: [PATCH/RFC 15/16] perf top: Add --num-thread option Date: Thu, 10 Dec 2015 16:53:34 +0900 Message-Id: <1449734015-9148-16-git-send-email-namhyung@kernel.org> X-Mailer: git-send-email 2.6.2 In-Reply-To: <1449734015-9148-1-git-send-email-namhyung@kernel.org> References: <1449734015-9148-1-git-send-email-namhyung@kernel.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5291 Lines: 168 The --num-thread option is to set number of reader thread. Default value is 0 which will be converted to 1/4 of number of mmaps. Signed-off-by: Namhyung Kim --- tools/perf/builtin-top.c | 49 +++++++++++++++++++++++++++++++++++------------- tools/perf/util/top.h | 1 + 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index f3ab46b234b6..fc9715b046b3 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -838,10 +838,10 @@ struct collector_arg { static void collect_hists(struct perf_top *top, struct hists *hists) { - int i, k; + unsigned int i, k; struct perf_evsel *evsel; - for (i = 0, k = 0; i < top->evlist->nr_mmaps; i++) { + for (i = 0, k = 0; i < top->nr_threads; i++) { evlist__for_each(top->evlist, evsel) { struct hists *src_hists = &hists[k++]; struct hists *dst_hists = evsel__hists(evsel); @@ -900,6 +900,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter, struct reader_arg { int idx; + int nr_idx; struct perf_top *top; struct hists *hists; struct perf_top_stats stats; @@ -993,7 +994,7 @@ static void perf_event__process_sample(struct reader_arg *rarg, addr_location__put(&al); } -static void perf_top__mmap_read(struct reader_arg *rarg) +static void perf_top__mmap_read_idx(struct reader_arg *rarg, int idx) { struct perf_sample sample; struct perf_evsel *evsel; @@ -1001,7 +1002,6 @@ static void perf_top__mmap_read(struct reader_arg *rarg) struct perf_session *session = top->session; union perf_event *event; struct machine *machine; - int idx = rarg->idx; u8 origin; int ret; @@ -1067,6 +1067,14 @@ static void perf_top__mmap_read(struct reader_arg *rarg) } } +static void perf_top__mmap_read(struct reader_arg *rarg) +{ + int i; + + for (i = 0; i < rarg->nr_idx; i++) + perf_top__mmap_read_idx(rarg, rarg->idx + i); +} + static void *mmap_read_worker(void *arg) { struct reader_arg *rarg = arg; @@ -1160,7 +1168,8 @@ static int __cmd_top(struct perf_top *top) struct reader_arg *rargs = NULL; struct collector_arg carg; int ret; - int i; + unsigned int i; + int idx, nr_idx, rem; top->session = perf_session__new(NULL, false, NULL); if (top->session == NULL) @@ -1211,34 +1220,47 @@ static int __cmd_top(struct perf_top *top) /* Wait for a minimal set of events before starting the snapshot */ perf_evlist__poll(top->evlist, 100); + if (top->nr_threads == 0) + top->nr_threads = top->evlist->nr_mmaps / 4 ?: 1; + if ((int)top->nr_threads > top->evlist->nr_mmaps) + top->nr_threads = top->evlist->nr_mmaps; + + nr_idx = top->evlist->nr_mmaps / top->nr_threads; + rem = top->evlist->nr_mmaps % top->nr_threads; + ret = -1; - readers = calloc(sizeof(pthread_t), top->evlist->nr_mmaps); + readers = calloc(sizeof(pthread_t), top->nr_threads); if (readers == NULL) goto out_delete; - rargs = calloc(sizeof(*rargs), top->evlist->nr_mmaps); + rargs = calloc(sizeof(*rargs), top->nr_threads); if (rargs == NULL) goto out_free; - hists = calloc(sizeof(*hists), top->evlist->nr_mmaps * top->evlist->nr_entries); + hists = calloc(sizeof(*hists), top->nr_threads * top->evlist->nr_entries); if (hists == NULL) goto out_free; - for (i = 0; i < top->evlist->nr_mmaps * top->evlist->nr_entries; i++) + for (i = 0; i < top->nr_threads * top->evlist->nr_entries; i++) __hists__init(&hists[i]); - for (i = 0; i < top->evlist->nr_mmaps; i++) { + for (i = 0, idx = 0; i < top->nr_threads; i++) { struct reader_arg *rarg = &rargs[i]; - rarg->idx = i; rarg->top = top; rarg->hists = &hists[i * top->evlist->nr_entries]; + rarg->idx = idx; + rarg->nr_idx = nr_idx; + if (rem-- > 0) + rarg->nr_idx++; + idx += rarg->nr_idx; + perf_top__mmap_read(rarg); } collect_hists(top, hists); - for (i = 0; i < top->evlist->nr_mmaps; i++) { + for (i = 0; i < top->nr_threads; i++) { if (pthread_create(&readers[i], NULL, mmap_read_worker, &rargs[i])) goto out_join; } @@ -1259,7 +1281,7 @@ static int __cmd_top(struct perf_top *top) out_join: pthread_join(ui_thread, NULL); pthread_join(collector, NULL); - for (i = 0; i < top->evlist->nr_mmaps; i++) { + for (i = 0; i < top->nr_threads; i++) { pthread_join(readers[i], NULL); } @@ -1458,6 +1480,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) OPT_CALLBACK('j', "branch-filter", &opts->branch_stack, "branch filter mask", "branch stack filter modes", parse_branch_stack), + OPT_UINTEGER(0, "num-thread", &top.nr_threads, "number of thread to run"), OPT_END() }; const char * const top_usage[] = { diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 55eb5aebae59..916ba36b0ac0 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -43,6 +43,7 @@ struct perf_top { int sym_pcnt_filter; const char *sym_filter; float min_percent; + unsigned int nr_threads; }; #define CONSOLE_CLEAR "" -- 2.6.2 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/