Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758566AbcDHPLR (ORCPT ); Fri, 8 Apr 2016 11:11:17 -0400 Received: from szxga02-in.huawei.com ([119.145.14.65]:47181 "EHLO szxga02-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1758403AbcDHPLQ (ORCPT ); Fri, 8 Apr 2016 11:11:16 -0400 From: Wang Nan To: CC: , , , , Wang Nan , "Arnaldo Carvalho de Melo" , Jiri Olsa Subject: [PATCH 3/4] perf bpf: Clone bpf stdout events in multiple bpf scripts Date: Fri, 8 Apr 2016 15:07:24 +0000 Message-ID: <1460128045-97310-4-git-send-email-wangnan0@huawei.com> X-Mailer: git-send-email 1.8.3.4 In-Reply-To: <1460128045-97310-1-git-send-email-wangnan0@huawei.com> References: <1460128045-97310-1-git-send-email-wangnan0@huawei.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.107.193.248] X-CFilter-Loop: Reflected X-Mirapoint-Virus-RAPID-Raw: score=unknown(0), refid=str=0001.0A020201.5707C93B.027A,ss=1,re=0.000,recu=0.000,reip=0.000,cl=1,cld=1,fgs=0, ip=0.0.0.0, so=2013-06-18 04:22:30, dmn=2013-03-21 17:37:32 X-Mirapoint-Loop-Id: 61fb846a1cec786d3ae1f09c296350ea Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7187 Lines: 259 This patch allows cloning bpf-output event configuration among multiple bpf scripts. If there exist a map named '__bpf_output__' and not configured using 'map:__bpf_output__.event=', this patch clones the configuration of another '__bpf_stdout__' map. For example, following command: # perf trace --ev bpf-output/no-inherit,name=evt/ \ --ev ./test_bpf_trace.c/map:__bpf_stdout__.event=evt/ \ --ev ./test_bpf_trace2.c usleep 100000 equals to: # perf trace --ev bpf-output/no-inherit,name=evt/ \ --ev ./test_bpf_trace.c/map:__bpf_stdout__.event=evt/ \ --ev ./test_bpf_trace2.c/map:__bpf_stdout__.event=evt/ \ usleep 100000 Signed-off-by: Wang Nan Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Li Zefan Cc: pi3orama@163.com --- tools/perf/builtin-record.c | 8 +++ tools/perf/builtin-trace.c | 10 ++++ tools/perf/util/bpf-loader.c | 124 +++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/bpf-loader.h | 19 +++++++ 4 files changed, 161 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 410035c..e64bd1e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -1276,6 +1276,14 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) if (err) return err; + err = bpf__setup_stdout(rec->evlist); + if (err) { + bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf)); + pr_err("ERROR: Setup BPF stdout failed: %s\n", + errbuf); + return err; + } + err = -ENOMEM; symbol__init(NULL); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index fb8257f..9d47bba 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -3289,6 +3289,16 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); + err = bpf__setup_stdout(trace.evlist); + if (err) { + char errbuf[BUFSIZ]; + + bpf__strerror_setup_stdout(trace.evlist, err, errbuf, sizeof(errbuf)); + pr_err("ERROR: Setup BPF stdout failed: %s\n", + errbuf); + goto out; + } + if (validate_evlist(trace.evlist, &has_bpf_output)) { pr_err("Only support tracepoint and bpf-output events!\n"); return -EINVAL; diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 0967ce6..a0d2802 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -842,6 +842,58 @@ bpf_map_op__new(struct parse_events_term *term) return op; } +static struct bpf_map_op * +bpf_map_op__clone(struct bpf_map_op *op) +{ + struct bpf_map_op *newop; + + newop = memdup(op, sizeof(*op)); + if (!newop) { + pr_debug("Failed to alloc bpf_map_op\n"); + return NULL; + } + + INIT_LIST_HEAD(&newop->list); + if (op->key_type == BPF_MAP_KEY_RANGES) { + size_t memsz = op->k.array.nr_ranges * + sizeof(op->k.array.ranges[0]); + + newop->k.array.ranges = memdup(op->k.array.ranges, memsz); + if (!newop->k.array.ranges) { + pr_debug("Failed to alloc indices for map\n"); + free(newop); + return NULL; + } + } + + return newop; +} + +static struct bpf_map_priv * +bpf_map_priv__clone(struct bpf_map_priv *priv) +{ + struct bpf_map_priv *newpriv; + struct bpf_map_op *pos, *newop; + + newpriv = zalloc(sizeof(*newpriv)); + if (!newpriv) { + pr_debug("No enough memory to alloc map private\n"); + return NULL; + } + INIT_LIST_HEAD(&newpriv->ops_list); + + list_for_each_entry(pos, &priv->ops_list, list) { + newop = bpf_map_op__clone(pos); + if (!newop) { + bpf_map_priv__purge(newpriv); + return NULL; + } + list_add_tail(&newop->list, &newpriv->ops_list); + } + + return newpriv; +} + static int bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) { @@ -1417,6 +1469,70 @@ int bpf__apply_obj_config(void) return 0; } +#define bpf__for_each_map(pos, obj, objtmp) \ + bpf_object__for_each_safe(obj, objtmp) \ + bpf_map__for_each(pos, obj) + +#define bpf__for_each_stdout_map(pos, obj, objtmp) \ + bpf__for_each_map(pos, obj, objtmp) \ + if (bpf_map__get_name(pos) && \ + (strcmp("__bpf_stdout__", \ + bpf_map__get_name(pos)) == 0)) + +int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) +{ + struct bpf_map_priv *tmpl_priv = NULL; + struct bpf_object *obj, *tmp; + struct bpf_map *map; + int err; + bool need_init = false; + + bpf__for_each_stdout_map(map, obj, tmp) { + struct bpf_map_priv *priv; + + err = bpf_map__get_private(map, (void **)&priv); + if (err) + return -BPF_LOADER_ERRNO__INTERNAL; + + /* + * No need to check map type: type should have been + * verified by kernel. + */ + if (!need_init && !priv) + need_init = !priv; + if (!tmpl_priv && priv) + tmpl_priv = priv; + } + + if (!need_init) + return 0; + + if (!tmpl_priv) + return 0; + + bpf__for_each_stdout_map(map, obj, tmp) { + struct bpf_map_priv *priv; + + err = bpf_map__get_private(map, (void **)&priv); + if (err) + return -BPF_LOADER_ERRNO__INTERNAL; + if (priv) + continue; + + priv = bpf_map_priv__clone(tmpl_priv); + if (!priv) + return -ENOMEM; + + err = bpf_map__set_private(map, priv, bpf_map_priv__clear); + if (err) { + bpf_map_priv__clear(map, priv); + return err; + } + } + + return 0; +} + #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) @@ -1590,3 +1706,11 @@ int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) bpf__strerror_end(buf, size); return 0; } + +int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, + int err, char *buf, size_t size) +{ + bpf__strerror_head(err, buf, size); + bpf__strerror_end(buf, size); + return 0; +} diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h index be43119..941e172 100644 --- a/tools/perf/util/bpf-loader.h +++ b/tools/perf/util/bpf-loader.h @@ -79,6 +79,11 @@ int bpf__strerror_config_obj(struct bpf_object *obj, size_t size); int bpf__apply_obj_config(void); int bpf__strerror_apply_obj_config(int err, char *buf, size_t size); + +int bpf__setup_stdout(struct perf_evlist *evlist); +int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err, + char *buf, size_t size); + #else static inline struct bpf_object * bpf__prepare_load(const char *filename __maybe_unused, @@ -125,6 +130,12 @@ bpf__apply_obj_config(void) } static inline int +bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) +{ + return 0; +} + +static inline int __bpf_strerror(char *buf, size_t size) { if (!size) @@ -177,5 +188,13 @@ bpf__strerror_apply_obj_config(int err __maybe_unused, { return __bpf_strerror(buf, size); } + +static inline int +bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, + int err __maybe_unused, char *buf, + size_t size) +{ + return __bpf_strerror(buf, size); +} #endif #endif -- 1.8.3.4