Hello,
This is to support the unprivileged BPF filter for profiling per-task events.
Until now only root (or any user with CAP_BPF) can use the filter and we
cannot add a new unprivileged BPF program types. After talking with the BPF
folks at LSF/MM/BPF 2024, I was told that this is the way to go. Finally I
managed to make it working with pinned BPF objects. :)
But it requires the one-time setup (by root) before using it like below.
# perf record --setup-filter pin
This will load the BPF program and maps and pin them in the BPF-fs. Then
normal users can use the filter.
$ perf record -o- -e cycles:u --filter 'period < 10000' perf test -w noploop | perf script -i-
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.011 MB - ]
perf 759982 448227.214189: 1 cycles:u: 7f153719f4d0 _start+0x0 (/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2)
perf 759982 448227.214195: 1 cycles:u: 7f153719f4d0 _start+0x0 (/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2)
perf 759982 448227.214196: 7 cycles:u: 7f153719f4d0 _start+0x0 (/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2)
perf 759982 448227.214196: 223 cycles:u: 7f153719f4d0 _start+0x0 (/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2)
perf 759982 448227.214198: 9475 cycles:u: ffffffff8ee012a0 [unknown] ([unknown])
perf 759982 448227.548608: 1 cycles:u: 559a9f03c81c noploop+0x5c (/home/namhyung/linux/tools/perf/perf)
perf 759982 448227.548611: 1 cycles:u: 559a9f03c81c noploop+0x5c (/home/namhyung/linux/tools/perf/perf)
perf 759982 448227.548612: 12 cycles:u: 559a9f03c81c noploop+0x5c (/home/namhyung/linux/tools/perf/perf)
perf 759982 448227.548613: 466 cycles:u: 559a9f03c81c noploop+0x5c (/home/namhyung/linux/tools/perf/perf)
It's also possible to unload (and unpin, of course) using this command:
# perf record --setup-filter unpin
The code is avaiable in 'perf/pinned-filter-v1' branch at
git://git.kernel.org/pub/scm/linux/kernel/git/namhyung/linux-perf.git
Thanks,
Namhyung
Namhyung Kim (6):
perf bpf-filter: Make filters map a single entry hashmap
perf bpf-filter: Pass 'target' to perf_bpf_filter__prepare()
perf bpf-filter: Split per-task filter use case
perf bpf-filter: Support pin/unpin BPF object
perf record: Fix a potential error handling issue
perf record: Add --setup-filter option
tools/perf/Documentation/perf-record.txt | 5 +
tools/perf/builtin-record.c | 23 +-
tools/perf/builtin-stat.c | 2 +-
tools/perf/builtin-trace.c | 2 +-
tools/perf/util/bpf-filter.c | 369 +++++++++++++++++--
tools/perf/util/bpf-filter.h | 19 +-
tools/perf/util/bpf_skel/sample-filter.h | 4 +-
tools/perf/util/bpf_skel/sample_filter.bpf.c | 58 ++-
tools/perf/util/evlist.c | 5 +-
tools/perf/util/evlist.h | 4 +-
tools/perf/util/python.c | 3 +-
11 files changed, 428 insertions(+), 66 deletions(-)
base-commit: ea558c86248b4955e5c5f3c0c921df450880605e
--
2.45.1.288.g0e0cd299f1-goog
And the value is now an array. This is to support multiple filter
entries in the map later.
No functional changes intended.
Signed-off-by: Namhyung Kim <[email protected]>
---
tools/perf/util/bpf-filter.c | 81 ++++++++++++++------
tools/perf/util/bpf_skel/sample-filter.h | 3 +-
tools/perf/util/bpf_skel/sample_filter.bpf.c | 37 +++++----
3 files changed, 78 insertions(+), 43 deletions(-)
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index b51544996046..12e9c7dbb4dd 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -87,71 +87,102 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
int perf_bpf_filter__prepare(struct evsel *evsel)
{
- int i, x, y, fd;
+ int i, x, y, fd, ret;
struct sample_filter_bpf *skel;
struct bpf_program *prog;
struct bpf_link *link;
struct perf_bpf_filter_expr *expr;
+ struct perf_bpf_filter_entry *entry;
+
+ entry = calloc(MAX_FILTERS, sizeof(*entry));
+ if (entry == NULL)
+ return -1;
skel = sample_filter_bpf__open_and_load();
if (!skel) {
pr_err("Failed to load perf sample-filter BPF skeleton\n");
- return -1;
+ ret = -EPERM;
+ goto err;
}
i = 0;
fd = bpf_map__fd(skel->maps.filters);
list_for_each_entry(expr, &evsel->bpf_filters, list) {
- struct perf_bpf_filter_entry entry = {
- .op = expr->op,
- .part = expr->part,
- .flags = expr->sample_flags,
- .value = expr->val,
- };
+ if (check_sample_flags(evsel, expr) < 0) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (check_sample_flags(evsel, expr) < 0)
- return -1;
+ if (i == MAX_FILTERS) {
+ ret = -E2BIG;
+ goto err;
+ }
- bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ entry[i].op = expr->op;
+ entry[i].part = expr->part;
+ entry[i].flags = expr->sample_flags;
+ entry[i].value = expr->val;
i++;
if (expr->op == PBF_OP_GROUP_BEGIN) {
struct perf_bpf_filter_expr *group;
list_for_each_entry(group, &expr->groups, list) {
- struct perf_bpf_filter_entry group_entry = {
- .op = group->op,
- .part = group->part,
- .flags = group->sample_flags,
- .value = group->val,
- };
- bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
+ if (i == MAX_FILTERS) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ entry[i].op = group->op;
+ entry[i].part = group->part;
+ entry[i].flags = group->sample_flags;
+ entry[i].value = group->val;
i++;
}
- memset(&entry, 0, sizeof(entry));
- entry.op = PBF_OP_GROUP_END;
- bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ if (i == MAX_FILTERS) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ entry[i].op = PBF_OP_GROUP_END;
i++;
}
}
- if (i > MAX_FILTERS) {
- pr_err("Too many filters: %d (max = %d)\n", i, MAX_FILTERS);
- return -1;
+ if (i < MAX_FILTERS) {
+ /* to terminate the loop early */
+ entry[i].op = PBF_OP_DONE;
+ i++;
+ }
+
+ /* The filters map has only one entry for now */
+ i = 0;
+ if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
+ ret = -errno;
+ pr_err("Failed to update the filter map\n");
+ goto err;
}
+
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
link = bpf_program__attach_perf_event(prog, FD(evsel, x, y));
if (IS_ERR(link)) {
pr_err("Failed to attach perf sample-filter program\n");
- return PTR_ERR(link);
+ ret = PTR_ERR(link);
+ goto err;
}
}
}
+ free(entry);
evsel->bpf_skel = skel;
return 0;
+
+err:
+ free(entry);
+ sample_filter_bpf__destroy(skel);
+ return ret;
}
int perf_bpf_filter__destroy(struct evsel *evsel)
diff --git a/tools/perf/util/bpf_skel/sample-filter.h b/tools/perf/util/bpf_skel/sample-filter.h
index 2e96e1ab084a..cf18f570eef4 100644
--- a/tools/perf/util/bpf_skel/sample-filter.h
+++ b/tools/perf/util/bpf_skel/sample-filter.h
@@ -14,6 +14,7 @@ enum perf_bpf_filter_op {
PBF_OP_AND,
PBF_OP_GROUP_BEGIN,
PBF_OP_GROUP_END,
+ PBF_OP_DONE,
};
/* BPF map entry for filtering */
@@ -24,4 +25,4 @@ struct perf_bpf_filter_entry {
__u64 value;
};
-#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */
\ No newline at end of file
+#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index fb94f5280626..5f17cd6458b7 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -9,10 +9,10 @@
/* BPF map that will be filled by user space */
struct filters {
- __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
- __type(value, struct perf_bpf_filter_entry);
- __uint(max_entries, MAX_FILTERS);
+ __type(value, struct perf_bpf_filter_entry[MAX_FILTERS]);
+ __uint(max_entries, 1);
} filters SEC(".maps");
int dropped;
@@ -144,35 +144,35 @@ int perf_sample_filter(void *ctx)
kctx = bpf_cast_to_kern_ctx(ctx);
- for (i = 0; i < MAX_FILTERS; i++) {
- int key = i; /* needed for verifier :( */
+ i = 0;
+ entry = bpf_map_lookup_elem(&filters, &i);
+ if (entry == NULL)
+ goto drop;
- entry = bpf_map_lookup_elem(&filters, &key);
- if (entry == NULL)
- break;
- sample_data = perf_get_sample(kctx, entry);
+ for (i = 0; i < MAX_FILTERS; i++) {
+ sample_data = perf_get_sample(kctx, &entry[i]);
- switch (entry->op) {
+ switch (entry[i].op) {
case PBF_OP_EQ:
- CHECK_RESULT(sample_data, ==, entry->value)
+ CHECK_RESULT(sample_data, ==, entry[i].value)
break;
case PBF_OP_NEQ:
- CHECK_RESULT(sample_data, !=, entry->value)
+ CHECK_RESULT(sample_data, !=, entry[i].value)
break;
case PBF_OP_GT:
- CHECK_RESULT(sample_data, >, entry->value)
+ CHECK_RESULT(sample_data, >, entry[i].value)
break;
case PBF_OP_GE:
- CHECK_RESULT(sample_data, >=, entry->value)
+ CHECK_RESULT(sample_data, >=, entry[i].value)
break;
case PBF_OP_LT:
- CHECK_RESULT(sample_data, <, entry->value)
+ CHECK_RESULT(sample_data, <, entry[i].value)
break;
case PBF_OP_LE:
- CHECK_RESULT(sample_data, <=, entry->value)
+ CHECK_RESULT(sample_data, <=, entry[i].value)
break;
case PBF_OP_AND:
- CHECK_RESULT(sample_data, &, entry->value)
+ CHECK_RESULT(sample_data, &, entry[i].value)
break;
case PBF_OP_GROUP_BEGIN:
in_group = 1;
@@ -183,6 +183,9 @@ int perf_sample_filter(void *ctx)
goto drop;
in_group = 0;
break;
+ case PBF_OP_DONE:
+ /* no failures so far, accept it */
+ return 1;
}
}
/* generate sample data */
--
2.45.1.288.g0e0cd299f1-goog
This is needed to prepare target-specific actions in the later patch.
We want to reuse the pinned BPF program and map for regular users to
profile their own processes.
Signed-off-by: Namhyung Kim <[email protected]>
---
tools/perf/builtin-record.c | 2 +-
tools/perf/builtin-stat.c | 2 +-
tools/perf/builtin-trace.c | 2 +-
tools/perf/util/bpf-filter.c | 2 +-
tools/perf/util/bpf-filter.h | 6 ++++--
tools/perf/util/evlist.c | 5 +++--
tools/perf/util/evlist.h | 4 +++-
tools/perf/util/python.c | 3 ++-
8 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 66a3de8ac661..8ec0b1607603 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1389,7 +1389,7 @@ static int record__open(struct record *rec)
"even with a suitable vmlinux or kallsyms file.\n\n");
}
- if (evlist__apply_filters(evlist, &pos)) {
+ if (evlist__apply_filters(evlist, &pos, &opts->target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter ?: "BPF", evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 35f79b48e8dc..23ba1e54291b 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -813,7 +813,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
return -1;
}
- if (evlist__apply_filters(evsel_list, &counter)) {
+ if (evlist__apply_filters(evsel_list, &counter, &target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 51eca671c797..fd005ebdec04 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -3974,7 +3974,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = trace__expand_filters(trace, &evsel);
if (err)
goto out_delete_evlist;
- err = evlist__apply_filters(evlist, &evsel);
+ err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
if (err < 0)
goto out_error_apply_filters;
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index 12e9c7dbb4dd..f43b9e61bf42 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -85,7 +85,7 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
return -1;
}
-int perf_bpf_filter__prepare(struct evsel *evsel)
+int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target __maybe_unused)
{
int i, x, y, fd, ret;
struct sample_filter_bpf *skel;
diff --git a/tools/perf/util/bpf-filter.h b/tools/perf/util/bpf-filter.h
index 7afd159411b8..955ef4e3a931 100644
--- a/tools/perf/util/bpf-filter.h
+++ b/tools/perf/util/bpf-filter.h
@@ -16,13 +16,14 @@ struct perf_bpf_filter_expr {
};
struct evsel;
+struct target;
#ifdef HAVE_BPF_SKEL
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, int part,
enum perf_bpf_filter_op op,
unsigned long val);
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
-int perf_bpf_filter__prepare(struct evsel *evsel);
+int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target);
int perf_bpf_filter__destroy(struct evsel *evsel);
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
@@ -33,7 +34,8 @@ static inline int perf_bpf_filter__parse(struct list_head *expr_head __maybe_unu
{
return -EOPNOTSUPP;
}
-static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
+static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused,
+ struct target *target __maybe_unused)
{
return -EOPNOTSUPP;
}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3a719edafc7a..1417f9a23083 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1086,7 +1086,8 @@ int evlist__create_maps(struct evlist *evlist, struct target *target)
return -1;
}
-int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
+int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
+ struct target *target)
{
struct evsel *evsel;
int err = 0;
@@ -1108,7 +1109,7 @@ int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
* non-tracepoint events can have BPF filters.
*/
if (!list_empty(&evsel->bpf_filters)) {
- err = perf_bpf_filter__prepare(evsel);
+ err = perf_bpf_filter__prepare(evsel, target);
if (err) {
*err_evsel = evsel;
break;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index cb91dc9117a2..cccc34da5a02 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -20,6 +20,7 @@ struct pollfd;
struct thread_map;
struct perf_cpu_map;
struct record_opts;
+struct target;
/*
* State machine of bkw_mmap_state:
@@ -212,7 +213,8 @@ void evlist__enable_non_dummy(struct evlist *evlist);
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
int evlist__create_maps(struct evlist *evlist, struct target *target);
-int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
+int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
+ struct target *target);
u64 __evlist__combined_sample_type(struct evlist *evlist);
u64 evlist__combined_sample_type(struct evlist *evlist);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 0aeb97c11c03..5e015d0c0df5 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -175,7 +175,8 @@ int bpf_counter__disable(struct evsel *evsel __maybe_unused)
// not to drag util/bpf-filter.c
#ifdef HAVE_BPF_SKEL
-int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
+int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused,
+ struct target *target __maybe_unused)
{
return 0;
}
--
2.45.1.288.g0e0cd299f1-goog
If the target is a list of tasks, it can use a shared hash map for
filter expressions. The key of the filter map is an integer index like
in an array. A separate pid_hash map is added to get the index for the
filter map using the tgid.
For system-wide mode including per-cpu or per-user targets are handled
by the single entry map like before.
Signed-off-by: Namhyung Kim <[email protected]>
---
tools/perf/util/bpf-filter.c | 186 +++++++++++++++----
tools/perf/util/bpf_skel/sample-filter.h | 1 +
tools/perf/util/bpf_skel/sample_filter.bpf.c | 23 ++-
3 files changed, 168 insertions(+), 42 deletions(-)
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index f43b9e61bf42..2187975189c9 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -3,10 +3,13 @@
#include <bpf/bpf.h>
#include <linux/err.h>
+#include <api/fs/fs.h>
#include <internal/xyarray.h>
+#include <perf/threadmap.h>
#include "util/debug.h"
#include "util/evsel.h"
+#include "util/target.h"
#include "util/bpf-filter.h"
#include <util/bpf-filter-flex.h>
@@ -85,38 +88,17 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
return -1;
}
-int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target __maybe_unused)
+static int get_filter_entries(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
{
- int i, x, y, fd, ret;
- struct sample_filter_bpf *skel;
- struct bpf_program *prog;
- struct bpf_link *link;
+ int i = 0;
struct perf_bpf_filter_expr *expr;
- struct perf_bpf_filter_entry *entry;
-
- entry = calloc(MAX_FILTERS, sizeof(*entry));
- if (entry == NULL)
- return -1;
-
- skel = sample_filter_bpf__open_and_load();
- if (!skel) {
- pr_err("Failed to load perf sample-filter BPF skeleton\n");
- ret = -EPERM;
- goto err;
- }
- i = 0;
- fd = bpf_map__fd(skel->maps.filters);
list_for_each_entry(expr, &evsel->bpf_filters, list) {
- if (check_sample_flags(evsel, expr) < 0) {
- ret = -EINVAL;
- goto err;
- }
+ if (check_sample_flags(evsel, expr) < 0)
+ return -EINVAL;
- if (i == MAX_FILTERS) {
- ret = -E2BIG;
- goto err;
- }
+ if (i == MAX_FILTERS)
+ return -E2BIG;
entry[i].op = expr->op;
entry[i].part = expr->part;
@@ -128,10 +110,8 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target __maybe_
struct perf_bpf_filter_expr *group;
list_for_each_entry(group, &expr->groups, list) {
- if (i == MAX_FILTERS) {
- ret = -E2BIG;
- goto err;
- }
+ if (i == MAX_FILTERS)
+ return -E2BIG;
entry[i].op = group->op;
entry[i].part = group->part;
@@ -140,10 +120,8 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target __maybe_
i++;
}
- if (i == MAX_FILTERS) {
- ret = -E2BIG;
- goto err;
- }
+ if (i == MAX_FILTERS)
+ return -E2BIG;
entry[i].op = PBF_OP_GROUP_END;
i++;
@@ -155,15 +133,143 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target __maybe_
entry[i].op = PBF_OP_DONE;
i++;
}
+ return 0;
+}
+
+static int convert_to_tgid(int tid)
+{
+ char path[128];
+ char *buf, *p, *q;
+ int tgid;
+ size_t len;
+
+ scnprintf(path, sizeof(path), "%d/status", tid);
+ if (procfs__read_str(path, &buf, &len) < 0)
+ return -1;
- /* The filters map has only one entry for now */
- i = 0;
- if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
- ret = -errno;
- pr_err("Failed to update the filter map\n");
+ p = strstr(buf, "Tgid:");
+ if (p == NULL) {
+ free(buf);
+ return -1;
+ }
+
+ tgid = strtol(p + 6, &q, 0);
+ free(buf);
+ if (*q != '\n')
+ return -1;
+
+ return tgid;
+}
+
+static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
+ struct perf_bpf_filter_entry *entry)
+{
+ int filter_idx;
+ int nr, last;
+ int fd = bpf_map__fd(skel->maps.filters);
+ struct perf_thread_map *threads;
+
+ /* Find the first available entry in the filters map */
+ for (filter_idx = 0; filter_idx < MAX_FILTERS; filter_idx++) {
+ if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0)
+ break;
+ }
+
+ if (filter_idx == MAX_FILTERS) {
+ pr_err("Too many users for the filter map\n");
+ return -EBUSY;
+ }
+
+ threads = perf_evsel__threads(&evsel->core);
+ if (threads == NULL) {
+ pr_err("Cannot get the thread list of the event\n");
+ return -EINVAL;
+ }
+
+ /* save the index to a hash map */
+ fd = bpf_map__fd(skel->maps.pid_hash);
+
+ last = -1;
+ nr = perf_thread_map__nr(threads);
+ for (int i = 0; i < nr; i++) {
+ int pid = perf_thread_map__pid(threads, i);
+ int tgid;
+
+ /* it actually needs tgid, let's get tgid from /proc. */
+ tgid = convert_to_tgid(pid);
+ if (tgid < 0) {
+ /* the thread may be dead, ignore. */
+ continue;
+ }
+
+ if (tgid == last)
+ continue;
+ last = tgid;
+
+ if (bpf_map_update_elem(fd, &tgid, &filter_idx, BPF_ANY) < 0) {
+ pr_err("Failed to update the pid hash\n");
+ return -errno;
+ }
+ pr_debug("pid hash: %d -> %d\n", tgid, filter_idx);
+ }
+ return 0;
+}
+
+int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
+{
+ int i, x, y, fd, ret;
+ struct sample_filter_bpf *skel = NULL;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ struct perf_bpf_filter_entry *entry;
+ bool needs_pid_hash = !target__has_cpu(target) && !target->uid_str;
+
+ entry = calloc(MAX_FILTERS, sizeof(*entry));
+ if (entry == NULL)
+ return -1;
+
+ ret = get_filter_entries(evsel, entry);
+ if (ret < 0) {
+ pr_err("Failed to process filter entries\n");
+ goto err;
+ }
+
+ skel = sample_filter_bpf__open();
+ if (!skel) {
+ pr_err("Failed to open perf sample-filter BPF skeleton\n");
+ ret = -EPERM;
goto err;
}
+ if (needs_pid_hash) {
+ bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
+ bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
+ skel->rodata->use_pid_hash = 1;
+ }
+
+ if (sample_filter_bpf__load(skel) < 0) {
+ pr_err("Failed to load perf sample-filter BPF skeleton\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ if (needs_pid_hash) {
+ /* The filters map is shared among other processes */
+ ret = update_pid_hash(skel, evsel, entry);
+ if (ret < 0)
+ goto err;
+ } else {
+ i = 0;
+ fd = bpf_map__fd(skel->maps.filters);
+
+ /* The filters map has only one entry in this case */
+ if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
+ ret = -errno;
+ pr_err("Failed to update the filter map\n");
+ goto err;
+ }
+ }
+
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
diff --git a/tools/perf/util/bpf_skel/sample-filter.h b/tools/perf/util/bpf_skel/sample-filter.h
index cf18f570eef4..375e27206640 100644
--- a/tools/perf/util/bpf_skel/sample-filter.h
+++ b/tools/perf/util/bpf_skel/sample-filter.h
@@ -2,6 +2,7 @@
#define PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H
#define MAX_FILTERS 64
+#define MAX_PIDS (16 * 1024)
/* supported filter operations */
enum perf_bpf_filter_op {
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index 5f17cd6458b7..1ccb0e8be73b 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -15,7 +15,16 @@ struct filters {
__uint(max_entries, 1);
} filters SEC(".maps");
+/* tgid to filter index */
+struct pid_hash {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} pid_hash SEC(".maps");
+
int dropped;
+volatile const int use_pid_hash;
void *bpf_cast_to_kern_ctx(void *) __ksym;
@@ -140,11 +149,21 @@ int perf_sample_filter(void *ctx)
__u64 sample_data;
int in_group = 0;
int group_result = 0;
- int i;
+ int i = 0;
kctx = bpf_cast_to_kern_ctx(ctx);
- i = 0;
+ if (use_pid_hash) {
+ int tgid = bpf_get_current_pid_tgid() >> 32;
+ int *idx;
+
+ idx = bpf_map_lookup_elem(&pid_hash, &tgid);
+ if (idx)
+ i = *idx;
+ else
+ goto drop;
+ }
+
entry = bpf_map_lookup_elem(&filters, &i);
if (entry == NULL)
goto drop;
--
2.45.1.288.g0e0cd299f1-goog
And use the pinned objects for unprivileged users to profile their own
tasks. The BPF objects need to be pinned in the BPF-fs by root first
and it'll be handled in the later patch.
Signed-off-by: Namhyung Kim <[email protected]>
---
tools/perf/util/bpf-filter.c | 230 +++++++++++++++++++++++++++++------
tools/perf/util/bpf-filter.h | 13 ++
2 files changed, 209 insertions(+), 34 deletions(-)
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index 2187975189c9..ac84260082a7 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
#include <bpf/bpf.h>
#include <linux/err.h>
@@ -23,6 +26,9 @@
#define __PERF_SAMPLE_TYPE(st, opt) { st, #st, opt }
#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PERF_SAMPLE_##_st, opt)
+/* Index in the pinned 'filters' map. Should be released after use. */
+static int pinned_filter_idx = -1;
+
static const struct perf_sample_info {
u64 type;
const char *name;
@@ -47,6 +53,8 @@ static const struct perf_sample_info {
PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
};
+static int get_pinned_fd(const char *name);
+
static const struct perf_sample_info *get_sample_info(u64 flags)
{
size_t i;
@@ -161,19 +169,26 @@ static int convert_to_tgid(int tid)
return tgid;
}
-static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
- struct perf_bpf_filter_entry *entry)
+static int update_pid_hash(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
{
int filter_idx;
- int nr, last;
- int fd = bpf_map__fd(skel->maps.filters);
+ int fd, nr, last;
struct perf_thread_map *threads;
+ fd = get_pinned_fd("filters");
+ if (fd < 0) {
+ pr_debug("cannot get fd for 'filters' map\n");
+ return fd;
+ }
+
/* Find the first available entry in the filters map */
for (filter_idx = 0; filter_idx < MAX_FILTERS; filter_idx++) {
- if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0)
+ if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0) {
+ pinned_filter_idx = filter_idx;
break;
+ }
}
+ close(fd);
if (filter_idx == MAX_FILTERS) {
pr_err("Too many users for the filter map\n");
@@ -187,7 +202,9 @@ static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
}
/* save the index to a hash map */
- fd = bpf_map__fd(skel->maps.pid_hash);
+ fd = get_pinned_fd("pid_hash");
+ if (fd < 0)
+ return fd;
last = -1;
nr = perf_thread_map__nr(threads);
@@ -208,10 +225,12 @@ static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
if (bpf_map_update_elem(fd, &tgid, &filter_idx, BPF_ANY) < 0) {
pr_err("Failed to update the pid hash\n");
- return -errno;
+ close(fd);
+ return -1;
}
pr_debug("pid hash: %d -> %d\n", tgid, filter_idx);
}
+ close(fd);
return 0;
}
@@ -234,40 +253,47 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
goto err;
}
- skel = sample_filter_bpf__open();
- if (!skel) {
- pr_err("Failed to open perf sample-filter BPF skeleton\n");
- ret = -EPERM;
- goto err;
- }
+ if (needs_pid_hash && geteuid() != 0) {
+ /* The filters map is shared among other processes */
+ ret = update_pid_hash(evsel, entry);
+ if (ret < 0)
+ goto err;
- if (needs_pid_hash) {
- bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
- bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
- skel->rodata->use_pid_hash = 1;
+ fd = get_pinned_fd("perf_sample_filter");
+ if (fd < 0) {
+ ret = fd;
+ goto err;
+ }
+
+ for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
+ for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
+ ret = ioctl(FD(evsel, x, y), PERF_EVENT_IOC_SET_BPF, fd);
+ if (ret < 0) {
+ pr_err("Failed to attach perf sample-filter\n");
+ goto err;
+ }
+ }
+ }
+
+ free(entry);
+ return 0;
}
- if (sample_filter_bpf__load(skel) < 0) {
+ skel = sample_filter_bpf__open_and_load();
+ if (!skel) {
+ ret = -errno;
pr_err("Failed to load perf sample-filter BPF skeleton\n");
- ret = -EPERM;
goto err;
}
- if (needs_pid_hash) {
- /* The filters map is shared among other processes */
- ret = update_pid_hash(skel, evsel, entry);
- if (ret < 0)
- goto err;
- } else {
- i = 0;
- fd = bpf_map__fd(skel->maps.filters);
-
- /* The filters map has only one entry in this case */
- if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
- ret = -errno;
- pr_err("Failed to update the filter map\n");
- goto err;
- }
+ i = 0;
+ fd = bpf_map__fd(skel->maps.filters);
+
+ /* The filters map has only one entry in this case */
+ if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
+ ret = -errno;
+ pr_err("Failed to update the filter map\n");
+ goto err;
}
prog = skel->progs.perf_sample_filter;
@@ -300,6 +326,15 @@ int perf_bpf_filter__destroy(struct evsel *evsel)
free(expr);
}
sample_filter_bpf__destroy(evsel->bpf_skel);
+
+ if (pinned_filter_idx >= 0) {
+ int fd = get_pinned_fd("filters");
+
+ bpf_map_delete_elem(fd, &pinned_filter_idx);
+ pinned_filter_idx = -1;
+ close(fd);
+ }
+
return 0;
}
@@ -342,3 +377,130 @@ int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
return ret;
}
+
+int perf_bpf_filter__pin(void)
+{
+ struct sample_filter_bpf *skel;
+ char *path = NULL;
+ int dir_fd, ret = -1;
+
+ skel = sample_filter_bpf__open();
+ if (!skel) {
+ ret = -errno;
+ pr_err("Failed to open perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ /* pinned program will use pid-hash */
+ bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
+ bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
+ skel->rodata->use_pid_hash = 1;
+
+ if (sample_filter_bpf__load(skel) < 0) {
+ ret = -errno;
+ pr_err("Failed to load perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
+ PERF_BPF_FILTER_PIN_PATH) < 0) {
+ ret = -errno;
+ pr_err("Failed to allocate pathname in the BPF-fs\n");
+ goto err;
+ }
+
+ ret = bpf_object__pin(skel->obj, path);
+ if (ret < 0) {
+ pr_err("Failed to pin BPF filter objects\n");
+ goto err;
+ }
+
+ /* setup access permissions for the pinned objects */
+ dir_fd = open(path, O_PATH);
+ if (dir_fd < 0) {
+ bpf_object__unpin(skel->obj, path);
+ ret = dir_fd;
+ goto err;
+ }
+
+ /* BPF-fs root has the sticky bit */
+ if (fchmodat(dir_fd, "..", 01755, 0) < 0) {
+ pr_debug("chmod for BPF-fs failed\n");
+ ret = -errno;
+ }
+
+ /* perf_filter directory */
+ if (fchmod(dir_fd, 0755) < 0) {
+ pr_debug("chmod for perf_filter directory failed?\n");
+ /*
+ * On my machine, the operation succeeds but returns failure.
+ * Don't know why, let's ignore the failure for now. If it's
+ * a real error, the following fchmodat() should fail too.
+ */
+ }
+
+ /* programs need write permission for some reason */
+ if (fchmodat(dir_fd, "perf_sample_filter", 0777, 0) < 0) {
+ pr_debug("chmod for perf_sample_filter failed\n");
+ ret = -errno;
+ }
+ /* maps */
+ if (fchmodat(dir_fd, "filters", 0666, 0) < 0) {
+ pr_debug("chmod for filters failed\n");
+ ret = -errno;
+ }
+ if (fchmodat(dir_fd, "pid_hash", 0666, 0) < 0) {
+ pr_debug("chmod for pid_hash failed\n");
+ ret = -errno;
+ }
+
+ close(dir_fd);
+
+err:
+ free(path);
+ sample_filter_bpf__destroy(skel);
+ return ret;
+}
+
+int perf_bpf_filter__unpin(void)
+{
+ struct sample_filter_bpf *skel;
+ char *path = NULL;
+ int ret = -1;
+
+ skel = sample_filter_bpf__open_and_load();
+ if (!skel) {
+ ret = -errno;
+ pr_err("Failed to open perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
+ PERF_BPF_FILTER_PIN_PATH) < 0) {
+ ret = -errno;
+ pr_err("Failed to allocate pathname in the BPF-fs\n");
+ goto err;
+ }
+
+ ret = bpf_object__unpin(skel->obj, path);
+
+err:
+ free(path);
+ sample_filter_bpf__destroy(skel);
+ return ret;
+}
+
+static int get_pinned_fd(const char *name)
+{
+ char *path = NULL;
+ int fd;
+
+ if (asprintf(&path, "%s/fs/bpf/%s/%s", sysfs__mountpoint(),
+ PERF_BPF_FILTER_PIN_PATH, name) < 0)
+ return -1;
+
+ fd = bpf_obj_get(path);
+
+ free(path);
+ return fd;
+}
diff --git a/tools/perf/util/bpf-filter.h b/tools/perf/util/bpf-filter.h
index 955ef4e3a931..32ff8299a5f3 100644
--- a/tools/perf/util/bpf-filter.h
+++ b/tools/perf/util/bpf-filter.h
@@ -18,6 +18,9 @@ struct perf_bpf_filter_expr {
struct evsel;
struct target;
+/* path in BPF-fs for the pinned program and maps */
+#define PERF_BPF_FILTER_PIN_PATH "perf_filter"
+
#ifdef HAVE_BPF_SKEL
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, int part,
enum perf_bpf_filter_op op,
@@ -26,6 +29,8 @@ int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target);
int perf_bpf_filter__destroy(struct evsel *evsel);
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
+int perf_bpf_filter__pin(void);
+int perf_bpf_filter__unpin(void);
#else /* !HAVE_BPF_SKEL */
@@ -47,5 +52,13 @@ static inline u64 perf_bpf_filter__lost_count(struct evsel *evsel __maybe_unused
{
return 0;
}
+static inline int perf_bpf_filter__pin(void)
+{
+ return -EOPNOTSUPP;
+}
+static inline int perf_bpf_filter__unpin(void)
+{
+ return -EOPNOTSUPP;
+}
#endif /* HAVE_BPF_SKEL*/
#endif /* PERF_UTIL_BPF_FILTER_H */
--
2.45.1.288.g0e0cd299f1-goog
The evlist is allocated at the beginning of cmd_record(). Also free-ing
thread masks should be paired with record__init_thread_masks() which is
called right before __cmd_record().
Let's change the order of these functions to release the resources
correctly in case of errors. This is maybe fine as the process exits,
but it might be a problem if it manages some system-wide resources that
live longer than the process.
Signed-off-by: Namhyung Kim <[email protected]>
---
tools/perf/builtin-record.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 8ec0b1607603..3a5a24dec356 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -4258,13 +4258,13 @@ int cmd_record(int argc, const char **argv)
err = __cmd_record(&record, argc, argv);
out:
- evlist__delete(rec->evlist);
+ record__free_thread_masks(rec, rec->nr_threads);
+ rec->nr_threads = 0;
symbol__exit();
auxtrace_record__free(rec->itr);
out_opts:
- record__free_thread_masks(rec, rec->nr_threads);
- rec->nr_threads = 0;
evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
+ evlist__delete(rec->evlist);
return err;
}
--
2.45.1.288.g0e0cd299f1-goog
To allow BPF filters for unprivileged users it needs to pin the BPF
objects to BPF-fs first. Let's add a new option to pin and unpin the
objects easily.
$ sudo perf record --setup-filter pin
The above command would pin BPF program and maps for the filter when the
system has BPF-fs (usually at /sys/fs/bpf/). To unpin the objects,
users can run the following command (as root).
$ sudo perf record --setup-filter unpin
Signed-off-by: Namhyung Kim <[email protected]>
---
tools/perf/Documentation/perf-record.txt | 5 +++++
tools/perf/builtin-record.c | 15 +++++++++++++++
2 files changed, 20 insertions(+)
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 6015fdd08fb6..e51a492dc8e0 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -828,6 +828,11 @@ filtered through the mask provided by -C option.
only, as of now. So the applications built without the frame
pointer might see bogus addresses.
+--setup-filter=<action>::
+ Prepare BPF filter to be used by regular users. The action should be
+ either "pin" or "unpin". The filter can be used after it's pinned.
+
+
include::intel-hybrid.txt[]
SEE ALSO
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3a5a24dec356..4dababd0d338 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -171,6 +171,7 @@ struct record {
bool timestamp_filename;
bool timestamp_boundary;
bool off_cpu;
+ const char *filter_action;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
@@ -3573,6 +3574,8 @@ static struct option __record_options[] = {
"write collected trace data into several data files using parallel threads",
record__parse_threads),
OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
+ OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
+ "BPF filter action"),
OPT_END()
};
@@ -4102,6 +4105,18 @@ int cmd_record(int argc, const char **argv)
pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
}
+ if (rec->filter_action) {
+ if (!strcmp(rec->filter_action, "pin"))
+ err = perf_bpf_filter__pin();
+ else if (!strcmp(rec->filter_action, "unpin"))
+ err = perf_bpf_filter__unpin();
+ else {
+ pr_warning("Unknown BPF filter action: %s\n", rec->filter_action);
+ err = -EINVAL;
+ }
+ goto out_opts;
+ }
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
--
2.45.1.288.g0e0cd299f1-goog