2022-02-03 14:33:52

by Jiri Olsa

[permalink] [raw]
Subject: [PATCH 6/8] libbpf: Add bpf_program__attach_kprobe_opts for multi kprobes

Adding support to bpf_program__attach_kprobe_opts to load kprobes
to multiple functions.

If the kprobe program has BPF_TRACE_FPROBE as expected_attach_type
it will use the new fprobe link to attach the program. In this case
it will use 'func_name' as pattern for functions to attach.

Adding also support to use '*' wildcard in 'kprobe/kretprobe' section
name by SEC macro, like:

SEC("kprobe/bpf_fentry_test*")
SEC("kretprobe/bpf_fentry_test*")

This will set kprobe's expected_attach_type to BPF_TRACE_FPROBE,
and attach it to provided functions pattern.

Signed-off-by: Jiri Olsa <[email protected]>
---
tools/lib/bpf/libbpf.c | 136 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 133 insertions(+), 3 deletions(-)

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7d595cfd03bc..6b343ef77ed8 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -8607,13 +8607,15 @@ static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie
static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie);
static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie);

+static int init_kprobe(struct bpf_program *prog, long cookie);
+
static const struct bpf_sec_def section_defs[] = {
SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe, .init_fn = init_kprobe),
SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
- SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe, .init_fn = init_kprobe),
SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
@@ -10031,6 +10033,123 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
return pfd;
}

+struct fprobe_resolve {
+ const char *name;
+ __u64 *addrs;
+ __u32 alloc;
+ __u32 cnt;
+};
+
+static bool glob_matches(const char *glob, const char *s)
+{
+ int n = strlen(glob);
+
+ if (n == 1 && glob[0] == '*')
+ return true;
+
+ if (glob[0] == '*' && glob[n - 1] == '*') {
+ const char *subs;
+ /* substring match */
+
+ /* this is hacky, but we don't want to allocate
+ * for no good reason
+ */
+ ((char *)glob)[n - 1] = '\0';
+ subs = strstr(s, glob + 1);
+ ((char *)glob)[n - 1] = '*';
+
+ return subs != NULL;
+ } else if (glob[0] == '*') {
+ size_t nn = strlen(s);
+ /* suffix match */
+
+ /* too short for a given suffix */
+ if (nn < n - 1)
+ return false;
+ return strcmp(s + nn - (n - 1), glob + 1) == 0;
+ } else if (glob[n - 1] == '*') {
+ /* prefix match */
+ return strncmp(s, glob, n - 1) == 0;
+ } else {
+ /* exact match */
+ return strcmp(glob, s) == 0;
+ }
+}
+
+static int resolve_fprobe_cb(void *arg, unsigned long long sym_addr,
+ char sym_type, const char *sym_name)
+{
+ struct fprobe_resolve *res = arg;
+ __u64 *p;
+
+ if (!glob_matches(res->name, sym_name))
+ return 0;
+
+ if (res->cnt == res->alloc) {
+ res->alloc = max((__u32) 16, res->alloc * 3 / 2);
+ p = libbpf_reallocarray(res->addrs, res->alloc, sizeof(__u32));
+ if (!p)
+ return -ENOMEM;
+ res->addrs = p;
+ }
+ res->addrs[res->cnt++] = sym_addr;
+ return 0;
+}
+
+static struct bpf_link *
+attach_fprobe_opts(const struct bpf_program *prog,
+ const char *func_name,
+ const struct bpf_kprobe_opts *kopts)
+{
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
+ struct fprobe_resolve res = {
+ .name = func_name,
+ };
+ struct bpf_link *link = NULL;
+ char errmsg[STRERR_BUFSIZE];
+ int err, link_fd, prog_fd;
+ bool retprobe;
+
+ err = libbpf__kallsyms_parse(&res, resolve_fprobe_cb);
+ if (err)
+ goto error;
+ if (!res.cnt) {
+ err = -ENOENT;
+ goto error;
+ }
+
+ retprobe = OPTS_GET(kopts, retprobe, false);
+
+ opts.fprobe.addrs = (__u64) res.addrs;
+ opts.fprobe.cnt = res.cnt;
+ opts.flags = retprobe ? BPF_F_FPROBE_RETURN : 0;
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto error;
+ }
+ link->detach = &bpf_link__detach_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FPROBE, &opts);
+ if (link_fd < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to attach to %s: %s\n",
+ prog->name, res.name,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto error;
+ }
+ link->fd = link_fd;
+ free(res.addrs);
+ return link;
+
+error:
+ free(link);
+ free(res.addrs);
+ return libbpf_err_ptr(err);
+}
+
struct bpf_link *
bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
const char *func_name,
@@ -10047,6 +10166,9 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
if (!OPTS_VALID(opts, bpf_kprobe_opts))
return libbpf_err_ptr(-EINVAL);

+ if (prog->expected_attach_type == BPF_TRACE_FPROBE)
+ return attach_fprobe_opts(prog, func_name, opts);
+
retprobe = OPTS_GET(opts, retprobe, false);
offset = OPTS_GET(opts, offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
@@ -10112,6 +10234,14 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
}

+static int init_kprobe(struct bpf_program *prog, long cookie)
+{
+ /* If we have wildcard, switch to fprobe link. */
+ if (strchr(prog->sec_name, '*'))
+ bpf_program__set_expected_attach_type(prog, BPF_TRACE_FPROBE);
+ return 0;
+}
+
static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
@@ -10127,7 +10257,7 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cooki
else
func_name = prog->sec_name + sizeof("kprobe/") - 1;

- n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
+ n = sscanf(func_name, "%m[a-zA-Z0-9_.*]+%li", &func, &offset);
if (n < 1) {
err = -EINVAL;
pr_warn("kprobe name is invalid: %s\n", func_name);
--
2.34.1


2022-02-09 04:15:37

by Andrii Nakryiko

[permalink] [raw]
Subject: Re: [PATCH 6/8] libbpf: Add bpf_program__attach_kprobe_opts for multi kprobes

On Wed, Feb 2, 2022 at 5:54 AM Jiri Olsa <[email protected]> wrote:
>
> Adding support to bpf_program__attach_kprobe_opts to load kprobes
> to multiple functions.
>
> If the kprobe program has BPF_TRACE_FPROBE as expected_attach_type
> it will use the new fprobe link to attach the program. In this case
> it will use 'func_name' as pattern for functions to attach.
>
> Adding also support to use '*' wildcard in 'kprobe/kretprobe' section
> name by SEC macro, like:
>
> SEC("kprobe/bpf_fentry_test*")
> SEC("kretprobe/bpf_fentry_test*")
>
> This will set kprobe's expected_attach_type to BPF_TRACE_FPROBE,
> and attach it to provided functions pattern.
>
> Signed-off-by: Jiri Olsa <[email protected]>
> ---
> tools/lib/bpf/libbpf.c | 136 ++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 133 insertions(+), 3 deletions(-)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 7d595cfd03bc..6b343ef77ed8 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -8607,13 +8607,15 @@ static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie
> static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie);
> static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie);
>
> +static int init_kprobe(struct bpf_program *prog, long cookie);
> +
> static const struct bpf_sec_def section_defs[] = {
> SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
> SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
> SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
> - SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
> + SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe, .init_fn = init_kprobe),
> SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
> - SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
> + SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe, .init_fn = init_kprobe),
> SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
> SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
> SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
> @@ -10031,6 +10033,123 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
> return pfd;
> }
>
> +struct fprobe_resolve {
> + const char *name;
> + __u64 *addrs;
> + __u32 alloc;
> + __u32 cnt;
> +};
> +
> +static bool glob_matches(const char *glob, const char *s)

we've since added more generic glob_match() implementation (see
test_progs.c), let's copy/paste that one (it's actually shorter and
doesn't do hacky input args modification). Let's maybe also add '?'
handling (it's trivial). Both original code in perf and the one in
test_progs.c are GPL-2.0-only, so let's also get acks from original
authors.

> +{
> + int n = strlen(glob);
> +
> + if (n == 1 && glob[0] == '*')
> + return true;
> +
> + if (glob[0] == '*' && glob[n - 1] == '*') {
> + const char *subs;
> + /* substring match */
> +
> + /* this is hacky, but we don't want to allocate
> + * for no good reason
> + */
> + ((char *)glob)[n - 1] = '\0';
> + subs = strstr(s, glob + 1);
> + ((char *)glob)[n - 1] = '*';
> +
> + return subs != NULL;
> + } else if (glob[0] == '*') {
> + size_t nn = strlen(s);
> + /* suffix match */
> +
> + /* too short for a given suffix */
> + if (nn < n - 1)
> + return false;
> + return strcmp(s + nn - (n - 1), glob + 1) == 0;
> + } else if (glob[n - 1] == '*') {
> + /* prefix match */
> + return strncmp(s, glob, n - 1) == 0;
> + } else {
> + /* exact match */
> + return strcmp(glob, s) == 0;
> + }
> +}
> +
> +static int resolve_fprobe_cb(void *arg, unsigned long long sym_addr,
> + char sym_type, const char *sym_name)
> +{
> + struct fprobe_resolve *res = arg;
> + __u64 *p;
> +
> + if (!glob_matches(res->name, sym_name))
> + return 0;
> +
> + if (res->cnt == res->alloc) {
> + res->alloc = max((__u32) 16, res->alloc * 3 / 2);
> + p = libbpf_reallocarray(res->addrs, res->alloc, sizeof(__u32));
> + if (!p)
> + return -ENOMEM;
> + res->addrs = p;
> + }

please use libbpf_ensure_mem() instead


> + res->addrs[res->cnt++] = sym_addr;
> + return 0;
> +}
> +
> +static struct bpf_link *
> +attach_fprobe_opts(const struct bpf_program *prog,
> + const char *func_name,

func_glob or func_pattern?

> + const struct bpf_kprobe_opts *kopts)
> +{
> + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
> + struct fprobe_resolve res = {
> + .name = func_name,
> + };
> + struct bpf_link *link = NULL;
> + char errmsg[STRERR_BUFSIZE];
> + int err, link_fd, prog_fd;
> + bool retprobe;
> +
> + err = libbpf__kallsyms_parse(&res, resolve_fprobe_cb);
> + if (err)
> + goto error;
> + if (!res.cnt) {
> + err = -ENOENT;
> + goto error;
> + }
> +
> + retprobe = OPTS_GET(kopts, retprobe, false);
> +
> + opts.fprobe.addrs = (__u64) res.addrs;

ptr_to_u64()

> + opts.fprobe.cnt = res.cnt;
> + opts.flags = retprobe ? BPF_F_FPROBE_RETURN : 0;
> +
> + link = calloc(1, sizeof(*link));
> + if (!link) {
> + err = -ENOMEM;
> + goto error;
> + }
> + link->detach = &bpf_link__detach_fd;
> +
> + prog_fd = bpf_program__fd(prog);
> + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FPROBE, &opts);
> + if (link_fd < 0) {
> + err = -errno;
> + pr_warn("prog '%s': failed to attach to %s: %s\n",
> + prog->name, res.name,
> + libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
> + goto error;
> + }
> + link->fd = link_fd;
> + free(res.addrs);
> + return link;
> +
> +error:
> + free(link);
> + free(res.addrs);
> + return libbpf_err_ptr(err);
> +}
> +
> struct bpf_link *
> bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
> const char *func_name,
> @@ -10047,6 +10166,9 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
> if (!OPTS_VALID(opts, bpf_kprobe_opts))
> return libbpf_err_ptr(-EINVAL);
>
> + if (prog->expected_attach_type == BPF_TRACE_FPROBE)
> + return attach_fprobe_opts(prog, func_name, opts);
> +
> retprobe = OPTS_GET(opts, retprobe, false);
> offset = OPTS_GET(opts, offset, 0);
> pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
> @@ -10112,6 +10234,14 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
> return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
> }
>
> +static int init_kprobe(struct bpf_program *prog, long cookie)
> +{
> + /* If we have wildcard, switch to fprobe link. */
> + if (strchr(prog->sec_name, '*'))

ugh... :( maybe let's have a separate SEC("kprobe.multi/<glob>") and
same for kretprobe?


> + bpf_program__set_expected_attach_type(prog, BPF_TRACE_FPROBE);
> + return 0;
> +}
> +
> static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie)
> {
> DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
> @@ -10127,7 +10257,7 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cooki
> else
> func_name = prog->sec_name + sizeof("kprobe/") - 1;
>
> - n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
> + n = sscanf(func_name, "%m[a-zA-Z0-9_.*]+%li", &func, &offset);
> if (n < 1) {
> err = -EINVAL;
> pr_warn("kprobe name is invalid: %s\n", func_name);
> --
> 2.34.1
>

2022-02-09 10:25:37

by Jiri Olsa

[permalink] [raw]
Subject: Re: [PATCH 6/8] libbpf: Add bpf_program__attach_kprobe_opts for multi kprobes

On Mon, Feb 07, 2022 at 10:59:29AM -0800, Andrii Nakryiko wrote:

SNIP

> > +struct fprobe_resolve {
> > + const char *name;
> > + __u64 *addrs;
> > + __u32 alloc;
> > + __u32 cnt;
> > +};
> > +
> > +static bool glob_matches(const char *glob, const char *s)
>
> we've since added more generic glob_match() implementation (see
> test_progs.c), let's copy/paste that one (it's actually shorter and
> doesn't do hacky input args modification). Let's maybe also add '?'
> handling (it's trivial). Both original code in perf and the one in
> test_progs.c are GPL-2.0-only, so let's also get acks from original
> authors.

ok, will check

>
> > +{
> > + int n = strlen(glob);
> > +
> > + if (n == 1 && glob[0] == '*')
> > + return true;
> > +
> > + if (glob[0] == '*' && glob[n - 1] == '*') {
> > + const char *subs;
> > + /* substring match */
> > +
> > + /* this is hacky, but we don't want to allocate
> > + * for no good reason
> > + */
> > + ((char *)glob)[n - 1] = '\0';
> > + subs = strstr(s, glob + 1);
> > + ((char *)glob)[n - 1] = '*';
> > +
> > + return subs != NULL;
> > + } else if (glob[0] == '*') {
> > + size_t nn = strlen(s);
> > + /* suffix match */
> > +
> > + /* too short for a given suffix */
> > + if (nn < n - 1)
> > + return false;
> > + return strcmp(s + nn - (n - 1), glob + 1) == 0;
> > + } else if (glob[n - 1] == '*') {
> > + /* prefix match */
> > + return strncmp(s, glob, n - 1) == 0;
> > + } else {
> > + /* exact match */
> > + return strcmp(glob, s) == 0;
> > + }
> > +}
> > +
> > +static int resolve_fprobe_cb(void *arg, unsigned long long sym_addr,
> > + char sym_type, const char *sym_name)
> > +{
> > + struct fprobe_resolve *res = arg;
> > + __u64 *p;
> > +
> > + if (!glob_matches(res->name, sym_name))
> > + return 0;
> > +
> > + if (res->cnt == res->alloc) {
> > + res->alloc = max((__u32) 16, res->alloc * 3 / 2);
> > + p = libbpf_reallocarray(res->addrs, res->alloc, sizeof(__u32));
> > + if (!p)
> > + return -ENOMEM;
> > + res->addrs = p;
> > + }
>
> please use libbpf_ensure_mem() instead

ok

>
>
> > + res->addrs[res->cnt++] = sym_addr;
> > + return 0;
> > +}
> > +
> > +static struct bpf_link *
> > +attach_fprobe_opts(const struct bpf_program *prog,
> > + const char *func_name,
>
> func_glob or func_pattern?

ok

>
> > + const struct bpf_kprobe_opts *kopts)
> > +{
> > + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
> > + struct fprobe_resolve res = {
> > + .name = func_name,
> > + };
> > + struct bpf_link *link = NULL;
> > + char errmsg[STRERR_BUFSIZE];
> > + int err, link_fd, prog_fd;
> > + bool retprobe;
> > +
> > + err = libbpf__kallsyms_parse(&res, resolve_fprobe_cb);
> > + if (err)
> > + goto error;
> > + if (!res.cnt) {
> > + err = -ENOENT;
> > + goto error;
> > + }
> > +
> > + retprobe = OPTS_GET(kopts, retprobe, false);
> > +
> > + opts.fprobe.addrs = (__u64) res.addrs;
>
> ptr_to_u64()

ok

>
> > + opts.fprobe.cnt = res.cnt;
> > + opts.flags = retprobe ? BPF_F_FPROBE_RETURN : 0;
> > +
> > + link = calloc(1, sizeof(*link));
> > + if (!link) {
> > + err = -ENOMEM;
> > + goto error;
> > + }
> > + link->detach = &bpf_link__detach_fd;
> > +
> > + prog_fd = bpf_program__fd(prog);
> > + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FPROBE, &opts);
> > + if (link_fd < 0) {
> > + err = -errno;
> > + pr_warn("prog '%s': failed to attach to %s: %s\n",
> > + prog->name, res.name,
> > + libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
> > + goto error;
> > + }
> > + link->fd = link_fd;
> > + free(res.addrs);
> > + return link;
> > +
> > +error:
> > + free(link);
> > + free(res.addrs);
> > + return libbpf_err_ptr(err);
> > +}
> > +
> > struct bpf_link *
> > bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
> > const char *func_name,
> > @@ -10047,6 +10166,9 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
> > if (!OPTS_VALID(opts, bpf_kprobe_opts))
> > return libbpf_err_ptr(-EINVAL);
> >
> > + if (prog->expected_attach_type == BPF_TRACE_FPROBE)
> > + return attach_fprobe_opts(prog, func_name, opts);
> > +
> > retprobe = OPTS_GET(opts, retprobe, false);
> > offset = OPTS_GET(opts, offset, 0);
> > pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
> > @@ -10112,6 +10234,14 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
> > return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
> > }
> >
> > +static int init_kprobe(struct bpf_program *prog, long cookie)
> > +{
> > + /* If we have wildcard, switch to fprobe link. */
> > + if (strchr(prog->sec_name, '*'))
>
> ugh... :( maybe let's have a separate SEC("kprobe.multi/<glob>") and
> same for kretprobe?

I agree new SEC type is more clear ;-) ok

thanks,
jirka