2022-03-17 04:19:52

by Jiri Olsa

[permalink] [raw]
Subject: [PATCHv3 bpf-next 09/13] libbpf: Add bpf_program__attach_kprobe_multi_opts function

Adding bpf_program__attach_kprobe_multi_opts function for attaching
kprobe program to multiple functions.

struct bpf_link *
bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
const char *pattern,
const struct bpf_kprobe_multi_opts *opts);

User can specify functions to attach with 'pattern' argument that
allows wildcards (*?' supported) or provide symbols or addresses
directly through opts argument. These 3 options are mutually
exclusive.

When using symbols or addresses, user can also provide cookie value
for each symbol/address that can be retrieved later in bpf program
with bpf_get_attach_cookie helper.

struct bpf_kprobe_multi_opts {
size_t sz;
const char **syms;
const unsigned long *addrs;
const __u64 *cookies;
size_t cnt;
bool retprobe;
size_t :0;
};

Symbols, addresses and cookies are provided through opts object
(syms/addrs/cookies) as array pointers with specified count (cnt).

Each cookie value is paired with provided function address or symbol
with the same array index.

The program can be also attached as return probe if 'retprobe' is set.

For quick usage with NULL opts argument, like:

bpf_program__attach_kprobe_multi_opts(prog, "ksys_*", NULL)

the 'prog' will be attached as kprobe to 'ksys_*' functions.

Also adding new program sections for automatic attachment:

kprobe.multi/<symbol_pattern>
kretprobe.multi/<symbol_pattern>

The symbol_pattern is used as 'pattern' argument in
bpf_program__attach_kprobe_multi_opts function.

Signed-off-by: Jiri Olsa <[email protected]>
---
tools/lib/bpf/libbpf.c | 160 +++++++++++++++++++++++++++++++++++++++
tools/lib/bpf/libbpf.h | 23 ++++++
tools/lib/bpf/libbpf.map | 1 +
3 files changed, 184 insertions(+)

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1ca520a29fdb..f3a31478e23b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -8610,6 +8610,7 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf
static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);

@@ -8621,6 +8622,8 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
@@ -10224,6 +10227,139 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
}

+/* Adapted from perf/util/string.c */
+static bool glob_match(const char *str, const char *pat)
+{
+ while (*str && *pat && *pat != '*') {
+ if (*pat == '?') { /* Matches any single character */
+ str++;
+ pat++;
+ continue;
+ }
+ if (*str != *pat)
+ return false;
+ str++;
+ pat++;
+ }
+ /* Check wild card */
+ if (*pat == '*') {
+ while (*pat == '*')
+ pat++;
+ if (!*pat) /* Tail wild card matches all */
+ return true;
+ while (*str)
+ if (glob_match(str++, pat))
+ return true;
+ }
+ return !*str && !*pat;
+}
+
+struct kprobe_multi_resolve {
+ const char *pattern;
+ unsigned long *addrs;
+ size_t cap;
+ size_t cnt;
+};
+
+static int
+resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx)
+{
+ struct kprobe_multi_resolve *res = ctx;
+ int err;
+
+ if (!glob_match(sym_name, res->pattern))
+ return 0;
+
+ err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long),
+ res->cnt + 1);
+ if (err)
+ return err;
+
+ res->addrs[res->cnt++] = (unsigned long) sym_addr;
+ return 0;
+}
+
+struct bpf_link *
+bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
+ const char *pattern,
+ const struct bpf_kprobe_multi_opts *opts)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, lopts);
+ struct kprobe_multi_resolve res = {
+ .pattern = pattern,
+ };
+ struct bpf_link *link = NULL;
+ char errmsg[STRERR_BUFSIZE];
+ const unsigned long *addrs;
+ int err, link_fd, prog_fd;
+ const __u64 *cookies;
+ const char **syms;
+ bool retprobe;
+ size_t cnt;
+
+ if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ syms = OPTS_GET(opts, syms, false);
+ addrs = OPTS_GET(opts, addrs, false);
+ cnt = OPTS_GET(opts, cnt, false);
+ cookies = OPTS_GET(opts, cookies, false);
+
+ if (!pattern && !addrs && !syms)
+ return libbpf_err_ptr(-EINVAL);
+ if (pattern && (addrs || syms || cookies || cnt))
+ return libbpf_err_ptr(-EINVAL);
+ if (!pattern && !cnt)
+ return libbpf_err_ptr(-EINVAL);
+ if (addrs && syms)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (pattern) {
+ err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
+ if (err)
+ goto error;
+ if (!res.cnt) {
+ err = -ENOENT;
+ goto error;
+ }
+ addrs = res.addrs;
+ cnt = res.cnt;
+ }
+
+ retprobe = OPTS_GET(opts, retprobe, false);
+
+ lopts.kprobe_multi.syms = syms;
+ lopts.kprobe_multi.addrs = addrs;
+ lopts.kprobe_multi.cookies = cookies;
+ lopts.kprobe_multi.cnt = cnt;
+ lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto error;
+ }
+ link->detach = &bpf_link__detach_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
+ if (link_fd < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to attach: %s\n",
+ prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto error;
+ }
+ link->fd = link_fd;
+ free(res.addrs);
+ return link;
+
+error:
+ free(link);
+ free(res.addrs);
+ return libbpf_err_ptr(err);
+}
+
static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
@@ -10255,6 +10391,30 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf
return libbpf_get_error(*link);
}

+static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *spec;
+ char *pattern;
+ int n;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
+ if (opts.retprobe)
+ spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
+ else
+ spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
+
+ n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
+ if (n < 1) {
+ pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
+ return -EINVAL;
+ }
+
+ *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
+ free(pattern);
+ return libbpf_get_error(*link);
+}
+
static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
const char *binary_path, uint64_t offset)
{
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c1b0c2ef14d8..d5239fb4abdc 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -425,6 +425,29 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
const char *func_name,
const struct bpf_kprobe_opts *opts);

+struct bpf_kprobe_multi_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* array of function symbols to attach */
+ const char **syms;
+ /* array of function addresses to attach */
+ const unsigned long *addrs;
+ /* array of user-provided values fetchable through bpf_get_attach_cookie */
+ const __u64 *cookies;
+ /* number of elements in syms/addrs/cookies arrays */
+ size_t cnt;
+ /* create return kprobes */
+ bool retprobe;
+ size_t :0;
+};
+
+#define bpf_kprobe_multi_opts__last_field retprobe
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
+ const char *pattern,
+ const struct bpf_kprobe_multi_opts *opts);
+
struct bpf_uprobe_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index df1b947792c8..554c56e6e5d3 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -444,4 +444,5 @@ LIBBPF_0.8.0 {
global:
libbpf_register_prog_handler;
libbpf_unregister_prog_handler;
+ bpf_program__attach_kprobe_multi_opts;
} LIBBPF_0.7.0;
--
2.35.1


2022-03-18 07:45:06

by Alexei Starovoitov

[permalink] [raw]
Subject: Re: [PATCHv3 bpf-next 09/13] libbpf: Add bpf_program__attach_kprobe_multi_opts function

On Wed, Mar 16, 2022 at 5:26 AM Jiri Olsa <[email protected]> wrote:
> +
> +struct bpf_link *
> +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
> + const char *pattern,
> + const struct bpf_kprobe_multi_opts *opts)
> +{
> + LIBBPF_OPTS(bpf_link_create_opts, lopts);
> + struct kprobe_multi_resolve res = {
> + .pattern = pattern,
> + };
> + struct bpf_link *link = NULL;
> + char errmsg[STRERR_BUFSIZE];
> + const unsigned long *addrs;
> + int err, link_fd, prog_fd;
> + const __u64 *cookies;
> + const char **syms;
> + bool retprobe;
> + size_t cnt;
> +
> + if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
> + return libbpf_err_ptr(-EINVAL);
> +
> + syms = OPTS_GET(opts, syms, false);
> + addrs = OPTS_GET(opts, addrs, false);
> + cnt = OPTS_GET(opts, cnt, false);
> + cookies = OPTS_GET(opts, cookies, false);
> +
> + if (!pattern && !addrs && !syms)
> + return libbpf_err_ptr(-EINVAL);
> + if (pattern && (addrs || syms || cookies || cnt))
> + return libbpf_err_ptr(-EINVAL);
> + if (!pattern && !cnt)
> + return libbpf_err_ptr(-EINVAL);
> + if (addrs && syms)
> + return libbpf_err_ptr(-EINVAL);
> +
> + if (pattern) {
> + err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
> + if (err)
> + goto error;
> + if (!res.cnt) {
> + err = -ENOENT;
> + goto error;
> + }
> + addrs = res.addrs;
> + cnt = res.cnt;
> + }

Thanks Jiri.
Great stuff and a major milestone!
I've applied Masami's and your patches to bpf-next.

But the above needs more work.
Currently test_progs -t kprobe_multi
takes 4 seconds on lockdep+debug kernel.
Mainly because of the above loop.

18.05% test_progs [kernel.kallsyms] [k]
kallsyms_expand_symbol.constprop.4
12.53% test_progs libc-2.28.so [.] _IO_vfscanf
6.31% test_progs [kernel.kallsyms] [k] number
4.66% test_progs [kernel.kallsyms] [k] format_decode
4.65% test_progs [kernel.kallsyms] [k] string_nocheck

Single test_skel_api() subtest takes almost a second.

A cache inside libbpf probably won't help.
Maybe introduce a bpf iterator for kallsyms?

On the kernel side kprobe_multi_resolve_syms() looks similarly inefficient.
I'm not sure whether it would be a bottle neck though.

Orthogonal to this issue please add a new stress test
to selftest/bpf that attaches to a lot of functions.

2022-03-18 14:13:22

by Jiri Olsa

[permalink] [raw]
Subject: Re: [PATCHv3 bpf-next 09/13] libbpf: Add bpf_program__attach_kprobe_multi_opts function

On Thu, Mar 17, 2022 at 10:14:28PM -0700, Andrii Nakryiko wrote:

SNIP

> > But the above needs more work.
> > Currently test_progs -t kprobe_multi
> > takes 4 seconds on lockdep+debug kernel.
> > Mainly because of the above loop.
> >
> > 18.05% test_progs [kernel.kallsyms] [k]
> > kallsyms_expand_symbol.constprop.4
> > 12.53% test_progs libc-2.28.so [.] _IO_vfscanf
> > 6.31% test_progs [kernel.kallsyms] [k] number
> > 4.66% test_progs [kernel.kallsyms] [k] format_decode
> > 4.65% test_progs [kernel.kallsyms] [k] string_nocheck
> >
> > Single test_skel_api() subtest takes almost a second.
> >
> > A cache inside libbpf probably won't help.
> > Maybe introduce a bpf iterator for kallsyms?
>
> BPF iterator for kallsyms is a great idea! So many benefits:

> - it should be significantly more efficient *and* simpler than
> parsing /proc/kallsyms;
> - there were some upstream patches recording ksym length (i.e.,
> function size), don't remember if that ever landed or not, but besides
> that the other complication of even exposing that to user space were
> concerns about /proc/kallsyms format being an ABI. With the BPF
> iterator we can easily provide that symbol size without any breakage.
> This would be great!

yes, great idea.. I was cc-ed on patches adding extra stuff to kallsyms:
https://lore.kernel.org/lkml/[email protected]/

this could be way out ;-) cc-ing Nick

> - we can allow parameterizing iterator with options like: skip or
> include module symbols, specify a set of types of symbols (function,
> variable, etc), etc. This would speed everything up in common cases by
> not even decompressing irrelevant names.
>
> In short, kallsyms iterator would be an immensely useful for any sort
> of tracing tool that deals with kernel stack traces or kallsyms in
> general.

I wonder we could make some use of it in perf as well, there's some
guessing wrt symbol sizes when we parse kallsyms, so we could get
rid of it.. I will work on that and try to add this

>
> But in this particular case, kprobe_multi_resolve_syms()
> implementation is extremely suboptimal. I didn't realize during review
> that kallsyms_lookup_name() is a linear scan... If that's not going to
> be changed to O(log(N)) some time soon, we need to reimplement
> kprobe_multi_resolve_syms(), probably.
>
> One way would be to sort user strings lexicographically and then do a
> linear scan over all kallsyms, for each symbol perform binary search
> over a sorted array of user strings. Stop once all the positions were
> "filled in" (we'd need to keep a bitmap or bool[], probably). This way
> it's going to be O(MlogN) instead of O(MN) as it is right now.

ok, I did something similar in multi-trampoline patchset that you
suggested, I think that will work here as well

>
> BTW, Jiri, libbpf.map is supposed to have an alphabetically ordered
> list of functions, it would be good to move
> bpf_program__attach_kprobe_multi_opts a bit higher before libbpf_*
> functions.

ah right, sry.. I'll send fix with follow up changes

thanks,
jirka

2022-03-18 15:33:41

by Andrii Nakryiko

[permalink] [raw]
Subject: Re: [PATCHv3 bpf-next 09/13] libbpf: Add bpf_program__attach_kprobe_multi_opts function

On Thu, Mar 17, 2022 at 8:53 PM Alexei Starovoitov
<[email protected]> wrote:
>
> On Wed, Mar 16, 2022 at 5:26 AM Jiri Olsa <[email protected]> wrote:
> > +
> > +struct bpf_link *
> > +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
> > + const char *pattern,
> > + const struct bpf_kprobe_multi_opts *opts)
> > +{
> > + LIBBPF_OPTS(bpf_link_create_opts, lopts);
> > + struct kprobe_multi_resolve res = {
> > + .pattern = pattern,
> > + };
> > + struct bpf_link *link = NULL;
> > + char errmsg[STRERR_BUFSIZE];
> > + const unsigned long *addrs;
> > + int err, link_fd, prog_fd;
> > + const __u64 *cookies;
> > + const char **syms;
> > + bool retprobe;
> > + size_t cnt;
> > +
> > + if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
> > + return libbpf_err_ptr(-EINVAL);
> > +
> > + syms = OPTS_GET(opts, syms, false);
> > + addrs = OPTS_GET(opts, addrs, false);
> > + cnt = OPTS_GET(opts, cnt, false);
> > + cookies = OPTS_GET(opts, cookies, false);
> > +
> > + if (!pattern && !addrs && !syms)
> > + return libbpf_err_ptr(-EINVAL);
> > + if (pattern && (addrs || syms || cookies || cnt))
> > + return libbpf_err_ptr(-EINVAL);
> > + if (!pattern && !cnt)
> > + return libbpf_err_ptr(-EINVAL);
> > + if (addrs && syms)
> > + return libbpf_err_ptr(-EINVAL);
> > +
> > + if (pattern) {
> > + err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
> > + if (err)
> > + goto error;
> > + if (!res.cnt) {
> > + err = -ENOENT;
> > + goto error;
> > + }
> > + addrs = res.addrs;
> > + cnt = res.cnt;
> > + }
>
> Thanks Jiri.
> Great stuff and a major milestone!
> I've applied Masami's and your patches to bpf-next.
>
> But the above needs more work.
> Currently test_progs -t kprobe_multi
> takes 4 seconds on lockdep+debug kernel.
> Mainly because of the above loop.
>
> 18.05% test_progs [kernel.kallsyms] [k]
> kallsyms_expand_symbol.constprop.4
> 12.53% test_progs libc-2.28.so [.] _IO_vfscanf
> 6.31% test_progs [kernel.kallsyms] [k] number
> 4.66% test_progs [kernel.kallsyms] [k] format_decode
> 4.65% test_progs [kernel.kallsyms] [k] string_nocheck
>
> Single test_skel_api() subtest takes almost a second.
>
> A cache inside libbpf probably won't help.
> Maybe introduce a bpf iterator for kallsyms?

BPF iterator for kallsyms is a great idea! So many benefits:
- it should be significantly more efficient *and* simpler than
parsing /proc/kallsyms;
- there were some upstream patches recording ksym length (i.e.,
function size), don't remember if that ever landed or not, but besides
that the other complication of even exposing that to user space were
concerns about /proc/kallsyms format being an ABI. With the BPF
iterator we can easily provide that symbol size without any breakage.
This would be great!
- we can allow parameterizing iterator with options like: skip or
include module symbols, specify a set of types of symbols (function,
variable, etc), etc. This would speed everything up in common cases by
not even decompressing irrelevant names.

In short, kallsyms iterator would be an immensely useful for any sort
of tracing tool that deals with kernel stack traces or kallsyms in
general.

But in this particular case, kprobe_multi_resolve_syms()
implementation is extremely suboptimal. I didn't realize during review
that kallsyms_lookup_name() is a linear scan... If that's not going to
be changed to O(log(N)) some time soon, we need to reimplement
kprobe_multi_resolve_syms(), probably.

One way would be to sort user strings lexicographically and then do a
linear scan over all kallsyms, for each symbol perform binary search
over a sorted array of user strings. Stop once all the positions were
"filled in" (we'd need to keep a bitmap or bool[], probably). This way
it's going to be O(MlogN) instead of O(MN) as it is right now.

BTW, Jiri, libbpf.map is supposed to have an alphabetically ordered
list of functions, it would be good to move
bpf_program__attach_kprobe_multi_opts a bit higher before libbpf_*
functions.



>
> On the kernel side kprobe_multi_resolve_syms() looks similarly inefficient.
> I'm not sure whether it would be a bottle neck though.
>
> Orthogonal to this issue please add a new stress test
> to selftest/bpf that attaches to a lot of functions.

2022-03-18 18:21:44

by Jiri Olsa

[permalink] [raw]
Subject: Re: [PATCHv3 bpf-next 09/13] libbpf: Add bpf_program__attach_kprobe_multi_opts function

On Thu, Mar 17, 2022 at 08:53:15PM -0700, Alexei Starovoitov wrote:
> On Wed, Mar 16, 2022 at 5:26 AM Jiri Olsa <[email protected]> wrote:
> > +
> > +struct bpf_link *
> > +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
> > + const char *pattern,
> > + const struct bpf_kprobe_multi_opts *opts)
> > +{
> > + LIBBPF_OPTS(bpf_link_create_opts, lopts);
> > + struct kprobe_multi_resolve res = {
> > + .pattern = pattern,
> > + };
> > + struct bpf_link *link = NULL;
> > + char errmsg[STRERR_BUFSIZE];
> > + const unsigned long *addrs;
> > + int err, link_fd, prog_fd;
> > + const __u64 *cookies;
> > + const char **syms;
> > + bool retprobe;
> > + size_t cnt;
> > +
> > + if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
> > + return libbpf_err_ptr(-EINVAL);
> > +
> > + syms = OPTS_GET(opts, syms, false);
> > + addrs = OPTS_GET(opts, addrs, false);
> > + cnt = OPTS_GET(opts, cnt, false);
> > + cookies = OPTS_GET(opts, cookies, false);
> > +
> > + if (!pattern && !addrs && !syms)
> > + return libbpf_err_ptr(-EINVAL);
> > + if (pattern && (addrs || syms || cookies || cnt))
> > + return libbpf_err_ptr(-EINVAL);
> > + if (!pattern && !cnt)
> > + return libbpf_err_ptr(-EINVAL);
> > + if (addrs && syms)
> > + return libbpf_err_ptr(-EINVAL);
> > +
> > + if (pattern) {
> > + err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
> > + if (err)
> > + goto error;
> > + if (!res.cnt) {
> > + err = -ENOENT;
> > + goto error;
> > + }
> > + addrs = res.addrs;
> > + cnt = res.cnt;
> > + }
>
> Thanks Jiri.
> Great stuff and a major milestone!
> I've applied Masami's and your patches to bpf-next.

great, thanks

>
> But the above needs more work.
> Currently test_progs -t kprobe_multi
> takes 4 seconds on lockdep+debug kernel.
> Mainly because of the above loop.
>
> 18.05% test_progs [kernel.kallsyms] [k]
> kallsyms_expand_symbol.constprop.4
> 12.53% test_progs libc-2.28.so [.] _IO_vfscanf
> 6.31% test_progs [kernel.kallsyms] [k] number
> 4.66% test_progs [kernel.kallsyms] [k] format_decode
> 4.65% test_progs [kernel.kallsyms] [k] string_nocheck
>
> Single test_skel_api() subtest takes almost a second.

hm, I'll check on lockdep+debug kernel.. I think this test
should be going through kallsyms just once, will check

anyway libbpf_kallsyms_parse could use ksym_get_addr, which is
now cached

>
> A cache inside libbpf probably won't help.
> Maybe introduce a bpf iterator for kallsyms?
>
> On the kernel side kprobe_multi_resolve_syms() looks similarly inefficient.
> I'm not sure whether it would be a bottle neck though.
>
> Orthogonal to this issue please add a new stress test
> to selftest/bpf that attaches to a lot of functions.

ok, will add that

thanks,
jirka