2023-08-11 02:22:59

by Rong Tao

[permalink] [raw]
Subject: [PATCH bpf-next v2] selftests/bpf: trace_helpers.c: optimize kallsyms cache

From: Rong Tao <[email protected]>

Static ksyms often have problems because the number of symbols exceeds the
MAX_SYMS limit. Like changing the MAX_SYMS from 300000 to 400000 in
commit e76a014334a6("selftests/bpf: Bump and validate MAX_SYMS") solves
the problem somewhat, but it's not the perfect way.

This commit uses dynamic memory allocation, which completely solves the
problem caused by the limitation of the number of kallsyms.

Signed-off-by: Rong Tao <[email protected]>
---
v2: Do the usual len/capacity scheme here to amortize the cost of realloc, and
don't free symbols.
v1: https://lore.kernel.org/lkml/[email protected]/
---
tools/testing/selftests/bpf/trace_helpers.c | 73 ++++++++++++++-------
1 file changed, 48 insertions(+), 25 deletions(-)

diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index f83d9f65c65b..cda5a2328450 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -18,9 +18,37 @@
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"

-#define MAX_SYMS 400000
-static struct ksym syms[MAX_SYMS];
-static int sym_cnt;
+static struct {
+ struct ksym *syms;
+ unsigned int sym_cap;
+ unsigned int sym_cnt;
+} ksyms = {
+ .syms = NULL,
+ .sym_cap = 1024,
+ .sym_cnt = 0,
+};
+
+static int ksyms__add_symbol(const char *name, unsigned long addr)
+{
+ void *tmp;
+ unsigned int new_cap;
+
+ if (ksyms.sym_cnt + 1 > ksyms.sym_cap) {
+ new_cap = ksyms.sym_cap * 4 / 3;
+ tmp = realloc(ksyms.syms, sizeof(struct ksym) * new_cap);
+ if (!tmp)
+ return -ENOMEM;
+ ksyms.syms = tmp;
+ ksyms.sym_cap = new_cap;
+ }
+
+ ksyms.syms[ksyms.sym_cnt].addr = addr;
+ ksyms.syms[ksyms.sym_cnt].name = strdup(name);
+
+ ksyms.sym_cnt++;
+
+ return 0;
+}

static int ksym_cmp(const void *p1, const void *p2)
{
@@ -33,9 +61,10 @@ int load_kallsyms_refresh(void)
char func[256], buf[256];
char symbol;
void *addr;
- int i = 0;

- sym_cnt = 0;
+ ksyms.syms = malloc(sizeof(struct ksym) * ksyms.sym_cap);
+ if (!ksyms.syms)
+ return -ENOMEM;

f = fopen("/proc/kallsyms", "r");
if (!f)
@@ -46,16 +75,10 @@ int load_kallsyms_refresh(void)
break;
if (!addr)
continue;
- if (i >= MAX_SYMS)
- return -EFBIG;
-
- syms[i].addr = (long) addr;
- syms[i].name = strdup(func);
- i++;
+ ksyms__add_symbol(func, (unsigned long)addr);
}
fclose(f);
- sym_cnt = i;
- qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
+ qsort(ksyms.syms, ksyms.sym_cnt, sizeof(struct ksym), ksym_cmp);
return 0;
}

@@ -65,48 +88,48 @@ int load_kallsyms(void)
* This is called/used from multiplace places,
* load symbols just once.
*/
- if (sym_cnt)
+ if (ksyms.sym_cnt)
return 0;
return load_kallsyms_refresh();
}

struct ksym *ksym_search(long key)
{
- int start = 0, end = sym_cnt;
+ int start = 0, end = ksyms.sym_cnt;
int result;

/* kallsyms not loaded. return NULL */
- if (sym_cnt <= 0)
+ if (ksyms.sym_cnt <= 0)
return NULL;

while (start < end) {
size_t mid = start + (end - start) / 2;

- result = key - syms[mid].addr;
+ result = key - ksyms.syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
- return &syms[mid];
+ return &ksyms.syms[mid];
}

- if (start >= 1 && syms[start - 1].addr < key &&
- key < syms[start].addr)
+ if (start >= 1 && ksyms.syms[start - 1].addr < key &&
+ key < ksyms.syms[start].addr)
/* valid ksym */
- return &syms[start - 1];
+ return &ksyms.syms[start - 1];

/* out of range. return _stext */
- return &syms[0];
+ return &ksyms.syms[0];
}

long ksym_get_addr(const char *name)
{
int i;

- for (i = 0; i < sym_cnt; i++) {
- if (strcmp(syms[i].name, name) == 0)
- return syms[i].addr;
+ for (i = 0; i < ksyms.sym_cnt; i++) {
+ if (strcmp(ksyms.syms[i].name, name) == 0)
+ return ksyms.syms[i].addr;
}

return 0;
--
2.39.3



2023-08-11 18:52:03

by Stanislav Fomichev

[permalink] [raw]
Subject: Re: [PATCH bpf-next v2] selftests/bpf: trace_helpers.c: optimize kallsyms cache

On 08/11, Rong Tao wrote:
> From: Rong Tao <[email protected]>
>
> Static ksyms often have problems because the number of symbols exceeds the
> MAX_SYMS limit. Like changing the MAX_SYMS from 300000 to 400000 in
> commit e76a014334a6("selftests/bpf: Bump and validate MAX_SYMS") solves
> the problem somewhat, but it's not the perfect way.
>
> This commit uses dynamic memory allocation, which completely solves the
> problem caused by the limitation of the number of kallsyms.
>
> Signed-off-by: Rong Tao <[email protected]>
> ---
> v2: Do the usual len/capacity scheme here to amortize the cost of realloc, and
> don't free symbols.
> v1: https://lore.kernel.org/lkml/[email protected]/
> ---
> tools/testing/selftests/bpf/trace_helpers.c | 73 ++++++++++++++-------
> 1 file changed, 48 insertions(+), 25 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
> index f83d9f65c65b..cda5a2328450 100644
> --- a/tools/testing/selftests/bpf/trace_helpers.c
> +++ b/tools/testing/selftests/bpf/trace_helpers.c
> @@ -18,9 +18,37 @@
> #define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
> #define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
>
> -#define MAX_SYMS 400000
> -static struct ksym syms[MAX_SYMS];
> -static int sym_cnt;
> +static struct {
> + struct ksym *syms;
> + unsigned int sym_cap;
> + unsigned int sym_cnt;
> +} ksyms = {
> + .syms = NULL,
> + .sym_cap = 1024,
> + .sym_cnt = 0,
> +};

Not sure what the struct buys you here (besides grouping everything
nicely), maybe do the following?
static struct ksym *syms;
static int sym_cnt;
static int sym_cap = 1024;

Will reduce the churn elsewhere..

> +static int ksyms__add_symbol(const char *name, unsigned long addr)
> +{
> + void *tmp;
> + unsigned int new_cap;
> +
> + if (ksyms.sym_cnt + 1 > ksyms.sym_cap) {
> + new_cap = ksyms.sym_cap * 4 / 3;
> + tmp = realloc(ksyms.syms, sizeof(struct ksym) * new_cap);
> + if (!tmp)
> + return -ENOMEM;
> + ksyms.syms = tmp;
> + ksyms.sym_cap = new_cap;
> + }
> +
> + ksyms.syms[ksyms.sym_cnt].addr = addr;
> + ksyms.syms[ksyms.sym_cnt].name = strdup(name);
> +
> + ksyms.sym_cnt++;
> +
> + return 0;
> +}
>
> static int ksym_cmp(const void *p1, const void *p2)
> {
> @@ -33,9 +61,10 @@ int load_kallsyms_refresh(void)
> char func[256], buf[256];
> char symbol;
> void *addr;
> - int i = 0;
>
> - sym_cnt = 0;
> + ksyms.syms = malloc(sizeof(struct ksym) * ksyms.sym_cap);
> + if (!ksyms.syms)
> + return -ENOMEM;
>
> f = fopen("/proc/kallsyms", "r");
> if (!f)
> @@ -46,16 +75,10 @@ int load_kallsyms_refresh(void)
> break;
> if (!addr)
> continue;
> - if (i >= MAX_SYMS)
> - return -EFBIG;
> -
> - syms[i].addr = (long) addr;
> - syms[i].name = strdup(func);
> - i++;
> + ksyms__add_symbol(func, (unsigned long)addr);

Need to check the return of ksyms__add_symbol here?

2023-08-12 06:23:40

by Rong Tao

[permalink] [raw]
Subject: Re: [PATCH bpf-next v2] selftests/bpf: trace_helpers.c: optimize kallsyms cache

I just submit v3. Sorry, I submitted the same patch [0][1] twice by mistake,
please choose one of them for review.

[0] https://lore.kernel.org/lkml/[email protected]/
[1] https://lore.kernel.org/lkml/[email protected]/

Good day,
Rong Tao