Including libbpf header files should be guarded by
HAVE_LIBBPF_SUPPORT. In bpf_counter.h, move the skeleton utilities
under HAVE_BPF_SKEL.
Fixes: d6a735ef3277 ("perf bpf_counter: Move common functions to bpf_counter.h")
Reported-by: Mike Leach <[email protected]>
Signed-off-by: Ian Rogers <[email protected]>
---
tools/perf/builtin-trace.c | 2 +
tools/perf/util/bpf_counter.h | 85 ++++++++++++++++++-----------------
2 files changed, 46 insertions(+), 41 deletions(-)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 86e06f136f40..d21fe0f32a6d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -16,7 +16,9 @@
#include "util/record.h"
#include <api/fs/tracing_path.h>
+#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/bpf.h>
+#endif
#include "util/bpf_map.h"
#include "util/rlimit.h"
#include "builtin.h"
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index 4dbf26408b69..9113c8bf5cb0 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -4,9 +4,12 @@
#include <linux/list.h>
#include <sys/resource.h>
+
+#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
+#endif
struct evsel;
struct target;
@@ -42,6 +45,47 @@ int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
+static inline __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static inline __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static inline __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = { .id = 0, };
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+/* trigger the leader program on a cpu */
+static inline int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
#else /* HAVE_BPF_SKEL */
#include <linux/err.h>
@@ -87,45 +131,4 @@ static inline void set_max_rlimit(void)
setrlimit(RLIMIT_MEMLOCK, &rinf);
}
-static inline __u32 bpf_link_get_id(int fd)
-{
- struct bpf_link_info link_info = { .id = 0, };
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.id;
-}
-
-static inline __u32 bpf_link_get_prog_id(int fd)
-{
- struct bpf_link_info link_info = { .id = 0, };
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.prog_id;
-}
-
-static inline __u32 bpf_map_get_id(int fd)
-{
- struct bpf_map_info map_info = { .id = 0, };
- __u32 map_info_len = sizeof(map_info);
-
- bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
- return map_info.id;
-}
-
-/* trigger the leader program on a cpu */
-static inline int bperf_trigger_reading(int prog_fd, int cpu)
-{
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
- .ctx_in = NULL,
- .ctx_size_in = 0,
- .flags = BPF_F_TEST_RUN_ON_CPU,
- .cpu = cpu,
- .retval = 0,
- );
-
- return bpf_prog_test_run_opts(prog_fd, &opts);
-}
-
#endif /* __PERF_BPF_COUNTER_H */
--
2.39.0.314.g84b9a713c41-goog
Em Fri, Jan 06, 2023 at 06:25:36AM -0800, Ian Rogers escreveu:
> Including libbpf header files should be guarded by
> HAVE_LIBBPF_SUPPORT. In bpf_counter.h, move the skeleton utilities
> under HAVE_BPF_SKEL.
>
> Fixes: d6a735ef3277 ("perf bpf_counter: Move common functions to bpf_counter.h")
> Reported-by: Mike Leach <[email protected]>
> Signed-off-by: Ian Rogers <[email protected]>
Can this be done in a way that reduces patch size?
- Arnaldo
> ---
> tools/perf/builtin-trace.c | 2 +
> tools/perf/util/bpf_counter.h | 85 ++++++++++++++++++-----------------
> 2 files changed, 46 insertions(+), 41 deletions(-)
>
> diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
> index 86e06f136f40..d21fe0f32a6d 100644
> --- a/tools/perf/builtin-trace.c
> +++ b/tools/perf/builtin-trace.c
> @@ -16,7 +16,9 @@
>
> #include "util/record.h"
> #include <api/fs/tracing_path.h>
> +#ifdef HAVE_LIBBPF_SUPPORT
> #include <bpf/bpf.h>
> +#endif
> #include "util/bpf_map.h"
> #include "util/rlimit.h"
> #include "builtin.h"
> diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
> index 4dbf26408b69..9113c8bf5cb0 100644
> --- a/tools/perf/util/bpf_counter.h
> +++ b/tools/perf/util/bpf_counter.h
> @@ -4,9 +4,12 @@
>
> #include <linux/list.h>
> #include <sys/resource.h>
> +
> +#ifdef HAVE_LIBBPF_SUPPORT
> #include <bpf/bpf.h>
> #include <bpf/btf.h>
> #include <bpf/libbpf.h>
> +#endif
>
> struct evsel;
> struct target;
> @@ -42,6 +45,47 @@ int bpf_counter__read(struct evsel *evsel);
> void bpf_counter__destroy(struct evsel *evsel);
> int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
>
> +static inline __u32 bpf_link_get_id(int fd)
> +{
> + struct bpf_link_info link_info = { .id = 0, };
> + __u32 link_info_len = sizeof(link_info);
> +
> + bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> + return link_info.id;
> +}
> +
> +static inline __u32 bpf_link_get_prog_id(int fd)
> +{
> + struct bpf_link_info link_info = { .id = 0, };
> + __u32 link_info_len = sizeof(link_info);
> +
> + bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> + return link_info.prog_id;
> +}
> +
> +static inline __u32 bpf_map_get_id(int fd)
> +{
> + struct bpf_map_info map_info = { .id = 0, };
> + __u32 map_info_len = sizeof(map_info);
> +
> + bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
> + return map_info.id;
> +}
> +
> +/* trigger the leader program on a cpu */
> +static inline int bperf_trigger_reading(int prog_fd, int cpu)
> +{
> + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> + .ctx_in = NULL,
> + .ctx_size_in = 0,
> + .flags = BPF_F_TEST_RUN_ON_CPU,
> + .cpu = cpu,
> + .retval = 0,
> + );
> +
> + return bpf_prog_test_run_opts(prog_fd, &opts);
> +}
> +
> #else /* HAVE_BPF_SKEL */
>
> #include <linux/err.h>
> @@ -87,45 +131,4 @@ static inline void set_max_rlimit(void)
> setrlimit(RLIMIT_MEMLOCK, &rinf);
> }
>
> -static inline __u32 bpf_link_get_id(int fd)
> -{
> - struct bpf_link_info link_info = { .id = 0, };
> - __u32 link_info_len = sizeof(link_info);
> -
> - bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> - return link_info.id;
> -}
> -
> -static inline __u32 bpf_link_get_prog_id(int fd)
> -{
> - struct bpf_link_info link_info = { .id = 0, };
> - __u32 link_info_len = sizeof(link_info);
> -
> - bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> - return link_info.prog_id;
> -}
> -
> -static inline __u32 bpf_map_get_id(int fd)
> -{
> - struct bpf_map_info map_info = { .id = 0, };
> - __u32 map_info_len = sizeof(map_info);
> -
> - bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
> - return map_info.id;
> -}
> -
> -/* trigger the leader program on a cpu */
> -static inline int bperf_trigger_reading(int prog_fd, int cpu)
> -{
> - DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> - .ctx_in = NULL,
> - .ctx_size_in = 0,
> - .flags = BPF_F_TEST_RUN_ON_CPU,
> - .cpu = cpu,
> - .retval = 0,
> - );
> -
> - return bpf_prog_test_run_opts(prog_fd, &opts);
> -}
> -
> #endif /* __PERF_BPF_COUNTER_H */
> --
> 2.39.0.314.g84b9a713c41-goog
--
- Arnaldo
On Fri, 6 Jan 2023 at 14:55, Arnaldo Carvalho de Melo <[email protected]> wrote:
>
> Em Fri, Jan 06, 2023 at 06:25:36AM -0800, Ian Rogers escreveu:
> > Including libbpf header files should be guarded by
> > HAVE_LIBBPF_SUPPORT. In bpf_counter.h, move the skeleton utilities
> > under HAVE_BPF_SKEL.
> >
> > Fixes: d6a735ef3277 ("perf bpf_counter: Move common functions to bpf_counter.h")
> > Reported-by: Mike Leach <[email protected]>
> > Signed-off-by: Ian Rogers <[email protected]>
>
> Can this be done in a way that reduces patch size?
>
> - Arnaldo
>
> > ---
> > tools/perf/builtin-trace.c | 2 +
> > tools/perf/util/bpf_counter.h | 85 ++++++++++++++++++-----------------
> > 2 files changed, 46 insertions(+), 41 deletions(-)
> >
> > diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
> > index 86e06f136f40..d21fe0f32a6d 100644
> > --- a/tools/perf/builtin-trace.c
> > +++ b/tools/perf/builtin-trace.c
> > @@ -16,7 +16,9 @@
> >
> > #include "util/record.h"
> > #include <api/fs/tracing_path.h>
> > +#ifdef HAVE_LIBBPF_SUPPORT
> > #include <bpf/bpf.h>
> > +#endif
> > #include "util/bpf_map.h"
> > #include "util/rlimit.h"
> > #include "builtin.h"
> > diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
> > index 4dbf26408b69..9113c8bf5cb0 100644
> > --- a/tools/perf/util/bpf_counter.h
> > +++ b/tools/perf/util/bpf_counter.h
> > @@ -4,9 +4,12 @@
> >
> > #include <linux/list.h>
> > #include <sys/resource.h>
> > +
> > +#ifdef HAVE_LIBBPF_SUPPORT
> > #include <bpf/bpf.h>
> > #include <bpf/btf.h>
> > #include <bpf/libbpf.h>
> > +#endif
> >
> > struct evsel;
> > struct target;
> > @@ -42,6 +45,47 @@ int bpf_counter__read(struct evsel *evsel);
> > void bpf_counter__destroy(struct evsel *evsel);
> > int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
> >
> > +static inline __u32 bpf_link_get_id(int fd)
> > +{
> > + struct bpf_link_info link_info = { .id = 0, };
> > + __u32 link_info_len = sizeof(link_info);
> > +
> > + bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > + return link_info.id;
> > +}
> > +
> > +static inline __u32 bpf_link_get_prog_id(int fd)
> > +{
> > + struct bpf_link_info link_info = { .id = 0, };
> > + __u32 link_info_len = sizeof(link_info);
> > +
> > + bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > + return link_info.prog_id;
> > +}
> > +
> > +static inline __u32 bpf_map_get_id(int fd)
> > +{
> > + struct bpf_map_info map_info = { .id = 0, };
> > + __u32 map_info_len = sizeof(map_info);
> > +
> > + bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
> > + return map_info.id;
> > +}
> > +
> > +/* trigger the leader program on a cpu */
> > +static inline int bperf_trigger_reading(int prog_fd, int cpu)
> > +{
> > + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> > + .ctx_in = NULL,
> > + .ctx_size_in = 0,
> > + .flags = BPF_F_TEST_RUN_ON_CPU,
> > + .cpu = cpu,
> > + .retval = 0,
> > + );
> > +
> > + return bpf_prog_test_run_opts(prog_fd, &opts);
> > +}
> > +
> > #else /* HAVE_BPF_SKEL */
> >
> > #include <linux/err.h>
> > @@ -87,45 +131,4 @@ static inline void set_max_rlimit(void)
> > setrlimit(RLIMIT_MEMLOCK, &rinf);
> > }
> >
> > -static inline __u32 bpf_link_get_id(int fd)
> > -{
> > - struct bpf_link_info link_info = { .id = 0, };
> > - __u32 link_info_len = sizeof(link_info);
> > -
> > - bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > - return link_info.id;
> > -}
> > -
> > -static inline __u32 bpf_link_get_prog_id(int fd)
> > -{
> > - struct bpf_link_info link_info = { .id = 0, };
> > - __u32 link_info_len = sizeof(link_info);
> > -
> > - bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > - return link_info.prog_id;
> > -}
> > -
> > -static inline __u32 bpf_map_get_id(int fd)
> > -{
> > - struct bpf_map_info map_info = { .id = 0, };
> > - __u32 map_info_len = sizeof(map_info);
> > -
> > - bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
> > - return map_info.id;
> > -}
> > -
> > -/* trigger the leader program on a cpu */
> > -static inline int bperf_trigger_reading(int prog_fd, int cpu)
> > -{
> > - DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> > - .ctx_in = NULL,
> > - .ctx_size_in = 0,
> > - .flags = BPF_F_TEST_RUN_ON_CPU,
> > - .cpu = cpu,
> > - .retval = 0,
> > - );
> > -
> > - return bpf_prog_test_run_opts(prog_fd, &opts);
> > -}
> > -
> > #endif /* __PERF_BPF_COUNTER_H */
> > --
> > 2.39.0.314.g84b9a713c41-goog
>
Tested-by: Mike Leach <[email protected]>
> --
>
> - Arnaldo
--
Mike Leach
Principal Engineer, ARM Ltd.
Manchester Design Centre. UK
On Fri, Jan 6, 2023 at 7:12 AM Mike Leach <[email protected]> wrote:
>
> On Fri, 6 Jan 2023 at 14:55, Arnaldo Carvalho de Melo <[email protected]> wrote:
> >
> > Em Fri, Jan 06, 2023 at 06:25:36AM -0800, Ian Rogers escreveu:
> > > Including libbpf header files should be guarded by
> > > HAVE_LIBBPF_SUPPORT. In bpf_counter.h, move the skeleton utilities
> > > under HAVE_BPF_SKEL.
> > >
> > > Fixes: d6a735ef3277 ("perf bpf_counter: Move common functions to bpf_counter.h")
> > > Reported-by: Mike Leach <[email protected]>
> > > Signed-off-by: Ian Rogers <[email protected]>
> >
> > Can this be done in a way that reduces patch size?
> >
> > - Arnaldo
Done in v3. Thanks,
Ian
> >
> > > ---
> > > tools/perf/builtin-trace.c | 2 +
> > > tools/perf/util/bpf_counter.h | 85 ++++++++++++++++++-----------------
> > > 2 files changed, 46 insertions(+), 41 deletions(-)
> > >
> > > diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
> > > index 86e06f136f40..d21fe0f32a6d 100644
> > > --- a/tools/perf/builtin-trace.c
> > > +++ b/tools/perf/builtin-trace.c
> > > @@ -16,7 +16,9 @@
> > >
> > > #include "util/record.h"
> > > #include <api/fs/tracing_path.h>
> > > +#ifdef HAVE_LIBBPF_SUPPORT
> > > #include <bpf/bpf.h>
> > > +#endif
> > > #include "util/bpf_map.h"
> > > #include "util/rlimit.h"
> > > #include "builtin.h"
> > > diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
> > > index 4dbf26408b69..9113c8bf5cb0 100644
> > > --- a/tools/perf/util/bpf_counter.h
> > > +++ b/tools/perf/util/bpf_counter.h
> > > @@ -4,9 +4,12 @@
> > >
> > > #include <linux/list.h>
> > > #include <sys/resource.h>
> > > +
> > > +#ifdef HAVE_LIBBPF_SUPPORT
> > > #include <bpf/bpf.h>
> > > #include <bpf/btf.h>
> > > #include <bpf/libbpf.h>
> > > +#endif
> > >
> > > struct evsel;
> > > struct target;
> > > @@ -42,6 +45,47 @@ int bpf_counter__read(struct evsel *evsel);
> > > void bpf_counter__destroy(struct evsel *evsel);
> > > int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
> > >
> > > +static inline __u32 bpf_link_get_id(int fd)
> > > +{
> > > + struct bpf_link_info link_info = { .id = 0, };
> > > + __u32 link_info_len = sizeof(link_info);
> > > +
> > > + bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > > + return link_info.id;
> > > +}
> > > +
> > > +static inline __u32 bpf_link_get_prog_id(int fd)
> > > +{
> > > + struct bpf_link_info link_info = { .id = 0, };
> > > + __u32 link_info_len = sizeof(link_info);
> > > +
> > > + bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > > + return link_info.prog_id;
> > > +}
> > > +
> > > +static inline __u32 bpf_map_get_id(int fd)
> > > +{
> > > + struct bpf_map_info map_info = { .id = 0, };
> > > + __u32 map_info_len = sizeof(map_info);
> > > +
> > > + bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
> > > + return map_info.id;
> > > +}
> > > +
> > > +/* trigger the leader program on a cpu */
> > > +static inline int bperf_trigger_reading(int prog_fd, int cpu)
> > > +{
> > > + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> > > + .ctx_in = NULL,
> > > + .ctx_size_in = 0,
> > > + .flags = BPF_F_TEST_RUN_ON_CPU,
> > > + .cpu = cpu,
> > > + .retval = 0,
> > > + );
> > > +
> > > + return bpf_prog_test_run_opts(prog_fd, &opts);
> > > +}
> > > +
> > > #else /* HAVE_BPF_SKEL */
> > >
> > > #include <linux/err.h>
> > > @@ -87,45 +131,4 @@ static inline void set_max_rlimit(void)
> > > setrlimit(RLIMIT_MEMLOCK, &rinf);
> > > }
> > >
> > > -static inline __u32 bpf_link_get_id(int fd)
> > > -{
> > > - struct bpf_link_info link_info = { .id = 0, };
> > > - __u32 link_info_len = sizeof(link_info);
> > > -
> > > - bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > > - return link_info.id;
> > > -}
> > > -
> > > -static inline __u32 bpf_link_get_prog_id(int fd)
> > > -{
> > > - struct bpf_link_info link_info = { .id = 0, };
> > > - __u32 link_info_len = sizeof(link_info);
> > > -
> > > - bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
> > > - return link_info.prog_id;
> > > -}
> > > -
> > > -static inline __u32 bpf_map_get_id(int fd)
> > > -{
> > > - struct bpf_map_info map_info = { .id = 0, };
> > > - __u32 map_info_len = sizeof(map_info);
> > > -
> > > - bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
> > > - return map_info.id;
> > > -}
> > > -
> > > -/* trigger the leader program on a cpu */
> > > -static inline int bperf_trigger_reading(int prog_fd, int cpu)
> > > -{
> > > - DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
> > > - .ctx_in = NULL,
> > > - .ctx_size_in = 0,
> > > - .flags = BPF_F_TEST_RUN_ON_CPU,
> > > - .cpu = cpu,
> > > - .retval = 0,
> > > - );
> > > -
> > > - return bpf_prog_test_run_opts(prog_fd, &opts);
> > > -}
> > > -
> > > #endif /* __PERF_BPF_COUNTER_H */
> > > --
> > > 2.39.0.314.g84b9a713c41-goog
> >
>
> Tested-by: Mike Leach <[email protected]>
>
> > --
> >
> > - Arnaldo
>
>
> --
> Mike Leach
> Principal Engineer, ARM Ltd.
> Manchester Design Centre. UK