From: "Steven Rostedt (Google)" <[email protected]>
The bpf events are created by the same macro magic as tracefs trace
events are. But to hook into bpf, it has its own code. It duplicates many
of the same macros as the tracefs macros and this is an issue because it
misses bug fixes as well as any new enhancements that come with the other
trace macros.
As the trace macros have been put into their own staging files, have bpf
take advantage of this and use the tracefs stage 6 macros that the "fast
ssign" portion of the trace event macro uses.
Link: https://lore.kernel.org/lkml/[email protected]/
Cc: [email protected]
Cc: Alexei Starovoitov <[email protected]>
Cc: Daniel Borkmann <[email protected]>
Reported-by: Linyu Yuan <[email protected]>
Signed-off-by: Steven Rostedt (Google) <[email protected]>
---
include/trace/bpf_probe.h | 45 +--------------------------------------
1 file changed, 1 insertion(+), 44 deletions(-)
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 155c495b89ea..1f7fc1fc590c 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -4,50 +4,7 @@
#ifdef CONFIG_BPF_EVENTS
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_cpumask
-#define __get_cpumask(field) (char *)__get_dynamic_array(field)
-
-#undef __get_sockaddr
-#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
-
-#undef __get_rel_dynamic_array
-#define __get_rel_dynamic_array(field) \
- ((void *)(&__entry->__rel_loc_##field) + \
- sizeof(__entry->__rel_loc_##field) + \
- (__entry->__rel_loc_##field & 0xffff))
-
-#undef __get_rel_dynamic_array_len
-#define __get_rel_dynamic_array_len(field) \
- ((__entry->__rel_loc_##field >> 16) & 0xffff)
-
-#undef __get_rel_str
-#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_cpumask
-#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __get_rel_sockaddr
-#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+#include "stages/stage6_event_callback.h"
#undef __perf_count
#define __perf_count(c) (c)
--
2.39.0
On Tue, Jan 24, 2023 at 12:25 PM Steven Rostedt <[email protected]> wrote:
>
> From: "Steven Rostedt (Google)" <[email protected]>
>
> The bpf events are created by the same macro magic as tracefs trace
> events are. But to hook into bpf, it has its own code. It duplicates many
> of the same macros as the tracefs macros and this is an issue because it
> misses bug fixes as well as any new enhancements that come with the other
> trace macros.
>
> As the trace macros have been put into their own staging files, have bpf
> take advantage of this and use the tracefs stage 6 macros that the "fast
> ssign" portion of the trace event macro uses.
>
> Link: https://lore.kernel.org/lkml/[email protected]/
>
> Cc: [email protected]
> Cc: Alexei Starovoitov <[email protected]>
> Cc: Daniel Borkmann <[email protected]>
> Reported-by: Linyu Yuan <[email protected]>
> Signed-off-by: Steven Rostedt (Google) <[email protected]>
Nice cleanup.
Acked-by: Alexei Starovoitov <[email protected]>