The series adds cross compiler support for BPF samples and fixes issues
building for arm64.
Tested on my arm64 platform with good results, for x86 I have only build tested
it. There are no errors on building, however there is a build warning for x86
which I haven't yet gotten rid off (I believe the warning itself isn't of
consequence but rather a result of using the preprocessor in this way). I
appreciate any help testing for x86 and looking forward to any feedback on the
patches, thanks!
v2 just adds Juri's correct email address as he's interested in this work.
Sorry about the noise, thanks!
Joel Fernandes (5):
samples/bpf: Use getppid instead of getpgrp for array map stress
samples/bpf: Enable cross compiler support
samples/bpf: Fix inline asm issues building samples on arm64
samples/bpf: Fix pt_regs issues when cross-compiling
samples/bpf: Add documentation on cross compilation
samples/bpf/Makefile | 46 +++++++++++++++++++++++++-----
samples/bpf/README.rst | 10 +++++++
samples/bpf/arm64_asmstubs.h | 3 ++
samples/bpf/bpf_helpers.h | 61 ++++++++++++++++++++++++++++++++--------
samples/bpf/generic_asmstubs.h | 4 +++
samples/bpf/map_perf_test_kern.c | 2 +-
samples/bpf/map_perf_test_user.c | 2 +-
7 files changed, 107 insertions(+), 21 deletions(-)
create mode 100644 samples/bpf/arm64_asmstubs.h
create mode 100644 samples/bpf/generic_asmstubs.h
CC: Alexei Starovoitov <[email protected]>
CC: Daniel Borkmann <[email protected]>
--
2.14.0.rc1.383.gd1ce394fe2-goog
When cross compiling, bpf samples use HOSTCC, however what we really want is to
use the cross compiler to build for the cross target since that is what will
help run the BPF target code. Detect this and also set -static as LDFLAGS
since often times we don't have control over what C library the cross target is
running and its not smart to rely on it.
Signed-off-by: Joel Fernandes <[email protected]>
---
samples/bpf/Makefile | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 6c7468eb3684..e5642c8c144d 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -152,6 +152,12 @@ HOSTLOADLIBES_test_map_in_map += -lelf
LLC ?= llc
CLANG ?= clang
+# Detect that we're cross compiling and use the right compilers and flags
+ifdef CROSS_COMPILE
+HOSTCC = $(CROSS_COMPILE)gcc
+HOSTLDFLAGS += -static
+endif
+
# Trick to allow make to be run from this directory
all:
$(MAKE) -C ../../ $(CURDIR)/
--
2.14.0.rc1.383.gd1ce394fe2-goog
BPF samples fail to build when cross-compiling for ARM64 because of incorrect
pt_regs param selection. This is because clang defines __x86_64__ and
bpf_headers thinks we're building for x86. Since clang is building for the BPF
target, it shouldn't make assumptions about what target the BPF program is
going to run on. To fix this, lets pass ARCH so the header knows which target
the BPF program is being compiled for and can use the correct pt_regs code.
Signed-off-by: Joel Fernandes <[email protected]>
---
samples/bpf/Makefile | 2 +-
samples/bpf/bpf_helpers.h | 49 +++++++++++++++++++++++++++++++++++++++++------
2 files changed, 44 insertions(+), 7 deletions(-)
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 7591cdd7fe69..8cbcaffe4001 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -199,7 +199,7 @@ ASM_STUBS := ${ARCH_ASM_STUBS} -include $(src)/generic_asmstubs.h
CLANG_ARGS = $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
- $(ASM_STUBS) \
+ -D__TARGET_ARCH_$(ARCH) $(ASM_STUBS) \
-Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 67c9c4438e4b..199d2e32703a 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -96,7 +96,42 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head;
+/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
+#if defined(__TARGET_ARCH_x86)
+ #define bpf_target_x86
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_s930x)
+ #define bpf_target_s930x
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arm64)
+ #define bpf_target_arm64
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_powerpc)
+ #define bpf_target_powerpc
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_sparc)
+ #define bpf_target_sparc
+ #define bpf_target_defined
+#else
+ #undef bpf_target_defined
+#endif
+
+/* Fall back to what the compiler says */
+#ifndef bpf_target_defined
#if defined(__x86_64__)
+ #define bpf_target_x86
+#elif defined(__s390x__)
+ #define bpf_target_s930x
+#elif defined(__aarch64__)
+ #define bpf_target_arm64
+#elif defined(__powerpc__)
+ #define bpf_target_powerpc
+#elif defined(__sparc__)
+ #define bpf_target_sparc
+#endif
+#endif
+
+#if defined(bpf_target_x86)
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
@@ -109,7 +144,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
-#elif defined(__s390x__)
+#elif defined(bpf_target_s390x)
#define PT_REGS_PARM1(x) ((x)->gprs[2])
#define PT_REGS_PARM2(x) ((x)->gprs[3])
@@ -122,7 +157,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
#define PT_REGS_SP(x) ((x)->gprs[15])
#define PT_REGS_IP(x) ((x)->psw.addr)
-#elif defined(__aarch64__)
+#elif defined(bpf_target_arm64)
#define PT_REGS_PARM1(x) ((x)->regs[0])
#define PT_REGS_PARM2(x) ((x)->regs[1])
@@ -135,7 +170,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->pc)
-#elif defined(__powerpc__)
+#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
#define PT_REGS_PARM2(x) ((x)->gpr[4])
@@ -146,7 +181,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
-#elif defined(__sparc__)
+#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
@@ -156,6 +191,8 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+
+/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
#else
@@ -164,10 +201,10 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
#endif
-#ifdef __powerpc__
+#ifdef bpf_target_powerpc
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#elif defined(__sparc__)
+#elif bpf_target_sparc
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
--
2.14.0.rc1.383.gd1ce394fe2-goog
Signed-off-by: Joel Fernandes <[email protected]>
---
samples/bpf/README.rst | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
index 79f9a58f1872..2b906127ef54 100644
--- a/samples/bpf/README.rst
+++ b/samples/bpf/README.rst
@@ -64,3 +64,13 @@ It is also possible to point make to the newly compiled 'llc' or
'clang' command via redefining LLC or CLANG on the make command line::
make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+
+Cross compiling samples
+-----------------------
+Inorder to cross-compile, say for arm64 targets, export CROSS_COMPILE and ARCH
+environment variables before calling make. This will direct make to build
+samples for the cross target.
+
+export ARCH=arm64
+export CROSS_COMPILE="aarch64-linux-gnu-"
+make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
--
2.14.0.rc1.383.gd1ce394fe2-goog
inline assembly has haunted building samples on arm64 for quite sometime.
This patch uses the pre-processor to noop all occurences of inline asm when
compiling the BPF sample for the BPF target.
This patch reintroduces inclusion of asm/sysregs.h which needs to be included
to avoid compiler errors now, see [1]. Previously a hack prevented this
inclusion [2] (to avoid the exact problem this patch fixes - skipping inline
assembler) but the hack causes other errors now and no longer works.
Using the preprocessor to noop the inline asm occurences, we also avoid
any future unstable hackery needed (such as those that skip asm headers)
and provides information that asm headers may have which could have been
used but which the inline asm issues prevented. This is the least messy
of all hacks in my opinion.
[1] https://lkml.org/lkml/2017/8/5/143
[2] https://lists.linaro.org/pipermail/linaro-kernel/2015-November/024036.html
Signed-off-by: Joel Fernandes <[email protected]>
---
samples/bpf/Makefile | 40 +++++++++++++++++++++++++++++++++-------
samples/bpf/arm64_asmstubs.h | 3 +++
samples/bpf/bpf_helpers.h | 12 ++++++------
samples/bpf/generic_asmstubs.h | 4 ++++
4 files changed, 46 insertions(+), 13 deletions(-)
create mode 100644 samples/bpf/arm64_asmstubs.h
create mode 100644 samples/bpf/generic_asmstubs.h
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index e5642c8c144d..7591cdd7fe69 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -151,6 +151,8 @@ HOSTLOADLIBES_test_map_in_map += -lelf
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
+PERL ?= perl
+RM ?= rm
# Detect that we're cross compiling and use the right compilers and flags
ifdef CROSS_COMPILE
@@ -186,14 +188,38 @@ verify_target_bpf: verify_cmds
$(src)/*.c: verify_target_bpf
-# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
-# But, there is no easy way to fix it, so just exclude it since it is
-# useless for BPF samples.
-$(obj)/%.o: $(src)/%.c
- $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
- -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
+curdir := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+ifeq ($(wildcard $(curdir)/${ARCH}_asmstubs.h),)
+ ARCH_ASM_STUBS :=
+else
+ ARCH_ASM_STUBS := -include $(src)/${ARCH}_asmstubs.h
+endif
+
+ASM_STUBS := ${ARCH_ASM_STUBS} -include $(src)/generic_asmstubs.h
+
+CLANG_ARGS = $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+ -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+ $(ASM_STUBS) \
-Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option \
- -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+ -O2 -emit-llvm
+
+$(obj)/%.o: $(src)/%.c
+ # Steps to compile BPF sample while getting rid of inline asm
+ # This has the advantage of not having to skip important asm headers
+ # Step 1. Use clang preprocessor to stub out asm() calls
+ # Step 2. Replace all "asm volatile" with single keyword "asmvolatile"
+ # Step 3. Use clang preprocessor to noop all asm volatile() calls
+ # and restore asm_bpf to asm for BPF's asm directives
+ # Step 4. Compile and link
+
+ $(CLANG) -E $(CLANG_ARGS) -c $< -o - | \
+ $(PERL) -pe "s/[_\s]*asm[_\s]*volatile[_\s]*/asmvolatile/g" | \
+ $(CLANG) -E $(ASM_STUBS) - -o - | \
+ $(CLANG) -E -Dasm_bpf=asm - -o [email protected]
+
+ $(CLANG) $(CLANG_ARGS) -c [email protected] \
+ -o - | $(LLC) -march=bpf -filetype=obj -o $@
+ $(RM) [email protected]
diff --git a/samples/bpf/arm64_asmstubs.h b/samples/bpf/arm64_asmstubs.h
new file mode 100644
index 000000000000..23d47dbe61b1
--- /dev/null
+++ b/samples/bpf/arm64_asmstubs.h
@@ -0,0 +1,3 @@
+/* Special handing for current_stack_pointer */
+#define __ASM_STACK_POINTER_H
+#define current_stack_pointer 0
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 9a9c95f2c9fb..67c9c4438e4b 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -64,12 +64,12 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
* emit BPF_LD_ABS and BPF_LD_IND instructions
*/
struct sk_buff;
-unsigned long long load_byte(void *skb,
- unsigned long long off) asm("llvm.bpf.load.byte");
-unsigned long long load_half(void *skb,
- unsigned long long off) asm("llvm.bpf.load.half");
-unsigned long long load_word(void *skb,
- unsigned long long off) asm("llvm.bpf.load.word");
+unsigned long long load_byte(void *skb, unsigned long long off)
+ asm_bpf("llvm.bpf.load.byte");
+unsigned long long load_half(void *skb, unsigned long long off)
+ asm_bpf("llvm.bpf.load.half");
+unsigned long long load_word(void *skb, unsigned long long off)
+ asm_bpf("llvm.bpf.load.word");
/* a helper structure used by eBPF C program
* to describe map attributes to elf_bpf loader
diff --git a/samples/bpf/generic_asmstubs.h b/samples/bpf/generic_asmstubs.h
new file mode 100644
index 000000000000..1b9e9f5094d8
--- /dev/null
+++ b/samples/bpf/generic_asmstubs.h
@@ -0,0 +1,4 @@
+#define bpf_noop_stub
+#define asm(...) bpf_noop_stub
+#define __asm__(...) bpf_noop_stub
+#define asmvolatile(...) bpf_noop_stub
--
2.14.0.rc1.383.gd1ce394fe2-goog
When cross-compiling the bpf sample map_perf_test for aarch64, I find that
__NR_getpgrp is undefined. This causes build errors. This syscall is deprecated
and requires defining __ARCH_WANT_SYSCALL_DEPRECATED. To avoid having to define
that, just use a different syscall (getppid) for the array map stress test.
Signed-off-by: Joel Fernandes <[email protected]>
---
samples/bpf/map_perf_test_kern.c | 2 +-
samples/bpf/map_perf_test_user.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 245165817fbe..038ffec295cf 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -232,7 +232,7 @@ int stress_hash_map_lookup(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/sys_getpgrp")
+SEC("kprobe/sys_getppid")
int stress_array_map_lookup(struct pt_regs *ctx)
{
u32 key = 1, i;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 1a8894b5ac51..1e9e68942197 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -232,7 +232,7 @@ static void test_array_lookup(int cpu)
start_time = time_get_ns();
for (i = 0; i < max_cnt; i++)
- syscall(__NR_getpgrp, 0);
+ syscall(__NR_getppid, 0);
printf("%d:array_lookup %lld lookups per sec\n",
cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
}
--
2.14.0.rc1.383.gd1ce394fe2-goog
On Mon, Aug 7, 2017 at 6:06 AM, Joel Fernandes <[email protected]> wrote:
> inline assembly has haunted building samples on arm64 for quite sometime.
> This patch uses the pre-processor to noop all occurences of inline asm when
> compiling the BPF sample for the BPF target.
>
> This patch reintroduces inclusion of asm/sysregs.h which needs to be included
> to avoid compiler errors now, see [1]. Previously a hack prevented this
> inclusion [2] (to avoid the exact problem this patch fixes - skipping inline
> assembler) but the hack causes other errors now and no longer works.
>
> Using the preprocessor to noop the inline asm occurences, we also avoid
> any future unstable hackery needed (such as those that skip asm headers)
> and provides information that asm headers may have which could have been
> used but which the inline asm issues prevented. This is the least messy
> of all hacks in my opinion.
>
> [1] https://lkml.org/lkml/2017/8/5/143
> [2] https://lists.linaro.org/pipermail/linaro-kernel/2015-November/024036.html
>
> Signed-off-by: Joel Fernandes <[email protected]>
> ---
> samples/bpf/Makefile | 40 +++++++++++++++++++++++++++++++++-------
> samples/bpf/arm64_asmstubs.h | 3 +++
> samples/bpf/bpf_helpers.h | 12 ++++++------
> samples/bpf/generic_asmstubs.h | 4 ++++
> 4 files changed, 46 insertions(+), 13 deletions(-)
> create mode 100644 samples/bpf/arm64_asmstubs.h
> create mode 100644 samples/bpf/generic_asmstubs.h
>
> diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> index e5642c8c144d..7591cdd7fe69 100644
> --- a/samples/bpf/Makefile
> +++ b/samples/bpf/Makefile
> @@ -151,6 +151,8 @@ HOSTLOADLIBES_test_map_in_map += -lelf
> # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
> LLC ?= llc
> CLANG ?= clang
> +PERL ?= perl
> +RM ?= rm
>
> # Detect that we're cross compiling and use the right compilers and flags
> ifdef CROSS_COMPILE
> @@ -186,14 +188,38 @@ verify_target_bpf: verify_cmds
>
> $(src)/*.c: verify_target_bpf
>
> -# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
> -# But, there is no easy way to fix it, so just exclude it since it is
> -# useless for BPF samples.
> -$(obj)/%.o: $(src)/%.c
> - $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
> - -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
> +curdir := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
> +ifeq ($(wildcard $(curdir)/${ARCH}_asmstubs.h),)
> + ARCH_ASM_STUBS :=
> +else
> + ARCH_ASM_STUBS := -include $(src)/${ARCH}_asmstubs.h
> +endif
> +
> +ASM_STUBS := ${ARCH_ASM_STUBS} -include $(src)/generic_asmstubs.h
> +
> +CLANG_ARGS = $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
> + -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
> + $(ASM_STUBS) \
> -Wno-compare-distinct-pointer-types \
> -Wno-gnu-variable-sized-type-not-at-end \
> -Wno-address-of-packed-member -Wno-tautological-compare \
> -Wno-unknown-warning-option \
> - -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
> + -O2 -emit-llvm
> +
> +$(obj)/%.o: $(src)/%.c
> + # Steps to compile BPF sample while getting rid of inline asm
> + # This has the advantage of not having to skip important asm headers
> + # Step 1. Use clang preprocessor to stub out asm() calls
> + # Step 2. Replace all "asm volatile" with single keyword "asmvolatile"
> + # Step 3. Use clang preprocessor to noop all asm volatile() calls
> + # and restore asm_bpf to asm for BPF's asm directives
> + # Step 4. Compile and link
> +
> + $(CLANG) -E $(CLANG_ARGS) -c $< -o - | \
> + $(PERL) -pe "s/[_\s]*asm[_\s]*volatile[_\s]*/asmvolatile/g" | \
> + $(CLANG) -E $(ASM_STUBS) - -o - | \
> + $(CLANG) -E -Dasm_bpf=asm - -o [email protected]
> +
> + $(CLANG) $(CLANG_ARGS) -c [email protected] \
> + -o - | $(LLC) -march=bpf -filetype=obj -o $@
Just found an issue here that asm_bpf will be stubbed out by
CLANG_ARGS, will fix it next rev. I'll also try to do object
inspection to make sure the asm directive bpf_helpers is working.
thanks!
-Joel
> + $(RM) [email protected]
> diff --git a/samples/bpf/arm64_asmstubs.h b/samples/bpf/arm64_asmstubs.h
> new file mode 100644
> index 000000000000..23d47dbe61b1
> --- /dev/null
> +++ b/samples/bpf/arm64_asmstubs.h
> @@ -0,0 +1,3 @@
> +/* Special handing for current_stack_pointer */
> +#define __ASM_STACK_POINTER_H
> +#define current_stack_pointer 0
> diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
> index 9a9c95f2c9fb..67c9c4438e4b 100644
> --- a/samples/bpf/bpf_helpers.h
> +++ b/samples/bpf/bpf_helpers.h
> @@ -64,12 +64,12 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
> * emit BPF_LD_ABS and BPF_LD_IND instructions
> */
> struct sk_buff;
> -unsigned long long load_byte(void *skb,
> - unsigned long long off) asm("llvm.bpf.load.byte");
> -unsigned long long load_half(void *skb,
> - unsigned long long off) asm("llvm.bpf.load.half");
> -unsigned long long load_word(void *skb,
> - unsigned long long off) asm("llvm.bpf.load.word");
> +unsigned long long load_byte(void *skb, unsigned long long off)
> + asm_bpf("llvm.bpf.load.byte");
> +unsigned long long load_half(void *skb, unsigned long long off)
> + asm_bpf("llvm.bpf.load.half");
> +unsigned long long load_word(void *skb, unsigned long long off)
> + asm_bpf("llvm.bpf.load.word");
>
> /* a helper structure used by eBPF C program
> * to describe map attributes to elf_bpf loader
> diff --git a/samples/bpf/generic_asmstubs.h b/samples/bpf/generic_asmstubs.h
> new file mode 100644
> index 000000000000..1b9e9f5094d8
> --- /dev/null
> +++ b/samples/bpf/generic_asmstubs.h
> @@ -0,0 +1,4 @@
> +#define bpf_noop_stub
> +#define asm(...) bpf_noop_stub
> +#define __asm__(...) bpf_noop_stub
> +#define asmvolatile(...) bpf_noop_stub
> --
> 2.14.0.rc1.383.gd1ce394fe2-goog
>
Please, no.
The amount of hellish hacks we are adding to deal with this is getting
way out of control.
BPF programs MUST have their own set of asm headers, this is the
only way to get around this issue in the long term.
I am also strongly against adding -static to the build.
Hi Dave,
On Mon, Aug 7, 2017 at 11:28 AM, David Miller <[email protected]> wrote:
>
> Please, no.
Sorry you dislike it, I had intentionally marked it as RFC as its an
idea I was just toying with the idea and posted it early to get
feedback.
>
> The amount of hellish hacks we are adding to deal with this is getting
> way out of control.
I agree with you that hellish hacks are being added which is why it
keeps breaking. I think one of the things my series does is to add
back inclusion of asm headers that were previously removed (that is
the worst hellish hack in my opinion that existing in mainline). So in
that respect my patch is an improvement and makes it possible to build
for arm64 platforms (which is currently broken in mainline).
>
> BPF programs MUST have their own set of asm headers, this is the
> only way to get around this issue in the long term.
Wouldn't that break scripts or bpf code that instruments/trace arch
specific code?
>
> I am also strongly against adding -static to the build.
I can drop -static if you prefer, that's not an issue.
As I understand it, there are no other cleaner alternatives and this
patchset makes the samples work. I would even argue that's its more
functional than previous attempts and fixes something broken in
mainline in a more generic way. If you can provide an example of where
my patchset may not work, I would love to hear it. My whole idea was
to do it in a way that makes future breakage not happen. I don't think
that leaving things broken in this state for extended periods of time
makes sense and IMHO will slow usage of bpf samples on other
platforms.
thanks,
-Joel
From: Joel Fernandes <[email protected]>
Date: Mon, 7 Aug 2017 18:20:49 -0700
> On Mon, Aug 7, 2017 at 11:28 AM, David Miller <[email protected]> wrote:
>> The amount of hellish hacks we are adding to deal with this is getting
>> way out of control.
>
> I agree with you that hellish hacks are being added which is why it
> keeps breaking. I think one of the things my series does is to add
> back inclusion of asm headers that were previously removed (that is
> the worst hellish hack in my opinion that existing in mainline). So in
> that respect my patch is an improvement and makes it possible to build
> for arm64 platforms (which is currently broken in mainline).
Yeah that is a problem.
Perhaps another avenue of attack is to separate "type" header files from
stuff that has functiond declarations and inline assembler code.
On Tue, Aug 8, 2017 at 8:35 PM, David Miller <[email protected]> wrote:
> From: Joel Fernandes <[email protected]>
> Date: Mon, 7 Aug 2017 18:20:49 -0700
>
>> On Mon, Aug 7, 2017 at 11:28 AM, David Miller <[email protected]> wrote:
>>> The amount of hellish hacks we are adding to deal with this is getting
>>> way out of control.
>>
>> I agree with you that hellish hacks are being added which is why it
>> keeps breaking. I think one of the things my series does is to add
>> back inclusion of asm headers that were previously removed (that is
>> the worst hellish hack in my opinion that existing in mainline). So in
>> that respect my patch is an improvement and makes it possible to build
>> for arm64 platforms (which is currently broken in mainline).
>
> Yeah that is a problem.
>
> Perhaps another avenue of attack is to separate "type" header files from
> stuff that has functiond declarations and inline assembler code.
I was thinking that's probably a huge undertaking if you meant doing
the above for every arch?
Also another way could be to modify clang to ignore inline asm
directives during compilation? Do you have any comments about such
approach?
thanks,
-Joel