The bpf_tail_call_static function is currently not defined unless
using clang >= 8.
To support bpf_tail_call_static on GAS we can check if __clang__ is
not defined to enable bpf_tail_call_static.
We need to use a GAS assembly syntax check so that the assembler
is provided GAS compatible assembly as well.
We can use gasversion to provide a migration path to llvm syntax
for GAS once llvm syntax is natively supported.
Signed-off-by: James Hilliard <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
---
Changes v1 -> v2:
- use gasversion to detect assembly variant
---
tools/lib/bpf/bpf_helpers.h | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 7349b16b8e2f..5b98f5506798 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -131,7 +131,7 @@
/*
* Helper function to perform a tail call with a constant/immediate map slot.
*/
-#if __clang_major__ >= 8 && defined(__bpf__)
+#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
static __always_inline void
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
{
@@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
__bpf_unreachable();
/*
- * Provide a hard guarantee that LLVM won't optimize setting r2 (map
- * pointer) and r3 (constant map index) from _different paths_ ending
+ * Provide a hard guarantee that the compiler won't optimize setting r2
+ * (map pointer) and r3 (constant map index) from _different paths_ ending
* up at the _same_ call insn as otherwise we won't be able to use the
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
@@ -148,12 +148,18 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
*
* Note on clobber list: we need to stay in-line with BPF calling
* convention, so even if we don't end up using r0, r4, r5, we need
- * to mark them as clobber so that LLVM doesn't end up using them
- * before / after the call.
+ * to mark them as clobber so that the compiler doesn't end up using
+ * them before / after the call.
*/
- asm volatile("r1 = %[ctx]\n\t"
+ asm volatile(".ifdef .gasversion.\n\t"
+ "mov %%r1,%[ctx]\n\t"
+ "mov %%r2,%[map]\n\t"
+ "mov %%r3,%[slot]\n\t"
+ ".else\n\t"
+ "r1 = %[ctx]\n\t"
"r2 = %[map]\n\t"
"r3 = %[slot]\n\t"
+ ".endif\n\t"
"call 12"
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
: "r0", "r1", "r2", "r3", "r4", "r5");
--
2.34.1
On 9/12/22 7:35 AM, James Hilliard wrote:
> The bpf_tail_call_static function is currently not defined unless
> using clang >= 8.
>
> To support bpf_tail_call_static on GAS we can check if __clang__ is
> not defined to enable bpf_tail_call_static.
>
> We need to use a GAS assembly syntax check so that the assembler
> is provided GAS compatible assembly as well.
>
> We can use gasversion to provide a migration path to llvm syntax
> for GAS once llvm syntax is natively supported.
I didn't see a gasversion comparison in asm code.
Is it possible that we compare gasversion to a known
gas version which supports new syntax? If the gasversion
is supported, use the same syntax as llvm. If the
gasversion is not supported, output an illegal insn
and it would be even better if some error information
is printed out on the screen.
>
> Signed-off-by: James Hilliard <[email protected]>
> Signed-off-by: Daniel Borkmann <[email protected]>
> ---
> Changes v1 -> v2:
> - use gasversion to detect assembly variant
> ---
> tools/lib/bpf/bpf_helpers.h | 18 ++++++++++++------
> 1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
> index 7349b16b8e2f..5b98f5506798 100644
> --- a/tools/lib/bpf/bpf_helpers.h
> +++ b/tools/lib/bpf/bpf_helpers.h
> @@ -131,7 +131,7 @@
> /*
> * Helper function to perform a tail call with a constant/immediate map slot.
> */
> -#if __clang_major__ >= 8 && defined(__bpf__)
> +#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
> static __always_inline void
> bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> {
> @@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> __bpf_unreachable();
>
> /*
> - * Provide a hard guarantee that LLVM won't optimize setting r2 (map
> - * pointer) and r3 (constant map index) from _different paths_ ending
> + * Provide a hard guarantee that the compiler won't optimize setting r2
> + * (map pointer) and r3 (constant map index) from _different paths_ ending
> * up at the _same_ call insn as otherwise we won't be able to use the
> * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
> * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
> @@ -148,12 +148,18 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> *
> * Note on clobber list: we need to stay in-line with BPF calling
> * convention, so even if we don't end up using r0, r4, r5, we need
> - * to mark them as clobber so that LLVM doesn't end up using them
> - * before / after the call.
> + * to mark them as clobber so that the compiler doesn't end up using
> + * them before / after the call.
> */
> - asm volatile("r1 = %[ctx]\n\t"
> + asm volatile(".ifdef .gasversion.\n\t"
> + "mov %%r1,%[ctx]\n\t"
> + "mov %%r2,%[map]\n\t"
> + "mov %%r3,%[slot]\n\t"
> + ".else\n\t"
> + "r1 = %[ctx]\n\t"
> "r2 = %[map]\n\t"
> "r3 = %[slot]\n\t"
> + ".endif\n\t"
> "call 12"
> :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
> : "r0", "r1", "r2", "r3", "r4", "r5");
On Mon, Sep 12, 2022 at 11:29 AM Yonghong Song <[email protected]> wrote:
>
>
>
> On 9/12/22 7:35 AM, James Hilliard wrote:
> > The bpf_tail_call_static function is currently not defined unless
> > using clang >= 8.
> >
> > To support bpf_tail_call_static on GAS we can check if __clang__ is
> > not defined to enable bpf_tail_call_static.
> >
> > We need to use a GAS assembly syntax check so that the assembler
> > is provided GAS compatible assembly as well.
> >
> > We can use gasversion to provide a migration path to llvm syntax
> > for GAS once llvm syntax is natively supported.
>
> I didn't see a gasversion comparison in asm code.
> Is it possible that we compare gasversion to a known
> gas version which supports new syntax? If the gasversion
> is supported, use the same syntax as llvm. If the
> gasversion is not supported, output an illegal insn
> and it would be even better if some error information
> is printed out on the screen.
Yeah, once llvm syntax is supported in GAS the check would simply
need to be changed to something like:
.if .gasversion. < 24000
At least this seems to me to be the best way to provide a migration
path as we can't really check assembler versions from the compiler
like we can with llvm.
>
> >
> > Signed-off-by: James Hilliard <[email protected]>
> > Signed-off-by: Daniel Borkmann <[email protected]>
> > ---
> > Changes v1 -> v2:
> > - use gasversion to detect assembly variant
> > ---
> > tools/lib/bpf/bpf_helpers.h | 18 ++++++++++++------
> > 1 file changed, 12 insertions(+), 6 deletions(-)
> >
> > diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
> > index 7349b16b8e2f..5b98f5506798 100644
> > --- a/tools/lib/bpf/bpf_helpers.h
> > +++ b/tools/lib/bpf/bpf_helpers.h
> > @@ -131,7 +131,7 @@
> > /*
> > * Helper function to perform a tail call with a constant/immediate map slot.
> > */
> > -#if __clang_major__ >= 8 && defined(__bpf__)
> > +#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
> > static __always_inline void
> > bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> > {
> > @@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> > __bpf_unreachable();
> >
> > /*
> > - * Provide a hard guarantee that LLVM won't optimize setting r2 (map
> > - * pointer) and r3 (constant map index) from _different paths_ ending
> > + * Provide a hard guarantee that the compiler won't optimize setting r2
> > + * (map pointer) and r3 (constant map index) from _different paths_ ending
> > * up at the _same_ call insn as otherwise we won't be able to use the
> > * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
> > * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
> > @@ -148,12 +148,18 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
> > *
> > * Note on clobber list: we need to stay in-line with BPF calling
> > * convention, so even if we don't end up using r0, r4, r5, we need
> > - * to mark them as clobber so that LLVM doesn't end up using them
> > - * before / after the call.
> > + * to mark them as clobber so that the compiler doesn't end up using
> > + * them before / after the call.
> > */
> > - asm volatile("r1 = %[ctx]\n\t"
> > + asm volatile(".ifdef .gasversion.\n\t"
> > + "mov %%r1,%[ctx]\n\t"
> > + "mov %%r2,%[map]\n\t"
> > + "mov %%r3,%[slot]\n\t"
> > + ".else\n\t"
> > + "r1 = %[ctx]\n\t"
> > "r2 = %[map]\n\t"
> > "r3 = %[slot]\n\t"
> > + ".endif\n\t"
> > "call 12"
> > :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
> > : "r0", "r1", "r2", "r3", "r4", "r5");