2024-05-08 00:05:37

by Maxwell Bland

[permalink] [raw]
Subject: [PATCH bpf-next v3 0/3] Support kCFI + BPF on arm64

In preparation for the BPF summit, I took a look back on BPF-CFI patches
to check the status and found that there had been no updates for around
a month, so I went ahead and made the fixes suggested in v2.

This patchset handles emitting proper CFI hashes during JIT, which
can cause some of the selftests to fail, and handles removing the
__nocfi tag from bpf_dispatch_*_func on ARM, meaning Clang CFI
checks will be generated:

0000000000fea1e8 <bpf_dispatcher_xdp_func>:
paciasp
stp x29, x30, [sp, #-0x10]!
mov x29, sp
+ ldur w16, [x2, #-0x4]
+ movk w17, #0x1881
+ movk w17, #0xd942, lsl #16
+ cmp w16, w17
+ b.eq 0xffff8000810016a0 <bpf_dispatcher_xdp_func+0x24>
+ brk #0x8222
blr x2
ldp x29, x30, [sp], #0x10
autiasp
ret

Where ^+ indicates the additional assembly.

Credit goes to Puranjay Mohan entirely for this, I just did some fixes,
hopefully that is OK.

Cc: [email protected]

Changes in v2->v3:
https://lore.kernel.org/all/[email protected]/
- Simplify cfi_get_func_hash to avoid needless failure case
- Use DEFINE_CFI_TYPE as suggested by Mark Rutland

Changes in v1->v2:
https://lore.kernel.org/bpf/[email protected]/
- Rebased on latest bpf-next/master

Mark Rutland (1):
cfi: add C CFI type macro

Maxwell Bland (1):
arm64/cfi,bpf: Use DEFINE_CFI_TYPE in arm64

Puranjay Mohan (1):
arm64/cfi,bpf: Support kCFI + BPF on arm64

arch/arm64/include/asm/cfi.h | 23 ++++++++++++++++++++++
arch/arm64/kernel/alternative.c | 18 +++++++++++++++++
arch/arm64/net/bpf_jit_comp.c | 18 +++++++++++++++--
arch/riscv/kernel/cfi.c | 34 ++------------------------------
arch/x86/kernel/alternative.c | 35 +++------------------------------
include/linux/cfi_types.h | 23 ++++++++++++++++++++++
6 files changed, 85 insertions(+), 66 deletions(-)
create mode 100644 arch/arm64/include/asm/cfi.h


base-commit: 329a6720a3ebbc041983b267981ab2cac102de93
--
2.34.1



2024-05-08 00:06:30

by Maxwell Bland

[permalink] [raw]
Subject: [PATCH bpf-next v3 1/3] cfi: add C CFI type macro

Currently x86 and riscv open-code 4 instances of the same logic to
define a u32 variable with the KCFI typeid of a given function.

Replace the duplicate logic with a common macro.

Signed-off-by: Mark Rutland <[email protected]>
---
arch/riscv/kernel/cfi.c | 34 ++--------------------------------
arch/x86/kernel/alternative.c | 35 +++--------------------------------
include/linux/cfi_types.h | 23 +++++++++++++++++++++++
3 files changed, 28 insertions(+), 64 deletions(-)

diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c
index 64bdd3e1ab8c..b78a6f41df22 100644
--- a/arch/riscv/kernel/cfi.c
+++ b/arch/riscv/kernel/cfi.c
@@ -82,41 +82,11 @@ struct bpf_insn;
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
extern unsigned int __bpf_prog_runX(const void *ctx,
const struct bpf_insn *insn);
-
-/*
- * Force a reference to the external symbol so the compiler generates
- * __kcfi_typid.
- */
-__ADDRESSABLE(__bpf_prog_runX);
-
-/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_hash,@object \n"
-" .globl cfi_bpf_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_hash: \n"
-" .word __kcfi_typeid___bpf_prog_runX \n"
-" .size cfi_bpf_hash, 4 \n"
-" .popsection \n"
-);
+DEFINE_CFI_TYPE(cfi_bpf_hash, __bpf_prog_runX);

/* Must match bpf_callback_t */
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
-
-__ADDRESSABLE(__bpf_callback_fn);
-
-/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_subprog_hash,@object \n"
-" .globl cfi_bpf_subprog_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_subprog_hash: \n"
-" .word __kcfi_typeid___bpf_callback_fn \n"
-" .size cfi_bpf_subprog_hash, 4 \n"
-" .popsection \n"
-);
+DEFINE_CFI_TYPE(cfi_bpf_subprog_hash, __bpf_callback_fn);

u32 cfi_get_func_hash(void *func)
{
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 45a280f2161c..a822699a40dd 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "SMP alternatives: " fmt

+#include <linux/cfi_types.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
@@ -918,41 +919,11 @@ struct bpf_insn;
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
extern unsigned int __bpf_prog_runX(const void *ctx,
const struct bpf_insn *insn);
-
-/*
- * Force a reference to the external symbol so the compiler generates
- * __kcfi_typid.
- */
-__ADDRESSABLE(__bpf_prog_runX);
-
-/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_hash,@object \n"
-" .globl cfi_bpf_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_hash: \n"
-" .long __kcfi_typeid___bpf_prog_runX \n"
-" .size cfi_bpf_hash, 4 \n"
-" .popsection \n"
-);
+DEFINE_CFI_TYPE(cfi_bpf_hash, __bpf_prog_runX);

/* Must match bpf_callback_t */
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
-
-__ADDRESSABLE(__bpf_callback_fn);
-
-/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_subprog_hash,@object \n"
-" .globl cfi_bpf_subprog_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_subprog_hash: \n"
-" .long __kcfi_typeid___bpf_callback_fn \n"
-" .size cfi_bpf_subprog_hash, 4 \n"
-" .popsection \n"
-);
+DEFINE_CFI_TYPE(cfi_bpf_subprog_hash, __bpf_callback_fn);

u32 cfi_get_func_hash(void *func)
{
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
index 6b8713675765..f510e62ca8b1 100644
--- a/include/linux/cfi_types.h
+++ b/include/linux/cfi_types.h
@@ -41,5 +41,28 @@
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif

+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_CFI_CLANG
+#define DEFINE_CFI_TYPE(name, func) \
+ /* \
+ * Force a reference to the function so the compiler generates \
+ * __kcfi_typeid_<func>. \
+ */ \
+ __ADDRESSABLE(func); \
+ /* u32 name = __kcfi_typeid_<func> */ \
+ extern u32 name; \
+ asm ( \
+ " .pushsection .data..ro_after_init,\"aw\",@progbits \n" \
+ " .type " #name ",@object \n" \
+ " .globl " #name " \n" \
+ " .p2align 2, 0x0 \n" \
+ #name ": \n" \
+ " .long __kcfi_typeid_" #func " \n" \
+ " .size " #name ", 4 \n" \
+ " .popsection \n" \
+ );
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_CFI_TYPES_H */
--
2.34.1



2024-05-08 00:06:59

by Maxwell Bland

[permalink] [raw]
Subject: [PATCH bpf-next v3 2/3] arm64/cfi,bpf: Support kCFI + BPF on arm64

Currently, bpf_dispatcher_*_func() is marked with `__nocfi` therefore
calling BPF programs from this interface doesn't cause CFI warnings.

When BPF programs are called directly from C: from BPF helpers or
struct_ops, CFI warnings are generated.

Implement proper CFI prologues for the BPF programs and callbacks and
drop __nocfi for arm64. Fix the trampoline generation code to emit kCFI
prologue when a struct_ops trampoline is being prepared.

Signed-off-by: Puranjay Mohan <[email protected]>
---
arch/arm64/include/asm/cfi.h | 23 ++++++++++++++
arch/arm64/kernel/alternative.c | 54 +++++++++++++++++++++++++++++++++
arch/arm64/net/bpf_jit_comp.c | 18 +++++++++--
3 files changed, 93 insertions(+), 2 deletions(-)
create mode 100644 arch/arm64/include/asm/cfi.h

diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h
new file mode 100644
index 000000000000..670e191f8628
--- /dev/null
+++ b/arch/arm64/include/asm/cfi.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_CFI_H
+#define _ASM_ARM64_CFI_H
+
+#ifdef CONFIG_CFI_CLANG
+#define __bpfcall
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+#define cfi_get_offset cfi_get_offset
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+extern u32 cfi_get_func_hash(void *func);
+#else
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+static inline u32 cfi_get_func_hash(void *func)
+{
+ return 0;
+}
+#endif /* CONFIG_CFI_CLANG */
+#endif /* _ASM_ARM64_CFI_H */
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 8ff6610af496..1715da7df137 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -13,6 +13,7 @@
#include <linux/elf.h>
#include <asm/cacheflush.h>
#include <asm/alternative.h>
+#include <asm/cfi.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/module.h>
@@ -298,3 +299,56 @@ noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
}
EXPORT_SYMBOL(alt_cb_patch_nops);
+
+#ifdef CONFIG_CFI_CLANG
+struct bpf_insn;
+
+/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
+extern unsigned int __bpf_prog_runX(const void *ctx,
+ const struct bpf_insn *insn);
+
+/*
+ * Force a reference to the external symbol so the compiler generates
+ * __kcfi_typid.
+ */
+__ADDRESSABLE(__bpf_prog_runX);
+
+/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
+asm (
+" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
+" .type cfi_bpf_hash,@object \n"
+" .globl cfi_bpf_hash \n"
+" .p2align 2, 0x0 \n"
+"cfi_bpf_hash: \n"
+" .word __kcfi_typeid___bpf_prog_runX \n"
+" .size cfi_bpf_hash, 4 \n"
+" .popsection \n"
+);
+
+/* Must match bpf_callback_t */
+extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
+
+__ADDRESSABLE(__bpf_callback_fn);
+
+/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
+asm (
+" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
+" .type cfi_bpf_subprog_hash,@object \n"
+" .globl cfi_bpf_subprog_hash \n"
+" .p2align 2, 0x0 \n"
+"cfi_bpf_subprog_hash: \n"
+" .word __kcfi_typeid___bpf_callback_fn \n"
+" .size cfi_bpf_subprog_hash, 4 \n"
+" .popsection \n"
+);
+
+u32 cfi_get_func_hash(void *func)
+{
+ u32 hash;
+
+ if (get_kernel_nofault(hash, func - cfi_get_offset()))
+ return 0;
+
+ return hash;
+}
+#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 76b91f36c729..703247457409 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -17,6 +17,7 @@
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
+#include <asm/cfi.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/patching.h>
@@ -162,6 +163,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
emit(insn, ctx);
}

+static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
+{
+ if (IS_ENABLED(CONFIG_CFI_CLANG))
+ emit(hash, ctx);
+}
+
/*
* Kernel addresses in the vmalloc space use at most 48 bits, and the
* remaining bits are guaranteed to be 0x1. So we can compose the address
@@ -337,6 +344,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
*
*/

+ emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
/* bpf function may be invoked by 3 instruction types:
* 1. bl, attached via freplace to bpf prog via short jump
* 2. br, attached via freplace to bpf prog via long jump
@@ -1806,9 +1814,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->ro_header = ro_header;
}

- prog->bpf_func = (void *)ctx.ro_image;
+ prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
prog->jited = 1;
- prog->jited_len = prog_size;
+ prog->jited_len = prog_size - cfi_get_offset();

if (!prog->is_func || extra_pass) {
int i;
@@ -2072,6 +2080,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* return address locates above FP */
retaddr_off = stack_size + 8;

+ if (flags & BPF_TRAMP_F_INDIRECT) {
+ /*
+ * Indirect call for bpf_struct_ops
+ */
+ emit_kcfi(cfi_get_func_hash(func_addr), ctx);
+ }
/* bpf trampoline may be invoked by 3 instruction types:
* 1. bl, attached to bpf prog or kernel function via short jump
* 2. br, attached to bpf prog or kernel function via long jump
--
2.34.1



2024-05-08 00:07:33

by Maxwell Bland

[permalink] [raw]
Subject: [PATCH bpf-next v3 3/3] arm64/cfi,bpf: Use DEFINE_CFI_TYPE in arm64

Corrects Puranjay Mohan's commit to adopt Mark Rutland's
suggestion of using a C CFI type macro in kCFI+BPF.

Signed-off-by: Maxwell Bland <[email protected]>
---
arch/arm64/kernel/alternative.c | 46 ++++-----------------------------
1 file changed, 5 insertions(+), 41 deletions(-)

diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 1715da7df137..d7a58eca7665 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -8,6 +8,7 @@

#define pr_fmt(fmt) "alternatives: " fmt

+#include <linux/cfi_types.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elf.h>
@@ -302,53 +303,16 @@ EXPORT_SYMBOL(alt_cb_patch_nops);

#ifdef CONFIG_CFI_CLANG
struct bpf_insn;
-
/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
extern unsigned int __bpf_prog_runX(const void *ctx,
const struct bpf_insn *insn);
-
-/*
- * Force a reference to the external symbol so the compiler generates
- * __kcfi_typid.
- */
-__ADDRESSABLE(__bpf_prog_runX);
-
-/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_hash,@object \n"
-" .globl cfi_bpf_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_hash: \n"
-" .word __kcfi_typeid___bpf_prog_runX \n"
-" .size cfi_bpf_hash, 4 \n"
-" .popsection \n"
-);
-
+DEFINE_CFI_TYPE(cfi_bpf_hash, __bpf_prog_runX);
/* Must match bpf_callback_t */
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
-
-__ADDRESSABLE(__bpf_callback_fn);
-
-/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
-asm (
-" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
-" .type cfi_bpf_subprog_hash,@object \n"
-" .globl cfi_bpf_subprog_hash \n"
-" .p2align 2, 0x0 \n"
-"cfi_bpf_subprog_hash: \n"
-" .word __kcfi_typeid___bpf_callback_fn \n"
-" .size cfi_bpf_subprog_hash, 4 \n"
-" .popsection \n"
-);
-
+DEFINE_CFI_TYPE(cfi_bpf_subprog_hash, __bpf_callback_fn);
u32 cfi_get_func_hash(void *func)
{
- u32 hash;
-
- if (get_kernel_nofault(hash, func - cfi_get_offset()))
- return 0;
-
- return hash;
+ u32 *hashp = func - cfi_get_offset();
+ return READ_ONCE(*hashp);
}
#endif
--
2.34.1



2024-05-08 10:15:09

by Puranjay Mohan

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 0/3] Support kCFI + BPF on arm64

Maxwell Bland <[email protected]> writes:

Hi Maxwell,

> In preparation for the BPF summit, I took a look back on BPF-CFI patches
> to check the status and found that there had been no updates for around
> a month, so I went ahead and made the fixes suggested in v2.
>
> This patchset handles emitting proper CFI hashes during JIT, which
> can cause some of the selftests to fail, and handles removing the
> __nocfi tag from bpf_dispatch_*_func on ARM, meaning Clang CFI
> checks will be generated:
>
> 0000000000fea1e8 <bpf_dispatcher_xdp_func>:
> paciasp
> stp x29, x30, [sp, #-0x10]!
> mov x29, sp
> + ldur w16, [x2, #-0x4]
> + movk w17, #0x1881
> + movk w17, #0xd942, lsl #16
> + cmp w16, w17
> + b.eq 0xffff8000810016a0 <bpf_dispatcher_xdp_func+0x24>
> + brk #0x8222
> blr x2
> ldp x29, x30, [sp], #0x10
> autiasp
> ret
>
> Where ^+ indicates the additional assembly.
>
> Credit goes to Puranjay Mohan entirely for this, I just did some fixes,
> hopefully that is OK.

Thanks for taking this effort forward.

checkpatch.pl complains about the patches like the following:

ERROR: Missing Signed-off-by: line by nominal patch author 'Maxwell Bland <[email protected]>'

So, you can change the authorship of the patch like:

git commit --amend --author "Puranjay Mohan <[email protected]>"

similar for the patch by Mark:

git commit --amend --author "Mark Rutland <[email protected]>"

Thanks,
Puranjay

2024-05-08 16:27:30

by Maxwell Bland

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 0/3] Support kCFI + BPF on arm64

On Wed, May 08, 2024 at 10:05:30AM GMT, Puranjay Mohan wrote:
> checkpatch.pl complains about the patches like the following:
>
> ERROR: Missing Signed-off-by: line by nominal patch author 'Maxwell Bland <[email protected]>'
>
> So, you can change the authorship of the patch like:
>
> git commit --amend --author "Puranjay Mohan <[email protected]>"
>

Thanks for the heads up! To avoid spamming the list I will respin the
series with appropriate attribution tags on Monday, apologies for the
mistake.

Maxwell