2024-03-23 10:31:19

by Puranjay Mohan

[permalink] [raw]
Subject: [PATCH bpf-next v3 0/2] bpf,arm64: Add support for BPF Arena

Changes in V3
V2: https://lore.kernel.org/bpf/[email protected]/
- Optimize bpf_addr_space_cast as suggested by Xu Kuohai

Changes in V2
V1: https://lore.kernel.org/bpf/[email protected]/
- Fix build warnings by using 5 in place of 32 as DONT_CLEAR marker.
R5 is not mapped to any BPF register so it can safely be used here.

This series adds the support for PROBE_MEM32 and bpf_addr_space_cast
instructions to the ARM64 BPF JIT. These two instructions allow the
enablement of BPF Arena.

All arena related selftests are passing.

[root@ip-172-31-6-62 bpf]# ./test_progs -a "*arena*"
#3/1 arena_htab/arena_htab_llvm:OK
#3/2 arena_htab/arena_htab_asm:OK
#3 arena_htab:OK
#4/1 arena_list/arena_list_1:OK
#4/2 arena_list/arena_list_1000:OK
#4 arena_list:OK
#434/1 verifier_arena/basic_alloc1:OK
#434/2 verifier_arena/basic_alloc2:OK
#434/3 verifier_arena/basic_alloc3:OK
#434/4 verifier_arena/iter_maps1:OK
#434/5 verifier_arena/iter_maps2:OK
#434/6 verifier_arena/iter_maps3:OK
#434 verifier_arena:OK
Summary: 3/10 PASSED, 0 SKIPPED, 0 FAILED

The verifier_arena selftest could fail in the CI because the following
commit is missing from bpf-next:
https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/commit/?id=fa3550dca8f02ec312727653a94115ef3ab68445

Puranjay Mohan (2):
bpf: Add arm64 JIT support for PROBE_MEM32 pseudo instructions.
bpf: Add arm64 JIT support for bpf_addr_space_cast instruction.

arch/arm64/net/bpf_jit_comp.c | 87 +++++++++++++++++---
tools/testing/selftests/bpf/DENYLIST.aarch64 | 2 -
2 files changed, 77 insertions(+), 12 deletions(-)

--
2.40.1



2024-03-23 10:31:44

by Puranjay Mohan

[permalink] [raw]
Subject: [PATCH bpf-next v3 1/2] bpf: Add arm64 JIT support for PROBE_MEM32 pseudo instructions.

Add support for [LDX | STX | ST], PROBE_MEM32, [B | H | W | DW]
instructions. They are similar to PROBE_MEM instructions with the
following differences:
- PROBE_MEM32 supports store.
- PROBE_MEM32 relies on the verifier to clear upper 32-bit of the
src/dst register
- PROBE_MEM32 adds 64-bit kern_vm_start address (which is stored in R28
in the prologue). Due to bpf_arena constructions such R28 + reg +
off16 access is guaranteed to be within arena virtual range, so no
address check at run-time.
- PROBE_MEM32 allows STX and ST. If they fault the store is a nop. When
LDX faults the destination register is zeroed.

To support these on arm64, we do tmp2 = R28 + src/dst reg and then use
tmp2 as the new src/dst register. This allows us to reuse most of the
code for normal [LDX | STX | ST].

Signed-off-by: Puranjay Mohan <[email protected]>
---
arch/arm64/net/bpf_jit_comp.c | 70 ++++++++++++++++++++++++++++++-----
1 file changed, 60 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index bc16eb694657..b9b5febe64f0 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -29,6 +29,7 @@
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
+#define PROBE_MEM32_BASE (MAX_BPF_JIT_REG + 5)

#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -67,6 +68,8 @@ static const int bpf2a64[] = {
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
[FP_BOTTOM] = A64_R(27),
+ /* callee saved register for kern_vm_start address */
+ [PROBE_MEM32_BASE] = A64_R(28),
};

struct jit_ctx {
@@ -295,7 +298,7 @@ static bool is_lsi_offset(int offset, int scale)
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)

static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
- bool is_exception_cb)
+ bool is_exception_cb, u64 arena_vm_start)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -306,6 +309,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tcc = bpf2a64[TCALL_CNT];
const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 pb = bpf2a64[PROBE_MEM32_BASE];
const int idx0 = ctx->idx;
int cur_offset;

@@ -411,6 +415,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,

/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+
+ if (arena_vm_start)
+ emit_a64_mov_i64(pb, arena_vm_start, ctx);
+
return 0;
}

@@ -738,6 +746,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)

#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
+#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */

bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -745,7 +754,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);

- regs->regs[dst_reg] = 0;
+ if (dst_reg != DONT_CLEAR)
+ regs->regs[dst_reg] = 0;
regs->pc = (unsigned long)&ex->fixup - offset;
return true;
}
@@ -765,7 +775,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;

if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
- BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32)
return 0;

if (!ctx->prog->aux->extable ||
@@ -810,6 +821,9 @@ static int add_exception_handler(const struct bpf_insn *insn,

ex->insn = ins_offset;

+ if (BPF_CLASS(insn->code) != BPF_LDX)
+ dst_reg = DONT_CLEAR;
+
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);

@@ -829,12 +843,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
bool extra_pass)
{
const u8 code = insn->code;
- const u8 dst = bpf2a64[insn->dst_reg];
- const u8 src = bpf2a64[insn->src_reg];
+ u8 dst = bpf2a64[insn->dst_reg];
+ u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 pb = bpf2a64[PROBE_MEM32_BASE];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -1237,7 +1252,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
- if (ctx->fpb_offset > 0 && src == fp) {
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, src, pb), ctx);
+ src = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
src_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1322,7 +1345,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
- if (ctx->fpb_offset > 0 && dst == fp) {
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, dst, pb), ctx);
+ dst = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
dst_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1365,6 +1396,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
}
break;
}
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;

/* STX: *(size *)(dst + off) = src */
@@ -1372,7 +1407,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_DW:
- if (ctx->fpb_offset > 0 && dst == fp) {
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit(A64_ADD(1, tmp2, dst, pb), ctx);
+ dst = tmp2;
+ }
+ if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
dst_adj = fpb;
off_adj = off + ctx->fpb_offset;
} else {
@@ -1413,6 +1456,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
}
break;
}
+
+ ret = add_exception_handler(insn, ctx, dst);
+ if (ret)
+ return ret;
break;

case BPF_STX | BPF_ATOMIC | BPF_W:
@@ -1594,6 +1641,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
+ u64 arena_vm_start;
u8 *image_ptr;
u8 *ro_image_ptr;

@@ -1611,6 +1659,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}

+ arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
@@ -1648,7 +1697,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
+ arena_vm_start)) {
prog = orig_prog;
goto out_off;
}
@@ -1696,7 +1746,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
ctx.idx = 0;
ctx.exentry_idx = 0;

- build_prologue(&ctx, was_classic, prog->aux->exception_cb);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);

if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
--
2.40.1


2024-03-23 10:31:49

by Puranjay Mohan

[permalink] [raw]
Subject: [PATCH bpf-next v3 2/2] bpf: Add arm64 JIT support for bpf_addr_space_cast instruction.

LLVM generates bpf_addr_space_cast instruction while translating
pointers between native (zero) address space and
__attribute__((address_space(N))). The addr_space=1 is reserved as
bpf_arena address space.

rY = addr_space_cast(rX, 0, 1) is processed by the verifier and
converted to normal 32-bit move: wX = wY

rY = addr_space_cast(rX, 1, 0) has to be converted by JIT:

Here I explain using symbolic language what the JIT is supposed to do:
We have:
src = [src_upper32][src_lower32] // 64 bit src kernel pointer
uvm = [uvm_upper32][uvm_lower32] // 64 bit user_vm_start

The JIT has to make the dst reg like following
dst = [uvm_upper32][src_lower32] // if src_lower32 != 0
dst = [00000000000][00000000000] // if src_lower32 == 0

Signed-off-by: Puranjay Mohan <[email protected]>
---
arch/arm64/net/bpf_jit_comp.c | 17 +++++++++++++++++
tools/testing/selftests/bpf/DENYLIST.aarch64 | 2 --
2 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index b9b5febe64f0..cd418c07f806 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -82,6 +82,7 @@ struct jit_ctx {
__le32 *ro_image;
u32 stack_size;
int fpb_offset;
+ u64 user_vm_start;
};

struct bpf_plt {
@@ -868,6 +869,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1U << 16 &&
+ BPF_CLASS(code) == BPF_ALU64) {
+ emit(A64_MOV(0, tmp, src), ctx); // 32-bit mov clears the upper 32 bits
+ emit_a64_mov_i(0, dst, ctx->user_vm_start >> 32, ctx);
+ emit(A64_LSL(1, dst, dst, 32), ctx);
+ emit(A64_CBZ(1, tmp, 2), ctx);
+ emit(A64_ORR(1, tmp, dst, tmp), ctx);
+ emit(A64_MOV(1, dst, tmp), ctx);
+ break;
+ }
switch (insn->off) {
case 0:
emit(A64_MOV(is64, dst, src), ctx);
@@ -1690,6 +1701,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}

ctx.fpb_offset = find_fpb_offset(prog);
+ ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);

/*
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
@@ -2511,6 +2523,11 @@ bool bpf_jit_supports_exceptions(void)
return true;
}

+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index d8ade15e2789..0445ac38bc07 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -10,5 +10,3 @@ fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_mu
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
-verifier_arena # JIT does not support arena
-arena_htab # JIT does not support arena
--
2.40.1


2024-03-23 18:49:11

by Alexei Starovoitov

[permalink] [raw]
Subject: Re: [PATCH bpf-next v3 1/2] bpf: Add arm64 JIT support for PROBE_MEM32 pseudo instructions.

On Sat, Mar 23, 2024 at 3:31 AM Puranjay Mohan <[email protected]> wrote:
>
> +#define PROBE_MEM32_BASE (MAX_BPF_JIT_REG + 5)
>
> #define check_imm(bits, imm) do { \
> if ((((imm) > 0) && ((imm) >> (bits))) || \
> @@ -67,6 +68,8 @@ static const int bpf2a64[] = {
> /* temporary register for blinding constants */
> [BPF_REG_AX] = A64_R(9),
> [FP_BOTTOM] = A64_R(27),
> + /* callee saved register for kern_vm_start address */
> + [PROBE_MEM32_BASE] = A64_R(28),
> };
>
> struct jit_ctx {
> @@ -295,7 +298,7 @@ static bool is_lsi_offset(int offset, int scale)
> #define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
>
> static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
> - bool is_exception_cb)
> + bool is_exception_cb, u64 arena_vm_start)
> {
> const struct bpf_prog *prog = ctx->prog;
> const bool is_main_prog = !bpf_is_subprog(prog);
> @@ -306,6 +309,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
> const u8 fp = bpf2a64[BPF_REG_FP];
> const u8 tcc = bpf2a64[TCALL_CNT];
> const u8 fpb = bpf2a64[FP_BOTTOM];
> + const u8 pb = bpf2a64[PROBE_MEM32_BASE];

In addition to riscv comments please use more verbose name here.
'pb' is too cryptic.
'mem32_base' ?

I would also drop PROBE prefix and use:
#define MEM32_BASE (MAX_BPF_JIT_REG + 5)

From the verifier pov the ld/st mode is BPF_PROBE_MEM32,
since it's asking JIT to emit code to probe read/write such arena address,
but from JIT pov the base is a real base that it got from
bpf_arena_get_kern_vm_start().
#define KERN_VM_START (MAX_BPF_JIT_REG + 5)
would be an alternative name that also fits.
or
#define ARENA_VM_START ...