2023-01-03 09:15:27

by Pu Lehui

[permalink] [raw]
Subject: [RFC PATCH bpf-next v2 0/4] Support bpf trampoline for RV64

BPF trampoline is the critical infrastructure of the bpf
subsystem, acting as a mediator between kernel functions
and BPF programs. Numerous important features, such as
using ebpf program for zero overhead kernel introspection,
rely on this key component. We can't wait to support bpf
trampoline on RV64. The implementation of bpf trampoline
was closely to x86 and arm64 for future development.

As most of riscv cpu support unaligned memory accesses,
we temporarily use patch [1] to facilitate testing. The
test results are as follow, and test_verifier with no
new failure ceses.

- fexit_test:OK
- fentry_test:OK
- fentry_fexit:OK
- fexit_stress:OK
- fexit_bpf2bpf:OK
- xdp_bpf2bpf:OK
- dummy_st_ops:OK
- modify_return:OK
- trampoline_count:OK
- get_func_ip_test:OK
- get_func_args_test:OK

[1] https://lore.kernel.org/linux-riscv/[email protected]/

v2:
- rebase for bpf-next tree.

Pu Lehui (4):
bpf: Rollback to text_poke when arch not supported ftrace direct call
riscv, bpf: Factor out emit_call for kernel and bpf context
riscv, bpf: Add bpf_arch_text_poke support for RV64
riscv, bpf: Add bpf trampoline support for RV64

arch/riscv/net/bpf_jit.h | 5 +
arch/riscv/net/bpf_jit_comp64.c | 483 ++++++++++++++++++++++++++++++--
kernel/bpf/trampoline.c | 8 +-
3 files changed, 471 insertions(+), 25 deletions(-)

--
2.25.1


2023-01-03 09:16:38

by Pu Lehui

[permalink] [raw]
Subject: [RFC PATCH bpf-next v2 3/4] riscv, bpf: Add bpf_arch_text_poke support for RV64

From: Pu Lehui <[email protected]>

Implement bpf_arch_text_poke for RV64. For call scenario,
ftrace framework reserve 4 nops for RV64 kernel function
as function entry, and use auipc+jalr instructions to call
kernel or module functions. However, since the auipc+jalr
call instructions is non-atomic operation, we need to use
stop-machine to make sure instruction patching in atomic
context. As for jump scenario, since we only jump inside
the trampoline, a jal instruction is sufficient.

Signed-off-by: Pu Lehui <[email protected]>
---
arch/riscv/net/bpf_jit.h | 5 ++
arch/riscv/net/bpf_jit_comp64.c | 131 +++++++++++++++++++++++++++++++-
2 files changed, 134 insertions(+), 2 deletions(-)

diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index d926e0f7ef57..bf9802a63061 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -573,6 +573,11 @@ static inline u32 rv_fence(u8 pred, u8 succ)
return rv_i_insn(imm11_0, 0, 0, 0, 0xf);
}

+static inline u32 rv_nop(void)
+{
+ return rv_i_insn(0, 0, 0, 0, 0x13);
+}
+
/* RVC instrutions. */

static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 69ebab81d935..2e6f1f39831d 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/memory.h>
+#include <linux/stop_machine.h>
#include "bpf_jit.h"

#define RV_REG_TCC RV_REG_A6
@@ -238,7 +240,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
if (!is_tail_call)
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
- is_tail_call ? 4 : 0, /* skip TCC init */
+ is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */
ctx);
}

@@ -615,6 +617,127 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
}

+struct text_poke_args {
+ void *addr;
+ const void *insns;
+ size_t len;
+ atomic_t cpu_count;
+};
+
+static int do_text_poke(void *data)
+{
+ int ret = 0;
+ struct text_poke_args *patch = data;
+
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
+ ret = patch_text_nosync(patch->addr, patch->insns, patch->len);
+ atomic_inc(&patch->cpu_count);
+ } else {
+ while (atomic_read(&patch->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ smp_mb();
+ }
+
+ return ret;
+}
+
+static int bpf_text_poke_stop_machine(void *addr, const void *insns, size_t len)
+{
+ struct text_poke_args patch = {
+ .addr = addr,
+ .insns = insns,
+ .len = len,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ return stop_machine(do_text_poke, &patch, cpu_online_mask);
+}
+
+static int gen_call_or_nops(void *target, void *ip, u32 *insns)
+{
+ int i, ret;
+ s64 rvoff;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = (u16 *)insns;
+
+ if (!target) {
+ for (i = 0; i < 4; i++)
+ emit(rv_nop(), &ctx);
+ return 0;
+ }
+
+ rvoff = (s64)(target - ip);
+ emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx);
+ ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx);
+ if (ret)
+ return ret;
+ emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx);
+
+ return 0;
+
+}
+
+static int bpf_text_poke_call(void *ip, void *old_addr, void *new_addr)
+{
+ int ret;
+ u32 old_insns[4], new_insns[4];
+
+ ret = gen_call_or_nops(old_addr, ip + 4, old_insns);
+ if (ret)
+ return ret;
+
+ ret = gen_call_or_nops(new_addr, ip + 4, new_insns);
+ if (ret)
+ return ret;
+
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, old_insns, sizeof(old_insns))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (memcmp(ip, new_insns, sizeof(new_insns)))
+ ret = bpf_text_poke_stop_machine(ip, new_insns, sizeof(new_insns));
+out:
+ mutex_unlock(&text_mutex);
+ return ret;
+}
+
+static int bpf_text_poke_jump(void *ip, void *old_addr, void *new_addr)
+{
+ int ret;
+ u32 old_insn, new_insn;
+
+ old_insn = old_addr ? rv_jal(RV_REG_ZERO, (s64)(old_addr - ip) >> 1) : rv_nop();
+ new_insn = new_addr ? rv_jal(RV_REG_ZERO, (s64)(new_addr - ip) >> 1) : rv_nop();
+
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, &old_insn, sizeof(old_insn))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (memcmp(ip, &new_insn, sizeof(new_insn)))
+ ret = patch_text_nosync(ip, &new_insn, sizeof(new_insn));
+out:
+ mutex_unlock(&text_mutex);
+ return ret;
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
+ void *old_addr, void *new_addr)
+{
+ if (!is_kernel_text((unsigned long)ip) &&
+ !is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ return poke_type == BPF_MOD_CALL ?
+ bpf_text_poke_call(ip, old_addr, new_addr) :
+ bpf_text_poke_jump(ip, old_addr, new_addr);
+}
+
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool extra_pass)
{
@@ -1266,7 +1389,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,

void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
- int stack_adjust = 0, store_offset, bpf_stack_adjust;
+ int i, stack_adjust = 0, store_offset, bpf_stack_adjust;

bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
if (bpf_stack_adjust)
@@ -1293,6 +1416,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)

store_offset = stack_adjust - 8;

+ /* reserve 4 nop insns */
+ for (i = 0; i < 4; i++)
+ emit(rv_nop(), ctx);
+
/* First instruction is always setting the tail-call-counter
* (TCC) register. This instruction is skipped for tail calls.
* Force using a 4-byte (non-compressed) instruction.
--
2.25.1

2023-01-03 09:45:53

by Pu Lehui

[permalink] [raw]
Subject: [RFC PATCH bpf-next v2 1/4] bpf: Rollback to text_poke when arch not supported ftrace direct call

From: Pu Lehui <[email protected]>

The current bpf trampoline attach to kernel functions via ftrace direct
call API, while text_poke is applied for bpf2bpf attach and tail call
optimization. For architectures that do not support ftrace direct call,
text_poke is still able to attach bpf trampoline to kernel functions.
Let's relax it by rollback to text_poke when architecture not supported.

Signed-off-by: Pu Lehui <[email protected]>
---
kernel/bpf/trampoline.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index d6395215b849..386197a7952c 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -228,15 +228,11 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad
static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
{
void *ip = tr->func.addr;
- unsigned long faddr;
int ret;

- faddr = ftrace_location((unsigned long)ip);
- if (faddr) {
- if (!tr->fops)
- return -ENOTSUPP;
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) &&
+ !!ftrace_location((unsigned long)ip))
tr->func.ftrace_managed = true;
- }

if (bpf_trampoline_module_get(tr))
return -ENOENT;
--
2.25.1

2023-01-03 09:48:24

by Pu Lehui

[permalink] [raw]
Subject: [RFC PATCH bpf-next v2 2/4] riscv, bpf: Factor out emit_call for kernel and bpf context

From: Pu Lehui <[email protected]>

The current emit_call function is not suitable for kernel
function call as it store return value to bpf R0 register.
We can separate it out for common use. Meanwhile, simplify
judgment logic, that is, fixed function address can use jal
or auipc+jalr, while the unfixed can use only auipc+jalr.

Signed-off-by: Pu Lehui <[email protected]>
---
arch/riscv/net/bpf_jit_comp64.c | 30 +++++++++++++-----------------
1 file changed, 13 insertions(+), 17 deletions(-)

diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index f2417ac54edd..69ebab81d935 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -428,12 +428,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
*rd = RV_REG_T2;
}

-static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
+static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
s64 upper, lower;

- if (rvoff && is_21b_int(rvoff) && !force_jalr) {
+ if (rvoff && fixed_addr && is_21b_int(rvoff)) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return 0;
} else if (in_auipc_jalr_range(rvoff)) {
@@ -454,24 +454,17 @@ static bool is_signed_bpf_cond(u8 cond)
cond == BPF_JSGE || cond == BPF_JSLE;
}

-static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
+static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
{
s64 off = 0;
u64 ip;
- u8 rd;
- int ret;

if (addr && ctx->insns) {
ip = (u64)(long)(ctx->insns + ctx->ninsns);
off = addr - ip;
}

- ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
- if (ret)
- return ret;
- rd = bpf_to_rv_reg(BPF_REG_0, ctx);
- emit_mv(rd, RV_REG_A0, ctx);
- return 0;
+ return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
}

static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
@@ -913,7 +906,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* JUMP off */
case BPF_JMP | BPF_JA:
rvoff = rv_offset(i, off, ctx);
- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
@@ -1032,17 +1025,20 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* function call */
case BPF_JMP | BPF_CALL:
{
- bool fixed;
+ bool fixed_addr;
u64 addr;

mark_call(ctx);
- ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
- &fixed);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &addr, &fixed_addr);
if (ret < 0)
return ret;
- ret = emit_call(fixed, addr, ctx);
+
+ ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
+
+ emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */
@@ -1057,7 +1053,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
break;

rvoff = epilogue_offset(ctx);
- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
--
2.25.1