Adding support to inline it on x86, because it's single
load instruction.
Signed-off-by: Jiri Olsa <[email protected]>
---
kernel/bpf/verifier.c | 21 ++++++++++++++++++++-
kernel/trace/bpf_trace.c | 1 +
2 files changed, 21 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d7473fee247c..f125c33a37c9 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13635,7 +13635,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
continue;
}
- /* Implement bpf_get_func_ip inline. */
+ /* Implement tracing bpf_get_func_ip inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_ip) {
/* Load IP address from ctx - 16 */
@@ -13650,6 +13650,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
continue;
}
+#ifdef CONFIG_X86
+ /* Implement kprobe_multi bpf_get_func_ip inline. */
+ if (prog_type == BPF_PROG_TYPE_KPROBE &&
+ eatype == BPF_TRACE_KPROBE_MULTI &&
+ insn->imm == BPF_FUNC_get_func_ip) {
+ /* Load IP address from ctx (struct pt_regs) ip */
+ insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct pt_regs, ip));
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
+ if (!new_prog)
+ return -ENOMEM;
+
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
+#endif
+
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 64891b7b0885..c1998b9d5531 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1039,6 +1039,7 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
{
+ /* This helper call is inlined by verifier on x86. */
return instruction_pointer(regs);
}
--
2.35.1