From: Xu Kuohai <[email protected]>
This series adds ftrace direct call for arm64, which is required to attach
bpf trampoline to fentry.
Although there is no agreement on how to support ftrace direct call on arm64,
no patch has been posted except the one I posted in [1], so this series
continues the work of [1] with the addition of long jump support. Now ftrace
direct call works regardless of the distance between the callsite and custom
trampoline.
[1] https://lore.kernel.org/bpf/[email protected]/
Xu Kuohai (4):
ftrace: Allow users to disable ftrace direct call
arm64: ftrace: Support long jump for ftrace direct call
arm64: ftrace: Add ftrace direct call support
ftrace: Fix dead loop caused by direct call in ftrace selftest
arch/arm64/Kconfig | 2 +
arch/arm64/Makefile | 4 +
arch/arm64/include/asm/ftrace.h | 35 ++++--
arch/arm64/include/asm/patching.h | 2 +
arch/arm64/include/asm/ptrace.h | 6 +-
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry-ftrace.S | 39 ++++--
arch/arm64/kernel/ftrace.c | 198 ++++++++++++++++++++++++++++--
arch/arm64/kernel/patching.c | 14 +++
arch/arm64/net/bpf_jit_comp.c | 4 +
kernel/trace/Kconfig | 7 +-
kernel/trace/ftrace.c | 10 +-
kernel/trace/trace_selftest.c | 2 +
13 files changed, 295 insertions(+), 29 deletions(-)
--
2.30.2
From: Xu Kuohai <[email protected]>
Add ftrace direct support for arm64.
1. When there is custom trampoline only, patch fentry callsite to call
the custom trampoline directly.
2. When ftrace caller and custom trampoline coexist, jump from fentry to
ftrace caller first, then jump to custom trampoline when ftrace caller
exits. As pt_regs->orig_x0 is currently unused by ftrace, its space
is reused as an intermediary for jumping from ftrace caller to custom
trampoline.
In short, this patch does the same thing as the x86 commit 562955fe6a55
("ftrace/x86: Add register_ftrace_direct() for custom trampolines").
Signed-off-by: Xu Kuohai <[email protected]>
Acked-by: Song Liu <[email protected]>
---
arch/arm64/Kconfig | 2 ++
arch/arm64/include/asm/ftrace.h | 12 ++++++++++++
arch/arm64/include/asm/ptrace.h | 6 +++++-
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry-ftrace.S | 18 +++++++++++++++---
5 files changed, 35 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 571cc234d0b3..e2f6ca75b881 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -180,6 +180,8 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
+ if DYNAMIC_FTRACE_WITH_REGS
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 40e63435965b..b07a3c24f918 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -67,6 +67,18 @@ extern void return_to_handler(void);
unsigned long ftrace_call_adjust(unsigned long addr);
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+ unsigned long addr)
+{
+ /*
+ * Place custom trampoline address in regs->custom_tramp to let ftrace
+ * trampoline jump to it.
+ */
+ regs->custom_tramp = addr;
+}
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
struct dyn_ftrace;
struct ftrace_ops;
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 41b332c054ab..9701c38fcc5f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -185,7 +185,11 @@ struct pt_regs {
u64 pstate;
};
};
- u64 orig_x0;
+ union {
+ u64 orig_x0;
+ /* Only used by ftrace to save custom trampoline address */
+ u64 custom_tramp;
+ };
#ifdef __AARCH64EB__
u32 unused2;
s32 syscallno;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 1197e7679882..56d4acc52a86 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -80,6 +80,7 @@ int main(void)
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
+ DEFINE(S_CUSTOM_TRAMP, offsetof(struct pt_regs, custom_tramp));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 0bebe3ffdb58..ae03df89d031 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -62,6 +62,9 @@
str x29, [sp, #S_FP]
.endif
+ /* Set custom_tramp to zero */
+ str xzr, [sp, #S_CUSTOM_TRAMP]
+
/* Save the callsite's SP and LR */
add x10, sp, #(PT_REGS_SIZE + 16)
stp x9, x10, [sp, #S_LR]
@@ -114,12 +117,21 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
/* Restore the callsite's FP, LR, PC */
ldr x29, [sp, #S_FP]
ldr x30, [sp, #S_LR]
- ldr x9, [sp, #S_PC]
-
+ ldr x10, [sp, #S_PC]
+
+ ldr x11, [sp, #S_CUSTOM_TRAMP]
+ cbz x11, 1f
+ /* Set x9 to parent ip before jump to custom trampoline */
+ mov x9, x30
+ /* Set lr to self ip */
+ ldr x30, [sp, #S_PC]
+ /* Set x10 (used for return address) to custom trampoline */
+ mov x10, x11
+1:
/* Restore the callsite's SP */
add sp, sp, #PT_REGS_SIZE + 16
- ret x9
+ ret x10
SYM_CODE_END(ftrace_common)
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
--
2.30.2