Try to make the arm64 ftrace with regs merged.
The later two patches are from Torsten.
The whole series applies cleanly on v5.3-rc5
Jisheng Zhang (1):
ftrace: introdue ftrace_call_init
Torsten Duwe (2):
arm64: implement ftrace with regs
arm64: use -fpatchable-function-entry if available
arch/arm64/Kconfig | 2 +
arch/arm64/Makefile | 5 ++
arch/arm64/include/asm/ftrace.h | 12 ++-
arch/arm64/include/asm/module.h | 3 +-
arch/arm64/kernel/entry-ftrace.S | 125 ++++++++++++++++++++++++++--
arch/arm64/kernel/ftrace.c | 138 +++++++++++++++++++++++--------
arch/arm64/kernel/module-plts.c | 3 +-
arch/arm64/kernel/module.c | 2 +-
include/linux/ftrace.h | 1 +
kernel/module.c | 7 +-
kernel/trace/ftrace.c | 4 +
11 files changed, 257 insertions(+), 45 deletions(-)
--
2.23.0.rc1
On some arch, the FTRACE_WITH_REGS is implemented with gcc's
-fpatchable-function-entry (=2), gcc adds 2 NOPs at the beginning
of each function, so this makes the MCOUNT_ADDR useless. In ftrace
common framework, MCOUNT_ADDR is mostly used to "init" the nop, so
let's introcude ftrace_call_init().
Signed-off-by: Jisheng Zhang <[email protected]>
---
include/linux/ftrace.h | 1 +
kernel/trace/ftrace.c | 4 ++++
2 files changed, 5 insertions(+)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8a8cb3c401b2..8175ffb671f0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -458,6 +458,7 @@ extern void ftrace_regs_caller(void);
extern void ftrace_call(void);
extern void ftrace_regs_call(void);
extern void mcount_call(void);
+extern int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec);
void ftrace_modify_all_code(int command);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca34503f178..9df5a66a6811 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2500,7 +2500,11 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
+#ifdef MCOUNT_ADDR
ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+#else
+ ret = ftrace_call_init(mod, rec);
+#endif
if (ret) {
ftrace_bug_type = FTRACE_BUG_INIT;
ftrace_bug(ret, rec);
--
2.23.0.rc1
From: Torsten Duwe <[email protected]>
Implement ftrace with regs, based on the new gcc flag
-fpatchable-function-entry (=2)
Now that gcc8 added 2 NOPs at the beginning of each function, replace
the first NOP thus generated with a quick LR saver (move it to scratch
reg x9), so the 2nd replacement insn, the call to ftrace, does not
clobber the value. Ftrace will then generate the standard stack
frames.
Note that patchable-function-entry in GCC disables IPA-RA, which means
ABI register calling conventions are obeyed and scratch registers such
as x9 are available.
Introduce and handle an ftrace_regs_trampoline for module PLTs, right
after ftrace_trampoline in an ftrace_trampolines[2] array, and double
the size of the corresponding special section.
Signed-off-by: Torsten Duwe <[email protected]>
---
arch/arm64/include/asm/ftrace.h | 12 ++-
arch/arm64/include/asm/module.h | 3 +-
arch/arm64/kernel/entry-ftrace.S | 125 ++++++++++++++++++++++++++--
arch/arm64/kernel/ftrace.c | 138 +++++++++++++++++++++++--------
arch/arm64/kernel/module-plts.c | 3 +-
arch/arm64/kernel/module.c | 2 +-
6 files changed, 239 insertions(+), 44 deletions(-)
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 5ab5200b2bdc..fde94a905b25 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -11,9 +11,12 @@
#include <asm/insn.h>
#define HAVE_FUNCTION_GRAPH_FP_TEST
-#define MCOUNT_ADDR ((unsigned long)_mcount)
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/compat.h>
@@ -30,6 +33,13 @@ extern void return_to_handler(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
+ /*
+ * For -fpatchable-function-entry=2, there's first the
+ * LR saver, and only then the actual call insn.
+ * Advance addr accordingly.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return (addr + AARCH64_INSN_SIZE);
/*
* addr is the address of the mcount call instruction.
* recordmcount does the necessary offset calculation.
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index f80e13cbf8ec..5463a2cf0165 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -21,7 +21,8 @@ struct mod_arch_specific {
struct mod_plt_sec init;
/* for CONFIG_DYNAMIC_FTRACE */
- struct plt_entry *ftrace_trampoline;
+ struct plt_entry *ftrace_trampolines;
+#define MOD_ARCH_NR_FTRACE_TRAMPOLINES 2
};
#endif
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 33d003d80121..4cfc0a886e4e 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
@@ -121,6 +122,7 @@ EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/*
* _mcount() is used to build the kernel with -pg option, but all the branch
* instructions to _mcount() are replaced to NOP initially at kernel start up,
@@ -160,11 +162,6 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
mcount_exit
ENDPROC(ftrace_caller)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(ftrace_stub)
- ret
-ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -184,7 +181,125 @@ ENTRY(ftrace_graph_caller)
mcount_exit
ENDPROC(ftrace_graph_caller)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+ .macro ftrace_regs_entry, allregs=0
+ /* make room for pt_regs, plus a callee frame */
+ sub sp, sp, #(S_FRAME_SIZE + 16)
+
+ /* save function arguments */
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+
+ .if \allregs == 1
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ .endif
+
+ /* Save fp and x28, which is used in this function. */
+ stp x28, x29, [sp, #S_X28]
+
+ /* The stack pointer as it was on ftrace_caller entry... */
+ add x28, sp, #(S_FRAME_SIZE + 16)
+ /* ...and the link Register at callee entry */
+ stp x9, x28, [sp, #S_LR] /* to pt_regs.r[30] and .sp */
+ /* The program counter just after the ftrace call site */
+ str lr, [sp, #S_PC]
+
+ /* Now fill in callee's preliminary stackframe. */
+ stp x29, x9, [sp, #S_FRAME_SIZE]
+ /* Let FP point to it. */
+ add x29, sp, #S_FRAME_SIZE
+
+ /* Our stackframe, stored inside pt_regs. */
+ stp x29, x30, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+ .endm
+
+ENTRY(ftrace_regs_caller)
+ ftrace_regs_entry 1
+ b ftrace_common
+ENDPROC(ftrace_regs_caller)
+
+ENTRY(ftrace_caller)
+ ftrace_regs_entry 0
+ b ftrace_common
+ENDPROC(ftrace_caller)
+
+ENTRY(ftrace_common)
+
+ mov x3, sp /* pt_regs are @sp */
+ ldr_l x2, function_trace_op, x0
+ mov x1, x9 /* parent IP */
+ sub x0, lr, #AARCH64_INSN_SIZE
+
+GLOBAL(ftrace_call)
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+/*
+ * GCC's patchable-function-entry implicitly disables IPA-RA,
+ * so all non-argument registers are either scratch / dead
+ * or callee-saved (within the ftrace framework). Function
+ * arguments of the call we are intercepting right now however
+ * need to be preserved in any case.
+ */
+ftrace_common_return:
+ /* restore function args */
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldr x8, [sp, #S_X8]
+
+ /* restore fp and x28 */
+ ldp x28, x29, [sp, #S_X28]
+
+ ldr lr, [sp, #S_LR]
+ ldr x9, [sp, #S_PC]
+ /* clean up both frames, ours and callee preliminary */
+ add sp, sp, #S_FRAME_SIZE + 16
+
+ ret x9
+ENDPROC(ftrace_common)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ ldr x0, [sp, #S_PC] /* pc */
+ sub x0, x0, #AARCH64_INSN_SIZE
+ add x1, sp, #S_LR /* &lr */
+ ldr x2, [sp, #S_FRAME_SIZE] /* fp */
+ bl prepare_ftrace_return
+ b ftrace_common_return
+ENDPROC(ftrace_graph_caller)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(ftrace_stub)
+ ret
+ENDPROC(ftrace_stub)
+
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* void return_to_handler(void)
*
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 171773257974..faf339e90319 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -62,6 +62,46 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(pc, 0, new, false);
}
+#ifdef CONFIG_ARM64_MODULE_PLTS
+static int install_ftrace_trampoline(struct module *mod, unsigned long *addr)
+{
+ struct plt_entry trampoline, *mod_trampoline;
+
+ /*
+ * Iterate over
+ * mod->arch.ftrace_trampolines[MOD_ARCH_NR_FTRACE_TRAMPOLINES]
+ * The assignment to various ftrace functions happens here.
+ */
+ if (*addr == FTRACE_ADDR)
+ mod_trampoline = &mod->arch.ftrace_trampolines[0];
+ else if (*addr == FTRACE_REGS_ADDR)
+ mod_trampoline = &mod->arch.ftrace_trampolines[1];
+ else
+ return -EINVAL;
+
+ trampoline = get_plt_entry(*addr, mod_trampoline);
+
+ /*
+ * Note that PLTs are place relative, and plt_entries_equal()
+ * checks whether they point to the same target. Here, we need
+ * to check if the actual opcodes are in fact identical,
+ * regardless of the offset in memory so use memcmp() instead.
+ */
+ if (memcmp(mod_trampoline, &trampoline, sizeof(trampoline))) {
+ /* point the trampoline at our ftrace entry point */
+ module_disable_ro(mod);
+ *mod_trampoline = trampoline;
+ module_enable_ro(mod, true);
+
+ /* update trampoline before patching in the branch */
+ smp_wmb();
+ }
+ *addr = (unsigned long)(void *)mod_trampoline;
+
+ return 0;
+}
+#endif
+
/*
* Turn on the call to ftrace_caller() in instrumented function
*/
@@ -73,8 +113,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline, *dst;
struct module *mod;
+ int ret;
/*
* On kernels that support module PLTs, the offset between the
@@ -93,40 +133,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (WARN_ON(!mod))
return -EINVAL;
- /*
- * There is only one ftrace trampoline per module. For now,
- * this is not a problem since on arm64, all dynamic ftrace
- * invocations are routed via ftrace_caller(). This will need
- * to be revisited if support for multiple ftrace entry points
- * is added in the future, but for now, the pr_err() below
- * deals with a theoretical issue only.
- *
- * Note that PLTs are place relative, and plt_entries_equal()
- * checks whether they point to the same target. Here, we need
- * to check if the actual opcodes are in fact identical,
- * regardless of the offset in memory so use memcmp() instead.
- */
- dst = mod->arch.ftrace_trampoline;
- trampoline = get_plt_entry(addr, dst);
- if (memcmp(dst, &trampoline, sizeof(trampoline))) {
- if (plt_entry_is_initialized(dst)) {
- pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
- return -EINVAL;
- }
-
- /* point the trampoline to our ftrace entry point */
- module_disable_ro(mod);
- *dst = trampoline;
- module_enable_ro(mod, true);
-
- /*
- * Ensure updated trampoline is visible to instruction
- * fetch before we patch in the branch.
- */
- __flush_icache_range((unsigned long)&dst[0],
- (unsigned long)&dst[1]);
- }
- addr = (unsigned long)dst;
+ /* Check against our well-known list of ftrace entry points */
+ if (addr == FTRACE_ADDR || addr == FTRACE_REGS_ADDR) {
+ ret = install_ftrace_trampoline(mod, &addr);
+ if (ret < 0)
+ return ret;
+ } else
+ return -EINVAL;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
@@ -138,6 +151,45 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(pc, old, new, true);
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, old_addr,
+ AARCH64_INSN_BRANCH_LINK);
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+/*
+ * Ftrace with regs generates the tracer calls as close as possible to
+ * the function entry; no stack frame has been set up at that point.
+ * In order to make another call e.g to ftrace_caller, the LR must be
+ * saved from being overwritten.
+ * Between two functions, and with IPA-RA turned off, the scratch registers
+ * are available, so move the LR to x9 before calling into ftrace.
+ *
+ * This function is called once during kernel startup for each call site.
+ * The address passed is that of the actual branch, so patch in the LR saver
+ * just before that.
+ */
+static int ftrace_setup_lr_saver(unsigned long addr)
+{
+ u32 old, new;
+
+ old = aarch64_insn_gen_nop();
+ /* "mov x9, lr" is officially aliased from "orr x9, xzr, lr". */
+ new = aarch64_insn_gen_logical_shifted_reg(AARCH64_INSN_REG_9,
+ AARCH64_INSN_REG_ZR, AARCH64_INSN_REG_LR, 0,
+ AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_LOGIC_ORR);
+ return ftrace_modify_code(addr - AARCH64_INSN_SIZE, old, new, true);
+}
+#endif
+
/*
* Turn off the call to ftrace_caller() in instrumented function
*/
@@ -196,6 +248,22 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(pc, old, new, validate);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ unsigned long pc = rec->ip;
+
+ /*
+ * -fpatchable-function-entry= does not generate a profiling call
+ * initially; the NOPs are already there. So instead,
+ * put the LR saver there ahead of time, in order to avoid
+ * any race condition over patching 2 instructions.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return ftrace_setup_lr_saver(pc);
+ else
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
void arch_ftrace_update_code(int command)
{
command |= FTRACE_MAY_SLEEP;
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 044c0ae4d6c8..acafb8a2244d 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -330,7 +330,8 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
tramp->sh_type = SHT_NOBITS;
tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
tramp->sh_addralign = __alignof__(struct plt_entry);
- tramp->sh_size = sizeof(struct plt_entry);
+ tramp->sh_size = MOD_ARCH_NR_FTRACE_TRAMPOLINES
+ * sizeof(struct plt_entry);
}
return 0;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 03ff15bffbb6..4e1d99805c5a 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -483,7 +483,7 @@ int module_finalize(const Elf_Ehdr *hdr,
#ifdef CONFIG_ARM64_MODULE_PLTS
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
- me->arch.ftrace_trampoline = (void *)s->sh_addr;
+ me->arch.ftrace_trampolines = (void *)s->sh_addr;
#endif
}
--
2.23.0.rc1
From: Torsten Duwe <[email protected]>
Test whether gcc supports -fpatchable-function-entry and use it to promote
DYNAMIC_FTRACE to DYNAMIC_FTRACE_WITH_REGS. Amend support for the new
object section that holds the locations (__patchable_function_entries) and
define a proper "notrace" attribute to switch it off.
Signed-off-by: Torsten Duwe <[email protected]>
---
arch/arm64/Kconfig | 2 ++
arch/arm64/Makefile | 5 +++++
kernel/module.c | 7 ++++++-
3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3adcec05b1f6..663392d1eae2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -144,6 +144,8 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS \
+ if $(cc-option,-fpatchable-function-entry=2)
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 61de992bbea3..e827ad0298ab 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -104,6 +104,11 @@ ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
endif
+ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
+ KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
diff --git a/kernel/module.c b/kernel/module.c
index 5933395af9a0..0759f89adbd3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3136,7 +3136,12 @@ static int find_module_sections(struct module *mod, struct load_info *info)
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
- mod->ftrace_callsites = section_objs(info, "__mcount_loc",
+ mod->ftrace_callsites = section_objs(info,
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+ "__patchable_function_entries",
+#else
+ "__mcount_loc",
+#endif
sizeof(*mod->ftrace_callsites),
&mod->num_ftrace_callsites);
#endif
--
2.23.0.rc1
On Mon, 19 Aug 2019 19:16:22 +0800 Jisheng Zhang wrote:
> On some arch, the FTRACE_WITH_REGS is implemented with gcc's
> -fpatchable-function-entry (=2), gcc adds 2 NOPs at the beginning
> of each function, so this makes the MCOUNT_ADDR useless. In ftrace
> common framework, MCOUNT_ADDR is mostly used to "init" the nop, so
> let's introcude ftrace_call_init().
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> include/linux/ftrace.h | 1 +
> kernel/trace/ftrace.c | 4 ++++
> 2 files changed, 5 insertions(+)
>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 8a8cb3c401b2..8175ffb671f0 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -458,6 +458,7 @@ extern void ftrace_regs_caller(void);
> extern void ftrace_call(void);
> extern void ftrace_regs_call(void);
> extern void mcount_call(void);
> +extern int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec);
>
> void ftrace_modify_all_code(int command);
>
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index eca34503f178..9df5a66a6811 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -2500,7 +2500,11 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
> if (unlikely(ftrace_disabled))
> return 0;
>
> +#ifdef MCOUNT_ADDR
> ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
> +#else
> + ret = ftrace_call_init(mod, rec);
> +#endif
If we want to remove MCOUNT_ADDR from all arch, I could cook patches for
this.
Hi,
On Mon, 19 Aug 2019, Jisheng Zhang wrote:
> On some arch, the FTRACE_WITH_REGS is implemented with gcc's
> -fpatchable-function-entry (=2), gcc adds 2 NOPs at the beginning
> of each function, so this makes the MCOUNT_ADDR useless. In ftrace
> common framework, MCOUNT_ADDR is mostly used to "init" the nop, so
> let's introcude ftrace_call_init().
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> include/linux/ftrace.h | 1 +
> kernel/trace/ftrace.c | 4 ++++
> 2 files changed, 5 insertions(+)
>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 8a8cb3c401b2..8175ffb671f0 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -458,6 +458,7 @@ extern void ftrace_regs_caller(void);
> extern void ftrace_call(void);
> extern void ftrace_regs_call(void);
> extern void mcount_call(void);
> +extern int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec);
>
> void ftrace_modify_all_code(int command);
>
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index eca34503f178..9df5a66a6811 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -2500,7 +2500,11 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
> if (unlikely(ftrace_disabled))
> return 0;
>
> +#ifdef MCOUNT_ADDR
> ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
> +#else
> + ret = ftrace_call_init(mod, rec);
> +#endif
> if (ret) {
> ftrace_bug_type = FTRACE_BUG_INIT;
> ftrace_bug(ret, rec);
I may be missing something, but the patch does not seem to be complete.
There is no ftrace_call_init() implemented. MCOUNT_ADDR is still defined,
so it does not change much. I don't think it is what Mark had in his mind.
Miroslav
Hi,
On Tue, 20 Aug 2019 11:27:38 +0200 (CEST) Miroslav Benes <[email protected]> wrote:
>
>
> Hi,
>
> On Mon, 19 Aug 2019, Jisheng Zhang wrote:
>
> > On some arch, the FTRACE_WITH_REGS is implemented with gcc's
> > -fpatchable-function-entry (=2), gcc adds 2 NOPs at the beginning
> > of each function, so this makes the MCOUNT_ADDR useless. In ftrace
> > common framework, MCOUNT_ADDR is mostly used to "init" the nop, so
> > let's introcude ftrace_call_init().
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > include/linux/ftrace.h | 1 +
> > kernel/trace/ftrace.c | 4 ++++
> > 2 files changed, 5 insertions(+)
> >
> > diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> > index 8a8cb3c401b2..8175ffb671f0 100644
> > --- a/include/linux/ftrace.h
> > +++ b/include/linux/ftrace.h
> > @@ -458,6 +458,7 @@ extern void ftrace_regs_caller(void);
> > extern void ftrace_call(void);
> > extern void ftrace_regs_call(void);
> > extern void mcount_call(void);
> > +extern int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec);
> >
> > void ftrace_modify_all_code(int command);
> >
> > diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> > index eca34503f178..9df5a66a6811 100644
> > --- a/kernel/trace/ftrace.c
> > +++ b/kernel/trace/ftrace.c
> > @@ -2500,7 +2500,11 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
> > if (unlikely(ftrace_disabled))
> > return 0;
> >
> > +#ifdef MCOUNT_ADDR
> > ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
> > +#else
> > + ret = ftrace_call_init(mod, rec);
> > +#endif
> > if (ret) {
> > ftrace_bug_type = FTRACE_BUG_INIT;
> > ftrace_bug(ret, rec);
>
> I may be missing something, but the patch does not seem to be complete.
> There is no ftrace_call_init() implemented. MCOUNT_ADDR is still defined,
> so it does not change much. I don't think it is what Mark had in his mind.
>
Yes, except arm64, MCOUNT_ADDR is still defined in other arch. If we want
to remove MCOUNT_ADDR from all arch, I will cook patches for this purpose.
Thanks
Hi all,
On Tue, 20 Aug 2019 11:27:38 +0200 (CEST) Miroslav Benes wrote:
>
> Hi,
>
> On Mon, 19 Aug 2019, Jisheng Zhang wrote:
>
> > On some arch, the FTRACE_WITH_REGS is implemented with gcc's
> > -fpatchable-function-entry (=2), gcc adds 2 NOPs at the beginning
> > of each function, so this makes the MCOUNT_ADDR useless. In ftrace
> > common framework, MCOUNT_ADDR is mostly used to "init" the nop, so
> > let's introcude ftrace_call_init().
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > include/linux/ftrace.h | 1 +
> > kernel/trace/ftrace.c | 4 ++++
> > 2 files changed, 5 insertions(+)
> >
> > diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> > index 8a8cb3c401b2..8175ffb671f0 100644
> > --- a/include/linux/ftrace.h
> > +++ b/include/linux/ftrace.h
> > @@ -458,6 +458,7 @@ extern void ftrace_regs_caller(void);
> > extern void ftrace_call(void);
> > extern void ftrace_regs_call(void);
> > extern void mcount_call(void);
> > +extern int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec);
> >
> > void ftrace_modify_all_code(int command);
> >
> > diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> > index eca34503f178..9df5a66a6811 100644
> > --- a/kernel/trace/ftrace.c
> > +++ b/kernel/trace/ftrace.c
> > @@ -2500,7 +2500,11 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
> > if (unlikely(ftrace_disabled))
> > return 0;
> >
> > +#ifdef MCOUNT_ADDR
> > ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
> > +#else
> > + ret = ftrace_call_init(mod, rec);
> > +#endif
> > if (ret) {
> > ftrace_bug_type = FTRACE_BUG_INIT;
> > ftrace_bug(ret, rec);
>
> I may be missing something, but the patch does not seem to be complete.
> There is no ftrace_call_init() implemented. MCOUNT_ADDR is still defined,
> so it does not change much. I don't think it is what Mark had in his mind.
>
Here is patch to remove MCOUNT_ADDR from all arch, but I think it isn't
as good as current MCOUNT_ADDR. So how to remove the MCOUNT_ADDR depedency?
Could we
patch ftrace_make_nop to accept one more parameter? I.E
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr, bool init)
or
make "0" a special meaning to tell ftrace_make_nop to "init"?
Thanks
<---8
ftrace: introdue ftrace_call_init
On some arch, the FTRACE_WITH_REGS is implemented with gcc's
-fpatchable-function-entry (=2), gcc adds 2 NOPs at the beginning
of each function, so this makes the MCOUNT_ADDR useless. In ftrace
common framework, MCOUNT_ADDR is mostly used to "init" the nop, so
let's introcude ftrace_call_init() and remove MCOUNT_ADDR.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/arm/include/asm/ftrace.h | 1 -
arch/arm/kernel/ftrace.c | 5 +++++
arch/arm64/include/asm/ftrace.h | 1 -
arch/arm64/kernel/ftrace.c | 5 +++++
arch/csky/kernel/ftrace.c | 5 +++++
arch/ia64/include/asm/ftrace.h | 2 --
arch/ia64/kernel/ftrace.c | 5 +++++
arch/microblaze/include/asm/ftrace.h | 1 -
arch/microblaze/kernel/ftrace.c | 5 +++++
arch/mips/include/asm/ftrace.h | 1 -
arch/mips/kernel/ftrace.c | 9 +++++++--
arch/nds32/include/asm/ftrace.h | 1 -
arch/nds32/kernel/ftrace.c | 5 +++++
arch/parisc/include/asm/ftrace.h | 1 -
arch/parisc/kernel/ftrace.c | 5 +++++
arch/powerpc/include/asm/ftrace.h | 1 -
arch/powerpc/kernel/trace/ftrace.c | 5 +++++
arch/riscv/include/asm/ftrace.h | 1 -
arch/riscv/kernel/ftrace.c | 5 +++++
arch/s390/include/asm/ftrace.h | 1 -
arch/s390/kernel/ftrace.c | 22 +++++++++++++++++-----
arch/sh/include/asm/ftrace.h | 2 --
arch/sh/kernel/ftrace.c | 5 +++++
arch/sparc/include/asm/ftrace.h | 1 -
arch/sparc/kernel/ftrace.c | 5 +++++
arch/x86/include/asm/ftrace.h | 1 -
arch/x86/kernel/ftrace.c | 28 +++++++++++++++-------------
arch/xtensa/include/asm/ftrace.h | 1 -
include/linux/ftrace.h | 1 +
kernel/trace/ftrace.c | 2 +-
30 files changed, 96 insertions(+), 37 deletions(-)
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 18b0197f2384..afb01ab7f956 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -7,7 +7,6 @@
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index bda949fd84e8..fd3547bb4bfa 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -172,6 +172,11 @@ int ftrace_make_nop(struct module *mod,
return ret;
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)__gnu_mcount_nc);
+}
+
int __init ftrace_dyn_arch_init(void)
{
return 0;
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 5ab5200b2bdc..de6f35ef3825 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -11,7 +11,6 @@
#include <asm/insn.h>
#define HAVE_FUNCTION_GRAPH_FP_TEST
-#define MCOUNT_ADDR ((unsigned long)_mcount)
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 171773257974..8298b8a7d8f6 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -196,6 +196,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(pc, old, new, validate);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
void arch_ftrace_update_code(int command)
{
command |= FTRACE_MAY_SLEEP;
diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c
index 44f4880179b7..c1fce15caaa4 100644
--- a/arch/csky/kernel/ftrace.c
+++ b/arch/csky/kernel/ftrace.c
@@ -122,6 +122,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(rec->ip, addr, false, false);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = ftrace_modify_code((unsigned long)&ftrace_call,
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
index a07a8e575453..2aefd2098fc5 100644
--- a/arch/ia64/include/asm/ftrace.h
+++ b/arch/ia64/include/asm/ftrace.h
@@ -9,8 +9,6 @@
extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
#define mcount _mcount
-/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
-#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
static inline unsigned long ftrace_call_adjust(unsigned long addr)
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c
index cee411e647ca..0cb2cbcea4fc 100644
--- a/arch/ia64/kernel/ftrace.c
+++ b/arch/ia64/kernel/ftrace.c
@@ -169,6 +169,11 @@ int ftrace_make_nop(struct module *mod,
return ftrace_modify_code(rec->ip, NULL, new, 0);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, ((struct fnptr *)mcount)->ip);
+}
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
index 5db7f4489f05..02d185ef22fe 100644
--- a/arch/microblaze/include/asm/ftrace.h
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -4,7 +4,6 @@
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index 224eea40e1ee..f3606e3cba94 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -149,6 +149,11 @@ int ftrace_make_nop(struct module *mod,
return ret;
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
/* I believe that first is called ftrace_make_nop before this function */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index b463f2aa5a61..45b3f7064fcd 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -12,7 +12,6 @@
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 2625232bfe52..85de81985e6f 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -61,7 +61,7 @@ static inline void ftrace_dyn_arch_init_insns(void)
/* la v1, _mcount */
v1 = 3;
buf = (u32 *)&insn_la_mcount[0];
- UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+ UASM_i_LA(&buf, v1, (unsigned long)_mcount);
/* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller;
@@ -200,6 +200,11 @@ int ftrace_make_nop(struct module *mod,
#endif
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
@@ -232,7 +237,7 @@ int __init ftrace_dyn_arch_init(void)
ftrace_dyn_arch_init_insns();
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
- ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+ ftrace_modify_code((unsigned long)_mcount, INSN_NOP);
return 0;
}
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
index 2f96cc96aa35..97263241249d 100644
--- a/arch/nds32/include/asm/ftrace.h
+++ b/arch/nds32/include/asm/ftrace.h
@@ -7,7 +7,6 @@
#define HAVE_FUNCTION_GRAPH_FP_TEST
-#define MCOUNT_ADDR ((unsigned long)(_mcount))
/* mcount call is composed of three instructions:
* sethi + ori + jral
*/
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index fd2a54b8cd57..8f0c34f7bf5b 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -203,6 +203,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return ftrace_modify_code(pc, call_insn, nop_insn, true);
}
+
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 958c0aa5dbb2..e0c7cc7958b8 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -5,7 +5,6 @@
#ifndef __ASSEMBLY__
extern void mcount(void);
-#define MCOUNT_ADDR ((unsigned long)mcount)
#define MCOUNT_INSN_SIZE 4
#define CC_USING_NOP_MCOUNT
extern unsigned long sys_call_table[];
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index b6fb30f2e4bf..4c9d8d1e5262 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -186,4 +186,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
insn, sizeof(insn)-4);
return 0;
}
+
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)mcount);
+}
#endif
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 3dfb80b86561..bac76b982d2c 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -5,7 +5,6 @@
#include <asm/types.h>
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifdef __ASSEMBLY__
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index be1ca98fce5c..728a357c51f0 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -472,6 +472,11 @@ int ftrace_make_nop(struct module *mod,
#endif /* CONFIG_MODULES */
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
#ifdef CONFIG_MODULES
#ifdef CONFIG_PPC64
/*
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index c6dcc5291f97..5ca88b80de44 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -33,7 +33,6 @@ struct dyn_arch_ftrace {
* both auipc and jalr at the same time.
*/
-#define MCOUNT_ADDR ((unsigned long)_mcount)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index b94d8db5ddcc..88fcdbf84ad8 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -88,6 +88,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 68d362f8d6c1..e70717c37f68 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -30,7 +30,6 @@ extern unsigned long ftrace_plt;
struct dyn_arch_ftrace { };
-#define MCOUNT_ADDR ((unsigned long)_mcount)
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#define KPROBE_ON_FTRACE_NOP 0
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1bb85f60c0dd..b74e35cc2262 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -110,11 +110,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- if (addr == MCOUNT_ADDR) {
- /* Initial code replacement */
- ftrace_generate_orig_insn(&orig);
- ftrace_generate_nop_insn(&new);
- } else if (is_kprobe_on_ftrace(&old)) {
+ if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -136,6 +132,22 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return 0;
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
+ return -EFAULT;
+ ftrace_generate_orig_insn(&orig);
+ ftrace_generate_nop_insn(&new);
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
+
+ return 0;
+}
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
struct ftrace_insn orig, new, old;
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h
index b1c1dc0cc261..5a7890b7a0d2 100644
--- a/arch/sh/include/asm/ftrace.h
+++ b/arch/sh/include/asm/ftrace.h
@@ -10,8 +10,6 @@
#ifndef __ASSEMBLY__
extern void mcount(void);
-#define MCOUNT_ADDR ((unsigned long)(mcount))
-
#ifdef CONFIG_DYNAMIC_FTRACE
#define CALL_ADDR ((long)(ftrace_call))
#define STUB_ADDR ((long)(ftrace_stub))
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 1b04270e5460..f7a12102a93d 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -242,6 +242,11 @@ int ftrace_make_nop(struct module *mod,
return ftrace_modify_code(rec->ip, old, new);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)mcount);
+}
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d3aa1a524431..a1eecda8976b 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -3,7 +3,6 @@
#define _ASM_SPARC64_FTRACE
#ifdef CONFIG_MCOUNT
-#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 684b84ce397f..7a16482fdd29 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -63,6 +63,11 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long ad
return ftrace_modify_code(ip, old, new);
}
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, (unsigned long)_mcount);
+}
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 287f1f7b2e52..58fe65fc5a63 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -6,7 +6,6 @@
#ifndef CC_USING_FENTRY
# error Compiler does not support fentry?
#endif
-# define MCOUNT_ADDR ((unsigned long)(__fentry__))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 024c3053dbba..3cdba006f848 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -149,26 +149,28 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
return 0;
}
-int ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
+/*
+ * On boot up, and when modules are loaded, the __fentry__
+ * is converted to a nop, and will never become __fentry__
+ * again. This code is either running before SMP (on boot up)
+ * or before the code will ever be executed (module load).
+ * We do not want to use the breakpoint version in this case,
+ * just modify the code directly.
+ */
+int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec)
{
unsigned const char *new, *old;
unsigned long ip = rec->ip;
- old = ftrace_call_replace(ip, addr);
+ old = ftrace_call_replace(ip, (unsigned long)__fentry__);
new = ftrace_nop_replace();
- /*
- * On boot up, and when modules are loaded, the MCOUNT_ADDR
- * is converted to a nop, and will never become MCOUNT_ADDR
- * again. This code is either running before SMP (on boot up)
- * or before the code will ever be executed (module load).
- * We do not want to use the breakpoint version in this case,
- * just modify the code directly.
- */
- if (addr == MCOUNT_ADDR)
- return ftrace_modify_code_direct(rec->ip, old, new);
+ return ftrace_modify_code_direct(ip, old, new);
+}
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
ftrace_expected = NULL;
/* Normal cases use add_brk_on_nop */
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
index 6c6d9a9f185f..3ce2170291cd 100644
--- a/arch/xtensa/include/asm/ftrace.h
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -28,7 +28,6 @@ extern unsigned long return_address(unsigned level);
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 3
#ifndef __ASSEMBLY__
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8a8cb3c401b2..8175ffb671f0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -458,6 +458,7 @@ extern void ftrace_regs_caller(void);
extern void ftrace_call(void);
extern void ftrace_regs_call(void);
extern void mcount_call(void);
+extern int ftrace_call_init(struct module *mod, struct dyn_ftrace *rec);
void ftrace_modify_all_code(int command);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca34503f178..62ca6977e570 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2500,7 +2500,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
- ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ ret = ftrace_call_init(mod, rec);
if (ret) {
ftrace_bug_type = FTRACE_BUG_INIT;
ftrace_bug(ret, rec);
--
2.23.0.rc1