2023-02-03 08:51:23

by Tiezhu Yang

[permalink] [raw]
Subject: [PATCH] LoongArch: Add kprobe on ftrace support

Add kprobe_ftrace_handler() and arch_prepare_kprobe_ftrace() to support
kprobe on ftrace, the code is similar with x86 and riscv.

Here is a simple example:

# echo 'p:myprobe kernel_clone' > /sys/kernel/debug/tracing/kprobe_events
# echo 'r:myretprobe kernel_clone $retval' >> /sys/kernel/debug/tracing/kprobe_events
# echo 1 > /sys/kernel/debug/tracing/events/kprobes/myprobe/enable
# echo 1 > /sys/kernel/debug/tracing/events/kprobes/myretprobe/enable
# echo 1 > /sys/kernel/debug/tracing/tracing_on
# cat /sys/kernel/debug/tracing/trace
# tracer: nop
#
# entries-in-buffer/entries-written: 2/2 #P:4
#
# _-----=> irqs-off/BH-disabled
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / _-=> migrate-disable
# |||| / delay
# TASK-PID CPU# ||||| TIMESTAMP FUNCTION
# | | | ||||| | |
bash-488 [002] ..... 2041.190681: myprobe: (kernel_clone+0x0/0x40c)
bash-488 [002] ..... 2041.190788: myretprobe: (__do_sys_clone+0x84/0xb8 <- kernel_clone) arg1=0x200

Signed-off-by: Tiezhu Yang <[email protected]>
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/kernel/ftrace_dyn.c | 65 ++++++++++++++++++++++++++++++++++++++
2 files changed, 66 insertions(+)

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 134a2f8..22a3e77 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -104,6 +104,7 @@ config LOONGARCH
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
index 0f07591..7b074c2 100644
--- a/arch/loongarch/kernel/ftrace_dyn.c
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -6,6 +6,7 @@
*/

#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/uaccess.h>

#include <asm/inst.h>
@@ -271,3 +272,67 @@ int ftrace_disable_ftrace_graph_caller(void)
}
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ struct kprobe *p;
+ struct pt_regs *regs;
+ struct kprobe_ctlblk *kcb;
+ int bit;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+
+ regs = ftrace_get_regs(fregs);
+ if (!regs)
+ goto out;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ instruction_pointer_set(regs, ip);
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->csr_era)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+
+ /*
+ * If pre_handler returns !0, it changes regs->csr_era. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+out:
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ return 0;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
--
2.1.0



2023-02-03 09:01:24

by Huacai Chen

[permalink] [raw]
Subject: Re: [PATCH] LoongArch: Add kprobe on ftrace support

Hi, Tiezhu,

On Fri, Feb 3, 2023 at 4:51 PM Tiezhu Yang <[email protected]> wrote:
>
> Add kprobe_ftrace_handler() and arch_prepare_kprobe_ftrace() to support
> kprobe on ftrace, the code is similar with x86 and riscv.
>
> Here is a simple example:
>
> # echo 'p:myprobe kernel_clone' > /sys/kernel/debug/tracing/kprobe_events
> # echo 'r:myretprobe kernel_clone $retval' >> /sys/kernel/debug/tracing/kprobe_events
> # echo 1 > /sys/kernel/debug/tracing/events/kprobes/myprobe/enable
> # echo 1 > /sys/kernel/debug/tracing/events/kprobes/myretprobe/enable
> # echo 1 > /sys/kernel/debug/tracing/tracing_on
> # cat /sys/kernel/debug/tracing/trace
> # tracer: nop
> #
> # entries-in-buffer/entries-written: 2/2 #P:4
> #
> # _-----=> irqs-off/BH-disabled
> # / _----=> need-resched
> # | / _---=> hardirq/softirq
> # || / _--=> preempt-depth
> # ||| / _-=> migrate-disable
> # |||| / delay
> # TASK-PID CPU# ||||| TIMESTAMP FUNCTION
> # | | | ||||| | |
> bash-488 [002] ..... 2041.190681: myprobe: (kernel_clone+0x0/0x40c)
> bash-488 [002] ..... 2041.190788: myretprobe: (__do_sys_clone+0x84/0xb8 <- kernel_clone) arg1=0x200
>
> Signed-off-by: Tiezhu Yang <[email protected]>
> ---
> arch/loongarch/Kconfig | 1 +
> arch/loongarch/kernel/ftrace_dyn.c | 65 ++++++++++++++++++++++++++++++++++++++
> 2 files changed, 66 insertions(+)
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 134a2f8..22a3e77 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -104,6 +104,7 @@ config LOONGARCH
> select HAVE_IRQ_EXIT_ON_IRQ_STACK
> select HAVE_IRQ_TIME_ACCOUNTING
> select HAVE_KPROBES
> + select HAVE_KPROBES_ON_FTRACE
> select HAVE_KRETPROBES
> select HAVE_MOD_ARCH_SPECIFIC
> select HAVE_NMI
> diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
> index 0f07591..7b074c2 100644
> --- a/arch/loongarch/kernel/ftrace_dyn.c
> +++ b/arch/loongarch/kernel/ftrace_dyn.c
> @@ -6,6 +6,7 @@
> */
>
> #include <linux/ftrace.h>
> +#include <linux/kprobes.h>
> #include <linux/uaccess.h>
>
> #include <asm/inst.h>
> @@ -271,3 +272,67 @@ int ftrace_disable_ftrace_graph_caller(void)
> }
> #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
> #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> +
> +#ifdef CONFIG_KPROBES_ON_FTRACE
> +/* Ftrace callback handler for kprobes -- called under preepmt disabled */
> +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> + struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +{
> + struct kprobe *p;
> + struct pt_regs *regs;
> + struct kprobe_ctlblk *kcb;
> + int bit;
> +
> + bit = ftrace_test_recursion_trylock(ip, parent_ip);
> + if (bit < 0)
> + return;
> +
In the old version I see preempt_disable_notrace() here and
preempt_enable_notrace() at the end. And I prefer to add this one to
the kprobes series rather than a separate patch.

Huacai
> + p = get_kprobe((kprobe_opcode_t *)ip);
> + if (unlikely(!p) || kprobe_disabled(p))
> + goto out;
> +
> + regs = ftrace_get_regs(fregs);
> + if (!regs)
> + goto out;
> +
> + kcb = get_kprobe_ctlblk();
> + if (kprobe_running()) {
> + kprobes_inc_nmissed_count(p);
> + } else {
> + unsigned long orig_ip = instruction_pointer(regs);
> +
> + instruction_pointer_set(regs, ip);
> +
> + __this_cpu_write(current_kprobe, p);
> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> + if (!p->pre_handler || !p->pre_handler(p, regs)) {
> + /*
> + * Emulate singlestep (and also recover regs->csr_era)
> + * as if there is a nop
> + */
> + instruction_pointer_set(regs,
> + (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> + if (unlikely(p->post_handler)) {
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + p->post_handler(p, regs, 0);
> + }
> + instruction_pointer_set(regs, orig_ip);
> + }
> +
> + /*
> + * If pre_handler returns !0, it changes regs->csr_era. We have to
> + * skip emulating post_handler.
> + */
> + __this_cpu_write(current_kprobe, NULL);
> + }
> +out:
> + ftrace_test_recursion_unlock(bit);
> +}
> +NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> +
> +int arch_prepare_kprobe_ftrace(struct kprobe *p)
> +{
> + p->ainsn.insn = NULL;
> + return 0;
> +}
> +#endif /* CONFIG_KPROBES_ON_FTRACE */
> --
> 2.1.0
>

2023-02-03 09:34:48

by Tiezhu Yang

[permalink] [raw]
Subject: Re: [PATCH] LoongArch: Add kprobe on ftrace support



On 02/03/2023 05:00 PM, Huacai Chen wrote:
> Hi, Tiezhu,
>
> On Fri, Feb 3, 2023 at 4:51 PM Tiezhu Yang <[email protected]> wrote:
>>
>> Add kprobe_ftrace_handler() and arch_prepare_kprobe_ftrace() to support
>> kprobe on ftrace, the code is similar with x86 and riscv.
>>
>> Here is a simple example:
>>
>> # echo 'p:myprobe kernel_clone' > /sys/kernel/debug/tracing/kprobe_events
>> # echo 'r:myretprobe kernel_clone $retval' >> /sys/kernel/debug/tracing/kprobe_events
>> # echo 1 > /sys/kernel/debug/tracing/events/kprobes/myprobe/enable
>> # echo 1 > /sys/kernel/debug/tracing/events/kprobes/myretprobe/enable
>> # echo 1 > /sys/kernel/debug/tracing/tracing_on
>> # cat /sys/kernel/debug/tracing/trace
>> # tracer: nop
>> #
>> # entries-in-buffer/entries-written: 2/2 #P:4
>> #
>> # _-----=> irqs-off/BH-disabled
>> # / _----=> need-resched
>> # | / _---=> hardirq/softirq
>> # || / _--=> preempt-depth
>> # ||| / _-=> migrate-disable
>> # |||| / delay
>> # TASK-PID CPU# ||||| TIMESTAMP FUNCTION
>> # | | | ||||| | |
>> bash-488 [002] ..... 2041.190681: myprobe: (kernel_clone+0x0/0x40c)
>> bash-488 [002] ..... 2041.190788: myretprobe: (__do_sys_clone+0x84/0xb8 <- kernel_clone) arg1=0x200
>>
>> Signed-off-by: Tiezhu Yang <[email protected]>
>> ---
>> arch/loongarch/Kconfig | 1 +
>> arch/loongarch/kernel/ftrace_dyn.c | 65 ++++++++++++++++++++++++++++++++++++++
>> 2 files changed, 66 insertions(+)
>>
>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>> index 134a2f8..22a3e77 100644
>> --- a/arch/loongarch/Kconfig
>> +++ b/arch/loongarch/Kconfig
>> @@ -104,6 +104,7 @@ config LOONGARCH
>> select HAVE_IRQ_EXIT_ON_IRQ_STACK
>> select HAVE_IRQ_TIME_ACCOUNTING
>> select HAVE_KPROBES
>> + select HAVE_KPROBES_ON_FTRACE
>> select HAVE_KRETPROBES
>> select HAVE_MOD_ARCH_SPECIFIC
>> select HAVE_NMI
>> diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
>> index 0f07591..7b074c2 100644
>> --- a/arch/loongarch/kernel/ftrace_dyn.c
>> +++ b/arch/loongarch/kernel/ftrace_dyn.c
>> @@ -6,6 +6,7 @@
>> */
>>
>> #include <linux/ftrace.h>
>> +#include <linux/kprobes.h>
>> #include <linux/uaccess.h>
>>
>> #include <asm/inst.h>
>> @@ -271,3 +272,67 @@ int ftrace_disable_ftrace_graph_caller(void)
>> }
>> #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
>> #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
>> +
>> +#ifdef CONFIG_KPROBES_ON_FTRACE
>> +/* Ftrace callback handler for kprobes -- called under preepmt disabled */
>> +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>> + struct ftrace_ops *ops, struct ftrace_regs *fregs)
>> +{
>> + struct kprobe *p;
>> + struct pt_regs *regs;
>> + struct kprobe_ctlblk *kcb;
>> + int bit;
>> +
>> + bit = ftrace_test_recursion_trylock(ip, parent_ip);
>> + if (bit < 0)
>> + return;
>> +
> In the old version I see preempt_disable_notrace() here and
> preempt_enable_notrace() at the end.

In the latest upstream kernel code, no need to do it, here is
the related commit:

ftrace: disable preemption when recursion locked
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ce5e48036c9e

> And I prefer to add this one to
> the kprobes series rather than a separate patch.
>

Actually, this patch is based on the kprobe series, no conflicts,
sent it as a separate patch is to avoid resending the kprobe series.

Thanks,
Tiezhu