Implement KPROBES_ON_FTRACE for arm64.
Applied after FTRACE_WITH_REGS:
http://lists.infradead.org/pipermail/linux-arm-kernel/2019-August/674404.html
Jisheng Zhang (4):
kprobes: adjust kprobe addr for KPROBES_ON_FTRACE
kprobes/x86: use instruction_pointer and instruction_pointer_set
kprobes: move kprobe_ftrace_handler() from x86 and make it weak
arm64: implement KPROBES_ON_FTRACE
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/probes/Makefile | 1 +
arch/arm64/kernel/probes/ftrace.c | 16 +++++++++++
arch/x86/kernel/kprobes/ftrace.c | 43 ----------------------------
kernel/kprobes.c | 47 +++++++++++++++++++++++++++++++
5 files changed, 65 insertions(+), 43 deletions(-)
create mode 100644 arch/arm64/kernel/probes/ftrace.c
--
2.23.0.rc1
For KPROBES_ON_FTRACE case, we need to adjust the kprobe's addr
correspondingly.
Signed-off-by: Jisheng Zhang <[email protected]>
---
kernel/kprobes.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..f8400753a8a9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1560,6 +1560,9 @@ int register_kprobe(struct kprobe *p)
addr = kprobe_addr(p);
if (IS_ERR(addr))
return PTR_ERR(addr);
+#ifdef CONFIG_KPROBES_ON_FTRACE
+ addr = (kprobe_opcode_t *)ftrace_call_adjust((unsigned long)addr);
+#endif
p->addr = addr;
ret = check_kprobe_rereg(p);
--
2.23.0.rc1
This is to make the kprobe_ftrace_handler() common, so we can move it
to common code in next patch.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/x86/kernel/kprobes/ftrace.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 681a4b36e9bb..c2ad0b9259ca 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -28,9 +28,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
- unsigned long orig_ip = regs->ip;
+ unsigned long orig_ip = instruction_pointer(regs);
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
- regs->ip = ip + sizeof(kprobe_opcode_t);
+ instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -39,12 +39,13 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
* Emulate singlestep (and also recover regs->ip)
* as if there is a 5byte nop
*/
- regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
- regs->ip = orig_ip;
+ instruction_pointer_set(regs, orig_ip);
}
/*
* If pre_handler returns !0, it changes regs->ip. We have to
--
2.23.0.rc1
This code could be reused. So move it from x86 to common code.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/x86/kernel/kprobes/ftrace.c | 44 --------------------------------
kernel/kprobes.c | 44 ++++++++++++++++++++++++++++++++
2 files changed, 44 insertions(+), 44 deletions(-)
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index c2ad0b9259ca..91ae1e3e65f7 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -12,50 +12,6 @@
#include "common.h"
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
-void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs)
-{
- struct kprobe *p;
- struct kprobe_ctlblk *kcb;
-
- /* Preempt is disabled by ftrace */
- p = get_kprobe((kprobe_opcode_t *)ip);
- if (unlikely(!p) || kprobe_disabled(p))
- return;
-
- kcb = get_kprobe_ctlblk();
- if (kprobe_running()) {
- kprobes_inc_nmissed_count(p);
- } else {
- unsigned long orig_ip = instruction_pointer(regs);
- /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
- instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
-
- __this_cpu_write(current_kprobe, p);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs)) {
- /*
- * Emulate singlestep (and also recover regs->ip)
- * as if there is a 5byte nop
- */
- instruction_pointer_set(regs,
- (unsigned long)p->addr + MCOUNT_INSN_SIZE);
- if (unlikely(p->post_handler)) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, regs, 0);
- }
- instruction_pointer_set(regs, orig_ip);
- }
- /*
- * If pre_handler returns !0, it changes regs->ip. We have to
- * skip emulating post_handler.
- */
- __this_cpu_write(current_kprobe, NULL);
- }
-}
-NOKPROBE_SYMBOL(kprobe_ftrace_handler);
-
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f8400753a8a9..479148ee1822 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -960,6 +960,50 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void __weak kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ /* Preempt is disabled by ftrace */
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ return;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+ /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+ instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->ip)
+ * as if there is a 5byte nop
+ */
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+ /*
+ * If pre_handler returns !0, it changes regs->ip. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
.func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
--
2.23.0.rc1
This patch implements KPROBES_ON_FTRACE for arm64.
~ # mount -t debugfs debugfs /sys/kernel/debug/
~ # cd /sys/kernel/debug/
/sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events
before the patch:
/sys/kernel/debug # cat kprobes/list
ffffff801009ff7c k _do_fork+0x4 [DISABLED]
after the patch:
/sys/kernel/debug # cat kprobes/list
ffffff801009ff7c k _do_fork+0x4 [DISABLED][FTRACE]
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/probes/Makefile | 1 +
arch/arm64/kernel/probes/ftrace.c | 16 ++++++++++++++++
3 files changed, 18 insertions(+)
create mode 100644 arch/arm64/kernel/probes/ftrace.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 663392d1eae2..928700f15e23 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -167,6 +167,7 @@ config ARM64
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_GENERIC_VDSO
select IOMMU_DMA if IOMMU_SUPPORT
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index 8e4be92e25b1..4020cfc66564 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
simulate-insn.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
simulate-insn.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
diff --git a/arch/arm64/kernel/probes/ftrace.c b/arch/arm64/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..1fe8f105e02e
--- /dev/null
+++ b/arch/arm64/kernel/probes/ftrace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * Copyright (C) 2019 Synaptics Incorporated
+ *
+ * Author: Jisheng Zhang <[email protected]>
+ */
+
+#include <linux/kprobes.h>
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.api.insn = NULL;
+ return 0;
+}
--
2.23.0.rc1
Hi,
On Mon, Aug 19, 2019 at 11:35:27AM +0000, Jisheng Zhang wrote:
> Implement KPROBES_ON_FTRACE for arm64.
It would be very helpful if the cover letter could explain what
KPROBES_ON_FTRACE is, and why it is wanted.
It's not clear to me whether this is enabling new functionality for
kprobes via ftrace, or whether this is an optimization for kprobes using
ftrace under the hood.
Thanks,
Mark.
>
> Applied after FTRACE_WITH_REGS:
> http://lists.infradead.org/pipermail/linux-arm-kernel/2019-August/674404.html
>
> Jisheng Zhang (4):
> kprobes: adjust kprobe addr for KPROBES_ON_FTRACE
> kprobes/x86: use instruction_pointer and instruction_pointer_set
> kprobes: move kprobe_ftrace_handler() from x86 and make it weak
> arm64: implement KPROBES_ON_FTRACE
>
> arch/arm64/Kconfig | 1 +
> arch/arm64/kernel/probes/Makefile | 1 +
> arch/arm64/kernel/probes/ftrace.c | 16 +++++++++++
> arch/x86/kernel/kprobes/ftrace.c | 43 ----------------------------
> kernel/kprobes.c | 47 +++++++++++++++++++++++++++++++
> 5 files changed, 65 insertions(+), 43 deletions(-)
> create mode 100644 arch/arm64/kernel/probes/ftrace.c
>
> --
> 2.23.0.rc1
>
Hi Jisheng,
On Mon, 19 Aug 2019 11:35:27 +0000
Jisheng Zhang <[email protected]> wrote:
> Implement KPROBES_ON_FTRACE for arm64.
>
> Applied after FTRACE_WITH_REGS:
> http://lists.infradead.org/pipermail/linux-arm-kernel/2019-August/674404.html
Looks interesting! thanks for working on it.
I'll check it.
Thanks,
>
> Jisheng Zhang (4):
> kprobes: adjust kprobe addr for KPROBES_ON_FTRACE
> kprobes/x86: use instruction_pointer and instruction_pointer_set
> kprobes: move kprobe_ftrace_handler() from x86 and make it weak
> arm64: implement KPROBES_ON_FTRACE
>
> arch/arm64/Kconfig | 1 +
> arch/arm64/kernel/probes/Makefile | 1 +
> arch/arm64/kernel/probes/ftrace.c | 16 +++++++++++
> arch/x86/kernel/kprobes/ftrace.c | 43 ----------------------------
> kernel/kprobes.c | 47 +++++++++++++++++++++++++++++++
> 5 files changed, 65 insertions(+), 43 deletions(-)
> create mode 100644 arch/arm64/kernel/probes/ftrace.c
>
> --
> 2.23.0.rc1
>
--
Masami Hiramatsu <[email protected]>
Jisheng Zhang wrote:
> For KPROBES_ON_FTRACE case, we need to adjust the kprobe's addr
> correspondingly.
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> kernel/kprobes.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 9873fc627d61..f8400753a8a9 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -1560,6 +1560,9 @@ int register_kprobe(struct kprobe *p)
> addr = kprobe_addr(p);
> if (IS_ERR(addr))
> return PTR_ERR(addr);
> +#ifdef CONFIG_KPROBES_ON_FTRACE
> + addr = (kprobe_opcode_t *)ftrace_call_adjust((unsigned long)addr);
> +#endif
> p->addr = addr;
I'm not sure what this is achieving, but looks wrong to me.
If you intend to have kprobes default to using ftrace entry for probing
functions, consider over-riding kprobe_lookup_name() -- see powerpc
variant for example.
- Naveen
Jisheng Zhang wrote:
> This patch implements KPROBES_ON_FTRACE for arm64.
>
> ~ # mount -t debugfs debugfs /sys/kernel/debug/
> ~ # cd /sys/kernel/debug/
> /sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events
>
> before the patch:
>
> /sys/kernel/debug # cat kprobes/list
> ffffff801009ff7c k _do_fork+0x4 [DISABLED]
This looks wrong -- we should not be allowing kprobe to be registered on
ftrace address without KPROBES_ON_FTRACE. Is _do_fork+0x4 the location
of ftrace entry on arm64?
- Naveen
Hi Jisheng,
On Mon, 19 Aug 2019 11:36:09 +0000
Jisheng Zhang <[email protected]> wrote:
> For KPROBES_ON_FTRACE case, we need to adjust the kprobe's addr
> correspondingly.
No, I think you have misunderstood what the ftrace_call_adjust() does.
Ftrace's rec->ip is already adjusted when initializing it. Kprobes
checks the list after initialized (adjusted). So you don't need to
adjust it again.
BTW, this type of hidden adjustment should be avoided by design.
If you find user specifies wrong address, return error instead of
adjust it silently.
Thank you,
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> kernel/kprobes.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 9873fc627d61..f8400753a8a9 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -1560,6 +1560,9 @@ int register_kprobe(struct kprobe *p)
> addr = kprobe_addr(p);
> if (IS_ERR(addr))
> return PTR_ERR(addr);
> +#ifdef CONFIG_KPROBES_ON_FTRACE
> + addr = (kprobe_opcode_t *)ftrace_call_adjust((unsigned long)addr);
> +#endif
> p->addr = addr;
>
> ret = check_kprobe_rereg(p);
> --
> 2.23.0.rc1
>
--
Masami Hiramatsu <[email protected]>
Hi Jisheng,
On Mon, 19 Aug 2019 11:37:32 +0000
Jisheng Zhang <[email protected]> wrote:
> This code could be reused. So move it from x86 to common code.
Yes, it can be among some arch, but at first, please make your
architecture implementation. After making sure that is enough
stable, we will optimize (consolidate) the code.
For example,
> - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> - instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
This may depend on arch implementation of kprobes.
Could you make a copy and update comments on arm64?
Thank you,
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/x86/kernel/kprobes/ftrace.c | 44 --------------------------------
> kernel/kprobes.c | 44 ++++++++++++++++++++++++++++++++
> 2 files changed, 44 insertions(+), 44 deletions(-)
>
> diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
> index c2ad0b9259ca..91ae1e3e65f7 100644
> --- a/arch/x86/kernel/kprobes/ftrace.c
> +++ b/arch/x86/kernel/kprobes/ftrace.c
> @@ -12,50 +12,6 @@
>
> #include "common.h"
>
> -/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> -void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> - struct ftrace_ops *ops, struct pt_regs *regs)
> -{
> - struct kprobe *p;
> - struct kprobe_ctlblk *kcb;
> -
> - /* Preempt is disabled by ftrace */
> - p = get_kprobe((kprobe_opcode_t *)ip);
> - if (unlikely(!p) || kprobe_disabled(p))
> - return;
> -
> - kcb = get_kprobe_ctlblk();
> - if (kprobe_running()) {
> - kprobes_inc_nmissed_count(p);
> - } else {
> - unsigned long orig_ip = instruction_pointer(regs);
> - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> - instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> -
> - __this_cpu_write(current_kprobe, p);
> - kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> - if (!p->pre_handler || !p->pre_handler(p, regs)) {
> - /*
> - * Emulate singlestep (and also recover regs->ip)
> - * as if there is a 5byte nop
> - */
> - instruction_pointer_set(regs,
> - (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> - if (unlikely(p->post_handler)) {
> - kcb->kprobe_status = KPROBE_HIT_SSDONE;
> - p->post_handler(p, regs, 0);
> - }
> - instruction_pointer_set(regs, orig_ip);
> - }
> - /*
> - * If pre_handler returns !0, it changes regs->ip. We have to
> - * skip emulating post_handler.
> - */
> - __this_cpu_write(current_kprobe, NULL);
> - }
> -}
> -NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> -
> int arch_prepare_kprobe_ftrace(struct kprobe *p)
> {
> p->ainsn.insn = NULL;
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index f8400753a8a9..479148ee1822 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -960,6 +960,50 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
> #endif /* CONFIG_OPTPROBES */
>
> #ifdef CONFIG_KPROBES_ON_FTRACE
> +/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> +void __weak kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> + struct ftrace_ops *ops, struct pt_regs *regs)
> +{
> + struct kprobe *p;
> + struct kprobe_ctlblk *kcb;
> +
> + /* Preempt is disabled by ftrace */
> + p = get_kprobe((kprobe_opcode_t *)ip);
> + if (unlikely(!p) || kprobe_disabled(p))
> + return;
> +
> + kcb = get_kprobe_ctlblk();
> + if (kprobe_running()) {
> + kprobes_inc_nmissed_count(p);
> + } else {
> + unsigned long orig_ip = instruction_pointer(regs);
> + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> + instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> +
> + __this_cpu_write(current_kprobe, p);
> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> + if (!p->pre_handler || !p->pre_handler(p, regs)) {
> + /*
> + * Emulate singlestep (and also recover regs->ip)
> + * as if there is a 5byte nop
> + */
> + instruction_pointer_set(regs,
> + (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> + if (unlikely(p->post_handler)) {
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + p->post_handler(p, regs, 0);
> + }
> + instruction_pointer_set(regs, orig_ip);
> + }
> + /*
> + * If pre_handler returns !0, it changes regs->ip. We have to
> + * skip emulating post_handler.
> + */
> + __this_cpu_write(current_kprobe, NULL);
> + }
> +}
> +NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> +
> static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
> .func = kprobe_ftrace_handler,
> .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
> --
> 2.23.0.rc1
>
--
Masami Hiramatsu <[email protected]>
On Mon, 19 Aug 2019 11:36:48 +0000
Jisheng Zhang <[email protected]> wrote:
> This is to make the kprobe_ftrace_handler() common, so we can move it
> to common code in next patch.
>
BTW, this patch looks good, without next patch. Could you update the
patch description and resend it with my Ack?
Acked-by: Masami Hiramatsu <[email protected]>
Thank you,
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/x86/kernel/kprobes/ftrace.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
> index 681a4b36e9bb..c2ad0b9259ca 100644
> --- a/arch/x86/kernel/kprobes/ftrace.c
> +++ b/arch/x86/kernel/kprobes/ftrace.c
> @@ -28,9 +28,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> if (kprobe_running()) {
> kprobes_inc_nmissed_count(p);
> } else {
> - unsigned long orig_ip = regs->ip;
> + unsigned long orig_ip = instruction_pointer(regs);
> /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> - regs->ip = ip + sizeof(kprobe_opcode_t);
> + instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
>
> __this_cpu_write(current_kprobe, p);
> kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> @@ -39,12 +39,13 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> * Emulate singlestep (and also recover regs->ip)
> * as if there is a 5byte nop
> */
> - regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
> + instruction_pointer_set(regs,
> + (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> if (unlikely(p->post_handler)) {
> kcb->kprobe_status = KPROBE_HIT_SSDONE;
> p->post_handler(p, regs, 0);
> }
> - regs->ip = orig_ip;
> + instruction_pointer_set(regs, orig_ip);
> }
> /*
> * If pre_handler returns !0, it changes regs->ip. We have to
> --
> 2.23.0.rc1
>
--
Masami Hiramatsu <[email protected]>
On Mon, 19 Aug 2019 22:13:02 +0530 "Naveen N. Rao" wrote:
> CAUTION: Email originated externally, do not click links or open attachments unless you recognize the sender and know the content is safe.
>
>
> Jisheng Zhang wrote:
> > For KPROBES_ON_FTRACE case, we need to adjust the kprobe's addr
> > correspondingly.
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > kernel/kprobes.c | 3 +++
> > 1 file changed, 3 insertions(+)
> >
> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> > index 9873fc627d61..f8400753a8a9 100644
> > --- a/kernel/kprobes.c
> > +++ b/kernel/kprobes.c
> > @@ -1560,6 +1560,9 @@ int register_kprobe(struct kprobe *p)
> > addr = kprobe_addr(p);
> > if (IS_ERR(addr))
> > return PTR_ERR(addr);
> > +#ifdef CONFIG_KPROBES_ON_FTRACE
> > + addr = (kprobe_opcode_t *)ftrace_call_adjust((unsigned long)addr);
> > +#endif
> > p->addr = addr;
>
> I'm not sure what this is achieving, but looks wrong to me.
Indeed, I didn't take care of non-ftrace addr when KPROBES_ON_FTRACE, will
update in next version.
thanks
>
> If you intend to have kprobes default to using ftrace entry for probing
> functions, consider over-riding kprobe_lookup_name() -- see powerpc
> variant for example.
>
>
> - Naveen
>
On Tue, 20 Aug 2019 09:07:35 +0900 Masami Hiramatsu wrote:
>
>
> Hi Jisheng,
Hi,
>
> On Mon, 19 Aug 2019 11:37:32 +0000
> Jisheng Zhang <[email protected]> wrote:
>
> > This code could be reused. So move it from x86 to common code.
>
> Yes, it can be among some arch, but at first, please make your
> architecture implementation. After making sure that is enough
> stable, we will optimize (consolidate) the code.
Got it. I will duplicate the function firstly then make the consolidation
as a TODO.
>
> For example,
> > - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> > - instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
>
> This may depend on arch implementation of kprobes.
Indeed, for arm64, would be as simple as s/ip/pc.
Thanks
>
> Could you make a copy and update comments on arm64?
>
> Thank you,
>
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > arch/x86/kernel/kprobes/ftrace.c | 44 --------------------------------
> > kernel/kprobes.c | 44 ++++++++++++++++++++++++++++++++
> > 2 files changed, 44 insertions(+), 44 deletions(-)
> >
> > diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
> > index c2ad0b9259ca..91ae1e3e65f7 100644
> > --- a/arch/x86/kernel/kprobes/ftrace.c
> > +++ b/arch/x86/kernel/kprobes/ftrace.c
> > @@ -12,50 +12,6 @@
> >
> > #include "common.h"
> >
> > -/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> > -void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> > - struct ftrace_ops *ops, struct pt_regs *regs)
> > -{
> > - struct kprobe *p;
> > - struct kprobe_ctlblk *kcb;
> > -
> > - /* Preempt is disabled by ftrace */
> > - p = get_kprobe((kprobe_opcode_t *)ip);
> > - if (unlikely(!p) || kprobe_disabled(p))
> > - return;
> > -
> > - kcb = get_kprobe_ctlblk();
> > - if (kprobe_running()) {
> > - kprobes_inc_nmissed_count(p);
> > - } else {
> > - unsigned long orig_ip = instruction_pointer(regs);
> > - /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> > - instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> > -
> > - __this_cpu_write(current_kprobe, p);
> > - kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> > - if (!p->pre_handler || !p->pre_handler(p, regs)) {
> > - /*
> > - * Emulate singlestep (and also recover regs->ip)
> > - * as if there is a 5byte nop
> > - */
> > - instruction_pointer_set(regs,
> > - (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> > - if (unlikely(p->post_handler)) {
> > - kcb->kprobe_status = KPROBE_HIT_SSDONE;
> > - p->post_handler(p, regs, 0);
> > - }
> > - instruction_pointer_set(regs, orig_ip);
> > - }
> > - /*
> > - * If pre_handler returns !0, it changes regs->ip. We have to
> > - * skip emulating post_handler.
> > - */
> > - __this_cpu_write(current_kprobe, NULL);
> > - }
> > -}
> > -NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> > -
> > int arch_prepare_kprobe_ftrace(struct kprobe *p)
> > {
> > p->ainsn.insn = NULL;
> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> > index f8400753a8a9..479148ee1822 100644
> > --- a/kernel/kprobes.c
> > +++ b/kernel/kprobes.c
> > @@ -960,6 +960,50 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
> > #endif /* CONFIG_OPTPROBES */
> >
> > #ifdef CONFIG_KPROBES_ON_FTRACE
> > +/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> > +void __weak kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> > + struct ftrace_ops *ops, struct pt_regs *regs)
> > +{
> > + struct kprobe *p;
> > + struct kprobe_ctlblk *kcb;
> > +
> > + /* Preempt is disabled by ftrace */
> > + p = get_kprobe((kprobe_opcode_t *)ip);
> > + if (unlikely(!p) || kprobe_disabled(p))
> > + return;
> > +
> > + kcb = get_kprobe_ctlblk();
> > + if (kprobe_running()) {
> > + kprobes_inc_nmissed_count(p);
> > + } else {
> > + unsigned long orig_ip = instruction_pointer(regs);
> > + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
> > + instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> > +
> > + __this_cpu_write(current_kprobe, p);
> > + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> > + if (!p->pre_handler || !p->pre_handler(p, regs)) {
> > + /*
> > + * Emulate singlestep (and also recover regs->ip)
> > + * as if there is a 5byte nop
> > + */
> > + instruction_pointer_set(regs,
> > + (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> > + if (unlikely(p->post_handler)) {
> > + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> > + p->post_handler(p, regs, 0);
> > + }
> > + instruction_pointer_set(regs, orig_ip);
> > + }
> > + /*
> > + * If pre_handler returns !0, it changes regs->ip. We have to
> > + * skip emulating post_handler.
> > + */
> > + __this_cpu_write(current_kprobe, NULL);
> > + }
> > +}
> > +NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> > +
> > static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
> > .func = kprobe_ftrace_handler,
> > .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
> > --
> > 2.23.0.rc1
> >
>
>
> --
> Masami Hiramatsu <[email protected]>
On Tue, 20 Aug 2019 09:01:30 +0900 Masami Hiramatsu wrote:
>
> Hi Jisheng,
Hi,
>
> On Mon, 19 Aug 2019 11:36:09 +0000
> Jisheng Zhang <[email protected]> wrote:
>
> > For KPROBES_ON_FTRACE case, we need to adjust the kprobe's addr
> > correspondingly.
>
> No, I think you have misunderstood what the ftrace_call_adjust() does.
> Ftrace's rec->ip is already adjusted when initializing it. Kprobes
> checks the list after initialized (adjusted). So you don't need to
> adjust it again.
This is not to adjust the ftarce's rec->ip, but to adjust the struct kprobe
addr member. Because check_kprobe_address_safe()=>arch_check_ftrace_location
will check the kprobe's addr with ftrace's rec->ip. Since ftrace's rec->ip
is already adjusted, there will be mismatch if we don't adjust kprobe's addr
correspondingly.
However, this patch is wrong. I should not update the kprobe's addr
for non-ftrace-entry. Will fix this in next version.
Thanks
>
> BTW, this type of hidden adjustment should be avoided by design.
> If you find user specifies wrong address, return error instead of
> adjust it silently.
>
> Thank you,
>
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > kernel/kprobes.c | 3 +++
> > 1 file changed, 3 insertions(+)
> >
> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> > index 9873fc627d61..f8400753a8a9 100644
> > --- a/kernel/kprobes.c
> > +++ b/kernel/kprobes.c
> > @@ -1560,6 +1560,9 @@ int register_kprobe(struct kprobe *p)
> > addr = kprobe_addr(p);
> > if (IS_ERR(addr))
> > return PTR_ERR(addr);
> > +#ifdef CONFIG_KPROBES_ON_FTRACE
> > + addr = (kprobe_opcode_t *)ftrace_call_adjust((unsigned long)addr);
> > +#endif
> > p->addr = addr;
> >
> > ret = check_kprobe_rereg(p);
> > --
> > 2.23.0.rc1
> >
>
>
> --
> Masami Hiramatsu <[email protected]>
On Mon, 19 Aug 2019 22:22:12 +0530 "Naveen N. Rao" wrote:
>
>
> Jisheng Zhang wrote:
> > This patch implements KPROBES_ON_FTRACE for arm64.
> >
> > ~ # mount -t debugfs debugfs /sys/kernel/debug/
> > ~ # cd /sys/kernel/debug/
> > /sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events
> >
> > before the patch:
> >
> > /sys/kernel/debug # cat kprobes/list
> > ffffff801009ff7c k _do_fork+0x4 [DISABLED]
>
> This looks wrong -- we should not be allowing kprobe to be registered on
Yes. I made a mistake when dumping this log. The kernel isn't as clean
as "before the patch".
> ftrace address without KPROBES_ON_FTRACE. Is _do_fork+0x4 the location
> of ftrace entry on arm64?
Indeed, w/o KPROBES_ON_FTRACE, it should be _do_fork+0x0
Thanks