Fix the return address of subsequent kretprobes when multiple
kretprobes are set on the same function.
For example:
# cd /sys/kernel/debug/tracing
# echo "r:event1 sys_symlink" > kprobe_events
# echo "r:event2 sys_symlink" >> kprobe_events
# echo 1 > events/kprobes/enable
# ln -s /tmp/foo /tmp/bar
(without this patch)
# cat trace
ln-897 [000] 20404.133727: event1: (kretprobe_trampoline+0x0/0x4c <- sys_symlink)
ln-897 [000] 20404.133747: event2: (system_call_fastpath+0x16/0x1b <- sys_symlink)
(with this patch)
# cat trace
ln-740 [000] 13799.491076: event1: (system_call_fastpath+0x16/0x1b <- sys_symlink)
ln-740 [000] 13799.491096: event2: (system_call_fastpath+0x16/0x1b <- sys_symlink)
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: [email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: YOSHIFUJI Hideaki <[email protected]>
Reviewed-by: Masami Hiramatsu <[email protected]>
Signed-off-by: KUMANO Syuhei <[email protected]>
---
arch/x86/kernel/kprobes.c | 25 ++++++++++++++++++++++---
1 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 675879b..5220e14 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -707,6 +707,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -738,14 +739,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp;
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
- orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
@@ -757,8 +778,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
break;
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
--
1.7.0.4
KUMANO Syuhei wrote:
> Fix the return address of subsequent kretprobes when multiple
> kretprobes are set on the same function.
>
> For example:
> # cd /sys/kernel/debug/tracing
> # echo "r:event1 sys_symlink" > kprobe_events
> # echo "r:event2 sys_symlink" >> kprobe_events
> # echo 1 > events/kprobes/enable
> # ln -s /tmp/foo /tmp/bar
> (without this patch)
> # cat trace
> ln-897 [000] 20404.133727: event1: (kretprobe_trampoline+0x0/0x4c <- sys_symlink)
> ln-897 [000] 20404.133747: event2: (system_call_fastpath+0x16/0x1b <- sys_symlink)
> (with this patch)
> # cat trace
> ln-740 [000] 13799.491076: event1: (system_call_fastpath+0x16/0x1b <- sys_symlink)
> ln-740 [000] 13799.491096: event2: (system_call_fastpath+0x16/0x1b <- sys_symlink)
>
Yeah, I'm OK for this fix.
Acked-by: Masami Hiramatsu <[email protected]>
Ingo, could you pull this on your x86/bugfix tree?
Thank you,
> Cc: Thomas Gleixner <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: "H. Peter Anvin" <[email protected]>
> Cc: [email protected]
> Cc: Frederic Weisbecker <[email protected]>
> Cc: Ananth N Mavinakayanahalli <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: [email protected]
> Cc: YOSHIFUJI Hideaki <[email protected]>
> Reviewed-by: Masami Hiramatsu <[email protected]>
> Signed-off-by: KUMANO Syuhei <[email protected]>
> ---
> arch/x86/kernel/kprobes.c | 25 ++++++++++++++++++++++---
> 1 files changed, 22 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
> index 675879b..5220e14 100644
> --- a/arch/x86/kernel/kprobes.c
> +++ b/arch/x86/kernel/kprobes.c
> @@ -707,6 +707,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
> struct hlist_node *node, *tmp;
> unsigned long flags, orig_ret_address = 0;
> unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
> + kprobe_opcode_t *correct_ret_addr = NULL;
>
> INIT_HLIST_HEAD(&empty_rp);
> kretprobe_hash_lock(current, &head, &flags);
> @@ -738,14 +739,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
> /* another task is sharing our hash bucket */
> continue;
>
> + orig_ret_address = (unsigned long)ri->ret_addr;
> +
> + if (orig_ret_address != trampoline_address)
> + /*
> + * This is the real return address. Any other
> + * instances associated with this task are for
> + * other calls deeper on the call stack
> + */
> + break;
> + }
> +
> + kretprobe_assert(ri, orig_ret_address, trampoline_address);
> +
> + correct_ret_addr = ri->ret_addr;
> + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
> + if (ri->task != current)
> + /* another task is sharing our hash bucket */
> + continue;
> +
> + orig_ret_address = (unsigned long)ri->ret_addr;
> if (ri->rp && ri->rp->handler) {
> __get_cpu_var(current_kprobe) = &ri->rp->kp;
> get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
> + ri->ret_addr = correct_ret_addr;
> ri->rp->handler(ri, regs);
> __get_cpu_var(current_kprobe) = NULL;
> }
>
> - orig_ret_address = (unsigned long)ri->ret_addr;
> recycle_rp_inst(ri, &empty_rp);
>
> if (orig_ret_address != trampoline_address)
> @@ -757,8 +778,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
> break;
> }
>
> - kretprobe_assert(ri, orig_ret_address, trampoline_address);
> -
> kretprobe_hash_unlock(current, &flags);
>
> hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
Commit-ID: 737480a0d525dae13306296da08029dff545bc72
Gitweb: http://git.kernel.org/tip/737480a0d525dae13306296da08029dff545bc72
Author: KUMANO Syuhei <[email protected]>
AuthorDate: Sun, 15 Aug 2010 15:18:04 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Thu, 19 Aug 2010 12:49:56 +0200
kprobes/x86: Fix the return address of multiple kretprobes
Fix the return address of subsequent kretprobes when multiple
kretprobes are set on the same function.
For example:
# cd /sys/kernel/debug/tracing
# echo "r:event1 sys_symlink" > kprobe_events
# echo "r:event2 sys_symlink" >> kprobe_events
# echo 1 > events/kprobes/enable
# ln -s /tmp/foo /tmp/bar
(without this patch)
# cat trace
ln-897 [000] 20404.133727: event1: (kretprobe_trampoline+0x0/0x4c <- sys_symlink)
ln-897 [000] 20404.133747: event2: (system_call_fastpath+0x16/0x1b <- sys_symlink)
(with this patch)
# cat trace
ln-740 [000] 13799.491076: event1: (system_call_fastpath+0x16/0x1b <- sys_symlink)
ln-740 [000] 13799.491096: event2: (system_call_fastpath+0x16/0x1b <- sys_symlink)
Signed-off-by: KUMANO Syuhei <[email protected]>
Reviewed-by: Masami Hiramatsu <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: YOSHIFUJI Hideaki <[email protected]>
LKML-Reference: <1281853084.3254.11.camel@camp10-laptop>
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/kernel/kprobes.c | 25 ++++++++++++++++++++++---
1 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 1bfb6cf..770ebfb 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp;
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
- orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
break;
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {