>From f87fb3155500f85ffbafc01d6f5b8dc2a3f6184c Mon Sep 17 00:00:00 2001
From: Harvey Harrison <[email protected]>
Date: Fri, 14 Dec 2007 22:02:51 -0800
Subject: [PATCH] [RFC] x86: kprobes unification
Further unification work. There is a possible behavior
change on X86_32 here.
is_IF_modifier(p->opcode)
to
is_IF_modifier(p->ainsn.insn)
Which should be equivalent, but is not purely cosmetic as
the rest of the unification so far.
Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes_32.c | 43 +++++++++++++++++++++--------
arch/x86/kernel/kprobes_64.c | 60 +++++++++++++++++++++++++++++++++++-------
2 files changed, 81 insertions(+), 22 deletions(-)
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index 878b0c4..21ac86f 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -220,15 +220,21 @@ retry:
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
-static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
+static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
{
- switch (opcode) {
+ switch (*insn) {
case 0xfa: /* cli */
case 0xfb: /* sti */
case 0xcf: /* iret/iretd */
case 0x9d: /* popf/popfd */
return 1;
}
+
+#ifdef CONFIG_X86_64
+ /* REX prefix */
+ if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
+ return 1;
+#endif
return 0;
}
@@ -370,7 +376,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__get_cpu_var(current_kprobe) = p;
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
= (regs->flags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->opcode))
+ if (is_IF_modifier(p->ainsn.insn))
kcb->kprobe_saved_flags &= ~IF_MASK;
}
@@ -410,10 +416,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
- unsigned long *sara = (unsigned long *)®s->sp;
+ unsigned long *sara = ®s->sp;
ri->ret_addr = (kprobe_opcode_t *) *sara;
-
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
}
@@ -528,8 +533,9 @@ no_kprobe:
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
- void __kprobes kretprobe_trampoline_holder(void)
- {
+void __kprobes kretprobe_trampoline_holder(void)
+{
+#ifdef CONFIG_X86_32
asm volatile ( ".global kretprobe_trampoline\n"
"kretprobe_trampoline: \n"
" pushf\n"
@@ -563,6 +569,11 @@ no_kprobe:
" addl $20, %esp\n"
" popf\n"
" ret\n");
+#else
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+#endif
}
/*
@@ -658,12 +669,20 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
- unsigned long *tos = (unsigned long *)®s->sp;
+ unsigned long *tos = ®s->sp;
+ unsigned long next_rip = 0;
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+#ifdef CONFIG_X86_64
+ /*skip the REX prefix*/
+ if (*insn >= 0x40 && *insn <= 0x4f)
+ insn++;
+#endif
regs->flags &= ~TF_MASK;
- switch (p->ainsn.insn[0]) {
+ switch (*insn) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kcb->kprobe_old_flags;
@@ -684,7 +703,7 @@ static void __kprobes resume_execution(struct kprobe *p,
*tos = orig_ip + (*tos - copy_ip);
goto no_change;
case 0xff:
- if ((p->ainsn.insn[1] & 0x30) == 0x10) {
+ if ((insn[1] & 0x30) == 0x10) {
/*
* call absolute, indirect
* Fix return addr; ip is correct.
@@ -692,8 +711,8 @@ static void __kprobes resume_execution(struct kprobe *p,
*/
*tos = orig_ip + (*tos - copy_ip);
goto no_change;
- } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
/* ip is correct. And this is boostable */
p->ainsn.boostable = 1;
goto no_change;
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index b437f7a..9bf6ebe 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -236,8 +236,11 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
return 1;
}
+#ifdef CONFIG_X86_64
+ /* REX prefix */
if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
return 1;
+#endif
return 0;
}
@@ -419,7 +422,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
- unsigned long *sara = (unsigned long *)regs->sp;
+ unsigned long *sara = ®s->sp;
ri->ret_addr = (kprobe_opcode_t *) *sara;
/* Replace the return addr with trampoline addr */
@@ -535,12 +538,48 @@ no_kprobe:
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
- void kretprobe_trampoline_holder(void)
- {
- asm volatile ( ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
- "nop\n");
- }
+void kretprobe_trampoline_holder(void)
+{
+#ifdef CONFIG_X86_32
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ " pushf\n"
+ /* skip cs, ip, orig_ax */
+ " subl $12, %esp\n"
+ " pushl %fs\n"
+ " pushl %ds\n"
+ " pushl %es\n"
+ " pushl %eax\n"
+ " pushl %ebp\n"
+ " pushl %edi\n"
+ " pushl %esi\n"
+ " pushl %edx\n"
+ " pushl %ecx\n"
+ " pushl %ebx\n"
+ " movl %esp, %eax\n"
+ " call trampoline_handler\n"
+ /* move flags to cs */
+ " movl 52(%esp), %edx\n"
+ " movl %edx, 48(%esp)\n"
+ /* save true return address on flags */
+ " movl %eax, 52(%esp)\n"
+ " popl %ebx\n"
+ " popl %ecx\n"
+ " popl %edx\n"
+ " popl %esi\n"
+ " popl %edi\n"
+ " popl %ebp\n"
+ " popl %eax\n"
+ /* skip ip, orig_ax, es, ds, fs */
+ " addl $20, %esp\n"
+ " popf\n"
+ " ret\n");
+#else
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+#endif
+}
/*
* Called when we hit the probe point at kretprobe_trampoline
@@ -634,16 +673,19 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
- unsigned long *tos = (unsigned long *)regs->sp;
+ unsigned long *tos = ®s->sp;
unsigned long next_rip = 0;
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
unsigned long orig_ip = (unsigned long)p->addr;
kprobe_opcode_t *insn = p->ainsn.insn;
+#ifdef CONFIG_X86_64
/*skip the REX prefix*/
if (*insn >= 0x40 && *insn <= 0x4f)
insn++;
+#endif
+ regs->flags &= ~TF_MASK;
switch (*insn) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
@@ -653,7 +695,6 @@ static void __kprobes resume_execution(struct kprobe *p,
case 0xcb:
case 0xc2:
case 0xca:
- regs->flags &= ~TF_MASK;
/* ip is already adjusted, no more changes required*/
return;
case 0xe8: /* call relative - Fix return addr */
@@ -678,7 +719,6 @@ static void __kprobes resume_execution(struct kprobe *p,
break;
}
- regs->flags &= ~TF_MASK;
if (next_rip) {
regs->ip = next_rip;
} else {
--
1.5.4.rc0.1083.gf568
>From 4a048bf0c67558b8170dcde7c8395f6c35592f0b Mon Sep 17 00:00:00 2001
From: Harvey Harrison <[email protected]>
Date: Fri, 14 Dec 2007 22:52:44 -0800
Subject: [PATCH] x86: Move some exception fixup logic in kprobes_{32|64}.c
Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes_32.c | 21 ++++++++++++++++++---
arch/x86/kernel/kprobes_64.c | 21 ++++++++++++++++-----
2 files changed, 34 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index 21ac86f..a2b5ca7 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -533,7 +533,7 @@ no_kprobe:
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
-void __kprobes kretprobe_trampoline_holder(void)
+void kretprobe_trampoline_holder(void)
{
#ifdef CONFIG_X86_32
asm volatile ( ".global kretprobe_trampoline\n"
@@ -787,6 +787,9 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+#ifdef CONFIG_X86_64
+ const struct exception_table_entry *fixup;
+#endif
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
@@ -829,11 +832,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
+#ifdef CONFIG_X86_32
if (fixup_exception(regs))
return 1;
-
+#else
+ fixup = search_exception_tables(regs->ip);
+ if (fixup) {
+ regs->ip = fixup->fixup;
+ return 1;
+ }
+#endif
/*
- * fixup_exception() could not handle it,
+ * Exception couldn't be fixed up,
* Let do_page_fault() fix it.
*/
break;
@@ -852,8 +862,13 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+#ifdef CONFIG_X86_32
if (args->regs && user_mode_vm(args->regs))
return ret;
+#else
+ if (args->regs && user_mode(args->regs))
+ return ret;
+#endif
switch (val) {
case DIE_INT3:
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 9bf6ebe..705cb99 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -769,7 +769,9 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+#ifdef CONFIG_X86_64
const struct exception_table_entry *fixup;
+#endif
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
@@ -812,14 +814,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
+#ifdef CONFIG_X86_32
+ if (fixup_exception(regs))
+ return 1;
+#else
fixup = search_exception_tables(regs->ip);
if (fixup) {
regs->ip = fixup->fixup;
return 1;
}
-
+#endif
/*
- * fixup() could not handle it,
+ * Exception couldn't be fixed up,
* Let do_page_fault() fix it.
*/
break;
@@ -838,8 +844,13 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
+#ifdef CONFIG_X86_32
+ if (args->regs && user_mode_vm(args->regs))
+ return ret;
+#else
if (args->regs && user_mode(args->regs))
return ret;
+#endif
switch (val) {
case DIE_INT3:
@@ -871,7 +882,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = (long *) regs->sp;
+ kcb->jprobe_saved_sp = ®s->sp;
addr = (unsigned long)(kcb->jprobe_saved_sp);
/*
* As Linus pointed out, gcc assumes that the callee
@@ -916,12 +927,12 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct jprobe *jp = container_of(p, struct jprobe, kp);
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
- if ((long *)regs->sp != kcb->jprobe_saved_sp) {
+ if (®s->sp != kcb->jprobe_saved_sp) {
struct pt_regs *saved_regs =
container_of(kcb->jprobe_saved_sp,
struct pt_regs, sp);
printk("current sp %p does not match saved sp %p\n",
- (long *)regs->sp, kcb->jprobe_saved_sp);
+ ®s->sp, kcb->jprobe_saved_sp);
printk("Saved registers for jprobe %p\n", jp);
show_registers(saved_regs);
printk("Current registers\n");
--
1.5.4.rc0.1083.gf568
>From 83fde2864abcc34ef8bc575a22b5f8e76477b64b Mon Sep 17 00:00:00 2001
From: Harvey Harrison <[email protected]>
Date: Fri, 14 Dec 2007 22:56:50 -0800
Subject: [PATCH] x86: Duplicate some instruction IP logic
Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes_32.c | 8 ++++++++
arch/x86/kernel/kprobes_64.c | 22 +++++++++++++++++++++-
2 files changed, 29 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index a2b5ca7..23fb9ae 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -721,6 +721,7 @@ static void __kprobes resume_execution(struct kprobe *p,
break;
}
+#ifdef CONFIG_X86_32
if (p->ainsn.boostable == 0) {
if ((regs->ip > copy_ip) &&
(regs->ip - copy_ip) + 5 < (MAX_INSN_SIZE + 1)) {
@@ -739,6 +740,13 @@ static void __kprobes resume_execution(struct kprobe *p,
regs->ip = orig_ip + (regs->ip - copy_ip);
no_change:
+#else
+ if (next_rip) {
+ regs->ip = next_rip;
+ } else {
+ regs->ip = orig_ip + (regs->ip - copy_ip);
+ }
+#endif
restore_btf();
}
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 705cb99..f9cede4 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -719,12 +719,32 @@ static void __kprobes resume_execution(struct kprobe *p,
break;
}
+#ifdef CONFIG_X86_32
+ if (p->ainsn.boostable == 0) {
+ if ((regs->ip > copy_ip) &&
+ (regs->ip - copy_ip) + 5 < (MAX_INSN_SIZE + 1)) {
+ /*
+ * These instructions can be executed directly if it
+ * jumps back to correct address.
+ */
+ set_jmp_op((void *)regs->ip,
+ (void *)orig_ip + (regs->ip - copy_ip));
+ p->ainsn.boostable = 1;
+ } else {
+ p->ainsn.boostable = -1;
+ }
+ }
+
+ regs->ip = orig_ip + (regs->ip - copy_ip);
+
+no_change:
+#else
if (next_rip) {
regs->ip = next_rip;
} else {
regs->ip = orig_ip + (regs->ip - copy_ip);
}
-
+#endif
restore_btf();
}
--
1.5.4.rc0.1083.gf568
>From 323525be495dc545158fe43e072523d994253c39 Mon Sep 17 00:00:00 2001
From: Harvey Harrison <[email protected]>
Date: Sat, 15 Dec 2007 00:28:58 -0800
Subject: [PATCH] x86: Uglify kprobes_{32|64}.c before final unification
Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes_32.c | 92 ++++++++++++++++++++++++++------
arch/x86/kernel/kprobes_64.c | 118 ++++++++++++++++++++++++++++++++++-------
2 files changed, 172 insertions(+), 38 deletions(-)
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index 23fb9ae..cb836ce 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -16,15 +16,6 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
- *
- * 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel
- * Probes initial implementation ( includes contributions from
- * Rusty Russell).
- * 2004-July Suparna Bhattacharya <[email protected]> added jumper probes
- * interface to access function arguments.
- * 2005-May Hien Nguyen <[email protected]>, Jim Keniston
- * <[email protected]> and Prasanna S Panchamukhi
- * <[email protected]> added function-return probes.
*/
#include <linux/kprobes.h>
@@ -350,7 +341,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
+#ifdef CONFIG_X86_32
free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+#else
+ free_insn_slot(p->ainsn.insn, 0);
+#endif
mutex_unlock(&kprobe_mutex);
}
@@ -471,7 +466,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
+#ifdef CONFIG_X86_32
regs->ip -= sizeof(kprobe_opcode_t);
+#else
+ regs->ip = (unsigned long)addr;
+#endif
ret = 1;
goto no_kprobe;
}
@@ -495,7 +494,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* Back up over the (now missing) int3 and run
* the original instruction.
*/
+#ifdef CONFIG_X86_32
regs->ip -= sizeof(kprobe_opcode_t);
+#else
+ regs->ip = (unsigned long)addr;
+#endif
ret = 1;
}
/* Not one of ours: let kernel handle it */
@@ -510,7 +513,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;
ss_probe:
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+#if defined(CONFIG_X86_32) && (!defined(CONFIG_PREEMPT) || defined(CONFIG_PM))
if (p->ainsn.boostable == 1 && !p->post_handler){
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
@@ -579,7 +582,11 @@ void kretprobe_trampoline_holder(void)
/*
* Called when we hit the probe point at kretprobe_trampoline
*/
+#ifdef CONFIG_X86_32
void *__kprobes trampoline_handler(struct pt_regs *regs)
+#else
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+#endif
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
@@ -590,11 +597,12 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
INIT_HLIST_HEAD(&empty_rp);
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
+#ifdef CONFIG_X86_32
/* fixup registers */
regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->ip = trampoline_address;
regs->orig_ax = 0xffffffff;
-
+#endif
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
@@ -612,14 +620,17 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
-
- if (ri->rp && ri->rp->handler){
+#ifdef CONFIG_X86_32
+ if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp;
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
-
+#else
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+#endif
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
@@ -633,13 +644,29 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
+#ifdef CONFIG_X86_64
+ regs->ip = orig_ret_address;
+ reset_current_kprobe();
+#endif
spin_unlock_irqrestore(&kretprobe_lock, flags);
-
+#ifdef CONFIG_X86_64
+ preempt_enable_no_resched();
+#endif
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
+
+#ifdef CONFIG_X86_32
return (void*)orig_ret_address;
+#else
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+#endif
}
/*
@@ -687,22 +714,29 @@ static void __kprobes resume_execution(struct kprobe *p,
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kcb->kprobe_old_flags;
break;
- case 0xc2: /* iret/ret/lret */
+ case 0xc2: /* ret/lret */
case 0xc3:
case 0xca:
case 0xcb:
- case 0xcf:
- case 0xea: /* jmp absolute -- ip is correct */
+#ifdef CONFIG_X86_32
+ case 0xcf: /* iret */
/* ip is already adjusted, no more changes required */
p->ainsn.boostable = 1;
goto no_change;
+#else
+ /* ip is already adjusted, no more changes required*/
+ return;
+#endif
case 0xe8: /* call relative - Fix return addr */
*tos = orig_ip + (*tos - copy_ip);
break;
+#ifdef CONFIG_X86_32
case 0x9a: /* call absolute -- same as call absolute, indirect */
*tos = orig_ip + (*tos - copy_ip);
goto no_change;
+#endif
case 0xff:
+#ifdef CONFIG_X86_32
if ((insn[1] & 0x30) == 0x10) {
/*
* call absolute, indirect
@@ -717,6 +751,28 @@ static void __kprobes resume_execution(struct kprobe *p,
p->ainsn.boostable = 1;
goto no_change;
}
+#else
+ if ((insn[1] & 0x30) == 0x10) {
+ /* call absolute, indirect */
+ /* Fix return addr; ip is correct. */
+ next_rip = regs->ip;
+ *tos = orig_ip + (*tos - copy_ip);
+ } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ /* ip is correct. */
+ next_rip = regs->ip;
+ }
+#endif
+ break;
+ case 0xea: /* jmp absolute -- ip is correct */
+#ifdef CONFIG_X86_32
+ /* ip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+#else
+ next_rip = regs->ip;
+ break;
+#endif
default:
break;
}
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index f9cede4..6761a80 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -16,18 +16,6 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
- *
- * 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel
- * Probes initial implementation ( includes contributions from
- * Rusty Russell).
- * 2004-July Suparna Bhattacharya <[email protected]> added jumper probes
- * interface to access function arguments.
- * 2004-Oct Jim Keniston <[email protected]> and Prasanna S Panchamukhi
- * <[email protected]> adapted for x86_64
- * 2005-Mar Roland McGrath <[email protected]>
- * Fixed to handle %rip-relative addressing mode correctly.
- * 2005-May Rusty Lynch <[email protected]>
- * Added function return probes functionality
*/
#include <linux/kprobes.h>
@@ -356,7 +344,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
+#ifdef CONFIG_X86_32
+ free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+#else
free_insn_slot(p->ainsn.insn, 0);
+#endif
mutex_unlock(&kprobe_mutex);
}
@@ -429,6 +421,10 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
*sara = (unsigned long) &kretprobe_trampoline;
}
+/*
+ * Interrupts are disabled on entry as trap3 is an interrupt gate and they
+ * remain disabled thorough out this function.
+ */
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
@@ -485,7 +481,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
+#ifdef CONFIG_X86_32
+ regs->ip -= sizeof(kprobe_opcode_t);
+#else
regs->ip = (unsigned long)addr;
+#endif
ret = 1;
goto no_kprobe;
}
@@ -509,7 +509,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* Back up over the (now missing) int3 and run
* the original instruction.
*/
+#ifdef CONFIG_X86_32
+ regs->ip -= sizeof(kprobe_opcode_t);
+#else
regs->ip = (unsigned long)addr;
+#endif
ret = 1;
}
/* Not one of ours: let kernel handle it */
@@ -524,6 +528,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;
ss_probe:
+#if defined(CONFIG_X86_32) && (!defined(CONFIG_PREEMPT) || defined(CONFIG_PM))
+ if (p->ainsn.boostable == 1 && !p->post_handler){
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+ regs->ip = (unsigned long)p->ainsn.insn;
+ preempt_enable_no_resched();
+ return 1;
+ }
+#endif
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -584,7 +597,11 @@ void kretprobe_trampoline_holder(void)
/*
* Called when we hit the probe point at kretprobe_trampoline
*/
+#ifdef CONFIG_X86_32
+void *__kprobes trampoline_handler(struct pt_regs *regs)
+#else
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+#endif
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
@@ -595,7 +612,12 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
INIT_HLIST_HEAD(&empty_rp);
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
-
+#ifdef CONFIG_X86_32
+ /* fixup registers */
+ regs->cs = __KERNEL_CS | get_kernel_rpl();
+ regs->ip = trampoline_address;
+ regs->orig_ax = 0xffffffff;
+#endif
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
@@ -613,10 +635,17 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
-
+#ifdef CONFIG_X86_32
+ if (ri->rp && ri->rp->handler) {
+ __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->rp->handler(ri, regs);
+ __get_cpu_var(current_kprobe) = NULL;
+ }
+#else
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
-
+#endif
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
@@ -630,22 +659,29 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
+#ifdef CONFIG_X86_64
regs->ip = orig_ret_address;
-
reset_current_kprobe();
+#endif
spin_unlock_irqrestore(&kretprobe_lock, flags);
+#ifdef CONFIG_X86_64
preempt_enable_no_resched();
-
+#endif
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
+
+#ifdef CONFIG_X86_32
+ return (void*)orig_ret_address;
+#else
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
+#endif
}
/*
@@ -669,6 +705,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
+ *
+ * This function also checks instruction size for preparing direct execution.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
@@ -691,16 +729,44 @@ static void __kprobes resume_execution(struct kprobe *p,
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kcb->kprobe_old_flags;
break;
- case 0xc3: /* ret/lret */
- case 0xcb:
- case 0xc2:
+ case 0xc2: /* ret/lret */
+ case 0xc3:
case 0xca:
+ case 0xcb:
+#ifdef CONFIG_X86_32
+ case 0xcf: /* iret */
+ /* ip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+#else
/* ip is already adjusted, no more changes required*/
return;
+#endif
case 0xe8: /* call relative - Fix return addr */
*tos = orig_ip + (*tos - copy_ip);
break;
+#ifdef CONFIG_X86_32
+ case 0x9a: /* call absolute -- same as call absolute, indirect */
+ *tos = orig_ip + (*tos - copy_ip);
+ goto no_change;
+#endif
case 0xff:
+#ifdef CONFIG_X86_32
+ if ((insn[1] & 0x30) == 0x10) {
+ /*
+ * call absolute, indirect
+ * Fix return addr; ip is correct.
+ * But this is not boostable
+ */
+ *tos = orig_ip + (*tos - copy_ip);
+ goto no_change;
+ } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ /* ip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
+ goto no_change;
+ }
+#else
if ((insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
/* Fix return addr; ip is correct. */
@@ -711,10 +777,17 @@ static void __kprobes resume_execution(struct kprobe *p,
/* ip is correct. */
next_rip = regs->ip;
}
+#endif
break;
case 0xea: /* jmp absolute -- ip is correct */
+#ifdef CONFIG_X86_32
+ /* ip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+#else
next_rip = regs->ip;
break;
+#endif
default:
break;
}
@@ -748,6 +821,10 @@ no_change:
restore_btf();
}
+/*
+ * Interrupts are disabled on entry as trap1 is an interrupt gate and they
+ * remain disabled thoroughout this function.
+ */
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
@@ -904,6 +981,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
kcb->jprobe_saved_regs = *regs;
kcb->jprobe_saved_sp = ®s->sp;
addr = (unsigned long)(kcb->jprobe_saved_sp);
+
/*
* As Linus pointed out, gcc assumes that the callee
* owns the argument space and could overwrite it, e.g.
--
1.5.4.rc0.1083.gf568
>From e7fd0efa2e7860012d0a625c1fda3174ae200dc5 Mon Sep 17 00:00:00 2001
From: Harvey Harrison <[email protected]>
Date: Sat, 15 Dec 2007 00:36:37 -0800
Subject: [PATCH] x86: Final kprobes_{32|64}.c unification
Sure, it's ugly, but it can only get better from here.
Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes_32.c | 33 +++++++++++++++++++++++++++++++++
arch/x86/kernel/kprobes_64.c | 18 ++++++++++++++++++
2 files changed, 51 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
index cb836ce..f195348 100644
--- a/arch/x86/kernel/kprobes_32.c
+++ b/arch/x86/kernel/kprobes_32.c
@@ -20,10 +20,15 @@
#include <linux/kprobes.h>
#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/slab.h>
#include <linux/preempt.h>
+#include <linux/module.h>
#include <linux/kdebug.h>
+
#include <asm/cacheflush.h>
#include <asm/desc.h>
+#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/alternative.h>
@@ -447,6 +452,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
regs->flags &= ~TF_MASK;
regs->flags |= kcb->kprobe_saved_flags;
goto no_kprobe;
+#ifdef CONFIG_X86_32
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
@@ -460,6 +466,33 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
+#else
+ } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
+ /* TODO: Provide re-entrancy from
+ * post_kprobes_handler() and avoid exception
+ * stack corruption while single-stepping on
+ * the instruction of the new probe.
+ */
+ arch_disarm_kprobe(p);
+ regs->ip = (unsigned long)p->addr;
+ reset_current_kprobe();
+ ret = 1;
+ } else {
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the
+ * handler. We here save the original kprobe
+ * variables and just single step on instruction
+ * of the new probe without calling any user
+ * handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+#endif
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index 6761a80..f195348 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/kdebug.h>
+#include <asm/cacheflush.h>
+#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/alternative.h>
@@ -450,6 +452,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
regs->flags &= ~TF_MASK;
regs->flags |= kcb->kprobe_saved_flags;
goto no_kprobe;
+#ifdef CONFIG_X86_32
+ }
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+#else
} else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/* TODO: Provide re-entrancy from
* post_kprobes_handler() and avoid exception
@@ -475,6 +492,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
+#endif
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
--
1.5.4.rc0.1083.gf568
>From 7fe6bd404ae0c51ec0bd334ca28a1f17f247a099 Mon Sep 17 00:00:00 2001
From: Harvey Harrison <[email protected]>
Date: Sat, 15 Dec 2007 00:39:44 -0800
Subject: [PATCH] x86: Eliminate kprobes_{32|64}.c
Now that the files are the same, unify the makefiles and
eliminate the duplicates.
Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/Makefile_32 | 2 +-
arch/x86/kernel/Makefile_64 | 2 +-
arch/x86/kernel/kprobes.c | 1090 ++++++++++++++++++++++++++++++++++++++++++
arch/x86/kernel/kprobes_32.c | 1090 ------------------------------------------
arch/x86/kernel/kprobes_64.c | 1090 ------------------------------------------
5 files changed, 1092 insertions(+), 2182 deletions(-)
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index db8cada..62a8120 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -35,7 +35,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
-obj-$(CONFIG_KPROBES) += kprobes_32.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_MODULES) += module_32.o
obj-$(CONFIG_ACPI_SRAT) += srat_32.o
obj-$(CONFIG_EFI) += efi.o efi_32.o efi_stub_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index a961f5c..79e335c 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -34,7 +34,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o
-obj-$(CONFIG_KPROBES) += kprobes_64.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_X86_VSMP) += vsmp_64.o
obj-$(CONFIG_K8_NB) += k8.o
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
new file mode 100644
index 0000000..f195348
--- /dev/null
+++ b/arch/x86/kernel/kprobes.c
@@ -0,0 +1,1090 @@
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ */
+
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/module.h>
+#include <linux/kdebug.h>
+
+#include <asm/cacheflush.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/alternative.h>
+
+void jprobe_return_end(void);
+static void __kprobes arch_copy_kprobe(struct kprobe *p);
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+struct kretprobe_blackpoint kretprobe_blacklist[] = {
+ {"__switch_to", }, /* This function switches only current task, but
+ doesn't switch kernel stack.*/
+ {NULL, NULL} /* Terminator */
+};
+const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
+
+#define W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
+ (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
+ (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
+ (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
+ << (r % sizeof(unsigned long)))
+
+#define R1(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
+#define R3(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
+#define R4(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf),
+#define RF(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)
+
+#ifdef CONFIG_X86_32
+#define R2(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf),
+#else
+#define R2(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
+ W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
+#endif
+
+/*
+ * Undefined/reserved opcodes, conditional jump, Opcode Extension
+ * Groups, and some special opcodes can not be boost.
+ */
+static const unsigned long
+twobyte_is_boostable[256 / sizeof(unsigned long)] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ----------------------------------------------- */
+ R1(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) /* 00 */
+ R2(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 10 */
+ R3(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 20 */
+ R4(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 30 */
+ R1(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 40 */
+ R2(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 50 */
+ R3(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) /* 60 */
+ R4(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* 70 */
+ R1(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 80 */
+ R2(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 90 */
+ R3(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* a0 */
+ R4(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) /* b0 */
+ R1(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* c0 */
+ R2(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* d0 */
+ R3(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* e0 */
+ RF(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
+ /* ----------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+static const unsigned long
+onebyte_has_modrm[256 / sizeof(unsigned long)] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ----------------------------------------------- */
+ R1(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 00 */
+ R2(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 10 */
+ R3(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 20 */
+ R4(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 30 */
+ R1(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 40 */
+ R2(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 50 */
+ R3(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) /* 60 */
+ R4(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 70 */
+ R1(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 80 */
+ R2(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 90 */
+ R3(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* a0 */
+ R4(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* b0 */
+ R1(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) /* c0 */
+ R2(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* d0 */
+ R3(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* e0 */
+ RF(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */
+ /* ----------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+static const unsigned long
+twobyte_has_modrm[256 / sizeof(unsigned long)] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ----------------------------------------------- */
+ R1(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) /* 0f */
+ R2(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) /* 1f */
+ R3(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* 2f */
+ R4(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 3f */
+ R1(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 4f */
+ R2(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 5f */
+ R3(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 6f */
+ R4(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) /* 7f */
+ R1(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 8f */
+ R2(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 9f */
+ R3(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) /* af */
+ R4(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) /* bf */
+ R1(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) /* cf */
+ R2(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* df */
+ R3(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* ef */
+ RF(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */
+ /* ----------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+#undef W
+#undef R1
+#undef R2
+#undef R3
+#undef R4
+#undef RF
+
+/* insert a jmp code */
+static inline void set_jmp_op(void *from, void *to)
+{
+ struct __arch_jmp_op {
+ char op;
+ long raddr;
+ } __attribute__((packed)) *jop;
+ jop = (struct __arch_jmp_op *)from;
+ jop->raddr = (long)(to) - ((long)(from) + 5);
+ jop->op = RELATIVEJUMP_INSTRUCTION;
+}
+
+/*
+ * returns non-zero if opcodes can be boosted.
+ */
+static inline int can_boost(kprobe_opcode_t *opcodes)
+{
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+retry:
+ if (opcodes - orig_opcodes > MAX_INSN_SIZE)
+ return 0;
+ opcode = *(opcodes++);
+
+ /* 2nd-byte opcode */
+ if (opcode == 0x0f) {
+ if (opcodes - orig_opcodes > MAX_INSN_SIZE)
+ return 0;
+ return test_bit(*opcodes, twobyte_is_boostable);
+ }
+
+ switch (opcode & 0xf0) {
+ case 0x60:
+ if (0x63 < opcode && opcode < 0x67)
+ goto retry; /* prefixes */
+ /* can't boost Address-size override and bound */
+ return (opcode != 0x62 && opcode != 0x67);
+ case 0x70:
+ return 0; /* can't boost conditional jump */
+ case 0xc0:
+ /* can't boost software-interruptions */
+ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
+ case 0xd0:
+ /* can boost AA* and XLAT */
+ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
+ case 0xe0:
+ /* can boost in/out and absolute jmps */
+ return ((opcode & 0x04) || opcode == 0xea);
+ case 0xf0:
+ if ((opcode & 0x0c) == 0 && opcode != 0xf1)
+ goto retry; /* lock/rep(ne) prefix */
+ /* clear and set flags can be boost */
+ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ default:
+ if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
+ goto retry; /* prefixes */
+ /* can't boost CS override and call */
+ return (opcode != 0x2e && opcode != 0x9a);
+ }
+}
+
+/*
+ * returns non-zero if opcode modifies the interrupt flag.
+ */
+static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
+{
+ switch (*insn) {
+ case 0xfa: /* cli */
+ case 0xfb: /* sti */
+ case 0xcf: /* iret/iretd */
+ case 0x9d: /* popf/popfd */
+ return 1;
+ }
+
+#ifdef CONFIG_X86_64
+ /* REX prefix */
+ if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
+ return 1;
+#endif
+ return 0;
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* insn: must be on special executable page on x86_32|64. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+
+ arch_copy_kprobe(p);
+ return 0;
+}
+
+/*
+ * Determine if the instruction uses the %rip-relative addressing mode.
+ * If it does, return the address of the 32-bit displacement word.
+ * If not, return null.
+ */
+static s32 __kprobes *is_riprel(u8 *insn)
+{
+ int need_modrm;
+
+ /* Skip legacy instruction prefixes. */
+ while (1) {
+ switch (*insn) {
+ case 0x66:
+ case 0x67:
+ case 0x2e:
+ case 0x3e:
+ case 0x26:
+ case 0x64:
+ case 0x65:
+ case 0x36:
+ case 0xf0:
+ case 0xf3:
+ case 0xf2:
+ ++insn;
+ continue;
+ }
+ break;
+ }
+
+ /* Skip REX instruction prefix. */
+ if ((*insn & 0xf0) == 0x40)
+ ++insn;
+
+ if (*insn == 0x0f) { /* Two-byte opcode. */
+ ++insn;
+ need_modrm = test_bit(*insn, twobyte_has_modrm);
+ } else { /* One-byte opcode. */
+ need_modrm = test_bit(*insn, onebyte_has_modrm);
+ }
+
+ if (need_modrm) {
+ u8 modrm = *++insn;
+ if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
+ /* Displacement follows ModRM byte. */
+ return (s32 *) ++insn;
+ }
+ }
+
+ /* No %rip-relative addressing mode here. */
+ return NULL;
+}
+
+static void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+#ifdef CONFIG_X86_32
+ memcpy(p->ainsn.insn, p->addr, (MAX_INSN_SIZE + 1) * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
+ if (can_boost(p->addr)) {
+ p->ainsn.boostable = 0;
+ } else {
+ p->ainsn.boostable = -1;
+ }
+#else
+ s32 *ripdisp;
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
+ ripdisp = is_riprel(p->ainsn.insn);
+ if (ripdisp) {
+ /*
+ * The copied instruction uses the %rip-relative
+ * addressing mode. Adjust the displacement for the
+ * difference between the original location of this
+ * instruction and the location of the copy that will
+ * actually be run. The tricky bit here is making sure
+ * that the sign extension happens correctly in this
+ * calculation, since we need a signed 32-bit result to
+ * be sign-extended to 64 bits when it's added to the
+ * %rip value and yield the same 64-bit result that the
+ * sign-extension of the original signed 32-bit
+ * displacement would have given.
+ */
+ s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
+ BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
+ *ripdisp = disp;
+ }
+ p->opcode = *p->addr;
+#endif
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ text_poke(p->addr, &p->opcode, 1);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ mutex_lock(&kprobe_mutex);
+#ifdef CONFIG_X86_32
+ free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+#else
+ free_insn_slot(p->ainsn.insn, 0);
+#endif
+ mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
+ kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
+ kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_flags = kcb->kprobe_old_flags
+ = (regs->flags & (TF_MASK | IF_MASK));
+ if (is_IF_modifier(p->ainsn.insn))
+ kcb->kprobe_saved_flags &= ~IF_MASK;
+}
+
+static __always_inline void clear_btf(void)
+{
+ if (test_thread_flag(TIF_DEBUGCTLMSR))
+#ifdef CONFIG_X86_32
+ wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
+#else
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
+#endif
+}
+
+static __always_inline void restore_btf(void)
+{
+ if (test_thread_flag(TIF_DEBUGCTLMSR))
+#ifdef CONFIG_X86_32
+ wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
+#else
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
+#endif
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ clear_btf();
+ regs->flags |= TF_MASK;
+ regs->flags &= ~IF_MASK;
+ /*single step inline if the instruction is an int3*/
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+ regs->ip = (unsigned long)p->ainsn.insn;
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ unsigned long *sara = ®s->sp;
+
+ ri->ret_addr = (kprobe_opcode_t *) *sara;
+ /* Replace the return addr with trampoline addr */
+ *sara = (unsigned long) &kretprobe_trampoline;
+}
+
+/*
+ * Interrupts are disabled on entry as trap3 is an interrupt gate and they
+ * remain disabled thorough out this function.
+ */
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+ regs->flags &= ~TF_MASK;
+ regs->flags |= kcb->kprobe_saved_flags;
+ goto no_kprobe;
+#ifdef CONFIG_X86_32
+ }
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+#else
+ } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
+ /* TODO: Provide re-entrancy from
+ * post_kprobes_handler() and avoid exception
+ * stack corruption while single-stepping on
+ * the instruction of the new probe.
+ */
+ arch_disarm_kprobe(p);
+ regs->ip = (unsigned long)p->addr;
+ reset_current_kprobe();
+ ret = 1;
+ } else {
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the
+ * handler. We here save the original kprobe
+ * variables and just single step on instruction
+ * of the new probe without calling any user
+ * handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+#endif
+ } else {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+#ifdef CONFIG_X86_32
+ regs->ip -= sizeof(kprobe_opcode_t);
+#else
+ regs->ip = (unsigned long)addr;
+#endif
+ ret = 1;
+ goto no_kprobe;
+ }
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
+ */
+#ifdef CONFIG_X86_32
+ regs->ip -= sizeof(kprobe_opcode_t);
+#else
+ regs->ip = (unsigned long)addr;
+#endif
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+
+ss_probe:
+#if defined(CONFIG_X86_32) && (!defined(CONFIG_PREEMPT) || defined(CONFIG_PM))
+ if (p->ainsn.boostable == 1 && !p->post_handler){
+ /* Boost up -- we can execute copied instructions directly */
+ reset_current_kprobe();
+ regs->ip = (unsigned long)p->ainsn.insn;
+ preempt_enable_no_resched();
+ return 1;
+ }
+#endif
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+void kretprobe_trampoline_holder(void)
+{
+#ifdef CONFIG_X86_32
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ " pushf\n"
+ /* skip cs, ip, orig_ax */
+ " subl $12, %esp\n"
+ " pushl %fs\n"
+ " pushl %ds\n"
+ " pushl %es\n"
+ " pushl %eax\n"
+ " pushl %ebp\n"
+ " pushl %edi\n"
+ " pushl %esi\n"
+ " pushl %edx\n"
+ " pushl %ecx\n"
+ " pushl %ebx\n"
+ " movl %esp, %eax\n"
+ " call trampoline_handler\n"
+ /* move flags to cs */
+ " movl 52(%esp), %edx\n"
+ " movl %edx, 48(%esp)\n"
+ /* save true return address on flags */
+ " movl %eax, 52(%esp)\n"
+ " popl %ebx\n"
+ " popl %ecx\n"
+ " popl %edx\n"
+ " popl %esi\n"
+ " popl %edi\n"
+ " popl %ebp\n"
+ " popl %eax\n"
+ /* skip ip, orig_ax, es, ds, fs */
+ " addl $20, %esp\n"
+ " popf\n"
+ " ret\n");
+#else
+ asm volatile ( ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: \n"
+ "nop\n");
+#endif
+}
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+#ifdef CONFIG_X86_32
+void *__kprobes trampoline_handler(struct pt_regs *regs)
+#else
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+#endif
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ head = kretprobe_inst_table_head(current);
+#ifdef CONFIG_X86_32
+ /* fixup registers */
+ regs->cs = __KERNEL_CS | get_kernel_rpl();
+ regs->ip = trampoline_address;
+ regs->orig_ax = 0xffffffff;
+#endif
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more then one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+#ifdef CONFIG_X86_32
+ if (ri->rp && ri->rp->handler) {
+ __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->rp->handler(ri, regs);
+ __get_cpu_var(current_kprobe) = NULL;
+ }
+#else
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+#endif
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+#ifdef CONFIG_X86_64
+ regs->ip = orig_ret_address;
+ reset_current_kprobe();
+#endif
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+#ifdef CONFIG_X86_64
+ preempt_enable_no_resched();
+#endif
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+#ifdef CONFIG_X86_32
+ return (void*)orig_ret_address;
+#else
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+#endif
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "int 3"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * interrupt. We have to fix up the stack as follows:
+ *
+ * 0) Except in the case of absolute or indirect jump or call instructions,
+ * the new ip is relative to the copied instruction. We need to make
+ * it relative to the original instruction.
+ *
+ * 1) If the single-stepped instruction was pushfl, then the TF and IF
+ * flags are set in the just-pushed flags, and may need to be cleared.
+ *
+ * 2) If the single-stepped instruction was a call, the return address
+ * that is atop the stack is the address following the copied instruction.
+ * We need to make it the address following the original instruction.
+ *
+ * This function also checks instruction size for preparing direct execution.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+{
+ unsigned long *tos = ®s->sp;
+ unsigned long next_rip = 0;
+ unsigned long copy_ip = (unsigned long)p->ainsn.insn;
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+#ifdef CONFIG_X86_64
+ /*skip the REX prefix*/
+ if (*insn >= 0x40 && *insn <= 0x4f)
+ insn++;
+#endif
+
+ regs->flags &= ~TF_MASK;
+ switch (*insn) {
+ case 0x9c: /* pushfl */
+ *tos &= ~(TF_MASK | IF_MASK);
+ *tos |= kcb->kprobe_old_flags;
+ break;
+ case 0xc2: /* ret/lret */
+ case 0xc3:
+ case 0xca:
+ case 0xcb:
+#ifdef CONFIG_X86_32
+ case 0xcf: /* iret */
+ /* ip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+#else
+ /* ip is already adjusted, no more changes required*/
+ return;
+#endif
+ case 0xe8: /* call relative - Fix return addr */
+ *tos = orig_ip + (*tos - copy_ip);
+ break;
+#ifdef CONFIG_X86_32
+ case 0x9a: /* call absolute -- same as call absolute, indirect */
+ *tos = orig_ip + (*tos - copy_ip);
+ goto no_change;
+#endif
+ case 0xff:
+#ifdef CONFIG_X86_32
+ if ((insn[1] & 0x30) == 0x10) {
+ /*
+ * call absolute, indirect
+ * Fix return addr; ip is correct.
+ * But this is not boostable
+ */
+ *tos = orig_ip + (*tos - copy_ip);
+ goto no_change;
+ } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ /* ip is correct. And this is boostable */
+ p->ainsn.boostable = 1;
+ goto no_change;
+ }
+#else
+ if ((insn[1] & 0x30) == 0x10) {
+ /* call absolute, indirect */
+ /* Fix return addr; ip is correct. */
+ next_rip = regs->ip;
+ *tos = orig_ip + (*tos - copy_ip);
+ } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ /* ip is correct. */
+ next_rip = regs->ip;
+ }
+#endif
+ break;
+ case 0xea: /* jmp absolute -- ip is correct */
+#ifdef CONFIG_X86_32
+ /* ip is already adjusted, no more changes required */
+ p->ainsn.boostable = 1;
+ goto no_change;
+#else
+ next_rip = regs->ip;
+ break;
+#endif
+ default:
+ break;
+ }
+
+#ifdef CONFIG_X86_32
+ if (p->ainsn.boostable == 0) {
+ if ((regs->ip > copy_ip) &&
+ (regs->ip - copy_ip) + 5 < (MAX_INSN_SIZE + 1)) {
+ /*
+ * These instructions can be executed directly if it
+ * jumps back to correct address.
+ */
+ set_jmp_op((void *)regs->ip,
+ (void *)orig_ip + (regs->ip - copy_ip));
+ p->ainsn.boostable = 1;
+ } else {
+ p->ainsn.boostable = -1;
+ }
+ }
+
+ regs->ip = orig_ip + (regs->ip - copy_ip);
+
+no_change:
+#else
+ if (next_rip) {
+ regs->ip = next_rip;
+ } else {
+ regs->ip = orig_ip + (regs->ip - copy_ip);
+ }
+#endif
+ restore_btf();
+}
+
+/*
+ * Interrupts are disabled on entry as trap1 is an interrupt gate and they
+ * remain disabled thoroughout this function.
+ */
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+ regs->flags |= kcb->kprobe_saved_flags;
+ trace_hardirqs_fixup_flags(regs->flags);
+
+ /* Restore the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ /*
+ * if somebody else is singlestepping across a probe point, flags
+ * will have TF set, in which case, continue the remaining processing
+ * of do_debug, as if this is not a probe hit.
+ */
+ if (regs->flags & TF_MASK)
+ return 0;
+
+ return 1;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+#ifdef CONFIG_X86_64
+ const struct exception_table_entry *fixup;
+#endif
+
+ switch(kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->ip = (unsigned long)cur->addr;
+ regs->flags |= kcb->kprobe_old_flags;
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+#ifdef CONFIG_X86_32
+ if (fixup_exception(regs))
+ return 1;
+#else
+ fixup = search_exception_tables(regs->ip);
+ if (fixup) {
+ regs->ip = fixup->fixup;
+ return 1;
+ }
+#endif
+ /*
+ * Exception couldn't be fixed up,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+#ifdef CONFIG_X86_32
+ if (args->regs && user_mode_vm(args->regs))
+ return ret;
+#else
+ if (args->regs && user_mode(args->regs))
+ return ret;
+#endif
+
+ switch (val) {
+ case DIE_INT3:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_DEBUG:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_GPF:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+ if (kprobe_running() &&
+ kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_sp = ®s->sp;
+ addr = (unsigned long)(kcb->jprobe_saved_sp);
+
+ /*
+ * As Linus pointed out, gcc assumes that the callee
+ * owns the argument space and could overwrite it, e.g.
+ * tailcall optimization. So, to be absolutely safe
+ * we also save and restore enough stack bytes to cover
+ * the argument area.
+ */
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+ MIN_STACK_SIZE(addr));
+ regs->flags &= ~IF_MASK;
+ trace_hardirqs_off();
+ regs->ip = (unsigned long)(jp->entry);
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+#ifdef CONFIG_X86_32
+ asm volatile (" xchgl %%ebx,%%esp \n"
+ " int3 \n"
+ " .globl jprobe_return_end \n"
+ " jprobe_return_end: \n"
+ " nop \n"::"b"
+ (kcb->jprobe_saved_sp):"memory");
+#else
+ asm volatile (" xchg %%rbx,%%rsp \n"
+ " int3 \n"
+ " .globl jprobe_return_end \n"
+ " jprobe_return_end: \n"
+ " nop \n"::"b"
+ (kcb->jprobe_saved_sp):"memory");
+#endif
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ u8 *addr = (u8 *) (regs->ip - 1);
+ unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_sp);
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+
+ if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
+ if (®s->sp != kcb->jprobe_saved_sp) {
+ struct pt_regs *saved_regs =
+ container_of(kcb->jprobe_saved_sp,
+ struct pt_regs, sp);
+ printk("current sp %p does not match saved sp %p\n",
+ ®s->sp, kcb->jprobe_saved_sp);
+ printk("Saved registers for jprobe %p\n", jp);
+ show_registers(saved_regs);
+ printk("Current registers\n");
+ show_registers(regs);
+ BUG();
+ }
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+ MIN_STACK_SIZE(stack_addr));
+ preempt_enable_no_resched();
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_X86_64
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+#endif
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+#ifdef CONFIG_X86_64
+ if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+ return 1;
+#endif
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+#ifdef CONFIG_X86_32
+ return 0;
+#else
+ return register_kprobe(&trampoline_p);
+#endif
+}
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c
deleted file mode 100644
index f195348..0000000
--- a/arch/x86/kernel/kprobes_32.c
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * Kernel Probes (KProbes)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- */
-
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/preempt.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-
-#include <asm/cacheflush.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/alternative.h>
-
-void jprobe_return_end(void);
-static void __kprobes arch_copy_kprobe(struct kprobe *p);
-
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-struct kretprobe_blackpoint kretprobe_blacklist[] = {
- {"__switch_to", }, /* This function switches only current task, but
- doesn't switch kernel stack.*/
- {NULL, NULL} /* Terminator */
-};
-const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
-
-#define W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
- (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
- (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
- (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
- << (r % sizeof(unsigned long)))
-
-#define R1(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
-#define R3(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
-#define R4(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf),
-#define RF(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)
-
-#ifdef CONFIG_X86_32
-#define R2(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf),
-#else
-#define R2(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
-#endif
-
-/*
- * Undefined/reserved opcodes, conditional jump, Opcode Extension
- * Groups, and some special opcodes can not be boost.
- */
-static const unsigned long
-twobyte_is_boostable[256 / sizeof(unsigned long)] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ----------------------------------------------- */
- R1(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) /* 00 */
- R2(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 10 */
- R3(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 20 */
- R4(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 30 */
- R1(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 40 */
- R2(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 50 */
- R3(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) /* 60 */
- R4(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* 70 */
- R1(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 80 */
- R2(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 90 */
- R3(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* a0 */
- R4(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) /* b0 */
- R1(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* c0 */
- R2(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* d0 */
- R3(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* e0 */
- RF(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
- /* ----------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
-
-static const unsigned long
-onebyte_has_modrm[256 / sizeof(unsigned long)] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ----------------------------------------------- */
- R1(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 00 */
- R2(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 10 */
- R3(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 20 */
- R4(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 30 */
- R1(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 40 */
- R2(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 50 */
- R3(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) /* 60 */
- R4(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 70 */
- R1(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 80 */
- R2(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 90 */
- R3(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* a0 */
- R4(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* b0 */
- R1(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) /* c0 */
- R2(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* d0 */
- R3(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* e0 */
- RF(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */
- /* ----------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
-
-static const unsigned long
-twobyte_has_modrm[256 / sizeof(unsigned long)] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ----------------------------------------------- */
- R1(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) /* 0f */
- R2(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) /* 1f */
- R3(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* 2f */
- R4(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 3f */
- R1(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 4f */
- R2(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 5f */
- R3(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 6f */
- R4(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) /* 7f */
- R1(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 8f */
- R2(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 9f */
- R3(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) /* af */
- R4(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) /* bf */
- R1(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) /* cf */
- R2(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* df */
- R3(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* ef */
- RF(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */
- /* ----------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
-
-#undef W
-#undef R1
-#undef R2
-#undef R3
-#undef R4
-#undef RF
-
-/* insert a jmp code */
-static inline void set_jmp_op(void *from, void *to)
-{
- struct __arch_jmp_op {
- char op;
- long raddr;
- } __attribute__((packed)) *jop;
- jop = (struct __arch_jmp_op *)from;
- jop->raddr = (long)(to) - ((long)(from) + 5);
- jop->op = RELATIVEJUMP_INSTRUCTION;
-}
-
-/*
- * returns non-zero if opcodes can be boosted.
- */
-static inline int can_boost(kprobe_opcode_t *opcodes)
-{
- kprobe_opcode_t opcode;
- kprobe_opcode_t *orig_opcodes = opcodes;
-retry:
- if (opcodes - orig_opcodes > MAX_INSN_SIZE)
- return 0;
- opcode = *(opcodes++);
-
- /* 2nd-byte opcode */
- if (opcode == 0x0f) {
- if (opcodes - orig_opcodes > MAX_INSN_SIZE)
- return 0;
- return test_bit(*opcodes, twobyte_is_boostable);
- }
-
- switch (opcode & 0xf0) {
- case 0x60:
- if (0x63 < opcode && opcode < 0x67)
- goto retry; /* prefixes */
- /* can't boost Address-size override and bound */
- return (opcode != 0x62 && opcode != 0x67);
- case 0x70:
- return 0; /* can't boost conditional jump */
- case 0xc0:
- /* can't boost software-interruptions */
- return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
- case 0xd0:
- /* can boost AA* and XLAT */
- return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
- case 0xe0:
- /* can boost in/out and absolute jmps */
- return ((opcode & 0x04) || opcode == 0xea);
- case 0xf0:
- if ((opcode & 0x0c) == 0 && opcode != 0xf1)
- goto retry; /* lock/rep(ne) prefix */
- /* clear and set flags can be boost */
- return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
- default:
- if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
- goto retry; /* prefixes */
- /* can't boost CS override and call */
- return (opcode != 0x2e && opcode != 0x9a);
- }
-}
-
-/*
- * returns non-zero if opcode modifies the interrupt flag.
- */
-static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
-{
- switch (*insn) {
- case 0xfa: /* cli */
- case 0xfb: /* sti */
- case 0xcf: /* iret/iretd */
- case 0x9d: /* popf/popfd */
- return 1;
- }
-
-#ifdef CONFIG_X86_64
- /* REX prefix */
- if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
- return 1;
-#endif
- return 0;
-}
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
- /* insn: must be on special executable page on x86_32|64. */
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
- return -ENOMEM;
-
- arch_copy_kprobe(p);
- return 0;
-}
-
-/*
- * Determine if the instruction uses the %rip-relative addressing mode.
- * If it does, return the address of the 32-bit displacement word.
- * If not, return null.
- */
-static s32 __kprobes *is_riprel(u8 *insn)
-{
- int need_modrm;
-
- /* Skip legacy instruction prefixes. */
- while (1) {
- switch (*insn) {
- case 0x66:
- case 0x67:
- case 0x2e:
- case 0x3e:
- case 0x26:
- case 0x64:
- case 0x65:
- case 0x36:
- case 0xf0:
- case 0xf3:
- case 0xf2:
- ++insn;
- continue;
- }
- break;
- }
-
- /* Skip REX instruction prefix. */
- if ((*insn & 0xf0) == 0x40)
- ++insn;
-
- if (*insn == 0x0f) { /* Two-byte opcode. */
- ++insn;
- need_modrm = test_bit(*insn, twobyte_has_modrm);
- } else { /* One-byte opcode. */
- need_modrm = test_bit(*insn, onebyte_has_modrm);
- }
-
- if (need_modrm) {
- u8 modrm = *++insn;
- if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
- /* Displacement follows ModRM byte. */
- return (s32 *) ++insn;
- }
- }
-
- /* No %rip-relative addressing mode here. */
- return NULL;
-}
-
-static void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
-#ifdef CONFIG_X86_32
- memcpy(p->ainsn.insn, p->addr, (MAX_INSN_SIZE + 1) * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
- if (can_boost(p->addr)) {
- p->ainsn.boostable = 0;
- } else {
- p->ainsn.boostable = -1;
- }
-#else
- s32 *ripdisp;
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
- ripdisp = is_riprel(p->ainsn.insn);
- if (ripdisp) {
- /*
- * The copied instruction uses the %rip-relative
- * addressing mode. Adjust the displacement for the
- * difference between the original location of this
- * instruction and the location of the copy that will
- * actually be run. The tricky bit here is making sure
- * that the sign extension happens correctly in this
- * calculation, since we need a signed 32-bit result to
- * be sign-extended to 64 bits when it's added to the
- * %rip value and yield the same 64-bit result that the
- * sign-extension of the original signed 32-bit
- * displacement would have given.
- */
- s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
- BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
- *ripdisp = disp;
- }
- p->opcode = *p->addr;
-#endif
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
-{
- text_poke(p->addr, &p->opcode, 1);
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
- mutex_lock(&kprobe_mutex);
-#ifdef CONFIG_X86_32
- free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
-#else
- free_insn_slot(p->ainsn.insn, 0);
-#endif
- mutex_unlock(&kprobe_mutex);
-}
-
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
- kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
- kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
-}
-
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
- kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- __get_cpu_var(current_kprobe) = p;
- kcb->kprobe_saved_flags = kcb->kprobe_old_flags
- = (regs->flags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->ainsn.insn))
- kcb->kprobe_saved_flags &= ~IF_MASK;
-}
-
-static __always_inline void clear_btf(void)
-{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
-#ifdef CONFIG_X86_32
- wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
-#else
- wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
-#endif
-}
-
-static __always_inline void restore_btf(void)
-{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
-#ifdef CONFIG_X86_32
- wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
-#else
- wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
-#endif
-}
-
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
-{
- clear_btf();
- regs->flags |= TF_MASK;
- regs->flags &= ~IF_MASK;
- /*single step inline if the instruction is an int3*/
- if (p->opcode == BREAKPOINT_INSTRUCTION)
- regs->ip = (unsigned long)p->addr;
- else
- regs->ip = (unsigned long)p->ainsn.insn;
-}
-
-/* Called with kretprobe_lock held */
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
-{
- unsigned long *sara = ®s->sp;
-
- ri->ret_addr = (kprobe_opcode_t *) *sara;
- /* Replace the return addr with trampoline addr */
- *sara = (unsigned long) &kretprobe_trampoline;
-}
-
-/*
- * Interrupts are disabled on entry as trap3 is an interrupt gate and they
- * remain disabled thorough out this function.
- */
-static int __kprobes kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *p;
- int ret = 0;
- kprobe_opcode_t *addr;
- struct kprobe_ctlblk *kcb;
-
- addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
-
- /*
- * We don't want to be preempted for the entire
- * duration of kprobe processing
- */
- preempt_disable();
- kcb = get_kprobe_ctlblk();
-
- /* Check we're not actually recursing */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
- *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
- regs->flags &= ~TF_MASK;
- regs->flags |= kcb->kprobe_saved_flags;
- goto no_kprobe;
-#ifdef CONFIG_X86_32
- }
- /* We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
-#else
- } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
- /* TODO: Provide re-entrancy from
- * post_kprobes_handler() and avoid exception
- * stack corruption while single-stepping on
- * the instruction of the new probe.
- */
- arch_disarm_kprobe(p);
- regs->ip = (unsigned long)p->addr;
- reset_current_kprobe();
- ret = 1;
- } else {
- /* We have reentered the kprobe_handler(), since
- * another probe was hit while within the
- * handler. We here save the original kprobe
- * variables and just single step on instruction
- * of the new probe without calling any user
- * handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
- }
-#endif
- } else {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
-#ifdef CONFIG_X86_32
- regs->ip -= sizeof(kprobe_opcode_t);
-#else
- regs->ip = (unsigned long)addr;
-#endif
- ret = 1;
- goto no_kprobe;
- }
- p = __get_cpu_var(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
- }
- goto no_kprobe;
- }
-
- p = get_kprobe(addr);
- if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Back up over the (now missing) int3 and run
- * the original instruction.
- */
-#ifdef CONFIG_X86_32
- regs->ip -= sizeof(kprobe_opcode_t);
-#else
- regs->ip = (unsigned long)addr;
-#endif
- ret = 1;
- }
- /* Not one of ours: let kernel handle it */
- goto no_kprobe;
- }
-
- set_current_kprobe(p, regs, kcb);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
- if (p->pre_handler && p->pre_handler(p, regs))
- /* handler has already set things up, so skip ss setup */
- return 1;
-
-ss_probe:
-#if defined(CONFIG_X86_32) && (!defined(CONFIG_PREEMPT) || defined(CONFIG_PM))
- if (p->ainsn.boostable == 1 && !p->post_handler){
- /* Boost up -- we can execute copied instructions directly */
- reset_current_kprobe();
- regs->ip = (unsigned long)p->ainsn.insn;
- preempt_enable_no_resched();
- return 1;
- }
-#endif
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
-
-no_kprobe:
- preempt_enable_no_resched();
- return ret;
-}
-
-/*
- * For function-return probes, init_kprobes() establishes a probepoint
- * here. When a retprobed function returns, this probe is hit and
- * trampoline_probe_handler() runs, calling the kretprobe's handler.
- */
-void kretprobe_trampoline_holder(void)
-{
-#ifdef CONFIG_X86_32
- asm volatile ( ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
- " pushf\n"
- /* skip cs, ip, orig_ax */
- " subl $12, %esp\n"
- " pushl %fs\n"
- " pushl %ds\n"
- " pushl %es\n"
- " pushl %eax\n"
- " pushl %ebp\n"
- " pushl %edi\n"
- " pushl %esi\n"
- " pushl %edx\n"
- " pushl %ecx\n"
- " pushl %ebx\n"
- " movl %esp, %eax\n"
- " call trampoline_handler\n"
- /* move flags to cs */
- " movl 52(%esp), %edx\n"
- " movl %edx, 48(%esp)\n"
- /* save true return address on flags */
- " movl %eax, 52(%esp)\n"
- " popl %ebx\n"
- " popl %ecx\n"
- " popl %edx\n"
- " popl %esi\n"
- " popl %edi\n"
- " popl %ebp\n"
- " popl %eax\n"
- /* skip ip, orig_ax, es, ds, fs */
- " addl $20, %esp\n"
- " popf\n"
- " ret\n");
-#else
- asm volatile ( ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
- "nop\n");
-#endif
-}
-
-/*
- * Called when we hit the probe point at kretprobe_trampoline
- */
-#ifdef CONFIG_X86_32
-void *__kprobes trampoline_handler(struct pt_regs *regs)
-#else
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-#endif
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- spin_lock_irqsave(&kretprobe_lock, flags);
- head = kretprobe_inst_table_head(current);
-#ifdef CONFIG_X86_32
- /* fixup registers */
- regs->cs = __KERNEL_CS | get_kernel_rpl();
- regs->ip = trampoline_address;
- regs->orig_ax = 0xffffffff;
-#endif
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-#ifdef CONFIG_X86_32
- if (ri->rp && ri->rp->handler) {
- __get_cpu_var(current_kprobe) = &ri->rp->kp;
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->rp->handler(ri, regs);
- __get_cpu_var(current_kprobe) = NULL;
- }
-#else
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-#endif
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-#ifdef CONFIG_X86_64
- regs->ip = orig_ret_address;
- reset_current_kprobe();
-#endif
- spin_unlock_irqrestore(&kretprobe_lock, flags);
-#ifdef CONFIG_X86_64
- preempt_enable_no_resched();
-#endif
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
-#ifdef CONFIG_X86_32
- return (void*)orig_ret_address;
-#else
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
-#endif
-}
-
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction whose first byte has been replaced by the "int 3"
- * instruction. To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * interrupt. We have to fix up the stack as follows:
- *
- * 0) Except in the case of absolute or indirect jump or call instructions,
- * the new ip is relative to the copied instruction. We need to make
- * it relative to the original instruction.
- *
- * 1) If the single-stepped instruction was pushfl, then the TF and IF
- * flags are set in the just-pushed flags, and may need to be cleared.
- *
- * 2) If the single-stepped instruction was a call, the return address
- * that is atop the stack is the address following the copied instruction.
- * We need to make it the address following the original instruction.
- *
- * This function also checks instruction size for preparing direct execution.
- */
-static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs, struct kprobe_ctlblk *kcb)
-{
- unsigned long *tos = ®s->sp;
- unsigned long next_rip = 0;
- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
- unsigned long orig_ip = (unsigned long)p->addr;
- kprobe_opcode_t *insn = p->ainsn.insn;
-
-#ifdef CONFIG_X86_64
- /*skip the REX prefix*/
- if (*insn >= 0x40 && *insn <= 0x4f)
- insn++;
-#endif
-
- regs->flags &= ~TF_MASK;
- switch (*insn) {
- case 0x9c: /* pushfl */
- *tos &= ~(TF_MASK | IF_MASK);
- *tos |= kcb->kprobe_old_flags;
- break;
- case 0xc2: /* ret/lret */
- case 0xc3:
- case 0xca:
- case 0xcb:
-#ifdef CONFIG_X86_32
- case 0xcf: /* iret */
- /* ip is already adjusted, no more changes required */
- p->ainsn.boostable = 1;
- goto no_change;
-#else
- /* ip is already adjusted, no more changes required*/
- return;
-#endif
- case 0xe8: /* call relative - Fix return addr */
- *tos = orig_ip + (*tos - copy_ip);
- break;
-#ifdef CONFIG_X86_32
- case 0x9a: /* call absolute -- same as call absolute, indirect */
- *tos = orig_ip + (*tos - copy_ip);
- goto no_change;
-#endif
- case 0xff:
-#ifdef CONFIG_X86_32
- if ((insn[1] & 0x30) == 0x10) {
- /*
- * call absolute, indirect
- * Fix return addr; ip is correct.
- * But this is not boostable
- */
- *tos = orig_ip + (*tos - copy_ip);
- goto no_change;
- } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* ip is correct. And this is boostable */
- p->ainsn.boostable = 1;
- goto no_change;
- }
-#else
- if ((insn[1] & 0x30) == 0x10) {
- /* call absolute, indirect */
- /* Fix return addr; ip is correct. */
- next_rip = regs->ip;
- *tos = orig_ip + (*tos - copy_ip);
- } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* ip is correct. */
- next_rip = regs->ip;
- }
-#endif
- break;
- case 0xea: /* jmp absolute -- ip is correct */
-#ifdef CONFIG_X86_32
- /* ip is already adjusted, no more changes required */
- p->ainsn.boostable = 1;
- goto no_change;
-#else
- next_rip = regs->ip;
- break;
-#endif
- default:
- break;
- }
-
-#ifdef CONFIG_X86_32
- if (p->ainsn.boostable == 0) {
- if ((regs->ip > copy_ip) &&
- (regs->ip - copy_ip) + 5 < (MAX_INSN_SIZE + 1)) {
- /*
- * These instructions can be executed directly if it
- * jumps back to correct address.
- */
- set_jmp_op((void *)regs->ip,
- (void *)orig_ip + (regs->ip - copy_ip));
- p->ainsn.boostable = 1;
- } else {
- p->ainsn.boostable = -1;
- }
- }
-
- regs->ip = orig_ip + (regs->ip - copy_ip);
-
-no_change:
-#else
- if (next_rip) {
- regs->ip = next_rip;
- } else {
- regs->ip = orig_ip + (regs->ip - copy_ip);
- }
-#endif
- restore_btf();
-}
-
-/*
- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled thoroughout this function.
- */
-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (!cur)
- return 0;
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
- resume_execution(cur, regs, kcb);
- regs->flags |= kcb->kprobe_saved_flags;
- trace_hardirqs_fixup_flags(regs->flags);
-
- /* Restore the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- goto out;
- }
- reset_current_kprobe();
-out:
- preempt_enable_no_resched();
-
- /*
- * if somebody else is singlestepping across a probe point, flags
- * will have TF set, in which case, continue the remaining processing
- * of do_debug, as if this is not a probe hit.
- */
- if (regs->flags & TF_MASK)
- return 0;
-
- return 1;
-}
-
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-#ifdef CONFIG_X86_64
- const struct exception_table_entry *fixup;
-#endif
-
- switch(kcb->kprobe_status) {
- case KPROBE_HIT_SS:
- case KPROBE_REENTER:
- /*
- * We are here because the instruction being single
- * stepped caused a page fault. We reset the current
- * kprobe and the ip points back to the probe address
- * and allow the page fault handler to continue as a
- * normal page fault.
- */
- regs->ip = (unsigned long)cur->addr;
- regs->flags |= kcb->kprobe_old_flags;
- if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe(kcb);
- else
- reset_current_kprobe();
- preempt_enable_no_resched();
- break;
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
- * In case the user-specified fault handler returned
- * zero, try to fix up.
- */
-#ifdef CONFIG_X86_32
- if (fixup_exception(regs))
- return 1;
-#else
- fixup = search_exception_tables(regs->ip);
- if (fixup) {
- regs->ip = fixup->fixup;
- return 1;
- }
-#endif
- /*
- * Exception couldn't be fixed up,
- * Let do_page_fault() fix it.
- */
- break;
- default:
- break;
- }
- return 0;
-}
-
-/*
- * Wrapper routine for handling exceptions.
- */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
-#ifdef CONFIG_X86_32
- if (args->regs && user_mode_vm(args->regs))
- return ret;
-#else
- if (args->regs && user_mode(args->regs))
- return ret;
-#endif
-
- switch (val) {
- case DIE_INT3:
- if (kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_DEBUG:
- if (post_kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_GPF:
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- ret = NOTIFY_STOP;
- preempt_enable();
- break;
- default:
- break;
- }
- return ret;
-}
-
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = ®s->sp;
- addr = (unsigned long)(kcb->jprobe_saved_sp);
-
- /*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- */
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
- MIN_STACK_SIZE(addr));
- regs->flags &= ~IF_MASK;
- trace_hardirqs_off();
- regs->ip = (unsigned long)(jp->entry);
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-#ifdef CONFIG_X86_32
- asm volatile (" xchgl %%ebx,%%esp \n"
- " int3 \n"
- " .globl jprobe_return_end \n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-#else
- asm volatile (" xchg %%rbx,%%rsp \n"
- " int3 \n"
- " .globl jprobe_return_end \n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-#endif
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->ip - 1);
- unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_sp);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
-
- if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
- if (®s->sp != kcb->jprobe_saved_sp) {
- struct pt_regs *saved_regs =
- container_of(kcb->jprobe_saved_sp,
- struct pt_regs, sp);
- printk("current sp %p does not match saved sp %p\n",
- ®s->sp, kcb->jprobe_saved_sp);
- printk("Saved registers for jprobe %p\n", jp);
- show_registers(saved_regs);
- printk("Current registers\n");
- show_registers(regs);
- BUG();
- }
- *regs = kcb->jprobe_saved_regs;
- memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
-#ifdef CONFIG_X86_64
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-#endif
-
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
-{
-#ifdef CONFIG_X86_64
- if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
- return 1;
-#endif
- return 0;
-}
-
-int __init arch_init_kprobes(void)
-{
-#ifdef CONFIG_X86_32
- return 0;
-#else
- return register_kprobe(&trampoline_p);
-#endif
-}
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
deleted file mode 100644
index f195348..0000000
--- a/arch/x86/kernel/kprobes_64.c
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * Kernel Probes (KProbes)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- */
-
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/preempt.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-
-#include <asm/cacheflush.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/alternative.h>
-
-void jprobe_return_end(void);
-static void __kprobes arch_copy_kprobe(struct kprobe *p);
-
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-struct kretprobe_blackpoint kretprobe_blacklist[] = {
- {"__switch_to", }, /* This function switches only current task, but
- doesn't switch kernel stack.*/
- {NULL, NULL} /* Terminator */
-};
-const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
-
-#define W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
- (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
- (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
- (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
- << (r % sizeof(unsigned long)))
-
-#define R1(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
-#define R3(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
-#define R4(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf),
-#define RF(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)
-
-#ifdef CONFIG_X86_32
-#define R2(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf),
-#else
-#define R2(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
- W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) |
-#endif
-
-/*
- * Undefined/reserved opcodes, conditional jump, Opcode Extension
- * Groups, and some special opcodes can not be boost.
- */
-static const unsigned long
-twobyte_is_boostable[256 / sizeof(unsigned long)] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ----------------------------------------------- */
- R1(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) /* 00 */
- R2(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 10 */
- R3(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 20 */
- R4(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 30 */
- R1(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 40 */
- R2(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 50 */
- R3(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) /* 60 */
- R4(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* 70 */
- R1(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 80 */
- R2(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 90 */
- R3(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* a0 */
- R4(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) /* b0 */
- R1(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* c0 */
- R2(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* d0 */
- R3(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) /* e0 */
- RF(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
- /* ----------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
-
-static const unsigned long
-onebyte_has_modrm[256 / sizeof(unsigned long)] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ----------------------------------------------- */
- R1(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 00 */
- R2(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 10 */
- R3(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 20 */
- R4(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) /* 30 */
- R1(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 40 */
- R2(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 50 */
- R3(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) /* 60 */
- R4(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 70 */
- R1(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 80 */
- R2(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 90 */
- R3(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* a0 */
- R4(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* b0 */
- R1(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) /* c0 */
- R2(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* d0 */
- R3(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* e0 */
- RF(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */
- /* ----------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
-
-static const unsigned long
-twobyte_has_modrm[256 / sizeof(unsigned long)] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ----------------------------------------------- */
- R1(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) /* 0f */
- R2(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) /* 1f */
- R3(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) /* 2f */
- R4(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 3f */
- R1(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 4f */
- R2(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 5f */
- R3(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 6f */
- R4(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) /* 7f */
- R1(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) /* 8f */
- R2(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* 9f */
- R3(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) /* af */
- R4(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) /* bf */
- R1(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) /* cf */
- R2(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* df */
- R3(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* ef */
- RF(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */
- /* ----------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
-
-#undef W
-#undef R1
-#undef R2
-#undef R3
-#undef R4
-#undef RF
-
-/* insert a jmp code */
-static inline void set_jmp_op(void *from, void *to)
-{
- struct __arch_jmp_op {
- char op;
- long raddr;
- } __attribute__((packed)) *jop;
- jop = (struct __arch_jmp_op *)from;
- jop->raddr = (long)(to) - ((long)(from) + 5);
- jop->op = RELATIVEJUMP_INSTRUCTION;
-}
-
-/*
- * returns non-zero if opcodes can be boosted.
- */
-static inline int can_boost(kprobe_opcode_t *opcodes)
-{
- kprobe_opcode_t opcode;
- kprobe_opcode_t *orig_opcodes = opcodes;
-retry:
- if (opcodes - orig_opcodes > MAX_INSN_SIZE)
- return 0;
- opcode = *(opcodes++);
-
- /* 2nd-byte opcode */
- if (opcode == 0x0f) {
- if (opcodes - orig_opcodes > MAX_INSN_SIZE)
- return 0;
- return test_bit(*opcodes, twobyte_is_boostable);
- }
-
- switch (opcode & 0xf0) {
- case 0x60:
- if (0x63 < opcode && opcode < 0x67)
- goto retry; /* prefixes */
- /* can't boost Address-size override and bound */
- return (opcode != 0x62 && opcode != 0x67);
- case 0x70:
- return 0; /* can't boost conditional jump */
- case 0xc0:
- /* can't boost software-interruptions */
- return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
- case 0xd0:
- /* can boost AA* and XLAT */
- return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
- case 0xe0:
- /* can boost in/out and absolute jmps */
- return ((opcode & 0x04) || opcode == 0xea);
- case 0xf0:
- if ((opcode & 0x0c) == 0 && opcode != 0xf1)
- goto retry; /* lock/rep(ne) prefix */
- /* clear and set flags can be boost */
- return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
- default:
- if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
- goto retry; /* prefixes */
- /* can't boost CS override and call */
- return (opcode != 0x2e && opcode != 0x9a);
- }
-}
-
-/*
- * returns non-zero if opcode modifies the interrupt flag.
- */
-static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
-{
- switch (*insn) {
- case 0xfa: /* cli */
- case 0xfb: /* sti */
- case 0xcf: /* iret/iretd */
- case 0x9d: /* popf/popfd */
- return 1;
- }
-
-#ifdef CONFIG_X86_64
- /* REX prefix */
- if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf)
- return 1;
-#endif
- return 0;
-}
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
- /* insn: must be on special executable page on x86_32|64. */
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
- return -ENOMEM;
-
- arch_copy_kprobe(p);
- return 0;
-}
-
-/*
- * Determine if the instruction uses the %rip-relative addressing mode.
- * If it does, return the address of the 32-bit displacement word.
- * If not, return null.
- */
-static s32 __kprobes *is_riprel(u8 *insn)
-{
- int need_modrm;
-
- /* Skip legacy instruction prefixes. */
- while (1) {
- switch (*insn) {
- case 0x66:
- case 0x67:
- case 0x2e:
- case 0x3e:
- case 0x26:
- case 0x64:
- case 0x65:
- case 0x36:
- case 0xf0:
- case 0xf3:
- case 0xf2:
- ++insn;
- continue;
- }
- break;
- }
-
- /* Skip REX instruction prefix. */
- if ((*insn & 0xf0) == 0x40)
- ++insn;
-
- if (*insn == 0x0f) { /* Two-byte opcode. */
- ++insn;
- need_modrm = test_bit(*insn, twobyte_has_modrm);
- } else { /* One-byte opcode. */
- need_modrm = test_bit(*insn, onebyte_has_modrm);
- }
-
- if (need_modrm) {
- u8 modrm = *++insn;
- if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
- /* Displacement follows ModRM byte. */
- return (s32 *) ++insn;
- }
- }
-
- /* No %rip-relative addressing mode here. */
- return NULL;
-}
-
-static void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
-#ifdef CONFIG_X86_32
- memcpy(p->ainsn.insn, p->addr, (MAX_INSN_SIZE + 1) * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
- if (can_boost(p->addr)) {
- p->ainsn.boostable = 0;
- } else {
- p->ainsn.boostable = -1;
- }
-#else
- s32 *ripdisp;
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
- ripdisp = is_riprel(p->ainsn.insn);
- if (ripdisp) {
- /*
- * The copied instruction uses the %rip-relative
- * addressing mode. Adjust the displacement for the
- * difference between the original location of this
- * instruction and the location of the copy that will
- * actually be run. The tricky bit here is making sure
- * that the sign extension happens correctly in this
- * calculation, since we need a signed 32-bit result to
- * be sign-extended to 64 bits when it's added to the
- * %rip value and yield the same 64-bit result that the
- * sign-extension of the original signed 32-bit
- * displacement would have given.
- */
- s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
- BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
- *ripdisp = disp;
- }
- p->opcode = *p->addr;
-#endif
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
-{
- text_poke(p->addr, &p->opcode, 1);
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
- mutex_lock(&kprobe_mutex);
-#ifdef CONFIG_X86_32
- free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
-#else
- free_insn_slot(p->ainsn.insn, 0);
-#endif
- mutex_unlock(&kprobe_mutex);
-}
-
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
- kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
- kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
-}
-
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
- kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- __get_cpu_var(current_kprobe) = p;
- kcb->kprobe_saved_flags = kcb->kprobe_old_flags
- = (regs->flags & (TF_MASK | IF_MASK));
- if (is_IF_modifier(p->ainsn.insn))
- kcb->kprobe_saved_flags &= ~IF_MASK;
-}
-
-static __always_inline void clear_btf(void)
-{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
-#ifdef CONFIG_X86_32
- wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
-#else
- wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
-#endif
-}
-
-static __always_inline void restore_btf(void)
-{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
-#ifdef CONFIG_X86_32
- wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
-#else
- wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr);
-#endif
-}
-
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
-{
- clear_btf();
- regs->flags |= TF_MASK;
- regs->flags &= ~IF_MASK;
- /*single step inline if the instruction is an int3*/
- if (p->opcode == BREAKPOINT_INSTRUCTION)
- regs->ip = (unsigned long)p->addr;
- else
- regs->ip = (unsigned long)p->ainsn.insn;
-}
-
-/* Called with kretprobe_lock held */
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
-{
- unsigned long *sara = ®s->sp;
-
- ri->ret_addr = (kprobe_opcode_t *) *sara;
- /* Replace the return addr with trampoline addr */
- *sara = (unsigned long) &kretprobe_trampoline;
-}
-
-/*
- * Interrupts are disabled on entry as trap3 is an interrupt gate and they
- * remain disabled thorough out this function.
- */
-static int __kprobes kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *p;
- int ret = 0;
- kprobe_opcode_t *addr;
- struct kprobe_ctlblk *kcb;
-
- addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
-
- /*
- * We don't want to be preempted for the entire
- * duration of kprobe processing
- */
- preempt_disable();
- kcb = get_kprobe_ctlblk();
-
- /* Check we're not actually recursing */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
- *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
- regs->flags &= ~TF_MASK;
- regs->flags |= kcb->kprobe_saved_flags;
- goto no_kprobe;
-#ifdef CONFIG_X86_32
- }
- /* We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
-#else
- } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
- /* TODO: Provide re-entrancy from
- * post_kprobes_handler() and avoid exception
- * stack corruption while single-stepping on
- * the instruction of the new probe.
- */
- arch_disarm_kprobe(p);
- regs->ip = (unsigned long)p->addr;
- reset_current_kprobe();
- ret = 1;
- } else {
- /* We have reentered the kprobe_handler(), since
- * another probe was hit while within the
- * handler. We here save the original kprobe
- * variables and just single step on instruction
- * of the new probe without calling any user
- * handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
- }
-#endif
- } else {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
-#ifdef CONFIG_X86_32
- regs->ip -= sizeof(kprobe_opcode_t);
-#else
- regs->ip = (unsigned long)addr;
-#endif
- ret = 1;
- goto no_kprobe;
- }
- p = __get_cpu_var(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
- }
- }
- goto no_kprobe;
- }
-
- p = get_kprobe(addr);
- if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Back up over the (now missing) int3 and run
- * the original instruction.
- */
-#ifdef CONFIG_X86_32
- regs->ip -= sizeof(kprobe_opcode_t);
-#else
- regs->ip = (unsigned long)addr;
-#endif
- ret = 1;
- }
- /* Not one of ours: let kernel handle it */
- goto no_kprobe;
- }
-
- set_current_kprobe(p, regs, kcb);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
- if (p->pre_handler && p->pre_handler(p, regs))
- /* handler has already set things up, so skip ss setup */
- return 1;
-
-ss_probe:
-#if defined(CONFIG_X86_32) && (!defined(CONFIG_PREEMPT) || defined(CONFIG_PM))
- if (p->ainsn.boostable == 1 && !p->post_handler){
- /* Boost up -- we can execute copied instructions directly */
- reset_current_kprobe();
- regs->ip = (unsigned long)p->ainsn.insn;
- preempt_enable_no_resched();
- return 1;
- }
-#endif
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
-
-no_kprobe:
- preempt_enable_no_resched();
- return ret;
-}
-
-/*
- * For function-return probes, init_kprobes() establishes a probepoint
- * here. When a retprobed function returns, this probe is hit and
- * trampoline_probe_handler() runs, calling the kretprobe's handler.
- */
-void kretprobe_trampoline_holder(void)
-{
-#ifdef CONFIG_X86_32
- asm volatile ( ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
- " pushf\n"
- /* skip cs, ip, orig_ax */
- " subl $12, %esp\n"
- " pushl %fs\n"
- " pushl %ds\n"
- " pushl %es\n"
- " pushl %eax\n"
- " pushl %ebp\n"
- " pushl %edi\n"
- " pushl %esi\n"
- " pushl %edx\n"
- " pushl %ecx\n"
- " pushl %ebx\n"
- " movl %esp, %eax\n"
- " call trampoline_handler\n"
- /* move flags to cs */
- " movl 52(%esp), %edx\n"
- " movl %edx, 48(%esp)\n"
- /* save true return address on flags */
- " movl %eax, 52(%esp)\n"
- " popl %ebx\n"
- " popl %ecx\n"
- " popl %edx\n"
- " popl %esi\n"
- " popl %edi\n"
- " popl %ebp\n"
- " popl %eax\n"
- /* skip ip, orig_ax, es, ds, fs */
- " addl $20, %esp\n"
- " popf\n"
- " ret\n");
-#else
- asm volatile ( ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
- "nop\n");
-#endif
-}
-
-/*
- * Called when we hit the probe point at kretprobe_trampoline
- */
-#ifdef CONFIG_X86_32
-void *__kprobes trampoline_handler(struct pt_regs *regs)
-#else
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-#endif
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- spin_lock_irqsave(&kretprobe_lock, flags);
- head = kretprobe_inst_table_head(current);
-#ifdef CONFIG_X86_32
- /* fixup registers */
- regs->cs = __KERNEL_CS | get_kernel_rpl();
- regs->ip = trampoline_address;
- regs->orig_ax = 0xffffffff;
-#endif
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-#ifdef CONFIG_X86_32
- if (ri->rp && ri->rp->handler) {
- __get_cpu_var(current_kprobe) = &ri->rp->kp;
- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
- ri->rp->handler(ri, regs);
- __get_cpu_var(current_kprobe) = NULL;
- }
-#else
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-#endif
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-#ifdef CONFIG_X86_64
- regs->ip = orig_ret_address;
- reset_current_kprobe();
-#endif
- spin_unlock_irqrestore(&kretprobe_lock, flags);
-#ifdef CONFIG_X86_64
- preempt_enable_no_resched();
-#endif
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
-#ifdef CONFIG_X86_32
- return (void*)orig_ret_address;
-#else
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
-#endif
-}
-
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction whose first byte has been replaced by the "int 3"
- * instruction. To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * interrupt. We have to fix up the stack as follows:
- *
- * 0) Except in the case of absolute or indirect jump or call instructions,
- * the new ip is relative to the copied instruction. We need to make
- * it relative to the original instruction.
- *
- * 1) If the single-stepped instruction was pushfl, then the TF and IF
- * flags are set in the just-pushed flags, and may need to be cleared.
- *
- * 2) If the single-stepped instruction was a call, the return address
- * that is atop the stack is the address following the copied instruction.
- * We need to make it the address following the original instruction.
- *
- * This function also checks instruction size for preparing direct execution.
- */
-static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs, struct kprobe_ctlblk *kcb)
-{
- unsigned long *tos = ®s->sp;
- unsigned long next_rip = 0;
- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
- unsigned long orig_ip = (unsigned long)p->addr;
- kprobe_opcode_t *insn = p->ainsn.insn;
-
-#ifdef CONFIG_X86_64
- /*skip the REX prefix*/
- if (*insn >= 0x40 && *insn <= 0x4f)
- insn++;
-#endif
-
- regs->flags &= ~TF_MASK;
- switch (*insn) {
- case 0x9c: /* pushfl */
- *tos &= ~(TF_MASK | IF_MASK);
- *tos |= kcb->kprobe_old_flags;
- break;
- case 0xc2: /* ret/lret */
- case 0xc3:
- case 0xca:
- case 0xcb:
-#ifdef CONFIG_X86_32
- case 0xcf: /* iret */
- /* ip is already adjusted, no more changes required */
- p->ainsn.boostable = 1;
- goto no_change;
-#else
- /* ip is already adjusted, no more changes required*/
- return;
-#endif
- case 0xe8: /* call relative - Fix return addr */
- *tos = orig_ip + (*tos - copy_ip);
- break;
-#ifdef CONFIG_X86_32
- case 0x9a: /* call absolute -- same as call absolute, indirect */
- *tos = orig_ip + (*tos - copy_ip);
- goto no_change;
-#endif
- case 0xff:
-#ifdef CONFIG_X86_32
- if ((insn[1] & 0x30) == 0x10) {
- /*
- * call absolute, indirect
- * Fix return addr; ip is correct.
- * But this is not boostable
- */
- *tos = orig_ip + (*tos - copy_ip);
- goto no_change;
- } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* ip is correct. And this is boostable */
- p->ainsn.boostable = 1;
- goto no_change;
- }
-#else
- if ((insn[1] & 0x30) == 0x10) {
- /* call absolute, indirect */
- /* Fix return addr; ip is correct. */
- next_rip = regs->ip;
- *tos = orig_ip + (*tos - copy_ip);
- } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
- /* ip is correct. */
- next_rip = regs->ip;
- }
-#endif
- break;
- case 0xea: /* jmp absolute -- ip is correct */
-#ifdef CONFIG_X86_32
- /* ip is already adjusted, no more changes required */
- p->ainsn.boostable = 1;
- goto no_change;
-#else
- next_rip = regs->ip;
- break;
-#endif
- default:
- break;
- }
-
-#ifdef CONFIG_X86_32
- if (p->ainsn.boostable == 0) {
- if ((regs->ip > copy_ip) &&
- (regs->ip - copy_ip) + 5 < (MAX_INSN_SIZE + 1)) {
- /*
- * These instructions can be executed directly if it
- * jumps back to correct address.
- */
- set_jmp_op((void *)regs->ip,
- (void *)orig_ip + (regs->ip - copy_ip));
- p->ainsn.boostable = 1;
- } else {
- p->ainsn.boostable = -1;
- }
- }
-
- regs->ip = orig_ip + (regs->ip - copy_ip);
-
-no_change:
-#else
- if (next_rip) {
- regs->ip = next_rip;
- } else {
- regs->ip = orig_ip + (regs->ip - copy_ip);
- }
-#endif
- restore_btf();
-}
-
-/*
- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled thoroughout this function.
- */
-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (!cur)
- return 0;
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
- resume_execution(cur, regs, kcb);
- regs->flags |= kcb->kprobe_saved_flags;
- trace_hardirqs_fixup_flags(regs->flags);
-
- /* Restore the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- goto out;
- }
- reset_current_kprobe();
-out:
- preempt_enable_no_resched();
-
- /*
- * if somebody else is singlestepping across a probe point, flags
- * will have TF set, in which case, continue the remaining processing
- * of do_debug, as if this is not a probe hit.
- */
- if (regs->flags & TF_MASK)
- return 0;
-
- return 1;
-}
-
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-#ifdef CONFIG_X86_64
- const struct exception_table_entry *fixup;
-#endif
-
- switch(kcb->kprobe_status) {
- case KPROBE_HIT_SS:
- case KPROBE_REENTER:
- /*
- * We are here because the instruction being single
- * stepped caused a page fault. We reset the current
- * kprobe and the ip points back to the probe address
- * and allow the page fault handler to continue as a
- * normal page fault.
- */
- regs->ip = (unsigned long)cur->addr;
- regs->flags |= kcb->kprobe_old_flags;
- if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe(kcb);
- else
- reset_current_kprobe();
- preempt_enable_no_resched();
- break;
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
- * In case the user-specified fault handler returned
- * zero, try to fix up.
- */
-#ifdef CONFIG_X86_32
- if (fixup_exception(regs))
- return 1;
-#else
- fixup = search_exception_tables(regs->ip);
- if (fixup) {
- regs->ip = fixup->fixup;
- return 1;
- }
-#endif
- /*
- * Exception couldn't be fixed up,
- * Let do_page_fault() fix it.
- */
- break;
- default:
- break;
- }
- return 0;
-}
-
-/*
- * Wrapper routine for handling exceptions.
- */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
-#ifdef CONFIG_X86_32
- if (args->regs && user_mode_vm(args->regs))
- return ret;
-#else
- if (args->regs && user_mode(args->regs))
- return ret;
-#endif
-
- switch (val) {
- case DIE_INT3:
- if (kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_DEBUG:
- if (post_kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_GPF:
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- ret = NOTIFY_STOP;
- preempt_enable();
- break;
- default:
- break;
- }
- return ret;
-}
-
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- unsigned long addr;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = ®s->sp;
- addr = (unsigned long)(kcb->jprobe_saved_sp);
-
- /*
- * As Linus pointed out, gcc assumes that the callee
- * owns the argument space and could overwrite it, e.g.
- * tailcall optimization. So, to be absolutely safe
- * we also save and restore enough stack bytes to cover
- * the argument area.
- */
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
- MIN_STACK_SIZE(addr));
- regs->flags &= ~IF_MASK;
- trace_hardirqs_off();
- regs->ip = (unsigned long)(jp->entry);
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-#ifdef CONFIG_X86_32
- asm volatile (" xchgl %%ebx,%%esp \n"
- " int3 \n"
- " .globl jprobe_return_end \n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-#else
- asm volatile (" xchg %%rbx,%%rsp \n"
- " int3 \n"
- " .globl jprobe_return_end \n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
- (kcb->jprobe_saved_sp):"memory");
-#endif
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->ip - 1);
- unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_sp);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
-
- if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
- if (®s->sp != kcb->jprobe_saved_sp) {
- struct pt_regs *saved_regs =
- container_of(kcb->jprobe_saved_sp,
- struct pt_regs, sp);
- printk("current sp %p does not match saved sp %p\n",
- ®s->sp, kcb->jprobe_saved_sp);
- printk("Saved registers for jprobe %p\n", jp);
- show_registers(saved_regs);
- printk("Current registers\n");
- show_registers(regs);
- BUG();
- }
- *regs = kcb->jprobe_saved_regs;
- memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
-#ifdef CONFIG_X86_64
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-#endif
-
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
-{
-#ifdef CONFIG_X86_64
- if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
- return 1;
-#endif
- return 0;
-}
-
-int __init arch_init_kprobes(void)
-{
-#ifdef CONFIG_X86_32
- return 0;
-#else
- return register_kprobe(&trampoline_p);
-#endif
-}
--
1.5.4.rc0.1083.gf568
* Harvey Harrison <[email protected]> wrote:
> Further unification work. There is a possible behavior change on
> X86_32 here.
>
> is_IF_modifier(p->opcode)
>
> to
>
> is_IF_modifier(p->ainsn.insn)
>
> Which should be equivalent, but is not purely cosmetic as the rest of
> the unification so far.
hm, could you split this into two, the pure-equivalence and the
possibly-modifying patch? (that way any potential breakage becomes
bisectable) Same end result, just two commits instead of one.
also, did you manage to run/test kprobes (on 32-bit or 64-bit x86), and
it worked fine?
Ingo
On Sat, 2007-12-15 at 09:50 +0100, Ingo Molnar wrote:
> * Harvey Harrison <[email protected]> wrote:
>
> > Further unification work. There is a possible behavior change on
> > X86_32 here.
> >
> > is_IF_modifier(p->opcode)
> >
> > to
> >
> > is_IF_modifier(p->ainsn.insn)
> >
> > Which should be equivalent, but is not purely cosmetic as the rest of
> > the unification so far.
>
> hm, could you split this into two, the pure-equivalence and the
> possibly-modifying patch? (that way any potential breakage becomes
> bisectable) Same end result, just two commits instead of one.
>
Sure, I'll go back through and see if the series can be cleaned up a bit
as well as expand the commit message a little bit.
> also, did you manage to run/test kprobes (on 32-bit or 64-bit x86), and
> it worked fine?
>
Sorry, I should have predicated the whole series with RFC. Currently
this is compile-tested only. There is only the one patch that has
any behavioral change. I believe the series also pointed out an
existing bug in the 32-bit version...which I've preserved but will
note in the commit messages in the respun series.
In case you're interested, from the patch which unifies the definition
of MAX_INSTRUCTION_SIZE:
memcpy(p->ainsn.insn, p->addr, (MAX_INSN_SIZE + 1) *
sizeof(kprobe_opcode_t));
If you compare this memcpy from arch_prepare_kprobe in 32/64 bit I'm
almost sure the X86_32 version should be
... + sizeof(kprobe_opcode_t)
not
... * sizeof(kprobe_opcode_t)
Cheers,
Harvey
* Harvey Harrison <[email protected]> wrote:
> If you compare this memcpy from arch_prepare_kprobe in 32/64 bit I'm
> almost sure the X86_32 version should be
>
> ... + sizeof(kprobe_opcode_t)
>
> not
>
> ... * sizeof(kprobe_opcode_t)
good point. I've Cc:-ed the top authors of kprobes.c. Could anyone
confirm (or deny) that Harvey found a real kprobes bug here?
Ingo
Hi Harvey and Ingo,
I'm working on another version of patches for unification.
Currently cleaning up the patches.
http://sources.redhat.com/ml/systemtap/2007-q4/msg00457.html
I'll cleanup and repost it today.
Ingo Molnar wrote:
> * Harvey Harrison <[email protected]> wrote:
>
>> If you compare this memcpy from arch_prepare_kprobe in 32/64 bit I'm
>> almost sure the X86_32 version should be
>>
>> ... + sizeof(kprobe_opcode_t)
>>
>> not
>>
>> ... * sizeof(kprobe_opcode_t)
>
> good point. I've Cc:-ed the top authors of kprobes.c. Could anyone
> confirm (or deny) that Harvey found a real kprobes bug here?
>
> Ingo
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
* Ingo Molnar <[email protected]> [2007-12-15 14:12:04]:
Hi Ingo, Harvey
In file include/asm-x86/kprobes_32.h
typedef u8 kprobe_opcode_t;
hence sizeof(kprobe_opcode_t) turns out to be 1.
Hence
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
is correct.
--
Regards
Srikar
>
> * Harvey Harrison <[email protected]> wrote:
>
> > If you compare this memcpy from arch_prepare_kprobe in 32/64 bit I'm
> > almost sure the X86_32 version should be
> >
> > ... + sizeof(kprobe_opcode_t)
> >
> > not
> >
> > ... * sizeof(kprobe_opcode_t)
>
> good point. I've Cc:-ed the top authors of kprobes.c. Could anyone
> confirm (or deny) that Harvey found a real kprobes bug here?
>
> Ingo
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
Masami,
* Masami Hiramatsu <[email protected]> wrote:
> Hi Harvey and Ingo,
>
> I'm working on another version of patches for unification.
> Currently cleaning up the patches.
> http://sources.redhat.com/ml/systemtap/2007-q4/msg00457.html
> I'll cleanup and repost it today.
cool! Please Cc: lkml and Harvey as well so that there's less overlap in
unification work - Harvey spent quite some time unifying and cleaning up
the kprobes code during the past week.
So i think we could/should use Harvey's latest series as a base, those
are pretty finegrained already. Note that they break 64-bit kprobes
though, with such a config:
CONFIG_KPROBES=y
CONFIG_NET_TCPPROBE=y
so it crashes with an int3 in the TCP code. It's probably some trivial
typo somewhere, as 32-bit works fine.
the coordinates for x86.git#mm can be found below - that tree already
includes Harvey's latest kprobes series. I'll try to bisect the 64-bit
breakage now.
Ingo
------------------>
git-clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git linux-2.6.git
cd linux-2.6.git
git-branch x86
git-checkout x86
git-pull git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git mm
(do subsequent pulls via "git-pull --force", as we frequently rebase the
git tree. NOTE: this might override your own local changes, so do this
only if you dont mind about losing thse changes in that tree.)
Hi Ingo, Harvey
In file include/asm-x86/kprobes_32.h
typedef u8 kprobe_opcode_t;
hence sizeof(kprobe_opcode_t) turns out to be 1.
Hence
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
is correct.
--
Regards
Srikar
>
> * Harvey Harrison <[email protected]> wrote:
>
> > If you compare this memcpy from arch_prepare_kprobe in 32/64 bit I'm
> > almost sure the X86_32 version should be
> >
> > ... + sizeof(kprobe_opcode_t)
> >
> > not
> >
> > ... * sizeof(kprobe_opcode_t)
>
> good point. I've Cc:-ed the top authors of kprobes.c. Could anyone
> confirm (or deny) that Harvey found a real kprobes bug here?
>
> Ingo
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
Hi Ingo,
Ingo Molnar wrote:
> Masami,
>
> * Masami Hiramatsu <[email protected]> wrote:
>
>> Hi Harvey and Ingo,
>>
>> I'm working on another version of patches for unification.
>> Currently cleaning up the patches.
>> http://sources.redhat.com/ml/systemtap/2007-q4/msg00457.html
>> I'll cleanup and repost it today.
>
> cool! Please Cc: lkml and Harvey as well so that there's less overlap in
> unification work - Harvey spent quite some time unifying and cleaning up
> the kprobes code during the past week.
Should I rewrite it based on current git tree?
My patch includes 3 part of patches.
- 2 Bugfix patches (which is not merged yet.)
- 2 booster patches (ditto)
- 2 unification patches (most of this patches are already done by Harvey's patch)
>
> So i think we could/should use Harvey's latest series as a base, those
> are pretty finegrained already. Note that they break 64-bit kprobes
> though, with such a config:
>
> CONFIG_KPROBES=y
> CONFIG_NET_TCPPROBE=y
>
> so it crashes with an int3 in the TCP code. It's probably some trivial
> typo somewhere, as 32-bit works fine.
>
> the coordinates for x86.git#mm can be found below - that tree already
> includes Harvey's latest kprobes series. I'll try to bisect the 64-bit
> breakage now.
>
> Ingo
>
> ------------------>
> git-clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git linux-2.6.git
> cd linux-2.6.git
> git-branch x86
> git-checkout x86
> git-pull git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git mm
>
> (do subsequent pulls via "git-pull --force", as we frequently rebase the
> git tree. NOTE: this might override your own local changes, so do this
> only if you dont mind about losing thse changes in that tree.)
>
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
* Masami Hiramatsu <[email protected]> wrote:
> > cool! Please Cc: lkml and Harvey as well so that there's less
> > overlap in unification work - Harvey spent quite some time unifying
> > and cleaning up the kprobes code during the past week.
>
> Should I rewrite it based on current git tree?
> My patch includes 3 part of patches.
> - 2 Bugfix patches (which is not merged yet.)
> - 2 booster patches (ditto)
> - 2 unification patches (most of this patches are already done by Harvey's patch)
would it be easier/more robust to first did the unification patches and
then get the bugfixes and new features in? That would give us your
bugfixes and new features on both 32-bit and 64-bit at the same time.
feel free to do whichever approach you prefer - but it would be nice to
preserve the unification and cleanup work done by Harvey.
btw., is any of your bugfixes 2.6.24 material?
Ingo
Hi Ingo,
Ingo Molnar wrote:
> * Masami Hiramatsu <[email protected]> wrote:
>
>>> cool! Please Cc: lkml and Harvey as well so that there's less
>>> overlap in unification work - Harvey spent quite some time unifying
>>> and cleaning up the kprobes code during the past week.
>> Should I rewrite it based on current git tree?
>> My patch includes 3 part of patches.
>> - 2 Bugfix patches (which is not merged yet.)
>> - 2 booster patches (ditto)
>> - 2 unification patches (most of this patches are already done by Harvey's patch)
>
> would it be easier/more robust to first did the unification patches and
> then get the bugfixes and new features in? That would give us your
> bugfixes and new features on both 32-bit and 64-bit at the same time.
As far as I can see, my patches have less #ifdefs than Harvey's.
However, that patch's granularity may be not so good currently.
> feel free to do whichever approach you prefer - but it would be nice to
> preserve the unification and cleanup work done by Harvey.
OK, rewriting will take a while, so I sent a series of patches which I have just now.
> btw., is any of your bugfixes 2.6.24 material?
Yes, I'd like to fix first two bugs in 2.6.24.
Thank you very much,
>
> Ingo
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
On Mon, 2007-12-17 at 19:52 +0530, Srikar Dronamraju wrote:
> * Ingo Molnar <[email protected]> [2007-12-15 14:12:04]:
>
>
> Hi Ingo, Harvey
>
> In file include/asm-x86/kprobes_32.h
> typedef u8 kprobe_opcode_t;
> hence sizeof(kprobe_opcode_t) turns out to be 1.
>
> Hence
>
> memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
> is correct.
>
OK, but this would be much clearer to adopt the X86_64 way, define
MAX_INSN_SIZE one smaller and make this line:
/* Copy original instruction plus space for 1 byte relative jump */
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE + sizeof(kprobe_opcode_t));
See the first patch of my cleanup work that unified MAX_INSN_SIZE
and you'll see why this jumped out.
Harvey
On Mon, 2007-12-17 at 17:06 +0100, Ingo Molnar wrote:
> * Masami Hiramatsu <[email protected]> wrote:
>
> > > cool! Please Cc: lkml and Harvey as well so that there's less
> > > overlap in unification work - Harvey spent quite some time unifying
> > > and cleaning up the kprobes code during the past week.
> >
> > Should I rewrite it based on current git tree?
> > My patch includes 3 part of patches.
> > - 2 Bugfix patches (which is not merged yet.)
> > - 2 booster patches (ditto)
> > - 2 unification patches (most of this patches are already done by Harvey's patch)
>
> would it be easier/more robust to first did the unification patches and
> then get the bugfixes and new features in? That would give us your
> bugfixes and new features on both 32-bit and 64-bit at the same time.
>
> feel free to do whichever approach you prefer - but it would be nice to
> preserve the unification and cleanup work done by Harvey.
>
> btw., is any of your bugfixes 2.6.24 material?
>
Well, I'll admit to being a little disappointed if my work doesn't make
it in, but there are bugfixes here. I think my cleanup breakout is
better in the more-finegrained changes sense. If you decide to keep
mine I'll rebase Masami's patches 1-4 on top of that and send it by
him for resubmittal. But I'll leave it to Ingo to decide how
to procede.
Harvey
Hi Harvey,
Harvey Harrison wrote:
> On Mon, 2007-12-17 at 19:52 +0530, Srikar Dronamraju wrote:
>> * Ingo Molnar <[email protected]> [2007-12-15 14:12:04]:
>>
>>
>> Hi Ingo, Harvey
>>
>> In file include/asm-x86/kprobes_32.h
>> typedef u8 kprobe_opcode_t;
>> hence sizeof(kprobe_opcode_t) turns out to be 1.
>>
>> Hence
>>
>> memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
>> is correct.
>>
>
> OK, but this would be much clearer to adopt the X86_64 way, define
> MAX_INSN_SIZE one smaller and make this line:
>
> /* Copy original instruction plus space for 1 byte relative jump */
> memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE + sizeof(kprobe_opcode_t));
>
> See the first patch of my cleanup work that unified MAX_INSN_SIZE
> and you'll see why this jumped out.
>
> Harvey
If you mention about a relative jump which is inserted by
resume_execution(), I think you might misunderstand that relative jump.
The size of that relative jump, which will be embedded by kprobe-booster, is
5-bytes(not 1 byte). So it needs 5 bytes space.
And we decided not to expand MAX_INSN_SIZE when we developed the booster.
The reasons are:
- it is supplemental feature(just accelerating kprobes), if we have no space,
we can disable it.
- 5 bytes are big enough compared with 15(=MAX_INSN_SIZE)
- the lengths of most of instructions are less than 10 bytes.
Additionally, MAX_INSN_SIZE is used in kernel/kprobes.c to allocate an
instruction buffer which will be assigned to p->ainsn.insn. Since the
instruction buffer size is MAX_INSN_SIZE, you can not copy instructions
more than MAX_INSN_SIZE.
BTW, in my patch, I unified MAX_INSN_SIZE to bigger one(16).
I think it is enough for us.
Thanks,
Best Regards,
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
On Mon, 2007-12-17 at 16:28 -0500, Masami Hiramatsu wrote:
> Hi Harvey,
> If you mention about a relative jump which is inserted by
> resume_execution(), I think you might misunderstand that relative jump.
>
> The size of that relative jump, which will be embedded by kprobe-booster, is
> 5-bytes(not 1 byte). So it needs 5 bytes space.
> And we decided not to expand MAX_INSN_SIZE when we developed the booster.
> The reasons are:
> - it is supplemental feature(just accelerating kprobes), if we have no space,
> we can disable it.
> - 5 bytes are big enough compared with 15(=MAX_INSN_SIZE)
> - the lengths of most of instructions are less than 10 bytes.
>
> Additionally, MAX_INSN_SIZE is used in kernel/kprobes.c to allocate an
> instruction buffer which will be assigned to p->ainsn.insn. Since the
> instruction buffer size is MAX_INSN_SIZE, you can not copy instructions
> more than MAX_INSN_SIZE.
>
> BTW, in my patch, I unified MAX_INSN_SIZE to bigger one(16).
> I think it is enough for us.
>
I went with 15 in mine, I thought it made the code a little more
readable, but I will defer if you think 16 is better. If you want me
to send the whole series to you, let me know.
I just sent out a series of 4 patches equivalent to your patches 1-4/6
but based on my already unified kprobes.c/h, You may want to check your
handling of restored registers in trampoline_probe_handler which I found
when rebasing yours on top of my cleanups. Not sure if this is
important, but it was a difference I found.
X86_32:
regs->cs = __KERNEL_CS | get_kernel_rpl();
yours:
regs->cs = __KERNEL_CS;
Harvey
Hi Harvey,
Harvey Harrison wrote:
> On Mon, 2007-12-17 at 16:28 -0500, Masami Hiramatsu wrote:
>> Hi Harvey,
>> If you mention about a relative jump which is inserted by
>> resume_execution(), I think you might misunderstand that relative jump.
>>
>> The size of that relative jump, which will be embedded by kprobe-booster, is
>> 5-bytes(not 1 byte). So it needs 5 bytes space.
>> And we decided not to expand MAX_INSN_SIZE when we developed the booster.
>> The reasons are:
>> - it is supplemental feature(just accelerating kprobes), if we have no space,
>> we can disable it.
>> - 5 bytes are big enough compared with 15(=MAX_INSN_SIZE)
>> - the lengths of most of instructions are less than 10 bytes.
>>
>> Additionally, MAX_INSN_SIZE is used in kernel/kprobes.c to allocate an
>> instruction buffer which will be assigned to p->ainsn.insn. Since the
>> instruction buffer size is MAX_INSN_SIZE, you can not copy instructions
>> more than MAX_INSN_SIZE.
>>
>> BTW, in my patch, I unified MAX_INSN_SIZE to bigger one(16).
>> I think it is enough for us.
>>
>
> I went with 15 in mine, I thought it made the code a little more
> readable, but I will defer if you think 16 is better. If you want me
> to send the whole series to you, let me know.
Before porting, could you tell me what differences are important
to you? We can discuss about it.
> I just sent out a series of 4 patches equivalent to your patches 1-4/6
> but based on my already unified kprobes.c/h, You may want to check your
> handling of restored registers in trampoline_probe_handler which I found
> when rebasing yours on top of my cleanups. Not sure if this is
> important, but it was a difference I found.
>
> X86_32:
> regs->cs = __KERNEL_CS | get_kernel_rpl();
>
> yours:
> regs->cs = __KERNEL_CS;
Because of kretprobe's compatibility, on x86-32 cs should be set rpl().
But get_kernel_rpl() does not exist on x86-64.
Thanks,
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
On Mon, 2007-12-17 at 16:52 -0500, Masami Hiramatsu wrote:
> Hi Harvey,
> Before porting, could you tell me what differences are important
> to you? We can discuss about it.
>
> > I just sent out a series of 4 patches equivalent to your patches 1-4/6
> > but based on my already unified kprobes.c/h, You may want to check your
> > handling of restored registers in trampoline_probe_handler which I found
> > when rebasing yours on top of my cleanups. Not sure if this is
> > important, but it was a difference I found.
> >
> > X86_32:
> > regs->cs = __KERNEL_CS | get_kernel_rpl();
> >
> > yours:
> > regs->cs = __KERNEL_CS;
>
> Because of kretprobe's compatibility, on x86-32 cs should be set rpl().
> But get_kernel_rpl() does not exist on x86-64.
>
I've already ported it and sent it to you. It's not really important to
me I just think my fine-grained patches may be of some use to see where
the differences between X86_32/64 ended up being. Your patches end up
being just about entirely removal of ifdefs when rebased onto my
patches, so it's at least a good secondary check of your patches even
if mine don't go in. Your patches end up being much smaller against
my version too.
I like my version slightly better because the remaining ifdefs (wrmsr,
etc) and others could be done in a few more small patches that are more
easily reviewable than your large final unification patch.
But, you know the code better than I....
Harvey
Hi Harvey,
Harvey Harrison wrote:
> On Mon, 2007-12-17 at 16:52 -0500, Masami Hiramatsu wrote:
>> Hi Harvey,
>> Before porting, could you tell me what differences are important
>> to you? We can discuss about it.
>>
>>> I just sent out a series of 4 patches equivalent to your patches 1-4/6
>>> but based on my already unified kprobes.c/h, You may want to check your
>>> handling of restored registers in trampoline_probe_handler which I found
>>> when rebasing yours on top of my cleanups. Not sure if this is
>>> important, but it was a difference I found.
>>>
>>> X86_32:
>>> regs->cs = __KERNEL_CS | get_kernel_rpl();
>>>
>>> yours:
>>> regs->cs = __KERNEL_CS;
>> Because of kretprobe's compatibility, on x86-32 cs should be set rpl().
>> But get_kernel_rpl() does not exist on x86-64.
>>
>
> I've already ported it and sent it to you. It's not really important to
> me I just think my fine-grained patches may be of some use to see where
> the differences between X86_32/64 ended up being. Your patches end up
> being just about entirely removal of ifdefs when rebased onto my
> patches, so it's at least a good secondary check of your patches even
> if mine don't go in. Your patches end up being much smaller against
> my version too.
OK, I'll review that.
>
> I like my version slightly better because the remaining ifdefs (wrmsr,
> etc) and others could be done in a few more small patches that are more
> easily reviewable than your large final unification patch.
I agreed that your patches are including some goodness.
So let us merge it into one.
>
> But, you know the code better than I....
>
> Harvey
>
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
On Mon, 2007-12-17 at 18:14 -0500, Masami Hiramatsu wrote:
> Hi Harvey,
>
> Harvey Harrison wrote:
> > On Mon, 2007-12-17 at 16:52 -0500, Masami Hiramatsu wrote:
> >> Hi Harvey,
> >> Before porting, could you tell me what differences are important
> >> to you? We can discuss about it.
> >
> > I've already ported it and sent it to you. It's not really important to
> > me I just think my fine-grained patches may be of some use to see where
> > the differences between X86_32/64 ended up being. Your patches end up
> > being just about entirely removal of ifdefs when rebased onto my
> > patches, so it's at least a good secondary check of your patches even
> > if mine don't go in. Your patches end up being much smaller against
> > my version too.
>
> OK, I'll review that.
>
> >
> > I like my version slightly better because the remaining ifdefs (wrmsr,
> > etc) and others could be done in a few more small patches that are more
> > easily reviewable than your large final unification patch.
>
> I agreed that your patches are including some goodness.
> So let us merge it into one.
>
>
OK, I'll take the last bits of your patches 5/6 that aren't already
cleaned up and send out a unified patchset for you to add your
acked/signed off by/reviewed by as appropriate.
These are:
-add stack_addr() macro
-I prefer the table defintion macros in mine as it avoids the need to
cast the pointer passed to test_bit, but if you want them
to be u32 as in your patch, I can change it.
-wrmsr/wrmsrl - use wrmsr() for both
-call is_IF_modifier with p->ainsn.insn in both
-check casting of jprobe_saved_sp, I get some compile warnings currently
with pointer comparisons to signed/unsigned types.
That will eliminate nearly all of the remaining ifdefs in my version,
let me work through this and I'll send out a set for review.
CHeers,
Harvey
Hi Harvey,
Harvey Harrison wrote:
> On Mon, 2007-12-17 at 18:14 -0500, Masami Hiramatsu wrote:
>> Hi Harvey,
>>
>> Harvey Harrison wrote:
>>> On Mon, 2007-12-17 at 16:52 -0500, Masami Hiramatsu wrote:
>>>> Hi Harvey,
>>>> Before porting, could you tell me what differences are important
>>>> to you? We can discuss about it.
>>> I've already ported it and sent it to you. It's not really important to
>>> me I just think my fine-grained patches may be of some use to see where
>>> the differences between X86_32/64 ended up being. Your patches end up
>>> being just about entirely removal of ifdefs when rebased onto my
>>> patches, so it's at least a good secondary check of your patches even
>>> if mine don't go in. Your patches end up being much smaller against
>>> my version too.
>> OK, I'll review that.
>>
>>> I like my version slightly better because the remaining ifdefs (wrmsr,
>>> etc) and others could be done in a few more small patches that are more
>>> easily reviewable than your large final unification patch.
>> I agreed that your patches are including some goodness.
>> So let us merge it into one.
>>
>>
>
> OK, I'll take the last bits of your patches 5/6 that aren't already
> cleaned up and send out a unified patchset for you to add your
> acked/signed off by/reviewed by as appropriate.
Sure, I'll review it. It is very helpful to me.
Please Cc: or To: the parsons who are listed in this mail.
Jim, if you can review the fixes which you've suggested,
could you give him your signed-off?
>
> These are:
>
> -add stack_addr() macro
> -I prefer the table defintion macros in mine as it avoids the need to
> cast the pointer passed to test_bit, but if you want them
> to be u32 as in your patch, I can change it.
please do so. we'd like to reduce ifdefs as less as possible:-)
> -wrmsr/wrmsrl - use wrmsr() for both
> -call is_IF_modifier with p->ainsn.insn in both
> -check casting of jprobe_saved_sp, I get some compile warnings currently
> with pointer comparisons to signed/unsigned types.
Could you also add below?
- fix some comments (it clarifies the meanings of the code)
- add fix_riprel(). this useful to reduce ifdefs.
- expand reenter_kprobe(). I think it treat above two blocks.
- reassignment of regs->ip in kprobe_handler can be unified
to "regs->ip = (unsigned long)addr;"
>
> That will eliminate nearly all of the remaining ifdefs in my version,
> let me work through this and I'll send out a set for review.
>
> CHeers,
>
> Harvey
>
>
Best Regards,
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
Masami Hiramatsu wrote:
>> These are:
>>
>> -add stack_addr() macro
>> -I prefer the table defintion macros in mine as it avoids the need to
>> cast the pointer passed to test_bit, but if you want them
>> to be u32 as in your patch, I can change it.
>
> please do so. we'd like to reduce ifdefs as less as possible:-)
>
>> -wrmsr/wrmsrl - use wrmsr() for both
>> -call is_IF_modifier with p->ainsn.insn in both
>> -check casting of jprobe_saved_sp, I get some compile warnings currently
>> with pointer comparisons to signed/unsigned types.
>
> Could you also add below?
> - fix some comments (it clarifies the meanings of the code)
> - add fix_riprel(). this useful to reduce ifdefs.
> - expand reenter_kprobe(). I think it treat above two blocks.
> - reassignment of regs->ip in kprobe_handler can be unified
> to "regs->ip = (unsigned long)addr;"
Oh, I forgot to point out important thing.
- please make bugfix patches first. I think my bugfix patches
need to go upstream before unification. It would cause some
crashes.
Thank you.
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division
e-mail: [email protected], [email protected]
On Mon, 2007-12-17 at 19:27 -0500, Masami Hiramatsu wrote:
> Masami Hiramatsu wrote:
> >> These are:
> >>
> >> -add stack_addr() macro
> >> -I prefer the table defintion macros in mine as it avoids the need
> to
> >> cast the pointer passed to test_bit, but if you want them
> >> to be u32 as in your patch, I can change it.
> >
> > please do so. we'd like to reduce ifdefs as less as possible:-)
> >
> >> -wrmsr/wrmsrl - use wrmsr() for both
> >> -call is_IF_modifier with p->ainsn.insn in both
> >> -check casting of jprobe_saved_sp, I get some compile warnings
> currently
> >> with pointer comparisons to signed/unsigned types.
> >
> > Could you also add below?
> > - fix some comments (it clarifies the meanings of the code)
> > - add fix_riprel(). this useful to reduce ifdefs.
> > - expand reenter_kprobe(). I think it treat above two blocks.
> > - reassignment of regs->ip in kprobe_handler can be unified
> > to "regs->ip = (unsigned long)addr;"
>
> Oh, I forgot to point out important thing.
> - please make bugfix patches first. I think my bugfix patches
> need to go upstream before unification. It would cause some
> crashes.
>
It's pretty hard to move those bugfixes to the head of the queue.
Attached is an mbox of my unification work. Maybe you could get your
two bugfixes applied to mainline kprobes_32/64.c, then my series
could go in as a merge later on. Most of my stuff is in kprobes.c
post-unify so the merge would be trivial later.
Ingo, what do you think? This rollup replaces all of my kprobes
patches to date. So you could apply patched 1,2/6 from Masami
into 2.6.24 and let mine come in during 2.6.25 as a merge, which
would avoid the conflicts in kprobes_32|64.c?
Harvey
Signed-off-by: Harvey Harrison <[email protected]>
---
Replacement for the last patch in the kprobes series I just sent.
arch/x86/kernel/kprobes.c | 45 +++++++++++++++++++++------------------------
1 files changed, 21 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 6f52c5e..c9df6fb 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -44,6 +44,20 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
};
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
+/*
+ * "®s->sp" looks wrong, but it's correct for x86_32. x86_32 CPUs
+ * don't save the ss and esp registers if the CPU is already in kernel
+ * mode when it traps. So for kprobes, regs->sp and regs->ss are not
+ * the [nonexistent] saved stack pointer and ss register, but rather
+ * the top 8 bytes of the pre-int3 stack. So ®s->sp happens to
+ * point to the top of the pre-int3 stack.
+ */
+#ifdef CONFIG_X86_32
+# define stack_addr(regs) ((unsigned long *)®s->sp)
+#else
+# define stack_addr(regs) ((unsigned long *)regs->sp)
+#endif
+
#define W(r, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
(b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
@@ -409,11 +423,8 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
-#ifdef CONFIG_X86_32
- unsigned long *sara = (unsigned long *)®s->sp;
-#else
- unsigned long *sara = (unsigned long *)regs->sp;
-#endif
+ unsigned long *sara = stack_addr(regs);
+
ri->ret_addr = (kprobe_opcode_t *) *sara;
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
@@ -751,11 +762,8 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
-#ifdef CONFIG_X86_32
- unsigned long *tos = (unsigned long *)®s->sp;
-#else
- unsigned long *tos = (unsigned long *)regs->sp;
-#endif
+
+ unsigned long *tos = stack_addr(regs);
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
unsigned long orig_ip = (unsigned long)p->addr;
kprobe_opcode_t *insn = p->ainsn.insn;
@@ -984,11 +992,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kcb->jprobe_saved_regs = *regs;
-#ifdef CONFIG_X86_32
- kcb->jprobe_saved_sp = ®s->sp;
-#else
- kcb->jprobe_saved_sp = (long *) regs->sp;
-#endif
+ kcb->jprobe_saved_sp = (long *)stack_addr(regs);
addr = (unsigned long)(kcb->jprobe_saved_sp);
/*
* As Linus pointed out, gcc assumes that the callee
@@ -1033,17 +1037,10 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct jprobe *jp = container_of(p, struct jprobe, kp);
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
-#ifdef CONFIG_X86_32
- if (®s->sp != kcb->jprobe_saved_sp) {
+ if (stack_addr(regs) != kcb->jprobe_saved_sp) {
struct pt_regs *saved_regs = &kcb->jprobe_saved_sp;
printk("current sp %p does not match saved sp %p\n",
- ®s->sp, kcb->jprobe_saved_sp);
-#else
- if ((long *)regs->sp != kcb->jprobe_saved_sp) {
- struct pt_regs *saved_regs = &kcb->jprobe_saved_sp;
- printk("current sp %p does not match saved sp %p\n",
- (long *)regs->sp, kcb->jprobe_saved_sp);
-#endif
+ stack_addr(regs), kcb->jprobe_saved_sp);
printk("Saved registers for jprobe %p\n", jp);
show_registers(saved_regs);
printk("Current registers\n");
--
1.5.4.rc0.1083.gf568