Ingo, please pull from
git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc uprobes/core
To remind, we have a serious bug. Any probed jmp/call can kill the
application, see the changelog in 11/15.
I tried to test this as much as possible, seems to work. I also wrote
the test-case which explicitly checks every conditional jump with all
possible bits combinations in eflags:
#include <asm/processor-flags.h>
#include <stdio.h>
static int nojmp;
#define __MK_FUNC(opc, name) \
asm ( \
".text\n" \
".globl " #name "; " #name ":\n" \
".byte 0x" #opc "\n" \
".byte 0x0a \n" /* offs to 1f below */ \
"movl $1, nojmp(%rip)\n" \
"1: ret\n" \
); \
#define MK_FUNC(opc) __MK_FUNC(opc, probe_func_ ## opc)
MK_FUNC(70) MK_FUNC(71) MK_FUNC(72) MK_FUNC(73)
MK_FUNC(74) MK_FUNC(75) MK_FUNC(76) MK_FUNC(77)
MK_FUNC(78) MK_FUNC(79) MK_FUNC(7a) MK_FUNC(7b)
MK_FUNC(7c) MK_FUNC(7d) MK_FUNC(7e) MK_FUNC(7f)
#define __CALL(flags, func) \
asm volatile ("pushf; push %0; popf; call " #func "; popf" \
: : "m" (flags) : "memory");
#define CALL(opc) \
({ \
nojmp = 0; \
__CALL(flags, probe_func_ ## opc); \
!nojmp; \
})
long test_flags(unsigned long flags)
{
printf("%04lx %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
flags,
CALL(70), CALL(71), CALL(72), CALL(73),
CALL(74), CALL(75), CALL(76), CALL(77),
CALL(78), CALL(79), CALL(7a), CALL(7b),
CALL(7c), CALL(7d), CALL(7e), CALL(7f));
return 0;
}
#define XF(xf) (X86_EFLAGS_ ## xf)
#define XF_MASK (XF(CF) | XF(OF) | XF(PF) | XF(SF) | XF(ZF))
int main(void)
{
unsigned int bits;
unsigned long __flags, flags;
asm volatile("pushf; pop %0" : "=rm" (__flags) : : "memory");
for (bits = 0; bits < (1 << 5); bits++) {
flags = __flags & ~XF_MASK;
#define CPY_BIT(nr, xf) \
if (bits & (1 << nr)) flags |= XF(xf)
CPY_BIT(0, CF);
CPY_BIT(1, OF);
CPY_BIT(2, PF);
CPY_BIT(3, SF);
CPY_BIT(4, ZF);
test_flags(flags);
}
return 0;
}
The output is the same with probe_func_70..probe_func_7f probed.
This series only fixes the problem. I'll send more changes to address
some of TODO's mentioned in the changelogs later. In particular, we
need to do something with "callw", see "Note: in 13/15.
Oleg Nesterov (15):
uprobes: Kill UPROBE_SKIP_SSTEP and can_skip_sstep()
uprobes/x86: Fold prepare_fixups() into arch_uprobe_analyze_insn()
uprobes/x86: Kill the "ia32_compat" check in handle_riprel_insn(), remove "mm" arg
uprobes/x86: Gather "riprel" functions together
uprobes/x86: move the UPROBE_FIX_{RIP,IP,CALL} code at the end of pre/post hooks
uprobes/x86: Introduce uprobe_xol_ops and arch_uprobe->ops
uprobes/x86: Conditionalize the usage of handle_riprel_insn()
uprobes/x86: Send SIGILL if arch_uprobe_post_xol() fails
uprobes/x86: Teach arch_uprobe_post_xol() to restart if possible
uprobes/x86: Introduce sizeof_long(), cleanup adjust_ret_addr() and arch_uretprobe_hijack_return_addr()
uprobes/x86: Emulate unconditional relative jmp's
uprobes/x86: Emulate nop's using ops->emulate()
uprobes/x86: Emulate relative call's
uprobes/x86: Emulate relative conditional "short" jmp's
uprobes/x86: Emulate relative conditional "near" jmp's
arch/x86/include/asm/uprobes.h | 16 +-
arch/x86/kernel/uprobes.c | 551 +++++++++++++++++++++++++---------------
kernel/events/uprobes.c | 31 +--
3 files changed, 372 insertions(+), 226 deletions(-)
* Oleg Nesterov <[email protected]> wrote:
> Ingo, please pull from
>
> git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc uprobes/core
>
>
> To remind, we have a serious bug. Any probed jmp/call can kill the
> application, see the changelog in 11/15.
>
> I tried to test this as much as possible, seems to work. I also wrote
> the test-case which explicitly checks every conditional jump with all
> possible bits combinations in eflags:
>
> #include <asm/processor-flags.h>
> #include <stdio.h>
>
> static int nojmp;
>
> #define __MK_FUNC(opc, name) \
> asm ( \
> ".text\n" \
> ".globl " #name "; " #name ":\n" \
> ".byte 0x" #opc "\n" \
> ".byte 0x0a \n" /* offs to 1f below */ \
> "movl $1, nojmp(%rip)\n" \
> "1: ret\n" \
> ); \
>
> #define MK_FUNC(opc) __MK_FUNC(opc, probe_func_ ## opc)
>
> MK_FUNC(70) MK_FUNC(71) MK_FUNC(72) MK_FUNC(73)
> MK_FUNC(74) MK_FUNC(75) MK_FUNC(76) MK_FUNC(77)
> MK_FUNC(78) MK_FUNC(79) MK_FUNC(7a) MK_FUNC(7b)
> MK_FUNC(7c) MK_FUNC(7d) MK_FUNC(7e) MK_FUNC(7f)
>
>
> #define __CALL(flags, func) \
> asm volatile ("pushf; push %0; popf; call " #func "; popf" \
> : : "m" (flags) : "memory");
>
> #define CALL(opc) \
> ({ \
> nojmp = 0; \
> __CALL(flags, probe_func_ ## opc); \
> !nojmp; \
> })
>
> long test_flags(unsigned long flags)
> {
> printf("%04lx %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
> flags,
> CALL(70), CALL(71), CALL(72), CALL(73),
> CALL(74), CALL(75), CALL(76), CALL(77),
> CALL(78), CALL(79), CALL(7a), CALL(7b),
> CALL(7c), CALL(7d), CALL(7e), CALL(7f));
>
> return 0;
> }
>
> #define XF(xf) (X86_EFLAGS_ ## xf)
> #define XF_MASK (XF(CF) | XF(OF) | XF(PF) | XF(SF) | XF(ZF))
>
> int main(void)
> {
> unsigned int bits;
> unsigned long __flags, flags;
>
> asm volatile("pushf; pop %0" : "=rm" (__flags) : : "memory");
>
> for (bits = 0; bits < (1 << 5); bits++) {
> flags = __flags & ~XF_MASK;
>
> #define CPY_BIT(nr, xf) \
> if (bits & (1 << nr)) flags |= XF(xf)
>
> CPY_BIT(0, CF);
> CPY_BIT(1, OF);
> CPY_BIT(2, PF);
> CPY_BIT(3, SF);
> CPY_BIT(4, ZF);
>
> test_flags(flags);
> }
>
> return 0;
> }
>
> The output is the same with probe_func_70..probe_func_7f probed.
>
> This series only fixes the problem. I'll send more changes to address
> some of TODO's mentioned in the changelogs later. In particular, we
> need to do something with "callw", see "Note: in 13/15.
>
> Oleg Nesterov (15):
> uprobes: Kill UPROBE_SKIP_SSTEP and can_skip_sstep()
> uprobes/x86: Fold prepare_fixups() into arch_uprobe_analyze_insn()
> uprobes/x86: Kill the "ia32_compat" check in handle_riprel_insn(), remove "mm" arg
> uprobes/x86: Gather "riprel" functions together
> uprobes/x86: move the UPROBE_FIX_{RIP,IP,CALL} code at the end of pre/post hooks
> uprobes/x86: Introduce uprobe_xol_ops and arch_uprobe->ops
> uprobes/x86: Conditionalize the usage of handle_riprel_insn()
> uprobes/x86: Send SIGILL if arch_uprobe_post_xol() fails
> uprobes/x86: Teach arch_uprobe_post_xol() to restart if possible
> uprobes/x86: Introduce sizeof_long(), cleanup adjust_ret_addr() and arch_uretprobe_hijack_return_addr()
> uprobes/x86: Emulate unconditional relative jmp's
> uprobes/x86: Emulate nop's using ops->emulate()
> uprobes/x86: Emulate relative call's
> uprobes/x86: Emulate relative conditional "short" jmp's
> uprobes/x86: Emulate relative conditional "near" jmp's
>
> arch/x86/include/asm/uprobes.h | 16 +-
> arch/x86/kernel/uprobes.c | 551 +++++++++++++++++++++++++---------------
> kernel/events/uprobes.c | 31 +--
> 3 files changed, 372 insertions(+), 226 deletions(-)
Pulled into tip:perf/core, thanks a lot Oleg!
Ingo
Peter, feel free to ignore 1-4, but could you look at 5/5? It lacks the
test-case because I do not have a x32-ready testing machine.
On 04/17, Oleg Nesterov wrote:
>
> This series only fixes the problem. I'll send more changes to address
> some of TODO's mentioned in the changelogs later. In particular, we
> need to do something with "callw", see "Note: in 13/15.
So, what do you all think we should do with "callw"? Jim votes for
declining to probe callw, and I fully agree.
Any objection?
Until then, lets cleanup the validate_insn_* paths and fix another bug.
This cleanup can also simplify the next "reject callw" change.
Oleg.
arch/x86/kernel/process_64.c | 7 +-
arch/x86/kernel/uprobes.c | 126 ++++++++++++++++++------------------------
2 files changed, 58 insertions(+), 75 deletions(-)
validate_insn_32bits() and validate_insn_64bits() are very similar,
turn them into the single uprobe_init_insn() which has the additional
"bool x86_64" argument which can be passed to insn_init() and used to
choose between good_insns_64/good_insns_32.
Also kill UPROBE_FIX_NONE, it has no users.
Note: the current code doesn't use ifdef's consistently, good_insns_64
depends on CONFIG_X86_64 but good_insns_32 is unconditional. This patch
removes ifdef around good_insns_64, we will add it back later along with
the similar one for good_insns_32.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 45 +++++++++++++--------------------------------
1 files changed, 13 insertions(+), 32 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index e1d7115..68c63ab 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -32,9 +32,6 @@
/* Post-execution fixups. */
-/* No fixup needed */
-#define UPROBE_FIX_NONE 0x0
-
/* Adjust IP back to vicinity of actual insn */
#define UPROBE_FIX_IP 0x1
@@ -114,7 +111,6 @@ static volatile u32 good_2byte_insns[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
-#ifdef CONFIG_X86_64
/* Good-instruction tables for 64-bit apps */
static volatile u32 good_insns_64[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
@@ -138,7 +134,6 @@ static volatile u32 good_insns_64[256 / 32] = {
/* ---------------------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
-#endif
#undef W
/*
@@ -209,16 +204,22 @@ static bool is_prefix_bad(struct insn *insn)
return false;
}
-static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn)
+static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
{
- insn_init(insn, auprobe->insn, false);
+ u32 volatile *good_insns;
+
+ insn_init(insn, auprobe->insn, x86_64);
- /* Skip good instruction prefixes; reject "bad" ones. */
insn_get_opcode(insn);
if (is_prefix_bad(insn))
return -ENOTSUPP;
- if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32))
+ if (x86_64)
+ good_insns = good_insns_64;
+ else
+ good_insns = good_insns_32;
+
+ if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
return 0;
if (insn->opcode.nbytes == 2) {
@@ -355,30 +356,10 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *
}
}
-static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
-{
- insn_init(insn, auprobe->insn, true);
-
- /* Skip good instruction prefixes; reject "bad" ones. */
- insn_get_opcode(insn);
- if (is_prefix_bad(insn))
- return -ENOTSUPP;
-
- if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64))
- return 0;
-
- if (insn->opcode.nbytes == 2) {
- if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
- return 0;
- }
- return -ENOTSUPP;
-}
-
static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
{
- if (mm->context.ia32_compat)
- return validate_insn_32bits(auprobe, insn);
- return validate_insn_64bits(auprobe, insn);
+ bool x86_64 = !mm->context.ia32_compat;
+ return uprobe_init_insn(auprobe, insn, x86_64);
}
#else /* 32-bit: */
/*
@@ -398,7 +379,7 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *
static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
{
- return validate_insn_32bits(auprobe, insn);
+ return uprobe_init_insn(auprobe, insn, false);
}
#endif /* CONFIG_X86_64 */
--
1.5.5.1
1. Extract the ->ia32_compat check from 64bit validate_insn_bits()
into the new helper, is_64bit_mm(), it will have more users.
TODO: this checks is actually wrong if mm owner is X32 task,
we need another fix which changes set_personality_ia32().
TODO: even worse, the whole 64-or-32-bit logic is very broken
and the fix is not simple, we need the nontrivial changes in
the core uprobes code.
2. Kill validate_insn_bits() and change its single caller to use
uprobe_init_insn(is_64bit_mm(mm).
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 20 +++++++++-----------
1 files changed, 9 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 68c63ab..5876ba5 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -231,6 +231,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
}
#ifdef CONFIG_X86_64
+static inline bool is_64bit_mm(struct mm_struct *mm)
+{
+ return !config_enabled(CONFIG_IA32_EMULATION) ||
+ !mm->context.ia32_compat;
+}
/*
* If arch_uprobe->insn doesn't use rip-relative addressing, return
* immediately. Otherwise, rewrite the instruction so that it accesses
@@ -355,13 +360,11 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *
*correction += 4;
}
}
-
-static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
+#else /* 32-bit: */
+static inline bool is_64bit_mm(struct mm_struct *mm)
{
- bool x86_64 = !mm->context.ia32_compat;
- return uprobe_init_insn(auprobe, insn, x86_64);
+ return false;
}
-#else /* 32-bit: */
/*
* No RIP-relative addressing on 32-bit
*/
@@ -376,11 +379,6 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *
long *correction)
{
}
-
-static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
-{
- return uprobe_init_insn(auprobe, insn, false);
-}
#endif /* CONFIG_X86_64 */
struct uprobe_xol_ops {
@@ -614,7 +612,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
bool fix_ip = true, fix_call = false;
int ret;
- ret = validate_insn_bits(auprobe, mm, &insn);
+ ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
if (ret)
return ret;
--
1.5.5.1
is_64bit_mm() assumes that mm->context.ia32_compat means the 32-bit
instruction set, this is not true if the task is TIF_X32.
Change set_personality_ia32() to initialize mm->context.ia32_compat
by TIF_X32 or TIF_IA32 instead of 1. This allows to fix is_64bit_mm()
without affecting other users, they all treat ia32_compat as "bool".
TIF_ in ->ia32_compat looks a bit strange, but this is grep-friendly
and avoids the new define's.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/process_64.c | 7 ++++---
arch/x86/kernel/uprobes.c | 2 +-
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9c0280f..9b53940 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -413,12 +413,11 @@ void set_personality_ia32(bool x32)
set_thread_flag(TIF_ADDR32);
/* Mark the associated mm as containing 32-bit tasks. */
- if (current->mm)
- current->mm->context.ia32_compat = 1;
-
if (x32) {
clear_thread_flag(TIF_IA32);
set_thread_flag(TIF_X32);
+ if (current->mm)
+ current->mm->context.ia32_compat = TIF_X32;
current->personality &= ~READ_IMPLIES_EXEC;
/* is_compat_task() uses the presence of the x32
syscall bit flag to determine compat status */
@@ -426,6 +425,8 @@ void set_personality_ia32(bool x32)
} else {
set_thread_flag(TIF_IA32);
clear_thread_flag(TIF_X32);
+ if (current->mm)
+ current->mm->context.ia32_compat = TIF_IA32;
current->personality |= force_personality32;
/* Prepare the first "return" to user space */
current_thread_info()->status |= TS_COMPAT;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index f445b00..4bbeb60 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -245,7 +245,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
static inline bool is_64bit_mm(struct mm_struct *mm)
{
return !config_enabled(CONFIG_IA32_EMULATION) ||
- !mm->context.ia32_compat;
+ !(mm->context.ia32_compat == TIF_IA32);
}
/*
* If arch_uprobe->insn doesn't use rip-relative addressing, return
--
1.5.5.1
Add the suitable ifdef's around good_insns_* arrays. We do not want
to add the ugly ifdef's into their only user, uprobe_init_insn(), so
the "#else" branch simply defines them as NULL. This doesn't generate
the extra code, gcc is smart enough, although the code is fine even if
it could not detect that (without CONFIG_IA32_EMULATION) is_64bit_mm()
is __builtin_constant_p().
The patch looks more complicated because it also moves good_insns_64
up close to good_insns_32.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 56 +++++++++++++++++++++++++-------------------
1 files changed, 32 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 6748600..f445b00 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -64,6 +64,7 @@
* to keep gcc from statically optimizing it out, as variable_test_bit makes
* some versions of gcc to think only *(unsigned long*) is used.
*/
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static volatile u32 good_insns_32[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
@@ -86,32 +87,12 @@ static volatile u32 good_insns_32[256 / 32] = {
/* ---------------------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
-
-/* Using this for both 64-bit and 32-bit apps */
-static volatile u32 good_2byte_insns[256 / 32] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
- /* ---------------------------------------------- */
- W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
- W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
- W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
- W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
- W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
- W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
- W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
- W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
- W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
- W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
- W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
- W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
- W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
- W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
- W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
- W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
- /* ---------------------------------------------- */
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-};
+#else
+#define good_insns_32 NULL
+#endif
/* Good-instruction tables for 64-bit apps */
+#if defined(CONFIG_X86_64)
static volatile u32 good_insns_64[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
@@ -134,6 +115,33 @@ static volatile u32 good_insns_64[256 / 32] = {
/* ---------------------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
+#else
+#define good_insns_64 NULL
+#endif
+
+/* Using this for both 64-bit and 32-bit apps */
+static volatile u32 good_2byte_insns[256 / 32] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ---------------------------------------------- */
+ W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
+ W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
+ W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+ W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
+ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
+ W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
+ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
+ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+ W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
+ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
+ W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+ W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
+ W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
+ /* ---------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
#undef W
/*
--
1.5.5.1
Change uprobe_init_insn() to make insn_complete() == T, this makes
other insn_get_*() calls unnecessary.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 13 ++++---------
1 files changed, 4 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 5876ba5..6748600 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -209,8 +209,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
u32 volatile *good_insns;
insn_init(insn, auprobe->insn, x86_64);
+ /* has the side-effect of processing the entire instruction */
+ insn_get_length(insn);
+ if (WARN_ON_ONCE(!insn_complete(insn)))
+ return -ENOEXEC;
- insn_get_opcode(insn);
if (is_prefix_bad(insn))
return -ENOTSUPP;
@@ -283,8 +286,6 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
* is the immediate operand.
*/
cursor = auprobe->insn + insn_offset_modrm(insn);
- insn_get_length(insn);
-
/*
* Convert from rip-relative addressing to indirect addressing
* via a scratch register. Change the r/m field from 0x5 (%rip)
@@ -563,11 +564,6 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
u8 opc1 = OPCODE1(insn);
- /* has the side-effect of processing the entire instruction */
- insn_get_length(insn);
- if (WARN_ON_ONCE(!insn_complete(insn)))
- return -ENOEXEC;
-
switch (opc1) {
case 0xeb: /* jmp 8 */
case 0xe9: /* jmp 32 */
@@ -643,7 +639,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
fix_ip = false;
break;
case 0xff:
- insn_get_modrm(&insn);
switch (MODRM_REG(&insn)) {
case 2: case 3: /* call or lcall, indirect */
fix_call = true;
--
1.5.5.1
On 04/17, Oleg Nesterov wrote:
>
> This series only fixes the problem. I'll send more changes to address
> some of TODO's mentioned in the changelogs later.
Another series which connects to the already merged changes.
All changes were planned, except 1/5 fixes the oversight in 8ad8e9d3fd64
"uprobes/x86: Introduce uprobe_xol_ops and arch_uprobe->ops". Fortunately
the wrong logic is harmless currently, but it should be fixed anyway.
Oleg.
arch/x86/include/asm/uprobes.h | 12 +++--
arch/x86/kernel/uprobes.c | 94 +++++++++++++++++++++++----------------
2 files changed, 62 insertions(+), 44 deletions(-)
Currently this doesn't matter, the only ->pre_xol() hook can't fail,
but we need to fix arch_uprobe_pre_xol() anyway. If ->pre_xol() fails
we should not change regs->ip/flags, we should just return the error
to make restart actually possible.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 8 ++++++--
1 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 4bbeb60..9690c65 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -676,6 +676,12 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
+ if (auprobe->ops->pre_xol) {
+ int err = auprobe->ops->pre_xol(auprobe, regs);
+ if (err)
+ return err;
+ }
+
regs->ip = utask->xol_vaddr;
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
@@ -685,8 +691,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
set_task_blockstep(current, false);
- if (auprobe->ops->pre_xol)
- return auprobe->ops->pre_xol(auprobe, regs);
return 0;
}
--
1.5.5.1
014940bad8e4 "uprobes/x86: Send SIGILL if arch_uprobe_post_xol() fails"
changed arch_uprobe_post_xol() to use arch_uprobe_abort_xol() if ->post_xol
fails. This was correct and helped to avoid the additional complications,
we need to clear X86_EFLAGS_TF in this case.
However, now that we have uprobe_xol_ops->abort() hook it would be better
to avoid arch_uprobe_abort_xol() here. ->post_xol() should likely do what
->abort() does anyway, we should not do the same work twice. Currently only
handle_riprel_post_xol() can be called twice, this is unnecessary but safe.
Still this is not clean and can lead to the problems in future.
Change arch_uprobe_post_xol() to clear X86_EFLAGS_TF and restore ->ip by
hand and avoid arch_uprobe_abort_xol(). This temporary uglifies the usage
of autask.saved_tf, we will cleanup this later.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 17 +++++++++--------
1 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 8fce6ef..4e2cdd5 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -748,22 +748,24 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
if (auprobe->ops->post_xol) {
int err = auprobe->ops->post_xol(auprobe, regs);
if (err) {
- arch_uprobe_abort_xol(auprobe, regs);
+ if (!utask->autask.saved_tf)
+ regs->flags &= ~X86_EFLAGS_TF;
/*
- * Restart the probed insn. ->post_xol() must ensure
- * this is really possible if it returns -ERESTART.
+ * Restore ->ip for restart or post mortem analysis.
+ * ->post_xol() must not return -ERESTART unless this
+ * is really possible.
*/
+ regs->ip = utask->vaddr;
if (err == -ERESTART)
return 0;
return err;
}
}
-
- current->thread.trap_nr = utask->autask.saved_trap_nr;
/*
* arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
* so we can get an extra SIGTRAP if we do not clear TF. We need
@@ -808,9 +810,8 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
/*
* This function gets called when XOL instruction either gets trapped or
- * the thread has a fatal signal, or if arch_uprobe_post_xol() failed.
- * Reset the instruction pointer to its probed address for the potential
- * restart or for post mortem analysis.
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
*/
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
--
1.5.5.1
UPROBE_FIX_SETF is only needed to handle "popf" correctly but it is
processed by the generic arch_uprobe_post_xol() code. This doesn't
allows us to make ->fixups private for default_xol_ops.
1 Change default_post_xol_op(UPROBE_FIX_SETF) to set ->saved_tf = T.
"popf" always reads the flags from stack, it doesn't matter if TF
was set or not before single-step. Ignoring the naming, this is
even more logical, "saved_tf" means "owned by application" and we
do not own this flag after "popf".
2. Change arch_uprobe_post_xol() to save ->saved_tf into the local
"bool send_sigtrap" before ->post_xol().
2. Change arch_uprobe_post_xol() to ignore UPROBE_FIX_SETF and just
check ->saved_tf after ->post_xol().
With this patch ->fixups and ->rip_rela_target_address are only used
by default_xol_ops hooks, we are ready to remove them from the common
part of arch_uprobe.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 20 ++++++++++++--------
1 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 4e2cdd5..e6314a2 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -441,6 +441,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
return -ERESTART;
}
}
+ /* popf; tell the caller to not touch TF */
+ if (auprobe->fixups & UPROBE_FIX_SETF)
+ utask->autask.saved_tf = true;
return 0;
}
@@ -746,15 +749,15 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t)
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
+ bool send_sigtrap = utask->autask.saved_tf;
+ int err = 0;
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
if (auprobe->ops->post_xol) {
- int err = auprobe->ops->post_xol(auprobe, regs);
+ err = auprobe->ops->post_xol(auprobe, regs);
if (err) {
- if (!utask->autask.saved_tf)
- regs->flags &= ~X86_EFLAGS_TF;
/*
* Restore ->ip for restart or post mortem analysis.
* ->post_xol() must not return -ERESTART unless this
@@ -762,8 +765,8 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
*/
regs->ip = utask->vaddr;
if (err == -ERESTART)
- return 0;
- return err;
+ err = 0;
+ send_sigtrap = false;
}
}
/*
@@ -771,12 +774,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* so we can get an extra SIGTRAP if we do not clear TF. We need
* to examine the opcode to make it right.
*/
- if (utask->autask.saved_tf)
+ if (send_sigtrap)
send_sig(SIGTRAP, current, 0);
- else if (!(auprobe->fixups & UPROBE_FIX_SETF))
+
+ if (!utask->autask.saved_tf)
regs->flags &= ~X86_EFLAGS_TF;
- return 0;
+ return err;
}
/* callback routine for handling exceptions. */
--
1.5.5.1
Finally we can move arch_uprobe->fixups/rip_rela_target_address
into the new "def" struct and place this struct in the union, they
are only used by default_xol_ops paths.
The patch also renames rip_rela_target_address to riprel_target just
to make this name shorter.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/include/asm/uprobes.h | 12 ++++++----
arch/x86/kernel/uprobes.c | 41 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 26 deletions(-)
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 93bee7b..72caff7 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -41,18 +41,20 @@ struct arch_uprobe {
u8 ixol[MAX_UINSN_BYTES];
};
- u16 fixups;
const struct uprobe_xol_ops *ops;
union {
-#ifdef CONFIG_X86_64
- unsigned long rip_rela_target_address;
-#endif
struct {
s32 offs;
u8 ilen;
u8 opc1;
- } branch;
+ } branch;
+ struct {
+#ifdef CONFIG_X86_64
+ long riprel_target;
+#endif
+ u16 fixups;
+ } def;
};
};
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index e6314a2..69b2d61 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -251,10 +251,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
* If arch_uprobe->insn doesn't use rip-relative addressing, return
* immediately. Otherwise, rewrite the instruction so that it accesses
* its memory operand indirectly through a scratch register. Set
- * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address
- * accordingly. (The contents of the scratch register will be saved
- * before we single-step the modified instruction, and restored
- * afterward.)
+ * def->fixups and def->riprel_target accordingly. (The contents of the
+ * scratch register will be saved before we single-step the modified
+ * instruction, and restored afterward).
*
* We do this because a rip-relative instruction can access only a
* relatively small area (+/- 2 GB from the instruction), and the XOL
@@ -308,18 +307,18 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
* is NOT the register operand, so we use %rcx (register
* #1) for the scratch register.
*/
- auprobe->fixups = UPROBE_FIX_RIP_CX;
+ auprobe->def.fixups = UPROBE_FIX_RIP_CX;
/* Change modrm from 00 000 101 to 00 000 001. */
*cursor = 0x1;
} else {
/* Use %rax (register #0) for the scratch register. */
- auprobe->fixups = UPROBE_FIX_RIP_AX;
+ auprobe->def.fixups = UPROBE_FIX_RIP_AX;
/* Change modrm from 00 xxx 101 to 00 xxx 000 */
*cursor = (reg << 3);
}
/* Target address = address of next instruction + (signed) offset */
- auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value;
+ auprobe->def.riprel_target = (long)insn->length + insn->displacement.value;
/* Displacement field is gone; slide immediate field (if any) over. */
if (insn->immediate.nbytes) {
@@ -336,25 +335,25 @@ static void
pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
struct arch_uprobe_task *autask)
{
- if (auprobe->fixups & UPROBE_FIX_RIP_AX) {
+ if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) {
autask->saved_scratch_register = regs->ax;
regs->ax = current->utask->vaddr;
- regs->ax += auprobe->rip_rela_target_address;
- } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) {
+ regs->ax += auprobe->def.riprel_target;
+ } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) {
autask->saved_scratch_register = regs->cx;
regs->cx = current->utask->vaddr;
- regs->cx += auprobe->rip_rela_target_address;
+ regs->cx += auprobe->def.riprel_target;
}
}
static void
handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
{
- if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
+ if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
struct arch_uprobe_task *autask;
autask = ¤t->utask->autask;
- if (auprobe->fixups & UPROBE_FIX_RIP_AX)
+ if (auprobe->def.fixups & UPROBE_FIX_RIP_AX)
regs->ax = autask->saved_scratch_register;
else
regs->cx = autask->saved_scratch_register;
@@ -432,17 +431,17 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
long correction = (long)(utask->vaddr - utask->xol_vaddr);
handle_riprel_post_xol(auprobe, regs, &correction);
- if (auprobe->fixups & UPROBE_FIX_IP)
+ if (auprobe->def.fixups & UPROBE_FIX_IP)
regs->ip += correction;
- if (auprobe->fixups & UPROBE_FIX_CALL) {
+ if (auprobe->def.fixups & UPROBE_FIX_CALL) {
if (adjust_ret_addr(regs->sp, correction)) {
regs->sp += sizeof_long();
return -ERESTART;
}
}
/* popf; tell the caller to not touch TF */
- if (auprobe->fixups & UPROBE_FIX_SETF)
+ if (auprobe->def.fixups & UPROBE_FIX_SETF)
utask->autask.saved_tf = true;
return 0;
@@ -636,12 +635,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
/*
* Figure out which fixups arch_uprobe_post_xol() will need to perform,
- * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups
- * is either zero or it reflects rip-related fixups.
+ * and annotate def->fixups accordingly. To start with, ->fixups is
+ * either zero or it reflects rip-related fixups.
*/
switch (OPCODE1(&insn)) {
case 0x9d: /* popf */
- auprobe->fixups |= UPROBE_FIX_SETF;
+ auprobe->def.fixups |= UPROBE_FIX_SETF;
break;
case 0xc3: /* ret or lret -- ip is correct */
case 0xcb:
@@ -669,9 +668,9 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
}
if (fix_ip)
- auprobe->fixups |= UPROBE_FIX_IP;
+ auprobe->def.fixups |= UPROBE_FIX_IP;
if (fix_call)
- auprobe->fixups |= UPROBE_FIX_CALL;
+ auprobe->def.fixups |= UPROBE_FIX_CALL;
auprobe->ops = &default_xol_ops;
return 0;
--
1.5.5.1
arch_uprobe_abort_xol() calls handle_riprel_post_xol() even if
auprobe->ops != default_xol_ops. This is fine correctness wise, only
default_pre_xol_op() can set UPROBE_FIX_RIP_AX|UPROBE_FIX_RIP_CX and
otherwise handle_riprel_post_xol() is nop.
But this doesn't look clean and this doesn't allow us to move ->fixups
into the union in arch_uprobe. Move this handle_riprel_post_xol() call
into the new default_abort_op() hook and change arch_uprobe_abort_xol()
accordingly.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 14 +++++++++++---
1 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 9690c65..8fce6ef 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -394,6 +394,7 @@ struct uprobe_xol_ops {
bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
+ void (*abort)(struct arch_uprobe *, struct pt_regs *);
};
static inline int sizeof_long(void)
@@ -444,9 +445,15 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
return 0;
}
+static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ handle_riprel_post_xol(auprobe, regs, NULL);
+}
+
static struct uprobe_xol_ops default_xol_ops = {
.pre_xol = default_pre_xol_op,
.post_xol = default_post_xol_op,
+ .abort = default_abort_op,
};
static bool branch_is_call(struct arch_uprobe *auprobe)
@@ -809,10 +816,11 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
- current->thread.trap_nr = utask->autask.saved_trap_nr;
- handle_riprel_post_xol(auprobe, regs, NULL);
- instruction_pointer_set(regs, utask->vaddr);
+ if (auprobe->ops->abort)
+ auprobe->ops->abort(auprobe, regs);
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ regs->ip = utask->vaddr;
/* clear TF if it was set by us in arch_uprobe_pre_xol() */
if (!utask->autask.saved_tf)
regs->flags &= ~X86_EFLAGS_TF;
--
1.5.5.1
On Sat, 2014-04-19 at 19:01 +0200, Oleg Nesterov wrote:
> Peter, feel free to ignore 1-4, but could you look at 5/5? It lacks the
> test-case because I do not have a x32-ready testing machine.
>
> On 04/17, Oleg Nesterov wrote:
> >
> > This series only fixes the problem. I'll send more changes to address
> > some of TODO's mentioned in the changelogs later. In particular, we
> > need to do something with "callw", see "Note: in 13/15.
>
> So, what do you all think we should do with "callw"? Jim votes for
> declining to probe callw, and I fully agree.
>
> Any objection?
>
> Until then, lets cleanup the validate_insn_* paths and fix another bug.
> This cleanup can also simplify the next "reject callw" change.
>
> Oleg.
>
> arch/x86/kernel/process_64.c | 7 +-
> arch/x86/kernel/uprobes.c | 126 ++++++++++++++++++------------------------
> 2 files changed, 58 insertions(+), 75 deletions(-)
>
These 5 patches are:
Reviewed-by: Jim Keniston <[email protected]>
On Tue, 2014-04-22 at 16:47 +0200, Oleg Nesterov wrote:
> Finally we can move arch_uprobe->fixups/rip_rela_target_address
> into the new "def" struct and place this struct in the union, they
> are only used by default_xol_ops paths.
>
> The patch also renames rip_rela_target_address to riprel_target just
> to make this name shorter.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
I see a couple of nits in this patch (see below), but the others look
good.
Patches 1-5 of this set:
Reviewed-by: Jim Keniston <[email protected]>
> ---
> arch/x86/include/asm/uprobes.h | 12 ++++++----
> arch/x86/kernel/uprobes.c | 41 +++++++++++++++++++--------------------
> 2 files changed, 27 insertions(+), 26 deletions(-)
>
> diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
> index 93bee7b..72caff7 100644
> --- a/arch/x86/include/asm/uprobes.h
> +++ b/arch/x86/include/asm/uprobes.h
> @@ -41,18 +41,20 @@ struct arch_uprobe {
> u8 ixol[MAX_UINSN_BYTES];
> };
>
> - u16 fixups;
> const struct uprobe_xol_ops *ops;
>
> union {
> -#ifdef CONFIG_X86_64
> - unsigned long rip_rela_target_address;
> -#endif
> struct {
> s32 offs;
> u8 ilen;
> u8 opc1;
> - } branch;
> + } branch;
> + struct {
> +#ifdef CONFIG_X86_64
> + long riprel_target;
> +#endif
> + u16 fixups;
> + } def;
"def" is kind of ambiguous. How about "dfault" or some such?
> };
> };
>
> diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
> index e6314a2..69b2d61 100644
> --- a/arch/x86/kernel/uprobes.c
> +++ b/arch/x86/kernel/uprobes.c
...
> @@ -636,12 +635,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
>
> /*
> * Figure out which fixups arch_uprobe_post_xol() will need to perform,
> - * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups
> - * is either zero or it reflects rip-related fixups.
> + * and annotate def->fixups accordingly. To start with, ->fixups is
> + * either zero or it reflects rip-related fixups.
That sentence stopped being true a couple of patch sets ago.
handle_riprel_insn() is called later in this function now.
> */
> switch (OPCODE1(&insn)) {
> case 0x9d: /* popf */
> - auprobe->fixups |= UPROBE_FIX_SETF;
> + auprobe->def.fixups |= UPROBE_FIX_SETF;
> break;
> case 0xc3: /* ret or lret -- ip is correct */
> case 0xcb:
> @@ -669,9 +668,9 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
> }
>
> if (fix_ip)
> - auprobe->fixups |= UPROBE_FIX_IP;
> + auprobe->def.fixups |= UPROBE_FIX_IP;
> if (fix_call)
> - auprobe->fixups |= UPROBE_FIX_CALL;
> + auprobe->def.fixups |= UPROBE_FIX_CALL;
>
> auprobe->ops = &default_xol_ops;
> return 0;
Jim
On 04/17, Oleg Nesterov wrote:
>
> This series only fixes the problem. I'll send more changes to address
> some of TODO's mentioned in the changelogs later.
This is probably the last series I am sending in this thread, before the
next PULL request. This completes the changes which I planned from the
very beginning, except "emulate jczx/loopz" which I am still unsure about.
I didn't test it yet, will do of course and report if something is wrong.
Jim, I see your email about the previous series. If we rename ->def then
this series obviously should be updated too, but this is trivial.
Oleg.
arch/x86/include/asm/uprobes.h | 3 +-
arch/x86/kernel/uprobes.c | 66 ++++++++++++++++------------------------
2 files changed, 28 insertions(+), 41 deletions(-)
handle_riprel_insn() assumes that nobody else could modify ->fixups
before. This is correct but fragile, change it to use "|=".
Also make ->fixups u8, we are going to add the new members into the
union. It is not clear why UPROBE_FIX_RIP_.X lived in the upper byte,
redefine them so that they can fit into u8.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/include/asm/uprobes.h | 2 +-
arch/x86/kernel/uprobes.c | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 72caff7..9ce25ce 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -53,7 +53,7 @@ struct arch_uprobe {
#ifdef CONFIG_X86_64
long riprel_target;
#endif
- u16 fixups;
+ u8 fixups;
} def;
};
};
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 69b2d61..37e73b6 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -33,16 +33,16 @@
/* Post-execution fixups. */
/* Adjust IP back to vicinity of actual insn */
-#define UPROBE_FIX_IP 0x1
+#define UPROBE_FIX_IP 0x01
/* Adjust the return address of a call insn */
-#define UPROBE_FIX_CALL 0x2
+#define UPROBE_FIX_CALL 0x02
/* Instruction will modify TF, don't change it */
-#define UPROBE_FIX_SETF 0x4
+#define UPROBE_FIX_SETF 0x04
-#define UPROBE_FIX_RIP_AX 0x8000
-#define UPROBE_FIX_RIP_CX 0x4000
+#define UPROBE_FIX_RIP_AX 0x08
+#define UPROBE_FIX_RIP_CX 0x10
#define UPROBE_TRAP_NR UINT_MAX
@@ -307,12 +307,12 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
* is NOT the register operand, so we use %rcx (register
* #1) for the scratch register.
*/
- auprobe->def.fixups = UPROBE_FIX_RIP_CX;
+ auprobe->def.fixups |= UPROBE_FIX_RIP_CX;
/* Change modrm from 00 000 101 to 00 000 001. */
*cursor = 0x1;
} else {
/* Use %rax (register #0) for the scratch register. */
- auprobe->def.fixups = UPROBE_FIX_RIP_AX;
+ auprobe->def.fixups |= UPROBE_FIX_RIP_AX;
/* Change modrm from 00 xxx 101 to 00 xxx 000 */
*cursor = (reg << 3);
}
--
1.5.5.1
Now that UPROBE_FIX_IP/UPROBE_FIX_CALL are mutually exclusive we can
use a single "fix_ip_or_call" enum instead of 2 fix_* booleans. This
way the logic looks more understandable and clean to me.
While at it, join "case 0xea" with other "ip is correct" ret/lret cases.
Also change default_post_xol_op() to use "else if" for the same reason.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 27 +++++++++++----------------
1 files changed, 11 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 4c60b7a..4ae2406 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -424,10 +424,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
long correction = (long)(utask->vaddr - utask->xol_vaddr);
handle_riprel_post_xol(auprobe, regs, &correction);
- if (auprobe->def.fixups & UPROBE_FIX_IP)
+ if (auprobe->def.fixups & UPROBE_FIX_IP) {
regs->ip += correction;
-
- if (auprobe->def.fixups & UPROBE_FIX_CALL) {
+ } else if (auprobe->def.fixups & UPROBE_FIX_CALL) {
regs->sp += sizeof_long();
if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen))
return -ERESTART;
@@ -612,7 +611,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
{
struct insn insn;
- bool fix_ip = true, fix_call = false;
+ u8 fix_ip_or_call = UPROBE_FIX_IP;
int ret;
ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
@@ -636,21 +635,20 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
case 0xcb:
case 0xc2:
case 0xca:
- fix_ip = false;
+ case 0xea: /* jmp absolute -- ip is correct */
+ fix_ip_or_call = 0;
break;
case 0x9a: /* call absolute - Fix return addr, not ip */
- fix_call = true;
- fix_ip = false;
- break;
- case 0xea: /* jmp absolute -- ip is correct */
- fix_ip = false;
+ fix_ip_or_call = UPROBE_FIX_CALL;
break;
case 0xff:
switch (MODRM_REG(&insn)) {
case 2: case 3: /* call or lcall, indirect */
- fix_call = true;
+ fix_ip_or_call = UPROBE_FIX_CALL;
+ break;
case 4: case 5: /* jmp or ljmp, indirect */
- fix_ip = false;
+ fix_ip_or_call = 0;
+ break;
}
/* fall through */
default:
@@ -658,10 +656,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
}
auprobe->def.ilen = insn.length;
- if (fix_ip)
- auprobe->def.fixups |= UPROBE_FIX_IP;
- if (fix_call)
- auprobe->def.fixups |= UPROBE_FIX_CALL;
+ auprobe->def.fixups |= fix_ip_or_call;
auprobe->ops = &default_xol_ops;
return 0;
--
1.5.5.1
Extract the "push return address" code from branch_emulate_op() into
the new simple helper, push_ret_address(). It will have more users.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 15 ++++++++++++---
1 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 37e73b6..48d2623 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -407,6 +407,17 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
return 0;
}
+static int push_ret_address(struct pt_regs *regs, unsigned long ip)
+{
+ unsigned long new_sp = regs->sp - sizeof_long();
+
+ if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
+ return -EFAULT;
+
+ regs->sp = new_sp;
+ return 0;
+}
+
/*
* Adjust the return address pushed by a call insn executed out of line.
*/
@@ -517,7 +528,6 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
unsigned long offs = (long)auprobe->branch.offs;
if (branch_is_call(auprobe)) {
- unsigned long new_sp = regs->sp - sizeof_long();
/*
* If it fails we execute this (mangled, see the comment in
* branch_clear_offset) insn out-of-line. In the likely case
@@ -527,9 +537,8 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
*
* But there is corner case, see the comment in ->post_xol().
*/
- if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long()))
+ if (push_ret_address(regs, new_ip))
return false;
- regs->sp = new_sp;
} else if (!check_jmp_cond(auprobe, regs)) {
offs = 0;
}
--
1.5.5.1
The only insn which could have both UPROBE_FIX_IP and UPROBE_FIX_CALL
was 0xe8 "call relative", and now it is handled by branch_xol_ops.
So we can change default_post_xol_op(UPROBE_FIX_CALL) to simply push
the address of next insn == utask->vaddr + insn.length, just we need
to record insn.length into the new auprobe->def.ilen member.
Note: if/when we teach branch_xol_ops to support jcxz/loopz we can
remove the "correction" logic, UPROBE_FIX_IP can use the same address.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/include/asm/uprobes.h | 1 +
arch/x86/kernel/uprobes.c | 24 +++---------------------
2 files changed, 4 insertions(+), 21 deletions(-)
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 9ce25ce..a040d49 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -54,6 +54,7 @@ struct arch_uprobe {
long riprel_target;
#endif
u8 fixups;
+ u8 ilen;
} def;
};
};
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 48d2623..4c60b7a 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -418,24 +418,6 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip)
return 0;
}
-/*
- * Adjust the return address pushed by a call insn executed out of line.
- */
-static int adjust_ret_addr(unsigned long sp, long correction)
-{
- int rasize = sizeof_long();
- long ra;
-
- if (copy_from_user(&ra, (void __user *)sp, rasize))
- return -EFAULT;
-
- ra += correction;
- if (copy_to_user((void __user *)sp, &ra, rasize))
- return -EFAULT;
-
- return 0;
-}
-
static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
@@ -446,10 +428,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
regs->ip += correction;
if (auprobe->def.fixups & UPROBE_FIX_CALL) {
- if (adjust_ret_addr(regs->sp, correction)) {
- regs->sp += sizeof_long();
+ regs->sp += sizeof_long();
+ if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen))
return -ERESTART;
- }
}
/* popf; tell the caller to not touch TF */
if (auprobe->def.fixups & UPROBE_FIX_SETF)
@@ -676,6 +657,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
handle_riprel_insn(auprobe, &insn);
}
+ auprobe->def.ilen = insn.length;
if (fix_ip)
auprobe->def.fixups |= UPROBE_FIX_IP;
if (fix_call)
--
1.5.5.1
On 04/24, Jim Keniston wrote:
>
> I see a couple of nits in this patch (see below), but the others look
> good.
>
> Patches 1-5 of this set:
> Reviewed-by: Jim Keniston <[email protected]>
Thanks!
> > struct {
> > s32 offs;
> > u8 ilen;
> > u8 opc1;
> > - } branch;
> > + } branch;
> > + struct {
> > +#ifdef CONFIG_X86_64
> > + long riprel_target;
> > +#endif
> > + u16 fixups;
> > + } def;
>
> "def" is kind of ambiguous.
Heh. I am shy to admit that my plan was to name it "default". I changed
its name only after gcc told me I should learn "C".
> How about "dfault" or some such?
Then probably "dflt", this looks more consintent and more IBMish ;)
On a serious note, I agree with any naming... but "dfault" looks like
"d" fault to me. Perhaps something else?
> > --- a/arch/x86/kernel/uprobes.c
> > +++ b/arch/x86/kernel/uprobes.c
> ...
> > @@ -636,12 +635,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
> >
> > /*
> > * Figure out which fixups arch_uprobe_post_xol() will need to perform,
> > - * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups
> > - * is either zero or it reflects rip-related fixups.
> > + * and annotate def->fixups accordingly. To start with, ->fixups is
> > + * either zero or it reflects rip-related fixups.
>
> That sentence stopped being true a couple of patch sets ago.
> handle_riprel_insn() is called later in this function now.
Yes, but the comment mentions arch_uprobe_post_xol? Anyway, I'll update
it to say "default_post_xol_op" instead.
Or I misunderstood you ?
Oleg.
On 04/25, Oleg Nesterov wrote:
>
> Jim, I see your email about the previous series. If we rename ->def then
> this series obviously should be updated too, but this is trivial.
Better yet, can't we rename it in a separate patch?
I agree, ->def doesn't look good. So far I am inclined to make it ->dflt,
although let me repeat I agree with any naming and I can use "dfault" as
you suggested.
But. If we rename it, then we should probably also rename default_xol_ops
and its methods accordingly, and I think that "cosmetic, renames only"
patch makes more sense.
OK?
Oleg.
On 04/25, Oleg Nesterov wrote:
>
> On 04/17, Oleg Nesterov wrote:
> >
> > This series only fixes the problem. I'll send more changes to address
> > some of TODO's mentioned in the changelogs later.
>
> This is probably the last series I am sending in this thread, before the
> next PULL request. This completes the changes which I planned from the
> very beginning, except "emulate jczx/loopz" which I am still unsure about.
I lied, let me send another short series.
I was not going to do this now, I am a bit tired of uprobes ;) But it seems
that Denys has a very good idea, we can simplify rip-relative handling.
However, if we are are going to change this code, imho we need to cleanup it
first. handle_riprel_insn(), pre_xol_rip_insn() and handle_riprel_post_xol()
look really annoying, imho. Even the naming asks for the cleanup imo, despite
the fact that usually I ignore the naming completely.
Oleg.
arch/x86/kernel/uprobes.c | 56 +++++++++++++++++++++------------------------
1 files changed, 26 insertions(+), 30 deletions(-)
default_pre_xol_op() passes ¤t->utask->autask to riprel_pre_xol()
and this is just ugly because it still needs to load current->utask to
read ->vaddr.
Remove this argument, change riprel_pre_xol() to use current->utask.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index a647808..9f6aba3 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -330,16 +330,17 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
* If we're emulating a rip-relative instruction, save the contents
* of the scratch register and store the target address in that register.
*/
-static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
- struct arch_uprobe_task *autask)
+static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
+ struct uprobe_task *utask = current->utask;
+
if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) {
- autask->saved_scratch_register = regs->ax;
- regs->ax = current->utask->vaddr;
+ utask->autask.saved_scratch_register = regs->ax;
+ regs->ax = utask->vaddr;
regs->ax += auprobe->def.riprel_target;
} else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) {
- autask->saved_scratch_register = regs->cx;
- regs->cx = current->utask->vaddr;
+ utask->autask.saved_scratch_register = regs->cx;
+ regs->cx = utask->vaddr;
regs->cx += auprobe->def.riprel_target;
}
}
@@ -377,8 +378,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
{
}
-static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
- struct arch_uprobe_task *autask)
+static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
}
static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
@@ -401,7 +401,7 @@ static inline int sizeof_long(void)
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- riprel_pre_xol(auprobe, regs, ¤t->utask->autask);
+ riprel_pre_xol(auprobe, regs);
return 0;
}
--
1.5.5.1
Ignoring the "correction" logic riprel_pre_xol() and riprel_post_xol()
are very similar but look quite differently.
1. Add the "UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX" check at the start
of riprel_pre_xol(), like the same check in riprel_post_xol().
2. Add the trivial scratch_reg() helper which returns the address of
scratch register pre_xol/post_xol need to change.
3. Change these functions to use the new helper and avoid copy-and-paste
under if/else branches.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 32 +++++++++++++++-----------------
1 files changed, 15 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 9f6aba3..6a9cac1 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -326,22 +326,24 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
}
}
+static inline unsigned long *
+scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ return (auprobe->def.fixups & UPROBE_FIX_RIP_AX) ? ®s->ax : ®s->cx;
+}
+
/*
* If we're emulating a rip-relative instruction, save the contents
* of the scratch register and store the target address in that register.
*/
static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- struct uprobe_task *utask = current->utask;
+ if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
+ struct uprobe_task *utask = current->utask;
+ unsigned long *sr = scratch_reg(auprobe, regs);
- if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) {
- utask->autask.saved_scratch_register = regs->ax;
- regs->ax = utask->vaddr;
- regs->ax += auprobe->def.riprel_target;
- } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) {
- utask->autask.saved_scratch_register = regs->cx;
- regs->cx = utask->vaddr;
- regs->cx += auprobe->def.riprel_target;
+ utask->autask.saved_scratch_register = *sr;
+ *sr = utask->vaddr + auprobe->def.riprel_target;
}
}
@@ -349,14 +351,10 @@ static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
long *correction)
{
if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
- struct arch_uprobe_task *autask;
-
- autask = ¤t->utask->autask;
- if (auprobe->def.fixups & UPROBE_FIX_RIP_AX)
- regs->ax = autask->saved_scratch_register;
- else
- regs->cx = autask->saved_scratch_register;
+ struct uprobe_task *utask = current->utask;
+ unsigned long *sr = scratch_reg(auprobe, regs);
+ *sr = utask->autask.saved_scratch_register;
/*
* The original instruction includes a displacement, and so
* is 4 bytes longer than what we've just single-stepped.
--
1.5.5.1
handle_riprel_insn(), pre_xol_rip_insn() and handle_riprel_post_xol()
look confusing and inconsistent. Rename them into riprel_analyze(),
riprel_pre_xol(), and riprel_post_xol() respectively.
No changes in compiled code.
Signed-off-by: Oleg Nesterov <[email protected]>
---
arch/x86/kernel/uprobes.c | 24 +++++++++++-------------
1 files changed, 11 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 4ae2406..a647808 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -268,8 +268,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
* - There's never a SIB byte.
* - The displacement is always 4 bytes.
*/
-static void
-handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
+static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
{
u8 *cursor;
u8 reg;
@@ -331,8 +330,7 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
* If we're emulating a rip-relative instruction, save the contents
* of the scratch register and store the target address in that register.
*/
-static void
-pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
+static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
struct arch_uprobe_task *autask)
{
if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) {
@@ -346,8 +344,8 @@ pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
}
}
-static void
-handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
+static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
+ long *correction)
{
if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
struct arch_uprobe_task *autask;
@@ -376,14 +374,14 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
/*
* No RIP-relative addressing on 32-bit
*/
-static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
+static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
{
}
-static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
+static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
struct arch_uprobe_task *autask)
{
}
-static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
+static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
long *correction)
{
}
@@ -403,7 +401,7 @@ static inline int sizeof_long(void)
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask);
+ riprel_pre_xol(auprobe, regs, ¤t->utask->autask);
return 0;
}
@@ -423,7 +421,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
struct uprobe_task *utask = current->utask;
long correction = (long)(utask->vaddr - utask->xol_vaddr);
- handle_riprel_post_xol(auprobe, regs, &correction);
+ riprel_post_xol(auprobe, regs, &correction);
if (auprobe->def.fixups & UPROBE_FIX_IP) {
regs->ip += correction;
} else if (auprobe->def.fixups & UPROBE_FIX_CALL) {
@@ -440,7 +438,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- handle_riprel_post_xol(auprobe, regs, NULL);
+ riprel_post_xol(auprobe, regs, NULL);
}
static struct uprobe_xol_ops default_xol_ops = {
@@ -652,7 +650,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
}
/* fall through */
default:
- handle_riprel_insn(auprobe, &insn);
+ riprel_analyze(auprobe, &insn);
}
auprobe->def.ilen = insn.length;
--
1.5.5.1
* Oleg Nesterov <[email protected]> [2014-04-27 18:52:27]:
> default_pre_xol_op() passes ¤t->utask->autask to riprel_pre_xol()
> and this is just ugly because it still needs to load current->utask to
> read ->vaddr.
>
> Remove this argument, change riprel_pre_xol() to use current->utask.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
* Oleg Nesterov <[email protected]> [2014-04-27 18:52:23]:
> handle_riprel_insn(), pre_xol_rip_insn() and handle_riprel_post_xol()
> look confusing and inconsistent. Rename them into riprel_analyze(),
> riprel_pre_xol(), and riprel_post_xol() respectively.
>
> No changes in compiled code.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
* Oleg Nesterov <[email protected]> [2014-04-27 18:52:30]:
> Ignoring the "correction" logic riprel_pre_xol() and riprel_post_xol()
> are very similar but look quite differently.
>
> 1. Add the "UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX" check at the start
> of riprel_pre_xol(), like the same check in riprel_post_xol().
>
> 2. Add the trivial scratch_reg() helper which returns the address of
> scratch register pre_xol/post_xol need to change.
>
> 3. Change these functions to use the new helper and avoid copy-and-paste
> under if/else branches.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
* Oleg Nesterov <[email protected]> [2014-04-19 19:01:47]:
> validate_insn_32bits() and validate_insn_64bits() are very similar,
> turn them into the single uprobe_init_insn() which has the additional
> "bool x86_64" argument which can be passed to insn_init() and used to
> choose between good_insns_64/good_insns_32.
>
> Also kill UPROBE_FIX_NONE, it has no users.
>
> Note: the current code doesn't use ifdef's consistently, good_insns_64
> depends on CONFIG_X86_64 but good_insns_32 is unconditional. This patch
> removes ifdef around good_insns_64, we will add it back later along with
> the similar one for good_insns_32.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
--
Thanks and Regards
Srikar Dronamraju
* Oleg Nesterov <[email protected]> [2014-04-19 19:01:51]:
> 1. Extract the ->ia32_compat check from 64bit validate_insn_bits()
> into the new helper, is_64bit_mm(), it will have more users.
>
> TODO: this checks is actually wrong if mm owner is X32 task,
> we need another fix which changes set_personality_ia32().
>
> TODO: even worse, the whole 64-or-32-bit logic is very broken
> and the fix is not simple, we need the nontrivial changes in
> the core uprobes code.
>
> 2. Kill validate_insn_bits() and change its single caller to use
> uprobe_init_insn(is_64bit_mm(mm).
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
--
Thanks and Regards
Srikar Dronamraju
* Oleg Nesterov <[email protected]> [2014-04-19 19:01:55]:
> Change uprobe_init_insn() to make insn_complete() == T, this makes
> other insn_get_*() calls unnecessary.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
--
Thanks and Regards
Srikar Dronamraju
* Oleg Nesterov <[email protected]> [2014-04-19 19:01:59]:
> Add the suitable ifdef's around good_insns_* arrays. We do not want
> to add the ugly ifdef's into their only user, uprobe_init_insn(), so
> the "#else" branch simply defines them as NULL. This doesn't generate
> the extra code, gcc is smart enough, although the code is fine even if
> it could not detect that (without CONFIG_IA32_EMULATION) is_64bit_mm()
> is __builtin_constant_p().
>
> The patch looks more complicated because it also moves good_insns_64
> up close to good_insns_32.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
Acked-by: Srikar Dronamraju <[email protected]>
> ---
--
Thanks and Regards
Srikar Dronamraju
* Oleg Nesterov <[email protected]> [2014-04-19 19:02:02]:
> is_64bit_mm() assumes that mm->context.ia32_compat means the 32-bit
> instruction set, this is not true if the task is TIF_X32.
>
> Change set_personality_ia32() to initialize mm->context.ia32_compat
> by TIF_X32 or TIF_IA32 instead of 1. This allows to fix is_64bit_mm()
> without affecting other users, they all treat ia32_compat as "bool".
>
> TIF_ in ->ia32_compat looks a bit strange, but this is grep-friendly
> and avoids the new define's.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
> ---
Acked-by: Srikar Dronamraju <[email protected]>
--
Thanks and Regards
Srikar Dronamraju
On Mon, 2014-04-28 at 12:06 +0530, Srikar Dronamraju wrote:
> * Oleg Nesterov <[email protected]> [2014-04-27 18:52:30]:
>
> > Ignoring the "correction" logic riprel_pre_xol() and riprel_post_xol()
> > are very similar but look quite differently.
> >
> > 1. Add the "UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX" check at the start
> > of riprel_pre_xol(), like the same check in riprel_post_xol().
> >
> > 2. Add the trivial scratch_reg() helper which returns the address of
> > scratch register pre_xol/post_xol need to change.
> >
> > 3. Change these functions to use the new helper and avoid copy-and-paste
> > under if/else branches.
> >
> > Signed-off-by: Oleg Nesterov <[email protected]>
>
> Acked-by: Srikar Dronamraju <[email protected]>
>
Reviewed-by: Jim Keniston <[email protected]>
On Mon, 2014-04-28 at 12:04 +0530, Srikar Dronamraju wrote:
> * Oleg Nesterov <[email protected]> [2014-04-27 18:52:23]:
>
> > handle_riprel_insn(), pre_xol_rip_insn() and handle_riprel_post_xol()
> > look confusing and inconsistent. Rename them into riprel_analyze(),
> > riprel_pre_xol(), and riprel_post_xol() respectively.
> >
> > No changes in compiled code.
> >
> > Signed-off-by: Oleg Nesterov <[email protected]>
>
> Acked-by: Srikar Dronamraju <[email protected]>
>
Reviewed-by: Jim Keniston <[email protected]>
On Mon, 2014-04-28 at 12:05 +0530, Srikar Dronamraju wrote:
> * Oleg Nesterov <[email protected]> [2014-04-27 18:52:27]:
>
> > default_pre_xol_op() passes ¤t->utask->autask to riprel_pre_xol()
> > and this is just ugly because it still needs to load current->utask to
> > read ->vaddr.
> >
> > Remove this argument, change riprel_pre_xol() to use current->utask.
> >
> > Signed-off-by: Oleg Nesterov <[email protected]>
>
> Acked-by: Srikar Dronamraju <[email protected]>
>
Reviewed-by: Jim Keniston <[email protected]>