2007-12-17 22:27:53

by Harvey Harrison

[permalink] [raw]
Subject: [PATCH 3/4] x86: add kprobe-booster to X86_64

Sorry I missed an ifdef in this patch in the following hunk:

@@ -183,6 +185,9 @@ retry:
}

switch (opcode & 0xf0) {
+#ifdef X86_64
+ case 0x40:
+ goto retry; /* REX prefix is boostable */
case 0x60:
if (0x63 < opcode && opcode < 0x67)
goto retry; /* prefixes */

Just add the #ifdef to only catch case 0x40.

@@ -183,6 +185,10 @@ retry:
}

switch (opcode & 0xf0) {
+#ifdef X86_64
+ case 0x40:
+ goto retry; /* REX prefix is boostable */
+#endif
case 0x60:
if (0x63 < opcode && opcode < 0x67)
goto retry; /* prefixes */

Cheers,

Harvey


2007-12-18 11:30:54

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64


* Harvey Harrison <[email protected]> wrote:

> Sorry I missed an ifdef in this patch in the following hunk:

could you resend your kprobes cleanups against current x86.git? They
have been conceptually acked by Masami. This cuts out the unification
part of your queue which is bad luck but the effort has been duplicated
already so there's not much we can do about it i guess.

Your other 17 cleanup and unification patches are still queued up in
x86.git and passed a lot of testing, so they will likely go into
v2.6.25. Nice work!

Ingo

2007-12-18 11:42:50

by Harvey Harrison

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64

On Tue, 2007-12-18 at 12:29 +0100, Ingo Molnar wrote:
> * Harvey Harrison <[email protected]> wrote:
>
> > Sorry I missed an ifdef in this patch in the following hunk:
>
> could you resend your kprobes cleanups against current x86.git? They
> have been conceptually acked by Masami. This cuts out the unification
> part of your queue which is bad luck but the effort has been duplicated
> already so there's not much we can do about it i guess.
>
> Your other 17 cleanup and unification patches are still queued up in
> x86.git and passed a lot of testing, so they will likely go into
> v2.6.25. Nice work!
>
> Ingo

Ingo,

I'd suggest just tossing my kprobes cleanups. I just sent you a rollup
of anything I saw that was left in mine that was still worthwhile
after Masami's, included below for reference. It didn't amount to much
left so I rolled it all together:

Subject: [PATCH] x86: kprobes leftover cleanups

Eliminate __always_inline, all of these static functions are
only called once. Minor whitespace cleanup. Eliminate one
supefluous return at end of void function. Reverse sense of
#ifndef to be #ifdef to show the case only affects X86_32.

Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes.c | 14 ++++++--------
1 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 9aadd4d..1a0d96d 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -159,7 +159,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);

/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-static __always_inline void set_jmp_op(void *from, void *to)
+static void set_jmp_op(void *from, void *to)
{
struct __arch_jmp_op {
char op;
@@ -174,7 +174,7 @@ static __always_inline void set_jmp_op(void *from, void *to)
* Returns non-zero if opcode is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
-static __always_inline int can_boost(kprobe_opcode_t *opcodes)
+static int can_boost(kprobe_opcode_t *opcodes)
{
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
@@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_saved_flags &= ~IF_MASK;
}

-static __always_inline void clear_btf(void)
+static void clear_btf(void)
{
if (test_thread_flag(TIF_DEBUGCTLMSR))
wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
}

-static __always_inline void restore_btf(void)
+static void restore_btf(void)
{
if (test_thread_flag(TIF_DEBUGCTLMSR))
wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
@@ -409,7 +409,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
clear_btf();
regs->flags |= TF_MASK;
regs->flags &= ~IF_MASK;
- /*single step inline if the instruction is an int3*/
+ /* single step inline if the instruction is an int3 */
if (p->opcode == BREAKPOINT_INSTRUCTION)
regs->ip = (unsigned long)p->addr;
else
@@ -767,7 +767,7 @@ static void __kprobes resume_execution(struct kprobe *p,
case 0xe8: /* call relative - Fix return addr */
*tos = orig_ip + (*tos - copy_ip);
break;
-#ifndef CONFIG_X86_64
+#ifdef CONFIG_X86_32
case 0x9a: /* call absolute -- same as call absolute, indirect */
*tos = orig_ip + (*tos - copy_ip);
goto no_change;
@@ -813,8 +813,6 @@ static void __kprobes resume_execution(struct kprobe *p,

no_change:
restore_btf();
-
- return;
}

/*
--
1.5.4.rc0.1143.g1a8a


2007-12-18 13:52:10

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64

Hi Harvey,

Thank you for cleaning this up.

Harvey Harrison wrote:
> Subject: [PATCH] x86: kprobes leftover cleanups
>
> Eliminate __always_inline, all of these static functions are
> only called once. Minor whitespace cleanup. Eliminate one
> supefluous return at end of void function. Reverse sense of
> #ifndef to be #ifdef to show the case only affects X86_32.

Unfortunately, to prevent kprobe recursive call, all functions which
is called from kprobes must be inlined or have __kprobes.
If __always_inline macro still work, I prefer to use it. If not,
it must have a __kprobe attribute like as below.

> Signed-off-by: Harvey Harrison <[email protected]>
> ---
> arch/x86/kernel/kprobes.c | 14 ++++++--------
> 1 files changed, 6 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
> index 9aadd4d..1a0d96d 100644
> --- a/arch/x86/kernel/kprobes.c
> +++ b/arch/x86/kernel/kprobes.c
> @@ -159,7 +159,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
> const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
>
> /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
> -static __always_inline void set_jmp_op(void *from, void *to)
> +static void set_jmp_op(void *from, void *to)

+static void __kprobes set_jmp_op(void *from, void *to)

> {
> struct __arch_jmp_op {
> char op;
> @@ -174,7 +174,7 @@ static __always_inline void set_jmp_op(void *from, void *to)
> * Returns non-zero if opcode is boostable.
> * RIP relative instructions are adjusted at copying time in 64 bits mode
> */
> -static __always_inline int can_boost(kprobe_opcode_t *opcodes)
> +static int can_boost(kprobe_opcode_t *opcodes)

+static int __kprobes can_boost(kprobe_opcode_t *opcodes)


> {
> kprobe_opcode_t opcode;
> kprobe_opcode_t *orig_opcodes = opcodes;
> @@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
> kcb->kprobe_saved_flags &= ~IF_MASK;
> }
>
> -static __always_inline void clear_btf(void)
> +static void clear_btf(void)

+static void __kprobes clear_btf(void)

> {
> if (test_thread_flag(TIF_DEBUGCTLMSR))
> wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
> }
>
> -static __always_inline void restore_btf(void)
> +static void restore_btf(void)

+static void __kprobes restore_btf(void)

> {
> if (test_thread_flag(TIF_DEBUGCTLMSR))
> wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
> @@ -409,7 +409,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
> clear_btf();
> regs->flags |= TF_MASK;
> regs->flags &= ~IF_MASK;
> - /*single step inline if the instruction is an int3*/
> + /* single step inline if the instruction is an int3 */
> if (p->opcode == BREAKPOINT_INSTRUCTION)
> regs->ip = (unsigned long)p->addr;
> else
> @@ -767,7 +767,7 @@ static void __kprobes resume_execution(struct kprobe *p,
> case 0xe8: /* call relative - Fix return addr */
> *tos = orig_ip + (*tos - copy_ip);
> break;
> -#ifndef CONFIG_X86_64
> +#ifdef CONFIG_X86_32
> case 0x9a: /* call absolute -- same as call absolute, indirect */
> *tos = orig_ip + (*tos - copy_ip);
> goto no_change;
> @@ -813,8 +813,6 @@ static void __kprobes resume_execution(struct kprobe *p,
>
> no_change:
> restore_btf();
> -
> - return;
> }
>
> /*

Thanks again!

--
Masami Hiramatsu

Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division

e-mail: [email protected], [email protected]

2007-12-18 14:00:58

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64


* Harvey Harrison <[email protected]> wrote:

> On Tue, 2007-12-18 at 12:29 +0100, Ingo Molnar wrote:
> > * Harvey Harrison <[email protected]> wrote:
> >
> > > Sorry I missed an ifdef in this patch in the following hunk:
> >
> > could you resend your kprobes cleanups against current x86.git? They
> > have been conceptually acked by Masami. This cuts out the unification
> > part of your queue which is bad luck but the effort has been duplicated
> > already so there's not much we can do about it i guess.
> >
> > Your other 17 cleanup and unification patches are still queued up in
> > x86.git and passed a lot of testing, so they will likely go into
> > v2.6.25. Nice work!
> >
> > Ingo
>
> Ingo,
>
> I'd suggest just tossing my kprobes cleanups. I just sent you a rollup
> of anything I saw that was left in mine that was still worthwhile
> after Masami's, included below for reference. It didn't amount to much
> left so I rolled it all together:
>
> Subject: [PATCH] x86: kprobes leftover cleanups
>
> Eliminate __always_inline, all of these static functions are
> only called once. Minor whitespace cleanup. Eliminate one
> supefluous return at end of void function. Reverse sense of
> #ifndef to be #ifdef to show the case only affects X86_32.

thanks, i've applied them.

Ingo

2007-12-19 02:30:18

by Harvey Harrison

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64

On Tue, 2007-12-18 at 08:50 -0500, Masami Hiramatsu wrote:
> Hi Harvey,
>
> Thank you for cleaning this up.
>
> Harvey Harrison wrote:
> > Subject: [PATCH] x86: kprobes leftover cleanups
> >
> > Eliminate __always_inline, all of these static functions are
> > only called once. Minor whitespace cleanup. Eliminate one
> > supefluous return at end of void function. Reverse sense of
> > #ifndef to be #ifdef to show the case only affects X86_32.
>
> Unfortunately, to prevent kprobe recursive call, all functions which
> is called from kprobes must be inlined or have __kprobes.
> If __always_inline macro still work, I prefer to use it. If not,
> it must have a __kprobe attribute like as below.

I thought all static functions that were only called once were
automatically inlined these days? Otherwise __always_inline and
inline are exactly the same in the kernel.

Harvey

2007-12-19 04:50:19

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64

Harvey Harrison wrote:
> On Tue, 2007-12-18 at 08:50 -0500, Masami Hiramatsu wrote:
>> Hi Harvey,
>>
>> Thank you for cleaning this up.
>>
>> Harvey Harrison wrote:
>>> Subject: [PATCH] x86: kprobes leftover cleanups
>>>
>>> Eliminate __always_inline, all of these static functions are
>>> only called once. Minor whitespace cleanup. Eliminate one
>>> supefluous return at end of void function. Reverse sense of
>>> #ifndef to be #ifdef to show the case only affects X86_32.
>> Unfortunately, to prevent kprobe recursive call, all functions which
>> is called from kprobes must be inlined or have __kprobes.
>> If __always_inline macro still work, I prefer to use it. If not,
>> it must have a __kprobe attribute like as below.
>
> I thought all static functions that were only called once were
> automatically inlined these days? Otherwise __always_inline and
> inline are exactly the same in the kernel.

Yes, it will be (not obviously) inlined, currently.
However, IMHO, it is not fail-safe coding.

I think we might better take care of someone who will modify the code
in the future. If they call those functions from other place,
it will not be inlined, and may be placed out of .kprobes.text.
In that case, we can not prevent inserting kprobes in those functions.

Thus, I recommend you to add __kprobes on those functions.
That indicates which functions will be used by kprobes and gives
hints how to write functions which will be called from kprobes.
(And also, it simplifies coding rule.)

Thank you,

>
> Harvey
>

--
Masami Hiramatsu

Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division

e-mail: [email protected], [email protected]

2007-12-19 05:21:46

by Harvey Harrison

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86: add kprobe-booster to X86_64

On Tue, 2007-12-18 at 23:43 -0500, Masami Hiramatsu wrote:
> Harvey Harrison wrote:
> > On Tue, 2007-12-18 at 08:50 -0500, Masami Hiramatsu wrote:
> >> Hi Harvey,
> >>
> >> Thank you for cleaning this up.
> >>
> >> Harvey Harrison wrote:
> >>> Subject: [PATCH] x86: kprobes leftover cleanups
> >>>
> >>> Eliminate __always_inline, all of these static functions are
> >>> only called once. Minor whitespace cleanup. Eliminate one
> >>> supefluous return at end of void function. Reverse sense of
> >>> #ifndef to be #ifdef to show the case only affects X86_32.
> >> Unfortunately, to prevent kprobe recursive call, all functions which
> >> is called from kprobes must be inlined or have __kprobes.
> >> If __always_inline macro still work, I prefer to use it. If not,
> >> it must have a __kprobe attribute like as below.
> >
> > I thought all static functions that were only called once were
> > automatically inlined these days? Otherwise __always_inline and
> > inline are exactly the same in the kernel.
>
> Yes, it will be (not obviously) inlined, currently.
> However, IMHO, it is not fail-safe coding.
>

Fair enough, you seem to have a deeper understanding of the code than
I, I'd suggest __kprobes as a better annotation for this purpose though.

> I think we might better take care of someone who will modify the code
> in the future. If they call those functions from other place,
> it will not be inlined, and may be placed out of .kprobes.text.
> In that case, we can not prevent inserting kprobes in those functions.
>
> Thus, I recommend you to add __kprobes on those functions.
> That indicates which functions will be used by kprobes and gives
> hints how to write functions which will be called from kprobes.
> (And also, it simplifies coding rule.)

Patch forthcoming.

Harvey

2007-12-19 05:27:19

by Harvey Harrison

[permalink] [raw]
Subject: [PATCH] x86: __kprobes annotations

__always_inline on some static functions was to ensure they ended
up in the .kprobes.text section. Mark this explicitly.

Signed-off-by: Harvey Harrison <[email protected]>
---
arch/x86/kernel/kprobes.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index c7a26be..521a469 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -159,7 +159,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);

/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-static void set_jmp_op(void *from, void *to)
+static void __kprobes set_jmp_op(void *from, void *to)
{
struct __arch_jmp_op {
char op;
@@ -174,7 +174,7 @@ static void set_jmp_op(void *from, void *to)
* Returns non-zero if opcode is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
-static int can_boost(kprobe_opcode_t *opcodes)
+static int __kprobes can_boost(kprobe_opcode_t *opcodes)
{
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
@@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}

-static void clear_btf(void)
+static void __kprobes clear_btf(void)
{
if (test_thread_flag(TIF_DEBUGCTLMSR))
wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
}

-static void restore_btf(void)
+static void __kprobes restore_btf(void)
{
if (test_thread_flag(TIF_DEBUGCTLMSR))
wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
--
1.5.4.rc0.1143.g1a8a


2007-12-19 05:47:18

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [PATCH] x86: __kprobes annotations

Harvey Harrison wrote:
> __always_inline on some static functions was to ensure they ended
> up in the .kprobes.text section. Mark this explicitly.

It is good to me.
Thanks!

>
> Signed-off-by: Harvey Harrison <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>

> ---
> arch/x86/kernel/kprobes.c | 8 ++++----
> 1 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
> index c7a26be..521a469 100644
> --- a/arch/x86/kernel/kprobes.c
> +++ b/arch/x86/kernel/kprobes.c
> @@ -159,7 +159,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
> const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
>
> /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
> -static void set_jmp_op(void *from, void *to)
> +static void __kprobes set_jmp_op(void *from, void *to)
> {
> struct __arch_jmp_op {
> char op;
> @@ -174,7 +174,7 @@ static void set_jmp_op(void *from, void *to)
> * Returns non-zero if opcode is boostable.
> * RIP relative instructions are adjusted at copying time in 64 bits mode
> */
> -static int can_boost(kprobe_opcode_t *opcodes)
> +static int __kprobes can_boost(kprobe_opcode_t *opcodes)
> {
> kprobe_opcode_t opcode;
> kprobe_opcode_t *orig_opcodes = opcodes;
> @@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
> kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
> }
>
> -static void clear_btf(void)
> +static void __kprobes clear_btf(void)
> {
> if (test_thread_flag(TIF_DEBUGCTLMSR))
> wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
> }
>
> -static void restore_btf(void)
> +static void __kprobes restore_btf(void)
> {
> if (test_thread_flag(TIF_DEBUGCTLMSR))
> wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);

--
Masami Hiramatsu

Software Engineer
Hitachi Computer Products (America) Inc.
Software Solutions Division

e-mail: [email protected], [email protected]

2007-12-19 09:28:24

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] x86: __kprobes annotations


* Harvey Harrison <[email protected]> wrote:

> __always_inline on some static functions was to ensure they ended up
> in the .kprobes.text section. Mark this explicitly.

thanks, applied. I rolled this back into your cleanup patch to make sure
we have a correct, bisectable kernel at every commit point.

Ingo