2023-11-03 10:49:34

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH] x86/acpi: Use %rip-relative addressing in wakeup_64.S

Instruction with %rip-relative address operand is one byte shorter than
its absolute address counterpart and is also compatible with position
independent executable (-fpie) build.

No functional changes intended.

Cc: "Rafael J. Wysocki" <[email protected]>
Cc: Len Brown <[email protected]>
Cc: Pavel Machek <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Signed-off-by: Uros Bizjak <[email protected]>
---
arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index d5d8a352eafa..94ff83f3d3fe 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,7 +17,7 @@
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
SYM_FUNC_START(wakeup_long64)
- movq saved_magic, %rax
+ movq saved_magic(%rip), %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
je 2f
@@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movq saved_rsp, %rsp
+ movq saved_rsp(%rip), %rsp

- movq saved_rbx, %rbx
- movq saved_rdi, %rdi
- movq saved_rsi, %rsi
- movq saved_rbp, %rbp
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp

- movq saved_rip, %rax
+ movq saved_rip(%rip), %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
SYM_FUNC_END(wakeup_long64)
@@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)

movq $.Lresume_point, saved_rip(%rip)

- movq %rsp, saved_rsp
- movq %rbp, saved_rbp
- movq %rbx, saved_rbx
- movq %rdi, saved_rdi
- movq %rsi, saved_rsi
+ movq %rsp, saved_rsp(%rip)
+ movq %rbp, saved_rbp(%rip)
+ movq %rbx, saved_rbx(%rip)
+ movq %rdi, saved_rdi(%rip)
+ movq %rsi, saved_rsi(%rip)

addq $8, %rsp
movl $3, %edi
--
2.41.0


2023-11-06 14:14:52

by Rafael J. Wysocki

[permalink] [raw]
Subject: Re: [PATCH] x86/acpi: Use %rip-relative addressing in wakeup_64.S

On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <[email protected]> wrote:
>
> Instruction with %rip-relative address operand is one byte shorter than
> its absolute address counterpart and is also compatible with position
> independent executable (-fpie) build.
>
> No functional changes intended.

I'm wondering what's the exact motivation for making this change.

Any urgent need for it doesn't seem to be there.

> Cc: "Rafael J. Wysocki" <[email protected]>
> Cc: Len Brown <[email protected]>
> Cc: Pavel Machek <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Borislav Petkov <[email protected]>
> Cc: Dave Hansen <[email protected]>
> Cc: "H. Peter Anvin" <[email protected]>
> Signed-off-by: Uros Bizjak <[email protected]>
> ---
> arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> index d5d8a352eafa..94ff83f3d3fe 100644
> --- a/arch/x86/kernel/acpi/wakeup_64.S
> +++ b/arch/x86/kernel/acpi/wakeup_64.S
> @@ -17,7 +17,7 @@
> * Hooray, we are in Long 64-bit mode (but still running in low memory)
> */
> SYM_FUNC_START(wakeup_long64)
> - movq saved_magic, %rax
> + movq saved_magic(%rip), %rax
> movq $0x123456789abcdef0, %rdx
> cmpq %rdx, %rax
> je 2f
> @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
> movw %ax, %es
> movw %ax, %fs
> movw %ax, %gs
> - movq saved_rsp, %rsp
> + movq saved_rsp(%rip), %rsp
>
> - movq saved_rbx, %rbx
> - movq saved_rdi, %rdi
> - movq saved_rsi, %rsi
> - movq saved_rbp, %rbp
> + movq saved_rbx(%rip), %rbx
> + movq saved_rdi(%rip), %rdi
> + movq saved_rsi(%rip), %rsi
> + movq saved_rbp(%rip), %rbp
>
> - movq saved_rip, %rax
> + movq saved_rip(%rip), %rax
> ANNOTATE_RETPOLINE_SAFE
> jmp *%rax
> SYM_FUNC_END(wakeup_long64)
> @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
>
> movq $.Lresume_point, saved_rip(%rip)
>
> - movq %rsp, saved_rsp
> - movq %rbp, saved_rbp
> - movq %rbx, saved_rbx
> - movq %rdi, saved_rdi
> - movq %rsi, saved_rsi
> + movq %rsp, saved_rsp(%rip)
> + movq %rbp, saved_rbp(%rip)
> + movq %rbx, saved_rbx(%rip)
> + movq %rdi, saved_rdi(%rip)
> + movq %rsi, saved_rsi(%rip)
>
> addq $8, %rsp
> movl $3, %edi
> --

2023-11-06 14:26:45

by Uros Bizjak

[permalink] [raw]
Subject: Re: [PATCH] x86/acpi: Use %rip-relative addressing in wakeup_64.S

On Mon, Nov 6, 2023 at 3:14 PM Rafael J. Wysocki <[email protected]> wrote:
>
> On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <[email protected]> wrote:
> >
> > Instruction with %rip-relative address operand is one byte shorter than
> > its absolute address counterpart and is also compatible with position
> > independent executable (-fpie) build.
> >
> > No functional changes intended.
>
> I'm wondering what's the exact motivation for making this change.

Mainly to be consistent with what the compiler emits by default when a
symbol is accessed. As said in the commit message, the %rip-relative
access is also one byte shorter, and results in a position independent
code.

> Any urgent need for it doesn't seem to be there.

True. It's mostly a nice-to-have change.

Thanks,
Uros.

> > Cc: "Rafael J. Wysocki" <[email protected]>
> > Cc: Len Brown <[email protected]>
> > Cc: Pavel Machek <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
> > Cc: Ingo Molnar <[email protected]>
> > Cc: Borislav Petkov <[email protected]>
> > Cc: Dave Hansen <[email protected]>
> > Cc: "H. Peter Anvin" <[email protected]>
> > Signed-off-by: Uros Bizjak <[email protected]>
> > ---
> > arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
> > 1 file changed, 12 insertions(+), 12 deletions(-)
> >
> > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> > index d5d8a352eafa..94ff83f3d3fe 100644
> > --- a/arch/x86/kernel/acpi/wakeup_64.S
> > +++ b/arch/x86/kernel/acpi/wakeup_64.S
> > @@ -17,7 +17,7 @@
> > * Hooray, we are in Long 64-bit mode (but still running in low memory)
> > */
> > SYM_FUNC_START(wakeup_long64)
> > - movq saved_magic, %rax
> > + movq saved_magic(%rip), %rax
> > movq $0x123456789abcdef0, %rdx
> > cmpq %rdx, %rax
> > je 2f
> > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
> > movw %ax, %es
> > movw %ax, %fs
> > movw %ax, %gs
> > - movq saved_rsp, %rsp
> > + movq saved_rsp(%rip), %rsp
> >
> > - movq saved_rbx, %rbx
> > - movq saved_rdi, %rdi
> > - movq saved_rsi, %rsi
> > - movq saved_rbp, %rbp
> > + movq saved_rbx(%rip), %rbx
> > + movq saved_rdi(%rip), %rdi
> > + movq saved_rsi(%rip), %rsi
> > + movq saved_rbp(%rip), %rbp
> >
> > - movq saved_rip, %rax
> > + movq saved_rip(%rip), %rax
> > ANNOTATE_RETPOLINE_SAFE
> > jmp *%rax
> > SYM_FUNC_END(wakeup_long64)
> > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
> >
> > movq $.Lresume_point, saved_rip(%rip)
> >
> > - movq %rsp, saved_rsp
> > - movq %rbp, saved_rbp
> > - movq %rbx, saved_rbx
> > - movq %rdi, saved_rdi
> > - movq %rsi, saved_rsi
> > + movq %rsp, saved_rsp(%rip)
> > + movq %rbp, saved_rbp(%rip)
> > + movq %rbx, saved_rbx(%rip)
> > + movq %rdi, saved_rdi(%rip)
> > + movq %rsi, saved_rsi(%rip)
> >
> > addq $8, %rsp
> > movl $3, %edi
> > --

2023-11-06 15:10:01

by Rafael J. Wysocki

[permalink] [raw]
Subject: Re: [PATCH] x86/acpi: Use %rip-relative addressing in wakeup_64.S

On Mon, Nov 6, 2023 at 3:25 PM Uros Bizjak <[email protected]> wrote:
>
> On Mon, Nov 6, 2023 at 3:14 PM Rafael J. Wysocki <[email protected]> wrote:
> >
> > On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <[email protected]> wrote:
> > >
> > > Instruction with %rip-relative address operand is one byte shorter than
> > > its absolute address counterpart and is also compatible with position
> > > independent executable (-fpie) build.
> > >
> > > No functional changes intended.
> >
> > I'm wondering what's the exact motivation for making this change.
>
> Mainly to be consistent with what the compiler emits by default when a
> symbol is accessed. As said in the commit message, the %rip-relative
> access is also one byte shorter, and results in a position independent
> code.
>
> > Any urgent need for it doesn't seem to be there.
>
> True. It's mostly a nice-to-have change.

OK, so

Acked-by: Rafael J. Wysocki <[email protected]>

and the decision what to do with it is up to the x86 folks.

> > > Cc: "Rafael J. Wysocki" <[email protected]>
> > > Cc: Len Brown <[email protected]>
> > > Cc: Pavel Machek <[email protected]>
> > > Cc: Thomas Gleixner <[email protected]>
> > > Cc: Ingo Molnar <[email protected]>
> > > Cc: Borislav Petkov <[email protected]>
> > > Cc: Dave Hansen <[email protected]>
> > > Cc: "H. Peter Anvin" <[email protected]>
> > > Signed-off-by: Uros Bizjak <[email protected]>
> > > ---
> > > arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
> > > 1 file changed, 12 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> > > index d5d8a352eafa..94ff83f3d3fe 100644
> > > --- a/arch/x86/kernel/acpi/wakeup_64.S
> > > +++ b/arch/x86/kernel/acpi/wakeup_64.S
> > > @@ -17,7 +17,7 @@
> > > * Hooray, we are in Long 64-bit mode (but still running in low memory)
> > > */
> > > SYM_FUNC_START(wakeup_long64)
> > > - movq saved_magic, %rax
> > > + movq saved_magic(%rip), %rax
> > > movq $0x123456789abcdef0, %rdx
> > > cmpq %rdx, %rax
> > > je 2f
> > > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
> > > movw %ax, %es
> > > movw %ax, %fs
> > > movw %ax, %gs
> > > - movq saved_rsp, %rsp
> > > + movq saved_rsp(%rip), %rsp
> > >
> > > - movq saved_rbx, %rbx
> > > - movq saved_rdi, %rdi
> > > - movq saved_rsi, %rsi
> > > - movq saved_rbp, %rbp
> > > + movq saved_rbx(%rip), %rbx
> > > + movq saved_rdi(%rip), %rdi
> > > + movq saved_rsi(%rip), %rsi
> > > + movq saved_rbp(%rip), %rbp
> > >
> > > - movq saved_rip, %rax
> > > + movq saved_rip(%rip), %rax
> > > ANNOTATE_RETPOLINE_SAFE
> > > jmp *%rax
> > > SYM_FUNC_END(wakeup_long64)
> > > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
> > >
> > > movq $.Lresume_point, saved_rip(%rip)
> > >
> > > - movq %rsp, saved_rsp
> > > - movq %rbp, saved_rbp
> > > - movq %rbx, saved_rbx
> > > - movq %rdi, saved_rdi
> > > - movq %rsi, saved_rsi
> > > + movq %rsp, saved_rsp(%rip)
> > > + movq %rbp, saved_rbp(%rip)
> > > + movq %rbx, saved_rbx(%rip)
> > > + movq %rdi, saved_rdi(%rip)
> > > + movq %rsi, saved_rsi(%rip)
> > >
> > > addq $8, %rsp
> > > movl $3, %edi
> > > --

Subject: [tip: x86/percpu] x86/acpi: Use %rip-relative addressing in wakeup_64.S

The following commit has been merged into the x86/percpu branch of tip:

Commit-ID: 0978d64f9406122c369d5f46e1eb855646f6c32c
Gitweb: https://git.kernel.org/tip/0978d64f9406122c369d5f46e1eb855646f6c32c
Author: Uros Bizjak <[email protected]>
AuthorDate: Fri, 03 Nov 2023 11:48:22 +01:00
Committer: Ingo Molnar <[email protected]>
CommitterDate: Thu, 30 Nov 2023 20:09:49 +01:00

x86/acpi: Use %rip-relative addressing in wakeup_64.S

This is a "nice-to-have" change with minor code generation benefits:

- Instruction with %rip-relative address operand is one byte shorter than
its absolute address counterpart,

- it is also compatible with position independent executable (-fpie) builds,

- it is also consistent with what the compiler emits by default when
a symbol is accessed.

No functional changes intended.

Signed-off-by: Uros Bizjak <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: Rafael J. Wysocki <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
---
arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index d5d8a35..94ff83f 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,7 +17,7 @@
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
SYM_FUNC_START(wakeup_long64)
- movq saved_magic, %rax
+ movq saved_magic(%rip), %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
je 2f
@@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movq saved_rsp, %rsp
+ movq saved_rsp(%rip), %rsp

- movq saved_rbx, %rbx
- movq saved_rdi, %rdi
- movq saved_rsi, %rsi
- movq saved_rbp, %rbp
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp

- movq saved_rip, %rax
+ movq saved_rip(%rip), %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
SYM_FUNC_END(wakeup_long64)
@@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)

movq $.Lresume_point, saved_rip(%rip)

- movq %rsp, saved_rsp
- movq %rbp, saved_rbp
- movq %rbx, saved_rbx
- movq %rdi, saved_rdi
- movq %rsi, saved_rsi
+ movq %rsp, saved_rsp(%rip)
+ movq %rbp, saved_rbp(%rip)
+ movq %rbx, saved_rbx(%rip)
+ movq %rdi, saved_rdi(%rip)
+ movq %rsi, saved_rsi(%rip)

addq $8, %rsp
movl $3, %edi