Update the trampoline code so its arguments are passed via RDI and RSI,
which matches the ordinary SysV calling convention for x86_64. This will
allow this code to be called directly from C.
Acked-by: Kirill A. Shutemov <[email protected]>
Signed-off-by: Ard Biesheuvel <[email protected]>
---
arch/x86/boot/compressed/head_64.S | 26 +++++++++-----------
arch/x86/boot/compressed/pgtable.h | 2 +-
2 files changed, 12 insertions(+), 16 deletions(-)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 1892679be2b6897a..491d985be75fd5b0 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -443,9 +443,9 @@ SYM_CODE_START(startup_64)
movq %r15, %rdi /* pass struct boot_params pointer */
call paging_prepare
- /* Save the trampoline address in RCX */
- movq %rax, %rcx
-
+ /* Pass the trampoline address and boolean flag as args #1 and #2 */
+ movq %rax, %rdi
+ movq %rdx, %rsi
leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
call *%rax
@@ -534,15 +534,15 @@ SYM_FUNC_END(.Lrelocated)
/*
* This is the 32-bit trampoline that will be copied over to low memory.
*
- * ECX contains the base address of the trampoline memory.
- * Non zero RDX means trampoline needs to enable 5-level paging.
+ * EDI contains the base address of the trampoline memory.
+ * Non-zero ESI means trampoline needs to enable 5-level paging.
*/
SYM_CODE_START(trampoline_32bit_src)
/* Grab return address */
movq (%rsp), %rax
/* Set up 32-bit addressable stack */
- leaq TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp
+ leaq TRAMPOLINE_32BIT_STACK_END(%rdi), %rsp
/* Preserve return address and other live 64-bit registers */
pushq %rax
@@ -569,7 +569,7 @@ SYM_CODE_START(trampoline_32bit_src)
movl %eax, %cr0
/* Check what paging mode we want to be in after the trampoline */
- testl %edx, %edx
+ testl %esi, %esi
jz 1f
/* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
@@ -584,21 +584,17 @@ SYM_CODE_START(trampoline_32bit_src)
jz 3f
2:
/* Point CR3 to the trampoline's new top level page table */
- leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
+ leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%edi), %eax
movl %eax, %cr3
3:
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
- pushl %ecx
- pushl %edx
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
/* Avoid writing EFER if no change was made (for TDX guest) */
jc 1f
wrmsr
-1: popl %edx
- popl %ecx
-
+1:
#ifdef CONFIG_X86_MCE
/*
* Preserve CR4.MCE if the kernel will enable #MC support.
@@ -615,14 +611,14 @@ SYM_CODE_START(trampoline_32bit_src)
/* Enable PAE and LA57 (if required) paging modes */
orl $X86_CR4_PAE, %eax
- testl %edx, %edx
+ testl %esi, %esi
jz 1f
orl $X86_CR4_LA57, %eax
1:
movl %eax, %cr4
/* Calculate address of paging_enabled() once we are executing in the trampoline */
- leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
+ leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%edi), %eax
/* Prepare the stack for far return to Long Mode */
pushl $__KERNEL_CS
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91dbb99203fbce2d..4e8cef135226bcbb 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -14,7 +14,7 @@
extern unsigned long *trampoline_32bit;
-extern void trampoline_32bit_src(void *return_ptr);
+extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
#endif /* __ASSEMBLER__ */
#endif /* BOOT_COMPRESSED_PAGETABLE_H */
--
2.39.2
On Mon, 31 Jul 2023 at 13:29, Borislav Petkov <[email protected]> wrote:
>
> On Fri, Jul 28, 2023 at 11:09:04AM +0200, Ard Biesheuvel wrote:
> > Update the trampoline code so its arguments are passed via RDI and RSI,
> > which matches the ordinary SysV calling convention for x86_64. This will
> > allow this code to be called directly from C.
> >
> > Acked-by: Kirill A. Shutemov <[email protected]>
> > Signed-off-by: Ard Biesheuvel <[email protected]>
> > ---
> > arch/x86/boot/compressed/head_64.S | 26 +++++++++-----------
> > arch/x86/boot/compressed/pgtable.h | 2 +-
> > 2 files changed, 12 insertions(+), 16 deletions(-)
> >
> > diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> > index 1892679be2b6897a..491d985be75fd5b0 100644
> > --- a/arch/x86/boot/compressed/head_64.S
> > +++ b/arch/x86/boot/compressed/head_64.S
> > @@ -443,9 +443,9 @@ SYM_CODE_START(startup_64)
> > movq %r15, %rdi /* pass struct boot_params pointer */
> > call paging_prepare
> >
> > - /* Save the trampoline address in RCX */
> > - movq %rax, %rcx
> > -
> > + /* Pass the trampoline address and boolean flag as args #1 and #2 */
> > + movq %rax, %rdi
> > + movq %rdx, %rsi
> > leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
> > call *%rax
> >
> > @@ -534,15 +534,15 @@ SYM_FUNC_END(.Lrelocated)
> > /*
> > * This is the 32-bit trampoline that will be copied over to low memory.
> > *
> > - * ECX contains the base address of the trampoline memory.
> > - * Non zero RDX means trampoline needs to enable 5-level paging.
> > + * EDI contains the base address of the trampoline memory.
> > + * Non-zero ESI means trampoline needs to enable 5-level paging.
> > */
>
> This is confusing - this talks about 32-bit and 32-bit registers but
> uses the 64-bit calling convention because it gets called by 64-bit
> code. Please add a short sentence clarifying that.
>
Ok
On Fri, Jul 28, 2023 at 11:09:04AM +0200, Ard Biesheuvel wrote:
> Update the trampoline code so its arguments are passed via RDI and RSI,
> which matches the ordinary SysV calling convention for x86_64. This will
> allow this code to be called directly from C.
>
> Acked-by: Kirill A. Shutemov <[email protected]>
> Signed-off-by: Ard Biesheuvel <[email protected]>
> ---
> arch/x86/boot/compressed/head_64.S | 26 +++++++++-----------
> arch/x86/boot/compressed/pgtable.h | 2 +-
> 2 files changed, 12 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> index 1892679be2b6897a..491d985be75fd5b0 100644
> --- a/arch/x86/boot/compressed/head_64.S
> +++ b/arch/x86/boot/compressed/head_64.S
> @@ -443,9 +443,9 @@ SYM_CODE_START(startup_64)
> movq %r15, %rdi /* pass struct boot_params pointer */
> call paging_prepare
>
> - /* Save the trampoline address in RCX */
> - movq %rax, %rcx
> -
> + /* Pass the trampoline address and boolean flag as args #1 and #2 */
> + movq %rax, %rdi
> + movq %rdx, %rsi
> leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
> call *%rax
>
> @@ -534,15 +534,15 @@ SYM_FUNC_END(.Lrelocated)
> /*
> * This is the 32-bit trampoline that will be copied over to low memory.
> *
> - * ECX contains the base address of the trampoline memory.
> - * Non zero RDX means trampoline needs to enable 5-level paging.
> + * EDI contains the base address of the trampoline memory.
> + * Non-zero ESI means trampoline needs to enable 5-level paging.
> */
This is confusing - this talks about 32-bit and 32-bit registers but
uses the 64-bit calling convention because it gets called by 64-bit
code. Please add a short sentence clarifying that.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette