2021-05-12 11:23:05

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH] KVM: SVM/VMX: Use %rax instead of %__ASM_AX within CONFIG_X86_64

There is no need to use %__ASM_AX within CONFIG_X86_64. The macro
will always expand to %rax.

Cc: Paolo Bonzini <[email protected]>
Signed-off-by: Uros Bizjak <[email protected]>
---
arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++-------------------
arch/x86/kvm/vmx/vmenter.S | 32 +++++++++++++--------------
2 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 4fa17df123cd..844b558bb021 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -64,14 +64,14 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
#ifdef CONFIG_X86_64
- mov VCPU_R8 (%_ASM_AX), %r8
- mov VCPU_R9 (%_ASM_AX), %r9
- mov VCPU_R10(%_ASM_AX), %r10
- mov VCPU_R11(%_ASM_AX), %r11
- mov VCPU_R12(%_ASM_AX), %r12
- mov VCPU_R13(%_ASM_AX), %r13
- mov VCPU_R14(%_ASM_AX), %r14
- mov VCPU_R15(%_ASM_AX), %r15
+ mov VCPU_R8 (%rax), %r8
+ mov VCPU_R9 (%rax), %r9
+ mov VCPU_R10(%rax), %r10
+ mov VCPU_R11(%rax), %r11
+ mov VCPU_R12(%rax), %r12
+ mov VCPU_R13(%rax), %r13
+ mov VCPU_R14(%rax), %r14
+ mov VCPU_R15(%rax), %r15
#endif

/* "POP" @vmcb to RAX. */
@@ -93,21 +93,21 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %_ASM_AX

/* Save all guest registers. */
- mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
- mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
- mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
- mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
- mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
- mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
+ mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
+ mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
+ mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
+ mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
+ mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
+ mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
#ifdef CONFIG_X86_64
- mov %r8, VCPU_R8 (%_ASM_AX)
- mov %r9, VCPU_R9 (%_ASM_AX)
- mov %r10, VCPU_R10(%_ASM_AX)
- mov %r11, VCPU_R11(%_ASM_AX)
- mov %r12, VCPU_R12(%_ASM_AX)
- mov %r13, VCPU_R13(%_ASM_AX)
- mov %r14, VCPU_R14(%_ASM_AX)
- mov %r15, VCPU_R15(%_ASM_AX)
+ mov %r8, VCPU_R8 (%rax)
+ mov %r9, VCPU_R9 (%rax)
+ mov %r10, VCPU_R10(%rax)
+ mov %r11, VCPU_R11(%rax)
+ mov %r12, VCPU_R12(%rax)
+ mov %r13, VCPU_R13(%rax)
+ mov %r14, VCPU_R14(%rax)
+ mov %r15, VCPU_R15(%rax)
#endif

/*
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 3a6461694fc2..9273709e4800 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -142,14 +142,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
#ifdef CONFIG_X86_64
- mov VCPU_R8 (%_ASM_AX), %r8
- mov VCPU_R9 (%_ASM_AX), %r9
- mov VCPU_R10(%_ASM_AX), %r10
- mov VCPU_R11(%_ASM_AX), %r11
- mov VCPU_R12(%_ASM_AX), %r12
- mov VCPU_R13(%_ASM_AX), %r13
- mov VCPU_R14(%_ASM_AX), %r14
- mov VCPU_R15(%_ASM_AX), %r15
+ mov VCPU_R8 (%rax), %r8
+ mov VCPU_R9 (%rax), %r9
+ mov VCPU_R10(%rax), %r10
+ mov VCPU_R11(%rax), %r11
+ mov VCPU_R12(%rax), %r12
+ mov VCPU_R13(%rax), %r13
+ mov VCPU_R14(%rax), %r14
+ mov VCPU_R15(%rax), %r15
#endif
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
@@ -175,14 +175,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
#ifdef CONFIG_X86_64
- mov %r8, VCPU_R8 (%_ASM_AX)
- mov %r9, VCPU_R9 (%_ASM_AX)
- mov %r10, VCPU_R10(%_ASM_AX)
- mov %r11, VCPU_R11(%_ASM_AX)
- mov %r12, VCPU_R12(%_ASM_AX)
- mov %r13, VCPU_R13(%_ASM_AX)
- mov %r14, VCPU_R14(%_ASM_AX)
- mov %r15, VCPU_R15(%_ASM_AX)
+ mov %r8, VCPU_R8 (%rax)
+ mov %r9, VCPU_R9 (%rax)
+ mov %r10, VCPU_R10(%rax)
+ mov %r11, VCPU_R11(%rax)
+ mov %r12, VCPU_R12(%rax)
+ mov %r13, VCPU_R13(%rax)
+ mov %r14, VCPU_R14(%rax)
+ mov %r15, VCPU_R15(%rax)
#endif

/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
--
2.31.1


2021-05-12 11:59:34

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH] KVM: SVM/VMX: Use %rax instead of %__ASM_AX within CONFIG_X86_64

On 12/05/21 13:21, Uros Bizjak wrote:
> There is no need to use %__ASM_AX within CONFIG_X86_64. The macro
> will always expand to %rax.
>
> Cc: Paolo Bonzini <[email protected]>
> Signed-off-by: Uros Bizjak <[email protected]>
> ---
> arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++-------------------
> arch/x86/kvm/vmx/vmenter.S | 32 +++++++++++++--------------
> 2 files changed, 38 insertions(+), 38 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
> index 4fa17df123cd..844b558bb021 100644
> --- a/arch/x86/kvm/svm/vmenter.S
> +++ b/arch/x86/kvm/svm/vmenter.S
> @@ -64,14 +64,14 @@ SYM_FUNC_START(__svm_vcpu_run)
> mov VCPU_RSI(%_ASM_AX), %_ASM_SI
> mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> #ifdef CONFIG_X86_64
> - mov VCPU_R8 (%_ASM_AX), %r8
> - mov VCPU_R9 (%_ASM_AX), %r9
> - mov VCPU_R10(%_ASM_AX), %r10
> - mov VCPU_R11(%_ASM_AX), %r11
> - mov VCPU_R12(%_ASM_AX), %r12
> - mov VCPU_R13(%_ASM_AX), %r13
> - mov VCPU_R14(%_ASM_AX), %r14
> - mov VCPU_R15(%_ASM_AX), %r15
> + mov VCPU_R8 (%rax), %r8
> + mov VCPU_R9 (%rax), %r9
> + mov VCPU_R10(%rax), %r10
> + mov VCPU_R11(%rax), %r11
> + mov VCPU_R12(%rax), %r12
> + mov VCPU_R13(%rax), %r13
> + mov VCPU_R14(%rax), %r14
> + mov VCPU_R15(%rax), %r15
> #endif
>
> /* "POP" @vmcb to RAX. */
> @@ -93,21 +93,21 @@ SYM_FUNC_START(__svm_vcpu_run)
> pop %_ASM_AX
>
> /* Save all guest registers. */
> - mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
> - mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
> - mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> - mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> - mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> - mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
> + mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
> + mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
> + mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
> + mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
> + mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> + mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
> #ifdef CONFIG_X86_64
> - mov %r8, VCPU_R8 (%_ASM_AX)
> - mov %r9, VCPU_R9 (%_ASM_AX)
> - mov %r10, VCPU_R10(%_ASM_AX)
> - mov %r11, VCPU_R11(%_ASM_AX)
> - mov %r12, VCPU_R12(%_ASM_AX)
> - mov %r13, VCPU_R13(%_ASM_AX)
> - mov %r14, VCPU_R14(%_ASM_AX)
> - mov %r15, VCPU_R15(%_ASM_AX)
> + mov %r8, VCPU_R8 (%rax)
> + mov %r9, VCPU_R9 (%rax)
> + mov %r10, VCPU_R10(%rax)
> + mov %r11, VCPU_R11(%rax)
> + mov %r12, VCPU_R12(%rax)
> + mov %r13, VCPU_R13(%rax)
> + mov %r14, VCPU_R14(%rax)
> + mov %r15, VCPU_R15(%rax)
> #endif
>
> /*
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 3a6461694fc2..9273709e4800 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -142,14 +142,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
> mov VCPU_RSI(%_ASM_AX), %_ASM_SI
> mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> #ifdef CONFIG_X86_64
> - mov VCPU_R8 (%_ASM_AX), %r8
> - mov VCPU_R9 (%_ASM_AX), %r9
> - mov VCPU_R10(%_ASM_AX), %r10
> - mov VCPU_R11(%_ASM_AX), %r11
> - mov VCPU_R12(%_ASM_AX), %r12
> - mov VCPU_R13(%_ASM_AX), %r13
> - mov VCPU_R14(%_ASM_AX), %r14
> - mov VCPU_R15(%_ASM_AX), %r15
> + mov VCPU_R8 (%rax), %r8
> + mov VCPU_R9 (%rax), %r9
> + mov VCPU_R10(%rax), %r10
> + mov VCPU_R11(%rax), %r11
> + mov VCPU_R12(%rax), %r12
> + mov VCPU_R13(%rax), %r13
> + mov VCPU_R14(%rax), %r14
> + mov VCPU_R15(%rax), %r15
> #endif
> /* Load guest RAX. This kills the @regs pointer! */
> mov VCPU_RAX(%_ASM_AX), %_ASM_AX
> @@ -175,14 +175,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
> mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
> #ifdef CONFIG_X86_64
> - mov %r8, VCPU_R8 (%_ASM_AX)
> - mov %r9, VCPU_R9 (%_ASM_AX)
> - mov %r10, VCPU_R10(%_ASM_AX)
> - mov %r11, VCPU_R11(%_ASM_AX)
> - mov %r12, VCPU_R12(%_ASM_AX)
> - mov %r13, VCPU_R13(%_ASM_AX)
> - mov %r14, VCPU_R14(%_ASM_AX)
> - mov %r15, VCPU_R15(%_ASM_AX)
> + mov %r8, VCPU_R8 (%rax)
> + mov %r9, VCPU_R9 (%rax)
> + mov %r10, VCPU_R10(%rax)
> + mov %r11, VCPU_R11(%rax)
> + mov %r12, VCPU_R12(%rax)
> + mov %r13, VCPU_R13(%rax)
> + mov %r14, VCPU_R14(%rax)
> + mov %r15, VCPU_R15(%rax)
> #endif
>
> /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
>

It looks a bit weird either way (either the address is different within
the #ifdef, or the address is different from the destinatino), so I lean
more towards avoiding churn.

Paolo

2021-05-12 16:55:32

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH] KVM: SVM/VMX: Use %rax instead of %__ASM_AX within CONFIG_X86_64

On Wed, May 12, 2021, Paolo Bonzini wrote:
> On 12/05/21 13:21, Uros Bizjak wrote:
> > There is no need to use %__ASM_AX within CONFIG_X86_64. The macro
> > will always expand to %rax.
> > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > index 3a6461694fc2..9273709e4800 100644
> > --- a/arch/x86/kvm/vmx/vmenter.S
> > +++ b/arch/x86/kvm/vmx/vmenter.S
> > @@ -142,14 +142,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > mov VCPU_RSI(%_ASM_AX), %_ASM_SI
> > mov VCPU_RDI(%_ASM_AX), %_ASM_DI
> > #ifdef CONFIG_X86_64
> > - mov VCPU_R8 (%_ASM_AX), %r8
> > - mov VCPU_R9 (%_ASM_AX), %r9
> > - mov VCPU_R10(%_ASM_AX), %r10
> > - mov VCPU_R11(%_ASM_AX), %r11
> > - mov VCPU_R12(%_ASM_AX), %r12
> > - mov VCPU_R13(%_ASM_AX), %r13
> > - mov VCPU_R14(%_ASM_AX), %r14
> > - mov VCPU_R15(%_ASM_AX), %r15
> > + mov VCPU_R8 (%rax), %r8
> > + mov VCPU_R9 (%rax), %r9
> > + mov VCPU_R10(%rax), %r10
> > + mov VCPU_R11(%rax), %r11
> > + mov VCPU_R12(%rax), %r12
> > + mov VCPU_R13(%rax), %r13
> > + mov VCPU_R14(%rax), %r14
> > + mov VCPU_R15(%rax), %r15
> > #endif
> > /* Load guest RAX. This kills the @regs pointer! */
> > mov VCPU_RAX(%_ASM_AX), %_ASM_AX
> > @@ -175,14 +175,14 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
> > mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
> > #ifdef CONFIG_X86_64
> > - mov %r8, VCPU_R8 (%_ASM_AX)
> > - mov %r9, VCPU_R9 (%_ASM_AX)
> > - mov %r10, VCPU_R10(%_ASM_AX)
> > - mov %r11, VCPU_R11(%_ASM_AX)
> > - mov %r12, VCPU_R12(%_ASM_AX)
> > - mov %r13, VCPU_R13(%_ASM_AX)
> > - mov %r14, VCPU_R14(%_ASM_AX)
> > - mov %r15, VCPU_R15(%_ASM_AX)
> > + mov %r8, VCPU_R8 (%rax)
> > + mov %r9, VCPU_R9 (%rax)
> > + mov %r10, VCPU_R10(%rax)
> > + mov %r11, VCPU_R11(%rax)
> > + mov %r12, VCPU_R12(%rax)
> > + mov %r13, VCPU_R13(%rax)
> > + mov %r14, VCPU_R14(%rax)
> > + mov %r15, VCPU_R15(%rax)
> > #endif
> > /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
> >
>
> It looks a bit weird either way (either the address is different within the
> #ifdef, or the address is different from the destinatino), so I lean more
> towards avoiding churn.

Even though it's unnecessary, I prefer %_ASM_AX since it provides a consistent
flow across the 64-bit-only boundary.