v1:
Applies after David's "KVM: nVMX: fix instruction skipping during
emulated vm-entry".
Radim Krčmář (3):
KVM: nVMX: fix handling of invalid host efer
KVM: nVMX: colocate guest vmcs checks
KVM: nVMX: refactor nested_vmx_entry_failure()
arch/x86/kvm/vmx.c | 92 +++++++++++++++++++++++++-----------------------------
1 file changed, 42 insertions(+), 50 deletions(-)
--
2.11.0
Host state must be checked before attempting vm entry, which means it
throws a different error.
Signed-off-by: Radim Krčmář <[email protected]>
---
arch/x86/kvm/vmx.c | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c9d7f1a1ba16..ab65b31ce58c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10481,6 +10481,24 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
goto out;
}
+ /*
+ * If the load IA32_EFER VM-exit control is 1, bits reserved in the
+ * IA32_EFER MSR must be 0 in the field for that register. In addition,
+ * the values of the LMA and LME bits in the field must each be that of
+ * the host address-space size VM-exit control.
+ */
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
+ ia32e = (vmcs12->vm_exit_controls &
+ VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
+ if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
+ goto out;
+ }
+ }
+
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
nested_vmx_entry_failure(vcpu, vmcs12,
@@ -10515,24 +10533,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
}
/*
- * If the load IA32_EFER VM-exit control is 1, bits reserved in the
- * IA32_EFER MSR must be 0 in the field for that register. In addition,
- * the values of the LMA and LME bits in the field must each be that of
- * the host address-space size VM-exit control.
- */
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
- ia32e = (vmcs12->vm_exit_controls &
- VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
- if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
- nested_vmx_entry_failure(vcpu, vmcs12,
- EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
- }
- }
-
- /*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
*/
--
2.11.0
Few guest checks were done before entering the guest, for which there is
no reason. Having them all in one place will be simpler.
Signed-off-by: Radim Krčmář <[email protected]>
---
arch/x86/kvm/vmx.c | 72 +++++++++++++++++++++++++++++-------------------------
1 file changed, 39 insertions(+), 33 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ab65b31ce58c..050899431b5e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10499,39 +10499,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
}
}
- if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
- !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
- nested_vmx_entry_failure(vcpu, vmcs12,
- EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
- }
- if (vmcs12->vmcs_link_pointer != -1ull) {
- nested_vmx_entry_failure(vcpu, vmcs12,
- EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
- return 1;
- }
-
- /*
- * If the load IA32_EFER VM-entry control is 1, the following checks
- * are performed on the field for the IA32_EFER MSR:
- * - Bits reserved in the IA32_EFER MSR must be 0.
- * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
- * the IA-32e mode guest VM-exit control. It must also be identical
- * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
- * CR0.PG) is 1.
- */
- if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
- ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
- if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
- ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
- ((vmcs12->guest_cr0 & X86_CR0_PG) &&
- ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
- nested_vmx_entry_failure(vcpu, vmcs12,
- EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
- }
- }
-
/*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
@@ -10561,6 +10528,45 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx);
+ if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
+ !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
+ return 1;
+ }
+ if (vmcs12->vmcs_link_pointer != -1ull) {
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
+ return 1;
+ }
+
+ /*
+ * If the load IA32_EFER VM-entry control is 1, the following checks
+ * are performed on the field for the IA32_EFER MSR:
+ * - Bits reserved in the IA32_EFER MSR must be 0.
+ * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
+ * the IA-32e mode guest VM-exit control. It must also be identical
+ * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
+ * CR0.PG) is 1.
+ */
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
+ ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
+ if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
+ ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
+ ((vmcs12->guest_cr0 & X86_CR0_PG) &&
+ ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
+ return 1;
+ }
+ }
+
if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
--
2.11.0
Change recurring pattern
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
nested_vmx_entry_failure(vcpu, ...);
return 1;
into
return nested_vmx_entry_failure(vcpu, ...)
Signed-off-by: Radim Krčmář <[email protected]>
---
arch/x86/kvm/vmx.c | 46 ++++++++++++++++------------------------------
1 file changed, 16 insertions(+), 30 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 050899431b5e..a74cde40e349 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1398,7 +1398,7 @@ static inline bool is_nmi(u32 intr_info)
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info,
unsigned long exit_qualification);
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+static int nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification);
@@ -10529,20 +10529,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx);
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
- !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
- leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- nested_vmx_entry_failure(vcpu, vmcs12,
+ !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
+ return nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
- }
- if (vmcs12->vmcs_link_pointer != -1ull) {
- leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- nested_vmx_entry_failure(vcpu, vmcs12,
+
+ if (vmcs12->vmcs_link_pointer != -1ull)
+ return nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
- return 1;
- }
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
@@ -10559,32 +10552,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
((vmcs12->guest_cr0 & X86_CR0_PG) &&
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
- leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- nested_vmx_entry_failure(vcpu, vmcs12,
+ return nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- return 1;
}
}
- if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
- leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- nested_vmx_entry_failure(vcpu, vmcs12,
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification))
+ return nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, exit_qualification);
- return 1;
- }
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
- if (msr_entry_idx) {
- leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- nested_vmx_entry_failure(vcpu, vmcs12,
+ if (msr_entry_idx)
+ return nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
- return 1;
- }
vmcs12->launch_state = 1;
@@ -11173,16 +11155,20 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
* It should only be called before L2 actually succeeded to run, and when
* vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
*/
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+static int nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification)
{
+ leave_guest_mode(vcpu);
+ vmx_load_vmcs01(vcpu);
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = qualification;
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
+
+ return 1;
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
--
2.11.0