There is only one GIF flag for the whole processor, so make sure it is not clobbered
when switching to L2 (in which case we also have to include the V_GIF_ENABLE_MASK,
lest we confuse enable_gif/disable_gif/gif_set). When going back, L1 could in
theory have entered L2 without issuing a CLGI so make sure the svm_set_gif is
done last, after svm->vmcb->control.int_ctl has been copied back from hsave.
Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/svm/nested.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 7e4a506828c9..6c7f0bffdf01 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -293,6 +293,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
{
+ const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
nested_svm_init_mmu_context(&svm->vcpu);
@@ -308,7 +309,10 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
- svm->vmcb->control.int_ctl = svm->nested.ctl.int_ctl | V_INTR_MASKING_MASK;
+ svm->vmcb->control.int_ctl =
+ (svm->nested.ctl.int_ctl & ~mask) |
+ (svm->nested.hsave->control.int_ctl & mask);
+
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
--
2.26.2
On 5/29/20 8:39 AM, Paolo Bonzini wrote:
> There is only one GIF flag for the whole processor, so make sure it is not clobbered
> when switching to L2 (in which case we also have to include the V_GIF_ENABLE_MASK,
> lest we confuse enable_gif/disable_gif/gif_set). When going back, L1 could in
> theory have entered L2 without issuing a CLGI so make sure the svm_set_gif is
> done last, after svm->vmcb->control.int_ctl has been copied back from hsave.
>
> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> arch/x86/kvm/svm/nested.c | 6 +++++-
> 1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 7e4a506828c9..6c7f0bffdf01 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -293,6 +293,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
>
> static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
> {
> + const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
> if (svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
> nested_svm_init_mmu_context(&svm->vcpu);
>
> @@ -308,7 +309,10 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
> svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
> svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
>
> - svm->vmcb->control.int_ctl = svm->nested.ctl.int_ctl | V_INTR_MASKING_MASK;
> + svm->vmcb->control.int_ctl =
> + (svm->nested.ctl.int_ctl & ~mask) |
> + (svm->nested.hsave->control.int_ctl & mask);
If this is the very first VMRUN, do we have any int_ctl saved in hsave ?
> +
> svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
> svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
> svm->vmcb->control.int_state = svm->nested.ctl.int_state;
On 01/06/20 01:11, Krish Sadhukhan wrote:
>>
>> + svm->vmcb->control.int_ctl =
>> + (svm->nested.ctl.int_ctl & ~mask) |
>> + (svm->nested.hsave->control.int_ctl & mask);
>
>
> If this is the very first VMRUN, do we have any int_ctl saved in hsave ?
Yes, copy_vmcb_control_area(hsave, vmcb) is called before
enter_svm_guest_mode (which calls nested_prepare_vmcb_control).
Paolo