2016-12-12 09:13:04

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH v2] KVM: hyperv: fix locking of struct kvm_hv fields

Introduce a new mutex to avoid an AB-BA deadlock between kvm->lock and
vcpu->mutex. Protect accesses in kvm_hv_setup_tsc_page too, as suggested
by Roman.

Reported-by: Dmitry Vyukov <[email protected]>
Cc: Roman Kagan <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
---
Documentation/virtual/kvm/locking.txt | 8 ++++++--
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/hyperv.c | 24 +++++++++++++++---------
arch/x86/kvm/x86.c | 1 +
4 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index e5dd9f4d6100..fd013bf4115b 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -13,8 +13,12 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.

-For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
-else is a leaf: no other lock is taken inside the critical sections.
+On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
+
+Everything else is a leaf: no other lock is taken inside the critical
+sections.

2: Exception
------------
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7892530cbacf..2e25038dbd93 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -704,6 +704,7 @@ struct kvm_apic_map {

/* Hyper-V emulation context */
struct kvm_hv {
+ struct mutex hv_lock;
u64 hv_guest_os_id;
u64 hv_hypercall;
u64 hv_tsc_page;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 99cde5220e07..1572c35b4f1a 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -852,6 +852,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
return;

+ mutex_lock(&kvm->arch.hyperv.hv_lock);
+ if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ goto out_unlock;
+
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
/*
* Because the TSC parameters only vary when there is a
@@ -859,7 +863,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
*/
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
&tsc_seq, sizeof(tsc_seq))))
- return;
+ goto out_unlock;

/*
* While we're computing and writing the parameters, force the
@@ -868,15 +872,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = 0;
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
- return;
+ goto out_unlock;

if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
- return;
+ goto out_unlock;

/* Ensure sequence is zero before writing the rest of the struct. */
smp_wmb();
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
- return;
+ goto out_unlock;

/*
* Now switch to the TSC page mechanism by writing the sequence.
@@ -891,6 +895,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = tsc_seq;
kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+out_unlock:
+ mutex_unlock(&kvm->arch.hyperv.hv_lock);
}

static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
@@ -1142,9 +1148,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (kvm_hv_msr_partition_wide(msr)) {
int r;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
return kvm_hv_set_msr(vcpu, msr, data, host);
@@ -1155,9 +1161,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
if (kvm_hv_msr_partition_wide(msr)) {
int r;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
return kvm_hv_get_msr(vcpu, msr, pdata);
@@ -1165,7 +1171,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)

bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
- return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+ return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
}

static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f0aee98e7492..30fc403df802 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7861,6 +7861,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
+ mutex_init(&kvm->arch.hyperv.hv_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);

kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
--
2.9.3


2016-12-12 12:28:19

by Roman Kagan

[permalink] [raw]
Subject: Re: [PATCH v2] KVM: hyperv: fix locking of struct kvm_hv fields

On Mon, Dec 12, 2016 at 10:12:53AM +0100, Paolo Bonzini wrote:
> Introduce a new mutex to avoid an AB-BA deadlock between kvm->lock and
> vcpu->mutex. Protect accesses in kvm_hv_setup_tsc_page too, as suggested
> by Roman.
>
> Reported-by: Dmitry Vyukov <[email protected]>
> Cc: Roman Kagan <[email protected]>
> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> Documentation/virtual/kvm/locking.txt | 8 ++++++--
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/hyperv.c | 24 +++++++++++++++---------
> arch/x86/kvm/x86.c | 1 +
> 4 files changed, 23 insertions(+), 11 deletions(-)

Reviewed-by: Roman Kagan <[email protected]>