2019-08-01 03:42:07

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 1/3] KVM: Fix leak vCPU's VMCS value into other pCPU

From: Wanpeng Li <[email protected]>

After commit d73eb57b80b (KVM: Boost vCPUs that are delivering interrupts), a
five years old bug is exposed. Running ebizzy benchmark in three 80 vCPUs VMs
on one 80 pCPUs Skylake server, a lot of rcu_sched stall warning splatting
in the VMs after stress testing:

INFO: rcu_sched detected stalls on CPUs/tasks: { 4 41 57 62 77} (detected by 15, t=60004 jiffies, g=899, c=898, q=15073)
Call Trace:
flush_tlb_mm_range+0x68/0x140
tlb_flush_mmu.part.75+0x37/0xe0
tlb_finish_mmu+0x55/0x60
zap_page_range+0x142/0x190
SyS_madvise+0x3cd/0x9c0
system_call_fastpath+0x1c/0x21

swait_active() sustains to be true before finish_swait() is called in
kvm_vcpu_block(), voluntarily preempted vCPUs are taken into account
by kvm_vcpu_on_spin() loop greatly increases the probability condition
kvm_arch_vcpu_runnable(vcpu) is checked and can be true, when APICv
is enabled the yield-candidate vCPU's VMCS RVI field leaks(by
vmx_sync_pir_to_irr()) into spinning-on-a-taken-lock vCPU's current
VMCS.

This patch fixes it by checking conservatively a subset of events.

Cc: Paolo Bonzini <[email protected]>
Cc: Radim Krčmář <[email protected]>
Cc: Christian Borntraeger <[email protected]>
Cc: Marc Zyngier <[email protected]>
Cc: [email protected]
Fixes: 98f4a1467 (KVM: add kvm_arch_vcpu_runnable() test to kvm_vcpu_on_spin() loop)
Signed-off-by: Wanpeng Li <[email protected]>
---
v2 -> v3:
* check conservatively a subset of events
v1 -> v2:
* checking swait_active(&vcpu->wq) for involuntary preemption

arch/mips/kvm/mips.c | 5 +++++
arch/powerpc/kvm/powerpc.c | 5 +++++
arch/s390/kvm/kvm-s390.c | 5 +++++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm.c | 6 ++++++
arch/x86/kvm/vmx/vmx.c | 6 ++++++
arch/x86/kvm/x86.c | 23 +++++++++++++++++++++++
include/linux/kvm_host.h | 1 +
virt/kvm/arm/arm.c | 5 +++++
virt/kvm/kvm_main.c | 13 ++++++++++++-
10 files changed, 69 insertions(+), 1 deletion(-)

diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 2cfe839..95a4642 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -98,6 +98,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return !!(vcpu->arch.pending_exceptions);
}

+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0dba7eb..3e34d5f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
}

+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3f520cd8..5623b23 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3102,6 +3102,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_s390_vcpu_has_irq(vcpu, 0);
}

+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7b0a4ee..de39a58 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1175,6 +1175,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ bool (*apicv_test_pi_on)(struct kvm_vcpu *vcpu);

int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
bool *expired);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7eafc69..1921f37 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5190,6 +5190,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_vcpu_wake_up(vcpu);
}

+static bool svm_apicv_test_pi_on(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
{
unsigned long flags;
@@ -7314,6 +7319,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {

.pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = svm_deliver_avic_intr,
+ .apicv_test_pi_on = svm_apicv_test_pi_on,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 074385c..863d641 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6117,6 +6117,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
return max_irr;
}

+static bool vmx_apicv_test_pi_on(struct kvm_vcpu *vcpu)
+{
+ return pi_test_on(vcpu_to_pi_desc(vcpu));
+}
+
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
if (!kvm_vcpu_apicv_active(vcpu))
@@ -7726,6 +7731,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .apicv_test_pi_on = vmx_apicv_test_pi_on,

.set_tss_addr = vmx_set_tss_addr,
.set_identity_map_addr = vmx_set_identity_map_addr,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c6d951c..177544e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9698,6 +9698,29 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}

+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+ (READ_ONCE(vcpu->arch.nmi_pending) &&
+ kvm_x86_ops->nmi_allowed(vcpu)))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+ (READ_ONCE(vcpu->arch.smi_pending) && !is_smm(vcpu)))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_EVENT, vcpu))
+ return true;
+
+ if (vcpu->arch.apicv_active && kvm_x86_ops->apicv_test_pi_on(vcpu))
+ return true;
+
+ return false;
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return vcpu->arch.preempted_in_kernel;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5c5b586..9e4c2bb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -872,6 +872,7 @@ int kvm_arch_check_processor_compat(void);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);

#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index acc4324..2927895 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -444,6 +444,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
&& !v->arch.power_off && !v->arch.pause);
}

+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return vcpu_mode_priv(vcpu);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 887f3b0..3e1a509 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2477,6 +2477,17 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
#endif
}

+static bool vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ if (kvm_arch_dy_runnable(vcpu))
+ return true;
+
+ if (!list_empty_careful(&vcpu->async_pf.done))
+ return true;
+
+ return false;
+}
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
@@ -2506,7 +2517,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (vcpu == me)
continue;
- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+ if (swait_active(&vcpu->wq) && !vcpu_runnable(vcpu))
continue;
if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
continue;
--
2.7.4


2019-08-01 03:44:46

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 3/3] KVM: Check preempted_in_kernel for involuntary preemption

From: Wanpeng Li <[email protected]>

preempted_in_kernel is updated in preempt_notifier when involuntary preemption
ocurrs, it can be stale when the voluntarily preempted vCPUs are taken into
account by kvm_vcpu_on_spin() loop. This patch lets it just check preempted_in_kernel
for involuntary preemption.

Cc: Paolo Bonzini <[email protected]>
Cc: Radim Krčmář <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
virt/kvm/kvm_main.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3e1a509..58b9590 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2519,7 +2519,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (swait_active(&vcpu->wq) && !vcpu_runnable(vcpu))
continue;
- if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
+ if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
+ !kvm_arch_vcpu_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
@@ -4216,7 +4217,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

- vcpu->preempted = false;
+ WRITE_ONCE(vcpu->preempted, false);
WRITE_ONCE(vcpu->ready, false);

kvm_arch_sched_in(vcpu, cpu);
@@ -4230,7 +4231,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);

if (current->state == TASK_RUNNING) {
- vcpu->preempted = true;
+ WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
kvm_arch_vcpu_put(vcpu);
--
2.7.4

2019-08-01 03:58:57

by Wanpeng Li

[permalink] [raw]
Subject: [PATCH v3 2/3] KVM: LAPIC: Don't need to wakeup vCPU twice afer timer fire

From: Wanpeng Li <[email protected]>

kvm_set_pending_timer() will take care to wake up the sleeping vCPU which
has pending timer, don't need to check this in apic_timer_expired() again.

Cc: Paolo Bonzini <[email protected]>
Cc: Radim Krčmář <[email protected]>
Signed-off-by: Wanpeng Li <[email protected]>
---
arch/x86/kvm/lapic.c | 8 --------
1 file changed, 8 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0aa1586..685d17c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1548,7 +1548,6 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
static void apic_timer_expired(struct kvm_lapic *apic)
{
struct kvm_vcpu *vcpu = apic->vcpu;
- struct swait_queue_head *q = &vcpu->wq;
struct kvm_timer *ktimer = &apic->lapic_timer;

if (atomic_read(&apic->lapic_timer.pending))
@@ -1566,13 +1565,6 @@ static void apic_timer_expired(struct kvm_lapic *apic)

atomic_inc(&apic->lapic_timer.pending);
kvm_set_pending_timer(vcpu);
-
- /*
- * For x86, the atomic_inc() is serialized, thus
- * using swait_active() is safe.
- */
- if (swait_active(q))
- swake_up_one(q);
}

static void start_sw_tscdeadline(struct kvm_lapic *apic)
--
2.7.4

2019-08-02 02:30:30

by Wanpeng Li

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] KVM: Fix leak vCPU's VMCS value into other pCPU

On Thu, 1 Aug 2019 at 21:31, Sasha Levin <[email protected]> wrote:
>
> Hi,
>
> [This is an automated email]
>
> This commit has been processed because it contains a "Fixes:" tag,
> fixing commit: 98f4a1467612 KVM: add kvm_arch_vcpu_runnable() test to kvm_vcpu_on_spin() loop.
>
> The bot has tested the following trees: v5.2.4, v5.1.21, v4.19.62, v4.14.134, v4.9.186, v4.4.186.
>
> v5.2.4: Build failed! Errors:
> arch/arm64/kvm/../../../virt/kvm/kvm_main.c:2483:31: error: ‘struct kvm_vcpu’ has no member named ‘async_pf’
>
> v5.1.21: Build failed! Errors:
> arch/arm64/kvm/../../../virt/kvm/kvm_main.c:2415:31: error: ‘struct kvm_vcpu’ has no member named ‘async_pf’

Thanks for reporting this, after more grep, it seems that just x86 and
s390 enable async_pf in their Makefile. So I can move 'if
(!list_empty_careful(&vcpu->async_pf.done))' checking to
kvm_arch_dy_runnable(), Paolo, do you have more comments to v3 before
I send a new version?

Regards,
Wanpeng Li

2019-08-04 03:31:34

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] KVM: Fix leak vCPU's VMCS value into other pCPU

On 01/08/19 05:30, Wanpeng Li wrote:
> +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
> +{
> + if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
> + return true;
> +
> + if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
> + (READ_ONCE(vcpu->arch.nmi_pending) &&
> + kvm_x86_ops->nmi_allowed(vcpu)))

You cannot check kvm_x86_ops->nmi_allowed(vcpu), so just use
kvm_test_request.

> + return true;
> +
> + if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
> + (READ_ONCE(vcpu->arch.smi_pending) && !is_smm(vcpu)))
> + return true;

Same here, if only for simplicity.

> + if (kvm_test_request(KVM_REQ_EVENT, vcpu))
> + return true;
> +
> + if (vcpu->arch.apicv_active && kvm_x86_ops->apicv_test_pi_on(vcpu))
> + return true;

Let's call it apicv_has_pending_interrupt instead.

Also, please add a comment in kvm_main.c saying that
kvm_arch_dy_runnable is called outside vcpu_load/vcpu_put.

Paolo

> +
> + return false;
> +}

2019-08-04 03:32:53

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] KVM: Fix leak vCPU's VMCS value into other pCPU

On 02/08/19 02:46, Wanpeng Li wrote:
> Thanks for reporting this, after more grep, it seems that just x86 and
> s390 enable async_pf in their Makefile. So I can move 'if
> (!list_empty_careful(&vcpu->async_pf.done))' checking to
> kvm_arch_dy_runnable()

No, wrap it with #ifdef CONFIG_KVM_ASYNC_PF instead. Thanks!

Paolo

2019-08-05 02:07:03

by Wanpeng Li

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] KVM: Fix leak vCPU's VMCS value into other pCPU

On Sat, 3 Aug 2019 at 14:41, Paolo Bonzini <[email protected]> wrote:
>
> On 02/08/19 02:46, Wanpeng Li wrote:
> > Thanks for reporting this, after more grep, it seems that just x86 and
> > s390 enable async_pf in their Makefile. So I can move 'if
> > (!list_empty_careful(&vcpu->async_pf.done))' checking to
> > kvm_arch_dy_runnable()
>
> No, wrap it with #ifdef CONFIG_KVM_ASYNC_PF instead. Thanks!

Ok, handle all the comments in v4. :)

Regards,
Wanpeng Li