kvm_arch_vcpu_ioctl_run is already doing srcu_read_lock/unlock in two
places, namely vcpu_run and post_kvm_run_save, and a third is actually
needed around the call to vcpu->arch.complete_userspace_io to avoid
the following splat:
WARNING: suspicious RCU usage
arch/x86/kvm/pmu.c:190 suspicious rcu_dereference_check() usage!
other info that might help us debug this:
rcu_scheduler_active = 2, debug_locks = 1
1 lock held by CPU 28/KVM/370841:
#0: ff11004089f280b8 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x87/0x730 [kvm]
Call Trace:
<TASK>
dump_stack_lvl+0x59/0x73
reprogram_fixed_counter+0x15d/0x1a0 [kvm]
kvm_pmu_trigger_event+0x1a3/0x260 [kvm]
? free_moved_vector+0x1b4/0x1e0
complete_fast_pio_in+0x8a/0xd0 [kvm]
This splat is not at all unexpected, since complete_userspace_io
callbacks can execute similar code to vmexits. For example, SVM
with nrips=false will call into the emulator from
svm_skip_emulated_instruction().
Reported-by: Like Xu <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
---
v2: actually commit what I tested... srcu_read_lock must be
before all "goto out"s.
arch/x86/kvm/x86.c | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 82a9dcd8c67f..e55de9b48d1a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9180,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
likely(!pic_in_kernel(vcpu->kvm));
}
+/* Called within kvm->srcu read side. */
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
@@ -9188,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
- /*
- * The call to kvm_ready_for_interrupt_injection() may end up in
- * kvm_xen_has_interrupt() which may require the srcu lock to be
- * held, to protect against changes in the vcpu_info address.
- */
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_run->ready_for_interrupt_injection =
pic_in_kernel(vcpu->kvm) ||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (is_smm(vcpu))
kvm_run->flags |= KVM_RUN_X86_SMM;
@@ -9815,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
/*
+ * Called within kvm->srcu read side.
* Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
@@ -10193,6 +10188,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
return r;
}
+/* Called within kvm->srcu read side. */
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
bool hv_timer;
@@ -10252,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted);
}
+/* Called within kvm->srcu read side. */
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
@@ -10291,8 +10287,6 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
}
}
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-
return r;
}
@@ -10398,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
+ struct kvm *kvm = vcpu->kvm;
int r;
vcpu_load(vcpu);
@@ -10405,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_run->flags = 0;
kvm_load_guest_fpu(vcpu);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) {
r = -EINTR;
@@ -10475,8 +10471,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (kvm_run->kvm_valid_regs)
store_regs(vcpu);
post_kvm_run_save(vcpu);
- kvm_sigset_deactivate(vcpu);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_sigset_deactivate(vcpu);
vcpu_put(vcpu);
return r;
}
--
2.31.1
On 19/2/2022 5:34 pm, Paolo Bonzini wrote:
> kvm_arch_vcpu_ioctl_run is already doing srcu_read_lock/unlock in two
> places, namely vcpu_run and post_kvm_run_save, and a third is actually
> needed around the call to vcpu->arch.complete_userspace_io to avoid
> the following splat:
>
> WARNING: suspicious RCU usage
> arch/x86/kvm/pmu.c:190 suspicious rcu_dereference_check() usage!
> other info that might help us debug this:
> rcu_scheduler_active = 2, debug_locks = 1
> 1 lock held by CPU 28/KVM/370841:
> #0: ff11004089f280b8 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x87/0x730 [kvm]
> Call Trace:
> <TASK>
> dump_stack_lvl+0x59/0x73
> reprogram_fixed_counter+0x15d/0x1a0 [kvm]
> kvm_pmu_trigger_event+0x1a3/0x260 [kvm]
> ? free_moved_vector+0x1b4/0x1e0
> complete_fast_pio_in+0x8a/0xd0 [kvm]
>
> This splat is not at all unexpected, since complete_userspace_io
> callbacks can execute similar code to vmexits. For example, SVM
> with nrips=false will call into the emulator from
> svm_skip_emulated_instruction().
>
> Reported-by: Like Xu <[email protected]>
> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> v2: actually commit what I tested... srcu_read_lock must be
> before all "goto out"s.
>
> arch/x86/kvm/x86.c | 19 ++++++++-----------
> 1 file changed, 8 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 82a9dcd8c67f..e55de9b48d1a 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9180,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
> likely(!pic_in_kernel(vcpu->kvm));
> }
>
> +/* Called within kvm->srcu read side. */
> static void post_kvm_run_save(struct kvm_vcpu *vcpu)
> {
> struct kvm_run *kvm_run = vcpu->run;
> @@ -9188,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
> kvm_run->cr8 = kvm_get_cr8(vcpu);
> kvm_run->apic_base = kvm_get_apic_base(vcpu);
>
> - /*
> - * The call to kvm_ready_for_interrupt_injection() may end up in
> - * kvm_xen_has_interrupt() which may require the srcu lock to be
> - * held, to protect against changes in the vcpu_info address.
> - */
> - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> kvm_run->ready_for_interrupt_injection =
> pic_in_kernel(vcpu->kvm) ||
> kvm_vcpu_ready_for_interrupt_injection(vcpu);
> - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>
> if (is_smm(vcpu))
> kvm_run->flags |= KVM_RUN_X86_SMM;
> @@ -9815,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
> EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
>
> /*
> + * Called within kvm->srcu read side.
> * Returns 1 to let vcpu_run() continue the guest execution loop without
> * exiting to the userspace. Otherwise, the value will be returned to the
> * userspace.
> @@ -10193,6 +10188,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> return r;
> }
>
> +/* Called within kvm->srcu read side. */
> static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
> {
> bool hv_timer;
> @@ -10252,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
> !vcpu->arch.apf.halted);
> }
>
> +/* Called within kvm->srcu read side. */
> static int vcpu_run(struct kvm_vcpu *vcpu)
> {
> int r;
> struct kvm *kvm = vcpu->kvm;
>
> - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
> vcpu->arch.l1tf_flush_l1d = true;
>
> for (;;) {
> @@ -10291,8 +10287,6 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
> }
> }
>
> - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
> -
> return r;
> }
>
> @@ -10398,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
> int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> {
> struct kvm_run *kvm_run = vcpu->run;
> + struct kvm *kvm = vcpu->kvm;
> int r;
>
> vcpu_load(vcpu);
> @@ -10405,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> kvm_run->flags = 0;
> kvm_load_guest_fpu(vcpu);
>
> + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
With this patch (based on ec756e40e271), the kworker/140:1+rcu_gp task on the host
will be overused that it won't even be possible to quickly boot a 2U4G VM with QEMU.
> if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
> if (kvm_run->immediate_exit) {
> r = -EINTR;
> @@ -10475,8 +10471,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> if (kvm_run->kvm_valid_regs)
> store_regs(vcpu);
> post_kvm_run_save(vcpu);
> - kvm_sigset_deactivate(vcpu);
> + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
>
> + kvm_sigset_deactivate(vcpu);
> vcpu_put(vcpu);
> return r;
> }
Greeting,
FYI, we noticed the following commit (built with gcc-9):
commit: 9daee8ca835bf3ba264414c6f3a3924e23455449 ("[PATCH v2] KVM: x86: pull kvm->srcu read-side to kvm_arch_vcpu_ioctl_run")
url: https://github.com/0day-ci/linux/commits/Paolo-Bonzini/KVM-x86-pull-kvm-srcu-read-side-to-kvm_arch_vcpu_ioctl_run/20220220-190039
base: https://git.kernel.org/cgit/virt/kvm/kvm.git master
patch link: https://lore.kernel.org/lkml/[email protected]
in testcase: kvm-unit-tests
version: kvm-unit-tests-x86_64-882825e-1_20220215
with following parameters:
ucode: 0x28
on test machine: 8 threads 1 sockets Intel(R) Core(TM) i7-4790 v3 @ 3.60GHz with 6G memory
caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):
If you fix the issue, kindly add following tag
Reported-by: kernel test robot <[email protected]>
please be noted, besides kvm-unit-tests.apic.fail in title, we also found below
cases fail upon this commit while pass on parent:
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/ucode:
lkp-hsw-d02/kvm-unit-tests/debian-10.4-x86_64-20200603.cgz/x86_64-rhel-8.3-func/gcc-9/0x28
988896bb61827345 9daee8ca835bf3ba264414c6f3a
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
:6 100% 6:6 kvm-unit-tests.apic-split.fail
:6 100% 6:6 kvm-unit-tests.apic.fail
:6 100% 6:6 kvm-unit-tests.hyperv_clock.fail
:6 100% 6:6 kvm-unit-tests.hyperv_connections.fail
:6 100% 6:6 kvm-unit-tests.hyperv_stimer.fail
:6 100% 6:6 kvm-unit-tests.hyperv_synic.fail
:6 100% 6:6 kvm-unit-tests.intel_iommu.fail
:6 100% 6:6 kvm-unit-tests.ioapic.fail
:6 100% 6:6 kvm-unit-tests.kvmclock_test.fail
:6 100% 6:6 kvm-unit-tests.smptest.fail
:6 100% 6:6 kvm-unit-tests.smptest3.fail
:6 100% 6:6 kvm-unit-tests.vmexit_ipi.fail
:6 100% 6:6 kvm-unit-tests.vmexit_ipi_halt.fail
:6 100% 6:6 kvm-unit-tests.vmx_apic_passthrough_thread.fail
:6 100% 6:6 kvm-unit-tests.vmx_init_signal_test.fail
:6 100% 6:6 kvm-unit-tests.vmx_sipi_signal_test.fail
2022-02-24 13:10:17 ./run_tests.sh
[31mFAIL[0m apic-split (timeout; duration=90s)
[32mPASS[0m ioapic-split (19 tests)
[31mFAIL[0m apic (timeout; duration=30)
[31mFAIL[0m ioapic (timeout; duration=90s)
[33mSKIP[0m cmpxchg8b (i386 only)
[31mFAIL[0m smptest (timeout; duration=90s)
[31mFAIL[0m smptest3 (timeout; duration=90s)
[32mPASS[0m vmexit_cpuid
[32mPASS[0m vmexit_vmcall
[32mPASS[0m vmexit_mov_from_cr8
[32mPASS[0m vmexit_mov_to_cr8
[32mPASS[0m vmexit_inl_pmtimer
[31mFAIL[0m vmexit_ipi (timeout; duration=90s)
[31mFAIL[0m vmexit_ipi_halt (timeout; duration=90s)
[32mPASS[0m vmexit_ple_round_robin
[32mPASS[0m vmexit_tscdeadline
[32mPASS[0m vmexit_tscdeadline_immed
[32mPASS[0m access
[32mPASS[0m access-reduced-maxphyaddr
[33mSKIP[0m smap (0 tests)
[33mSKIP[0m pku (0 tests)
[33mSKIP[0m pks (0 tests)
[33mSKIP[0m asyncpf (0 tests)
[32mPASS[0m emulator (141 tests, 1 skipped)
[32mPASS[0m eventinj (13 tests)
[32mPASS[0m hypercall (2 tests)
[32mPASS[0m idt_test (4 tests)
[32mPASS[0m memory (7 tests)
[32mPASS[0m msr (17 tests)
[32mPASS[0m pmu (142 tests)
[32mPASS[0m pmu_lbr (3 tests)
[32mPASS[0m pmu_emulation (4 tests)
[32mPASS[0m vmware_backdoors (11 tests)
[32mPASS[0m realmode
[32mPASS[0m s3
[32mPASS[0m setjmp (10 tests)
[32mPASS[0m sieve
[32mPASS[0m syscall (2 tests)
[32mPASS[0m tsc (3 tests)
[32mPASS[0m tsc_adjust (6 tests)
[32mPASS[0m xsave (17 tests)
[32mPASS[0m rmap_chain
[31mFAIL[0m svm (timeout; duration=90s)
[33mSKIP[0m taskswitch (i386 only)
[33mSKIP[0m taskswitch2 (i386 only)
[31mFAIL[0m kvmclock_test (timeout; duration=90s)
[32mPASS[0m pcid-enabled (2 tests)
[32mPASS[0m pcid-disabled (2 tests)
[32mPASS[0m pcid-asymmetric (2 tests)
[32mPASS[0m rdpru (1 tests)
[32mPASS[0m umip (21 tests)
[33mSKIP[0m la57 (i386 only)
[32mPASS[0m vmx (428809 tests, 2 expected failures, 5 skipped)
[33mSKIP[0m ept (0 tests)
[31mFAIL[0m vmx_eoi_bitmap_ioapic_scan (timeout; duration=90s)
[33mSKIP[0m vmx_hlt_with_rvi_test (1 tests, 1 skipped)
[33mSKIP[0m vmx_apicv_test (2 tests, 2 skipped)
[31mFAIL[0m vmx_apic_passthrough_thread (timeout; duration=90s)
[31mFAIL[0m vmx_init_signal_test (timeout; duration=10)
[31mFAIL[0m vmx_sipi_signal_test (timeout; duration=10)
[32mPASS[0m vmx_apic_passthrough_tpr_threshold_test (6 tests)
[32mPASS[0m vmx_vmcs_shadow_test (153979 tests)
[32mPASS[0m vmx_pf_exception_test (58 tests)
[32mPASS[0m vmx_pf_vpid_test (3833946 tests)
[32mPASS[0m vmx_pf_invvpid_test (3833946 tests)
[32mPASS[0m vmx_pf_no_vpid_test (3833946 tests)
[31mFAIL[0m vmx_pf_exception_test_reduced_maxphyaddr
[32mPASS[0m debug (22 tests)
[31mFAIL[0m hyperv_synic (timeout; duration=90s)
[31mFAIL[0m hyperv_connections (timeout; duration=90s)
[31mFAIL[0m hyperv_stimer (timeout; duration=90s)
[31mFAIL[0m hyperv_clock (timeout; duration=90s)
[31mFAIL[0m intel_iommu (timeout; duration=30)
[33mSKIP[0m tsx-ctrl (1 tests, 1 skipped)
[31mFAIL[0m intel_cet (timeout; duration=90s)
To reproduce:
git clone https://github.com/intel/lkp-tests.git
cd lkp-tests
sudo bin/lkp install job.yaml # job file is attached in this email
bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run
sudo bin/lkp run generated-yaml-file
# if come across any failure that blocks the test,
# please remove ~/.lkp and /lkp dir to run from a clean state.
---
0DAY/LKP+ Test Infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/[email protected] Intel Corporation
Thanks,
Oliver Sang
On Fri, Feb 25, 2022, kernel test robot wrote:
> commit: 9daee8ca835bf3ba264414c6f3a3924e23455449 ("[PATCH v2] KVM: x86: pull kvm->srcu read-side to kvm_arch_vcpu_ioctl_run")
> url: https://github.com/0day-ci/linux/commits/Paolo-Bonzini/KVM-x86-pull-kvm-srcu-read-side-to-kvm_arch_vcpu_ioctl_run/20220220-190039
> base: https://git.kernel.org/cgit/virt/kvm/kvm.git master
> patch link: https://lore.kernel.org/lkml/[email protected]
>
> in testcase: kvm-unit-tests
> version: kvm-unit-tests-x86_64-882825e-1_20220215
> with following parameters:
>
> ucode: 0x28
> please be noted, besides kvm-unit-tests.apic.fail in title, we also found below
> cases fail upon this commit while pass on parent:
Same thing I hit[*]. Paolo temporarily dropped the buggy patch, a new one will
presumably show up soonish.
[*] https://lore.kernel.org/all/[email protected]