Since we know vPMU will not work properly when (1) the guest bit_width(s)
of the [gp|fixed] counters are greater than the host ones, or (2) guest
requested architectural events exceeds the range supported by the host, so
we can setup a smaller left shift value and refresh the guest cpuid entry,
thus fixing the following UBSAN shift-out-of-bounds warning:
shift exponent 197 is too large for 64-bit type 'long long unsigned int'
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x107/0x163 lib/dump_stack.c:120
ubsan_epilogue+0xb/0x5a lib/ubsan.c:148
__ubsan_handle_shift_out_of_bounds.cold+0xb1/0x181 lib/ubsan.c:395
intel_pmu_refresh.cold+0x75/0x99 arch/x86/kvm/vmx/pmu_intel.c:348
kvm_vcpu_after_set_cpuid+0x65a/0xf80 arch/x86/kvm/cpuid.c:177
kvm_vcpu_ioctl_set_cpuid2+0x160/0x440 arch/x86/kvm/cpuid.c:308
kvm_arch_vcpu_ioctl+0x11b6/0x2d70 arch/x86/kvm/x86.c:4709
kvm_vcpu_ioctl+0x7b9/0xdb0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3386
vfs_ioctl fs/ioctl.c:48 [inline]
__do_sys_ioctl fs/ioctl.c:753 [inline]
__se_sys_ioctl fs/ioctl.c:739 [inline]
__x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xa9
Reported-by: [email protected]
Signed-off-by: Like Xu <[email protected]>
---
v1->v2 Changelog:
- Add similar treatment for eax.split.mask_length (Sean)
arch/x86/kvm/vmx/pmu_intel.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index a886a47daebd..d1584ae6625a 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp);
+ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
@@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
x86_pmu.num_counters_fixed);
+ edx.split.bit_width_fixed = min_t(int,
+ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}
--
2.29.2
On 18/01/21 03:58, Like Xu wrote:
> Since we know vPMU will not work properly when (1) the guest bit_width(s)
> of the [gp|fixed] counters are greater than the host ones, or (2) guest
> requested architectural events exceeds the range supported by the host, so
> we can setup a smaller left shift value and refresh the guest cpuid entry,
> thus fixing the following UBSAN shift-out-of-bounds warning:
>
> shift exponent 197 is too large for 64-bit type 'long long unsigned int'
>
> Call Trace:
> __dump_stack lib/dump_stack.c:79 [inline]
> dump_stack+0x107/0x163 lib/dump_stack.c:120
> ubsan_epilogue+0xb/0x5a lib/ubsan.c:148
> __ubsan_handle_shift_out_of_bounds.cold+0xb1/0x181 lib/ubsan.c:395
> intel_pmu_refresh.cold+0x75/0x99 arch/x86/kvm/vmx/pmu_intel.c:348
> kvm_vcpu_after_set_cpuid+0x65a/0xf80 arch/x86/kvm/cpuid.c:177
> kvm_vcpu_ioctl_set_cpuid2+0x160/0x440 arch/x86/kvm/cpuid.c:308
> kvm_arch_vcpu_ioctl+0x11b6/0x2d70 arch/x86/kvm/x86.c:4709
> kvm_vcpu_ioctl+0x7b9/0xdb0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3386
> vfs_ioctl fs/ioctl.c:48 [inline]
> __do_sys_ioctl fs/ioctl.c:753 [inline]
> __se_sys_ioctl fs/ioctl.c:739 [inline]
> __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
> do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
> entry_SYSCALL_64_after_hwframe+0x44/0xa9
>
> Reported-by: [email protected]
> Signed-off-by: Like Xu <[email protected]>
> ---
> v1->v2 Changelog:
> - Add similar treatment for eax.split.mask_length (Sean)
>
> arch/x86/kvm/vmx/pmu_intel.c | 4 ++++
> 1 file changed, 4 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index a886a47daebd..d1584ae6625a 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>
> pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
> x86_pmu.num_counters_gp);
> + eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
> pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
> + eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
> pmu->available_event_types = ~entry->ebx &
> ((1ull << eax.split.mask_length) - 1);
>
> @@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
> pmu->nr_arch_fixed_counters =
> min_t(int, edx.split.num_counters_fixed,
> x86_pmu.num_counters_fixed);
> + edx.split.bit_width_fixed = min_t(int,
> + edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
> pmu->counter_bitmask[KVM_PMC_FIXED] =
> ((u64)1 << edx.split.bit_width_fixed) - 1;
> }
>
Queued, thanks.
Paolo