From: Like Xu <[email protected]>
Add a test to check that fixed counters enabled via guest
CPUID.0xA.ECX (instead of EDX[04:00]) work as normal as usual.
Co-developed-by: Jinrong Liang <[email protected]>
Signed-off-by: Jinrong Liang <[email protected]>
Signed-off-by: Like Xu <[email protected]>
---
.../kvm/x86_64/pmu_basic_functionality_test.c | 71 +++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
index e19f8c2774c5..108cfe254095 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
@@ -298,10 +298,81 @@ static void intel_test_counters_num(void)
}
}
+static void intel_guest_run_fixed_counters(uint64_t supported_bitmask,
+ uint8_t max_fixed_num)
+{
+ unsigned int i;
+ uint64_t msr_val;
+
+ for (i = 0; i < max_fixed_num; i++) {
+ if (!(supported_bitmask & BIT_ULL(i)))
+ continue;
+
+ if (wrmsr_safe(MSR_CORE_PERF_FIXED_CTR0 + i, 0) == GP_VECTOR)
+ GUEST_SYNC(GP_VECTOR);
+
+ wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * i));
+ wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(INTEL_PMC_IDX_FIXED + i));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ rdmsr_safe(MSR_CORE_PERF_FIXED_CTR0 + i, &msr_val);
+
+ GUEST_SYNC(msr_val);
+ }
+
+ GUEST_DONE();
+}
+
+static void test_fixed_counters_setup(struct kvm_vcpu *vcpu, uint8_t edx_fix_num,
+ uint32_t fixed_bitmask, bool expected)
+{
+ struct kvm_cpuid_entry2 *entry;
+ uint8_t max_fixed_num = X86_INTEL_MAX_FIXED_CTR_NUM;
+ uint64_t supported_bitmask = 0;
+ uint64_t msr_val;
+ unsigned int i;
+
+ entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+ entry->ecx = fixed_bitmask;
+ entry->edx = (entry->edx & ~FIXED_CTR_NUM_MASK) | edx_fix_num;
+ vcpu_set_cpuid(vcpu);
+
+ for (i = 0; i < max_fixed_num; i++) {
+ if (entry->ecx & BIT_ULL(i) ||
+ ((entry->edx & FIXED_CTR_NUM_MASK) > i))
+ supported_bitmask |= BIT_ULL(i);
+ }
+
+ vcpu_args_set(vcpu, 2, supported_bitmask, max_fixed_num);
+
+ while (run_vcpu(vcpu, &msr_val) != UCALL_DONE)
+ TEST_ASSERT(!!msr_val == expected,
+ "Unexpected when testing fixed counter.");
+}
+
+static void intel_test_fixed_counters(void)
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ uint32_t ecx;
+ uint8_t edx, num = X86_INTEL_MAX_FIXED_CTR_NUM;
+
+ for (edx = 0; edx <= num; edx++) {
+ /* KVM doesn't emulate more fixed counters than it can support. */
+ for (ecx = 0; ecx <= (BIT_ULL(num) - 1); ecx++) {
+ vm = pmu_vm_create_with_one_vcpu(&vcpu,
+ intel_guest_run_fixed_counters);
+ test_fixed_counters_setup(vcpu, edx, ecx, true);
+ kvm_vm_free(vm);
+ }
+ }
+}
+
static void intel_test_pmu_cpuid(void)
{
intel_test_arch_events();
intel_test_counters_num();
+ intel_test_fixed_counters();
}
int main(int argc, char *argv[])
--
2.31.1
On Tue, May 30, 2023, Jinrong Liang wrote:
> +static void test_fixed_counters_setup(struct kvm_vcpu *vcpu, uint8_t edx_fix_num,
> + uint32_t fixed_bitmask, bool expected)
> +{
> + struct kvm_cpuid_entry2 *entry;
> + uint8_t max_fixed_num = X86_INTEL_MAX_FIXED_CTR_NUM;
> + uint64_t supported_bitmask = 0;
> + uint64_t msr_val;
> + unsigned int i;
> +
> + entry = vcpu_get_cpuid_entry(vcpu, 0xa);
> + entry->ecx = fixed_bitmask;
> + entry->edx = (entry->edx & ~FIXED_CTR_NUM_MASK) | edx_fix_num;
> + vcpu_set_cpuid(vcpu);
> +
> + for (i = 0; i < max_fixed_num; i++) {
> + if (entry->ecx & BIT_ULL(i) ||
> + ((entry->edx & FIXED_CTR_NUM_MASK) > i))
> + supported_bitmask |= BIT_ULL(i);
> + }
> +
> + vcpu_args_set(vcpu, 2, supported_bitmask, max_fixed_num);
All of this can be queried from the guest, no? Then you also verify that KVM is
passing in the correct CPUID info too.
> + while (run_vcpu(vcpu, &msr_val) != UCALL_DONE)
> + TEST_ASSERT(!!msr_val == expected,
> + "Unexpected when testing fixed counter.");
ASSERT_EQ()