From: Jinrong Liang <[email protected]>
Add tests to check AMD PMU legacy four performance counters.
Signed-off-by: Jinrong Liang <[email protected]>
---
.../kvm/x86_64/pmu_basic_functionality_test.c | 72 ++++++++++++++-----
1 file changed, 54 insertions(+), 18 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
index 70adfad45010..cb2a7ad5c504 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
@@ -58,20 +58,29 @@ static uint64_t run_vcpu(struct kvm_vcpu *vcpu, uint64_t *ucall_arg)
static void guest_measure_loop(uint64_t event_code)
{
- uint32_t nr_fixed_counter = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
- uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
- uint32_t pmu_version = this_cpu_property(X86_PROPERTY_PMU_VERSION);
+ uint8_t nr_gp_counters, pmu_version = 1;
+ uint64_t event_sel_msr;
uint32_t counter_msr;
unsigned int i;
- if (rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
- counter_msr = MSR_IA32_PMC0;
- else
- counter_msr = MSR_IA32_PERFCTR0;
+ if (host_cpu_is_intel) {
+ nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+ pmu_version = this_cpu_property(X86_PROPERTY_PMU_VERSION);
+ event_sel_msr = MSR_P6_EVNTSEL0;
+
+ if (rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
+ counter_msr = MSR_IA32_PMC0;
+ else
+ counter_msr = MSR_IA32_PERFCTR0;
+ } else {
+ nr_gp_counters = AMD64_NR_COUNTERS;
+ event_sel_msr = MSR_K7_EVNTSEL0;
+ counter_msr = MSR_K7_PERFCTR0;
+ }
for (i = 0; i < nr_gp_counters; i++) {
wrmsr(counter_msr + i, 0);
- wrmsr(MSR_P6_EVNTSEL0 + i, ARCH_PERFMON_EVENTSEL_OS |
+ wrmsr(event_sel_msr + i, ARCH_PERFMON_EVENTSEL_OS |
ARCH_PERFMON_EVENTSEL_ENABLE | event_code);
if (pmu_version > 1) {
@@ -85,7 +94,12 @@ static void guest_measure_loop(uint64_t event_code)
}
}
- if (pmu_version < 2 || nr_fixed_counter < 1)
+ if (host_cpu_is_amd || pmu_version < 2)
+ goto done;
+
+ uint32_t nr_fixed_counter = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+
+ if (nr_fixed_counter < 1)
goto done;
if (event_code == intel_arch_events[INTEL_ARCH_INSTRUCTIONS_RETIRED])
@@ -407,19 +421,41 @@ static void intel_test_pmu_version(void)
}
}
+static void amd_test_pmu_counters(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ unsigned int i;
+ uint64_t msr_val;
+
+ for (i = 0; i < ARRAY_SIZE(amd_arch_events); i++) {
+ vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_measure_loop);
+ vcpu_args_set(vcpu, 1, amd_arch_events[i]);
+ while (run_vcpu(vcpu, &msr_val) != UCALL_DONE)
+ TEST_ASSERT(msr_val, "Unexpected AMD counter values");
+
+ kvm_vm_free(vm);
+ }
+}
+
int main(int argc, char *argv[])
{
TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
- TEST_REQUIRE(host_cpu_is_intel);
- TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
- TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
-
- intel_test_arch_events();
- intel_test_counters_num();
- intel_test_fixed_counters();
- intel_test_pmu_version();
+ if (host_cpu_is_intel) {
+ TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
+ TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
+
+ intel_test_arch_events();
+ intel_test_counters_num();
+ intel_test_fixed_counters();
+ intel_test_pmu_version();
+ } else if (host_cpu_is_amd) {
+ amd_test_pmu_counters();
+ } else {
+ TEST_FAIL("Unknown CPU vendor");
+ }
return 0;
}
--
2.39.3