2023-11-21 12:43:23

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH 0/9] Test the consistency of AMD PMU counters and their features

Hi,

This series is an addition to below patch set:
KVM: x86/pmu: selftests: Fixes and new tests
https://lore.kernel.org/all/[email protected]/

Add selftests for AMD PMU counters, including tests for basic functionality
of AMD PMU counters, numbers of counters, AMD PMU versions, PerfCtrExtCore
and AMD PerfMonV2 features. Also adds PMI tests for Intel gp and fixed counters.

All patches have been tested on both Intel and AMD machines, with one exception
AMD Guest PerfMonV2 has not been tested on my AMD machine, as does not support
PerfMonV2.

If Sean fixed the issue of not enabling forced emulation to generate #UD when
applying the "KVM: x86/pmu: selftests: Fixes and new tests" patch set, then the
patch "KVM: selftests: Add forced emulation check to fix #UD" can be dropped.

Any feedback or suggestions are greatly appreciated.

Sincerely,

Jinrong

Jinrong Liang (9):
KVM: selftests: Add forced emulation check to fix #UD
KVM: selftests: Test gp counters overflow interrupt handling
KVM: selftests: Test fixed counters overflow interrupt handling
KVM: selftests: Add x86 feature and properties for AMD PMU in
processor.h
KVM: selftests: Test AMD PMU performance counters basic functions
KVM: selftests: Test consistency of AMD PMU counters num
KVM: selftests: Test consistency of PMU MSRs with AMD PMU version
KVM: selftests: Test AMD Guest PerfCtrExtCore
KVM: selftests: Test AMD Guest PerfMonV2

.../selftests/kvm/include/x86_64/processor.h | 3 +
.../selftests/kvm/x86_64/pmu_counters_test.c | 446 ++++++++++++++++--
2 files changed, 400 insertions(+), 49 deletions(-)


base-commit: c076acf10c78c0d7e1aa50670e9cc4c91e8d59b4
prerequisite-patch-id: e33e3cd1ff495ffdccfeca5c8247dc8af9996b08
prerequisite-patch-id: a46a885c36e440f09701b553d5b27cb53f6b660f
prerequisite-patch-id: a9ac79bbf777b3824f0c61c45a68f1308574ab79
prerequisite-patch-id: cd7b82618866160b5ac77199b681148dfb96e341
prerequisite-patch-id: df5d1c23dd98d83ba3606e84eb5f0a4cd834f52c
prerequisite-patch-id: e374d7ce66c66650f23c066690ab816f81e6c3e3
prerequisite-patch-id: 11f133be9680787fe69173777ef1ae448b23168c
prerequisite-patch-id: eea75162480ca828fb70395d5c224003ea5ae246
prerequisite-patch-id: 6b7b22b6b56dd28bd80404e1a295abef60ecfa9a
prerequisite-patch-id: 2a078271ce109bb526ded7d6eec12b4adbe26cff
prerequisite-patch-id: e51c5c2f34fc9fe587ce0eea6f11dc84af89a946
prerequisite-patch-id: 8c1c276fc6571a99301d18aa00ad8280d5a29faf
prerequisite-patch-id: 37d2f2895e22bae420401e8620410cd628e4fb39
prerequisite-patch-id: 1abba01ee49d71c38386afa9abf1794130e32a2c
prerequisite-patch-id: a7486fd15be405a864527090d473609d44a99c3b
prerequisite-patch-id: 41993b2eef8d1e2286ec04b3c1aa1a757792bafe
prerequisite-patch-id: 9442b1b4c370b1a68c32eaa6ce3ee4c5d549efd0
prerequisite-patch-id: 89b2e89917a89713d6a63cbd594f6979f4d06578
prerequisite-patch-id: 1e9fe564790f41cfd52ebafc412434608187d8db
prerequisite-patch-id: 7d0b2b4af888fe09eae85ebfe56b4daed71aa08c
prerequisite-patch-id: 4e6910c90ae769b7556f6aec40f5d600285fe4d0
prerequisite-patch-id: 5248bc19b00c94188b803a4f41fa19172701d7b0
prerequisite-patch-id: f9310c716dbdcbe9e3672e29d9e576064845d917
prerequisite-patch-id: 21b2c6b4878d2ce5a315627efa247240335ede1e
prerequisite-patch-id: e01570f8ff40aacba38f86454572803bd68a1d59
prerequisite-patch-id: 65eea4f11ce5e8f9836651c593b7e563b0404459
--
2.39.3


2023-11-21 12:43:27

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH 2/9] KVM: selftests: Test gp counters overflow interrupt handling

From: Jinrong Liang <[email protected]>

Add tests to verify that gp counters overflow interrupt handling
works as expected and clean up.

Signed-off-by: Jinrong Liang <[email protected]>
---
.../selftests/kvm/x86_64/pmu_counters_test.c | 121 ++++++++++++++----
1 file changed, 98 insertions(+), 23 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index 7d8094a27209..1b108e6718fc 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -6,6 +6,7 @@
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <x86intrin.h>

+#include "apic.h"
#include "pmu.h"
#include "processor.h"

@@ -19,14 +20,15 @@
#define NUM_EXTRA_INSNS 7
#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS)

+#define PMI_VECTOR 0x20
+
static uint8_t kvm_pmu_version;
static bool kvm_has_perf_caps;
static bool is_forced_emulation_enabled;
+static bool pmi_irq_called;

static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
- void *guest_code,
- uint8_t pmu_version,
- uint64_t perf_capabilities)
+ void *guest_code)
{
struct kvm_vm *vm;

@@ -34,6 +36,17 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(*vcpu);

+ return vm;
+}
+
+static struct kvm_vm *intel_pmu_vm_create(struct kvm_vcpu **vcpu,
+ void *guest_code, uint8_t pmu_version,
+ uint64_t perf_capabilities)
+{
+ struct kvm_vm *vm;
+
+ vm = pmu_vm_create_with_one_vcpu(vcpu, guest_code);
+
sync_global_to_guest(vm, kvm_pmu_version);
sync_global_to_guest(vm, is_forced_emulation_enabled);

@@ -45,6 +58,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
vcpu_set_msr(*vcpu, MSR_IA32_PERF_CAPABILITIES, perf_capabilities);

vcpu_set_cpuid_property(*vcpu, X86_PROPERTY_PMU_VERSION, pmu_version);
+
return vm;
}

@@ -198,6 +212,15 @@ static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
return !(*(u64 *)&event);
}

+static uint32_t get_pmc_msr(void)
+{
+ if (this_cpu_has(X86_FEATURE_PDCM) &&
+ rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
+ return MSR_IA32_PMC0;
+ else
+ return MSR_IA32_PERFCTR0;
+}
+
static void guest_test_arch_event(uint8_t idx)
{
const struct {
@@ -226,18 +249,12 @@ static void guest_test_arch_event(uint8_t idx)
/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
bool guest_has_perf_global_ctrl = pmu_version >= 2;
struct kvm_x86_pmu_feature gp_event, fixed_event;
- uint32_t base_pmc_msr;
+ uint32_t base_pmc_msr = get_pmc_msr();
unsigned int i;

/* The host side shouldn't invoke this without a guest PMU. */
GUEST_ASSERT(pmu_version);

- if (this_cpu_has(X86_FEATURE_PDCM) &&
- rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
- base_pmc_msr = MSR_IA32_PMC0;
- else
- base_pmc_msr = MSR_IA32_PERFCTR0;
-
gp_event = intel_event_to_feature[idx].gp_event;
GUEST_ASSERT_EQ(idx, gp_event.f.bit);

@@ -293,8 +310,8 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
if (!pmu_version)
return;

- vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
- pmu_version, perf_capabilities);
+ vm = intel_pmu_vm_create(&vcpu, guest_test_arch_events, pmu_version,
+ perf_capabilities);

vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
length);
@@ -414,18 +431,12 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters

static void guest_test_gp_counters(void)
{
+ uint32_t base_msr = get_pmc_msr();
uint8_t nr_gp_counters = 0;
- uint32_t base_msr;

if (guest_get_pmu_version())
nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);

- if (this_cpu_has(X86_FEATURE_PDCM) &&
- rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
- base_msr = MSR_IA32_PMC0;
- else
- base_msr = MSR_IA32_PERFCTR0;
-
guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0);
GUEST_DONE();
}
@@ -436,8 +447,8 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;

- vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_gp_counters,
- pmu_version, perf_capabilities);
+ vm = intel_pmu_vm_create(&vcpu, guest_test_gp_counters, pmu_version,
+ perf_capabilities);

vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
nr_gp_counters);
@@ -503,8 +514,8 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;

- vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_fixed_counters,
- pmu_version, perf_capabilities);
+ vm = intel_pmu_vm_create(&vcpu, guest_test_fixed_counters, pmu_version,
+ perf_capabilities);

vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK,
supported_bitmask);
@@ -516,6 +527,68 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
kvm_vm_free(vm);
}

+static void pmi_irq_handler(struct ex_regs *regs)
+{
+ pmi_irq_called = true;
+ x2apic_write_reg(APIC_EOI, 0);
+}
+
+static void guest_test_counters_pmi_workload(void)
+{
+ __asm__ __volatile__
+ ("sti\n"
+ "loop .\n"
+ "cli\n"
+ : "+c"((int){NUM_BRANCHES})
+ );
+}
+
+static void test_pmi_init_x2apic(void)
+{
+ x2apic_enable();
+ x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT |
+ APIC_DM_FIXED | PMI_VECTOR);
+ pmi_irq_called = false;
+}
+
+static void guest_test_gp_counter_pmi(void)
+{
+ uint8_t guest_pmu_version = guest_get_pmu_version();
+ uint32_t base_msr = get_pmc_msr();
+
+ test_pmi_init_x2apic();
+
+ wrmsr(base_msr,
+ (1ULL << this_cpu_property(X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH)) - 2);
+ wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_OS |
+ ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_INT |
+ INTEL_ARCH_CPU_CYCLES);
+
+ if (guest_pmu_version >= 2)
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(0));
+ guest_test_counters_pmi_workload();
+
+ GUEST_ASSERT(pmi_irq_called);
+ GUEST_DONE();
+}
+
+static void test_intel_ovf_pmi(uint8_t pmu_version, uint64_t perf_capabilities)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ if (!pmu_version)
+ return;
+
+ vm = intel_pmu_vm_create(&vcpu, guest_test_gp_counter_pmi, pmu_version,
+ perf_capabilities);
+
+ vm_install_exception_handler(vm, PMI_VECTOR, pmi_irq_handler);
+ run_vcpu(vcpu);
+
+ kvm_vm_free(vm);
+}
+
static void test_intel_counters(void)
{
uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
@@ -596,6 +669,8 @@ static void test_intel_counters(void)
for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++)
test_fixed_counters(v, perf_caps[i], j, k);
}
+
+ test_intel_ovf_pmi(v, perf_caps[i]);
}
}
}
--
2.39.3

2023-11-21 12:43:30

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH 3/9] KVM: selftests: Test fixed counters overflow interrupt handling

From: Jinrong Liang <[email protected]>

Add tests to verify that fixed counters overflow interrupt handling
works as expected.

Signed-off-by: Jinrong Liang <[email protected]>
---
.../selftests/kvm/x86_64/pmu_counters_test.c | 32 +++++++++++++++++--
1 file changed, 29 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index 1b108e6718fc..efd8c61e1c16 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -551,9 +551,8 @@ static void test_pmi_init_x2apic(void)
pmi_irq_called = false;
}

-static void guest_test_gp_counter_pmi(void)
+static void guest_test_gp_counter_pmi(uint8_t guest_pmu_version)
{
- uint8_t guest_pmu_version = guest_get_pmu_version();
uint32_t base_msr = get_pmc_msr();

test_pmi_init_x2apic();
@@ -569,6 +568,33 @@ static void guest_test_gp_counter_pmi(void)
guest_test_counters_pmi_workload();

GUEST_ASSERT(pmi_irq_called);
+}
+
+static void guest_test_fixed_counter_pmi(uint8_t guest_pmu_version)
+{
+ if (guest_pmu_version < 2)
+ return;
+
+ test_pmi_init_x2apic();
+
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsr(MSR_CORE_PERF_FIXED_CTR0,
+ (1ULL << this_cpu_property(X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH)) - 2);
+ wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(0, FIXED_PMC_ENABLE_PMI));
+
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(0));
+ guest_test_counters_pmi_workload();
+
+ GUEST_ASSERT(pmi_irq_called);
+}
+
+static void guest_test_counters_pmi(void)
+{
+ uint8_t guest_pmu_version = guest_get_pmu_version();
+
+ guest_test_gp_counter_pmi(guest_pmu_version);
+ guest_test_fixed_counter_pmi(guest_pmu_version);
+
GUEST_DONE();
}

@@ -580,7 +606,7 @@ static void test_intel_ovf_pmi(uint8_t pmu_version, uint64_t perf_capabilities)
if (!pmu_version)
return;

- vm = intel_pmu_vm_create(&vcpu, guest_test_gp_counter_pmi, pmu_version,
+ vm = intel_pmu_vm_create(&vcpu, guest_test_counters_pmi, pmu_version,
perf_capabilities);

vm_install_exception_handler(vm, PMI_VECTOR, pmi_irq_handler);
--
2.39.3

2023-11-21 12:43:32

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH 4/9] KVM: selftests: Add x86 feature and properties for AMD PMU in processor.h

From: Jinrong Liang <[email protected]>

Add x86 feature and properties for AMD PMU so that tests don't have
to manually retrieve the correct CPUID leaf+register, and so that the
resulting code is self-documenting.

Signed-off-by: Jinrong Liang <[email protected]>
---
tools/testing/selftests/kvm/include/x86_64/processor.h | 3 +++
1 file changed, 3 insertions(+)

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index ba16d714b451..250ace4f14b8 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -176,6 +176,7 @@ struct kvm_x86_cpu_feature {
* Extended Leafs, a.k.a. AMD defined
*/
#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
+#define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
@@ -192,6 +193,7 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
+#define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)

/*
* KVM defined paravirt features.
@@ -276,6 +278,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
+#define X86_PROPERTY_PMU_NR_CORE_COUNTERS KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)

#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)

--
2.39.3

2023-11-21 12:43:42

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH 5/9] KVM: selftests: Test AMD PMU performance counters basic functions

From: Jinrong Liang <[email protected]>

Add tests to check AMD PMU performance counters basic functions.

Signed-off-by: Jinrong Liang <[email protected]>
---
.../selftests/kvm/x86_64/pmu_counters_test.c | 84 +++++++++++++++++--
1 file changed, 75 insertions(+), 9 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index efd8c61e1c16..3c4081a508b0 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -21,6 +21,8 @@
#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS)

#define PMI_VECTOR 0x20
+#define AMD64_NR_COUNTERS 4
+#define AMD64_NR_COUNTERS_CORE 6

static uint8_t kvm_pmu_version;
static bool kvm_has_perf_caps;
@@ -411,7 +413,6 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
rdpmc_idx = i;
if (base_msr == MSR_CORE_PERF_FIXED_CTR0)
rdpmc_idx |= INTEL_RDPMC_FIXED;
-
guest_test_rdpmc(rdpmc_idx, expect_success, expected_val);

/*
@@ -421,7 +422,6 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
*/
GUEST_ASSERT(!expect_success || !pmu_has_fast_mode);
rdpmc_idx |= INTEL_RDPMC_FAST;
-
guest_test_rdpmc(rdpmc_idx, false, -1ull);

vector = wrmsr_safe(msr, 0);
@@ -701,19 +701,85 @@ static void test_intel_counters(void)
}
}

+static void set_amd_counters(uint8_t *nr_amd_ounters, uint64_t *ctrl_msr,
+ uint32_t *pmc_msr, uint8_t *flag)
+{
+ if (this_cpu_has(X86_FEATURE_PERFMON_V2)) {
+ *nr_amd_ounters = this_cpu_property(X86_PROPERTY_PMU_NR_CORE_COUNTERS);
+ *ctrl_msr = MSR_F15H_PERF_CTL0;
+ *pmc_msr = MSR_F15H_PERF_CTR0;
+ *flag = 2;
+ } else if (this_cpu_has(X86_FEATURE_PERFCTR_CORE)) {
+ *nr_amd_ounters = AMD64_NR_COUNTERS_CORE;
+ *ctrl_msr = MSR_F15H_PERF_CTL0;
+ *pmc_msr = MSR_F15H_PERF_CTR0;
+ *flag = 2;
+ } else {
+ *nr_amd_ounters = AMD64_NR_COUNTERS;
+ *ctrl_msr = MSR_K7_EVNTSEL0;
+ *pmc_msr = MSR_K7_PERFCTR0;
+ *flag = 1;
+ }
+}
+
+static void guest_test_amd_counters(void)
+{
+ bool guest_pmu_is_perfmonv2 = this_cpu_has(X86_FEATURE_PERFMON_V2);
+ uint8_t nr_amd_counters, flag;
+ uint64_t ctrl_msr;
+ unsigned int i, j;
+ uint32_t pmc_msr;
+
+ set_amd_counters(&nr_amd_counters, &ctrl_msr, &pmc_msr, &flag);
+
+ for (i = 0; i < nr_amd_counters; i++) {
+ for (j = 0; j < NR_AMD_ZEN_EVENTS; j++) {
+ wrmsr(pmc_msr + i * flag, 0);
+ wrmsr(ctrl_msr + i * flag, ARCH_PERFMON_EVENTSEL_OS |
+ ARCH_PERFMON_EVENTSEL_ENABLE | amd_pmu_zen_events[j]);
+
+ if (guest_pmu_is_perfmonv2)
+ wrmsr(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, BIT_ULL(i));
+
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+
+ GUEST_ASSERT(rdmsr(pmc_msr + i * flag));
+ }
+ }
+
+ GUEST_DONE();
+}
+
+static void test_amd_zen_events(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_amd_counters);
+
+ run_vcpu(vcpu);
+ kvm_vm_free(vm);
+}
+
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_is_pmu_enabled());

- TEST_REQUIRE(host_cpu_is_intel);
- TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
- TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
+ if (host_cpu_is_intel) {
+ TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
+ TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));

- kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
- kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
- is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
+ kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
+ kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
+ is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();

- test_intel_counters();
+ test_intel_counters();
+ } else if (host_cpu_is_amd) {
+ test_amd_zen_events();
+ } else {
+ TEST_FAIL("Unknown CPU vendor");
+ }

return 0;
}
--
2.39.3