2023-11-29 07:27:57

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v2 0/5] KVM: selftests: aarch64: Introduce pmu_event_filter_test

The test is inspired by the pmu_event_filter_test which implemented by x86. On
the arm64 platform, there is the same ability to set the pmu_event_filter
through the KVM_ARM_VCPU_PMU_V3_FILTER attribute. So add the test for arm64.

The series first move some pmu common code from vpmu_counter_access to
lib/aarch64/vpmu.c and include/aarch64/vpmu.h, which can be used by
pmu_event_filter_test. Then fix a bug related to the [enable|disable]_counter,
and at last, implement the test itself.

Changelog:
----------
v1->v2:
- Improve the commit message. [Eric]
- Fix the bug in [enable|disable]_counter. [Raghavendra & Marc]
- Add the check if kvm has attr KVM_ARM_VCPU_PMU_V3_FILTER.
- Add if host pmu support the test event throught pmceid0.
- Split the test_invalid_filter() to another patch. [Eric]

v1: https://lore.kernel.org/all/[email protected]/

Shaoqin Huang (5):
KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() public
KVM: selftests: aarch64: Move pmu helper functions into vpmu.h
KVM: selftests: aarch64: Fix the buggy [enable|disable]_counter
KVM: selftests: aarch64: Introduce pmu_event_filter_test
KVM: selftests: aarch64: Add invalid filter test in
pmu_event_filter_test

tools/testing/selftests/kvm/Makefile | 2 +
.../kvm/aarch64/pmu_event_filter_test.c | 267 ++++++++++++++++++
.../kvm/aarch64/vpmu_counter_access.c | 218 ++------------
.../selftests/kvm/include/aarch64/vpmu.h | 135 +++++++++
.../testing/selftests/kvm/lib/aarch64/vpmu.c | 74 +++++
5 files changed, 502 insertions(+), 194 deletions(-)
create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h
create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c

--
2.40.1


2023-11-29 07:28:05

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v2 5/5] KVM: selftests: aarch64: Add invalid filter test in pmu_event_filter_test

Add the invalid filter test to double check if the KVM_ARM_VCPU_PMU_V3_FILTER
will return the expected error.

Signed-off-by: Shaoqin Huang <[email protected]>
---
.../kvm/aarch64/pmu_event_filter_test.c | 36 +++++++++++++++++++
1 file changed, 36 insertions(+)

diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
index 0e652fbdb37a..4c375417b194 100644
--- a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
@@ -7,6 +7,7 @@
* This test checks if the guest only see the limited pmu event that userspace
* sets, if the guest can use those events which user allow, and if the guest
* can't use those events which user deny.
+ * It also checks that setting invalid filter ranges return the expected error.
* This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
* is supported on the host.
*/
@@ -197,6 +198,39 @@ static void for_each_test(void)
run_test(t);
}

+static void set_invalid_filter(struct vpmu_vm *vm, void *arg)
+{
+ struct kvm_pmu_event_filter invalid;
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
+ .addr = (uint64_t)&invalid,
+ };
+ int ret = 0;
+
+ /* The max event number is (1 << 16), set a range large than it. */
+ invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0);
+ ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
+ TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range "
+ "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
+ ret, errno);
+
+ ret = 0;
+
+ /* Set the Invalid action. */
+ invalid = __DEFINE_FILTER(0, 1, 3);
+ ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
+ TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action "
+ "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
+ ret, errno);
+}
+
+static void test_invalid_filter(void)
+{
+ vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL);
+ destroy_vpmu_vm(vpmu_vm);
+}
+
static bool kvm_supports_pmu_event_filter(void)
{
int r;
@@ -228,4 +262,6 @@ int main(void)
TEST_REQUIRE(host_pmu_supports_events());

for_each_test();
+
+ test_invalid_filter();
}
--
2.40.1

2023-11-29 07:28:06

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v2 4/5] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Introduce pmu_event_filter_test for arm64 platforms. The test configures
PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and
check if the guest can use those events which user allow and can't use
those events which use deny.

This test refactor the create_vpmu_vm() and make it a wrapper for
__create_vpmu_vm(), which allows some extra init code before
KVM_ARM_VCPU_PMU_V3_INIT.

And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the
pmu event filter in KVM. And choose to filter two common event
branches_retired and instructions_retired, and let guest use the two
events in pmu. And check if the result is expected.

Signed-off-by: Shaoqin Huang <[email protected]>
---
tools/testing/selftests/kvm/Makefile | 1 +
.../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++
.../selftests/kvm/include/aarch64/vpmu.h | 4 +
.../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
4 files changed, 248 insertions(+), 2 deletions(-)
create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b60852c222ac..5f126e1a1dbf 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
+TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
new file mode 100644
index 000000000000..0e652fbdb37a
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pmu_event_filter_test - Test user limit pmu event for guest.
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * This test checks if the guest only see the limited pmu event that userspace
+ * sets, if the guest can use those events which user allow, and if the guest
+ * can't use those events which user deny.
+ * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
+ * is supported on the host.
+ */
+#include <kvm_util.h>
+#include <processor.h>
+#include <vgic.h>
+#include <vpmu.h>
+#include <test_util.h>
+#include <perf/arm_pmuv3.h>
+
+struct {
+ uint64_t branches_retired;
+ uint64_t instructions_retired;
+} pmc_results;
+
+static struct vpmu_vm *vpmu_vm;
+static uint64_t pmceid0;
+
+#define FILTER_NR 10
+
+struct test_desc {
+ const char *name;
+ void (*check_result)(void);
+ struct kvm_pmu_event_filter filter[FILTER_NR];
+};
+
+#define __DEFINE_FILTER(base, num, act) \
+ ((struct kvm_pmu_event_filter) { \
+ .base_event = base, \
+ .nevents = num, \
+ .action = act, \
+ })
+
+#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
+
+#define EMPTY_FILTER { 0 }
+
+#define SW_INCR 0x0
+#define INST_RETIRED 0x8
+#define BR_RETIRED 0x21
+
+#define NUM_BRANCHES 10
+
+static void run_and_measure_loop(void)
+{
+ asm volatile(
+ " mov x10, %[loop]\n"
+ "1: sub x10, x10, #1\n"
+ " cmp x10, #0x0\n"
+ " b.gt 1b\n"
+ :
+ : [loop] "r" (NUM_BRANCHES)
+ : "x10", "cc");
+}
+
+static void guest_code(void)
+{
+ uint64_t pmcr = read_sysreg(pmcr_el0);
+
+ pmu_disable_reset();
+
+ write_pmevtypern(0, BR_RETIRED);
+ write_pmevtypern(1, INST_RETIRED);
+ enable_counter(0);
+ enable_counter(1);
+ write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
+
+ run_and_measure_loop();
+
+ write_sysreg(pmcr, pmcr_el0);
+
+ pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
+ pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
+
+ GUEST_DONE();
+}
+
+static void guest_get_pmceid0(void)
+{
+ uint64_t pmceid0 = read_sysreg(pmceid0_el0);
+
+ GUEST_PRINTF("%lx\n", pmceid0);
+
+ GUEST_DONE();
+}
+
+static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
+ };
+ struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
+
+ while (filter && filter->nevents != 0) {
+ attr.addr = (uint64_t)filter;
+ vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
+ filter++;
+ }
+}
+
+static void create_vpmu_vm_with_filter(void *guest_code,
+ struct kvm_pmu_event_filter *filter)
+{
+ vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ while (1) {
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ return;
+ case UCALL_PRINTF:
+ pmceid0 = strtoll(uc.buffer, NULL, 16);
+ break;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+}
+
+static void check_pmc_counting(void)
+{
+ uint64_t br = pmc_results.branches_retired;
+ uint64_t ir = pmc_results.instructions_retired;
+
+ TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
+ "%lu (expected %u)", br, NUM_BRANCHES);
+ TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
+}
+
+static void check_pmc_not_counting(void)
+{
+ uint64_t br = pmc_results.branches_retired;
+ uint64_t ir = pmc_results.instructions_retired;
+
+ TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
+ TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
+}
+
+static void run_vcpu_and_sync_pmc_results(void)
+{
+ memset(&pmc_results, 0, sizeof(pmc_results));
+ sync_global_to_guest(vpmu_vm->vm, pmc_results);
+
+ run_vcpu(vpmu_vm->vcpu);
+
+ sync_global_from_guest(vpmu_vm->vm, pmc_results);
+}
+
+static void run_test(struct test_desc *t)
+{
+ pr_debug("Test: %s\n", t->name);
+
+ create_vpmu_vm_with_filter(guest_code, t->filter);
+
+ run_vcpu_and_sync_pmc_results();
+
+ t->check_result();
+
+ destroy_vpmu_vm(vpmu_vm);
+}
+
+static struct test_desc tests[] = {
+ {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
+ {"member_allow_filter", check_pmc_counting,
+ {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
+ DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
+ {"member_deny_filter", check_pmc_not_counting,
+ {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
+ DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
+ {"not_member_deny_filter", check_pmc_counting,
+ {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
+ {"not_member_allow_filter", check_pmc_not_counting,
+ {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
+ { 0 }
+};
+
+static void for_each_test(void)
+{
+ struct test_desc *t;
+
+ for (t = &tests[0]; t->name; t++)
+ run_test(t);
+}
+
+static bool kvm_supports_pmu_event_filter(void)
+{
+ int r;
+
+ vpmu_vm = create_vpmu_vm(guest_code);
+
+ r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
+ KVM_ARM_VCPU_PMU_V3_FILTER);
+
+ destroy_vpmu_vm(vpmu_vm);
+ return !r;
+}
+
+static bool host_pmu_supports_events(void)
+{
+ vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
+
+ run_vcpu(vpmu_vm->vcpu);
+
+ destroy_vpmu_vm(vpmu_vm);
+
+ return pmceid0 & (BR_RETIRED | INST_RETIRED);
+}
+
+int main(void)
+{
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
+ TEST_REQUIRE(kvm_supports_pmu_event_filter());
+ TEST_REQUIRE(host_pmu_supports_events());
+
+ for_each_test();
+}
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
index 644dae3814b5..f103d0824f8a 100644
--- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
+++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
@@ -18,6 +18,10 @@ struct vpmu_vm {
int gic_fd;
};

+struct vpmu_vm *__create_vpmu_vm(void *guest_code,
+ void (*init_pmu)(struct vpmu_vm *vm, void *arg),
+ void *arg);
+
struct vpmu_vm *create_vpmu_vm(void *guest_code);

void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
index b3de8fdc555e..76ea03d607f1 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
@@ -7,8 +7,9 @@
#include <vpmu.h>
#include <perf/arm_pmuv3.h>

-/* Create a VM that has one vCPU with PMUv3 configured. */
-struct vpmu_vm *create_vpmu_vm(void *guest_code)
+struct vpmu_vm *__create_vpmu_vm(void *guest_code,
+ void (*init_pmu)(struct vpmu_vm *vm, void *arg),
+ void *arg)
{
struct kvm_vcpu_init init;
uint8_t pmuver;
@@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);

/* Initialize vPMU */
+ if (init_pmu)
+ init_pmu(vpmu_vm, arg);
+
vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);

return vpmu_vm;
}

+/* Create a VM that has one vCPU with PMUv3 configured. */
+struct vpmu_vm *create_vpmu_vm(void *guest_code)
+{
+ return __create_vpmu_vm(guest_code, NULL, NULL);
+}
+
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
{
close(vpmu_vm->gic_fd);
--
2.40.1

2023-11-29 07:28:17

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v2 2/5] KVM: selftests: aarch64: Move pmu helper functions into vpmu.h

Move those pmu helper functions into include/aarch64/vpmu.h, thus
it can be used by other pmu test.

No functional change intended.

Reviewed-by: Eric Auger <[email protected]>
Signed-off-by: Shaoqin Huang <[email protected]>
---
.../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
.../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++
2 files changed, 119 insertions(+), 118 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index 17305408a334..62d6315790ab 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -20,12 +20,6 @@
#include <perf/arm_pmuv3.h>
#include <linux/bitfield.h>

-/* The max number of the PMU event counters (excluding the cycle counter) */
-#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/* The cycle counter bit position that's common among the PMU registers */
-#define ARMV8_PMU_CYCLE_IDX 31
-
static struct vpmu_vm *vpmu_vm;

struct pmreg_sets {
@@ -35,118 +29,6 @@ struct pmreg_sets {

#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}

-static uint64_t get_pmcr_n(uint64_t pmcr)
-{
- return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
-}
-
-static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
-{
- *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
- *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
-}
-
-static uint64_t get_counters_mask(uint64_t n)
-{
- uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
-
- if (n)
- mask |= GENMASK(n - 1, 0);
- return mask;
-}
-
-/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline unsigned long read_sel_evcntr(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevcntr_el0);
-}
-
-/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline void write_sel_evcntr(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevcntr_el0);
- isb();
-}
-
-/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline unsigned long read_sel_evtyper(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevtyper_el0);
-}
-
-/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline void write_sel_evtyper(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevtyper_el0);
- isb();
-}
-
-static inline void enable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenset_el0);
- isb();
-}
-
-static inline void disable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenclr_el0);
- isb();
-}
-
-static void pmu_disable_reset(void)
-{
- uint64_t pmcr = read_sysreg(pmcr_el0);
-
- /* Reset all counters, disabling them */
- pmcr &= ~ARMV8_PMU_PMCR_E;
- write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
- isb();
-}
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
- isb();
-}
-
-#define READ_PMEVTYPERN(n) \
- return read_sysreg(pmevtyper##n##_el0)
-static unsigned long read_pmevtypern(int n)
-{
- PMEVN_SWITCH(n, READ_PMEVTYPERN);
- return 0;
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
- isb();
-}
-
/*
* The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
* accessors that test cases will use. Each of the accessors will
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
index 0a56183644ee..e0cc1ca1c4b7 100644
--- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
+++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
@@ -1,10 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */

#include <kvm_util.h>
+#include <perf/arm_pmuv3.h>

#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL

+/* The max number of the PMU event counters (excluding the cycle counter) */
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/* The cycle counter bit position that's common among the PMU registers */
+#define ARMV8_PMU_CYCLE_IDX 31
+
struct vpmu_vm {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
@@ -14,3 +21,115 @@ struct vpmu_vm {
struct vpmu_vm *create_vpmu_vm(void *guest_code);

void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
+
+static inline uint64_t get_pmcr_n(uint64_t pmcr)
+{
+ return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+}
+
+static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
+{
+ *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+ *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+}
+
+static inline uint64_t get_counters_mask(uint64_t n)
+{
+ uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
+
+ if (n)
+ mask |= GENMASK(n - 1, 0);
+ return mask;
+}
+
+/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
+static inline unsigned long read_sel_evcntr(int sel)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ return read_sysreg(pmxevcntr_el0);
+}
+
+/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
+static inline void write_sel_evcntr(int sel, unsigned long val)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ write_sysreg(val, pmxevcntr_el0);
+ isb();
+}
+
+/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
+static inline unsigned long read_sel_evtyper(int sel)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ return read_sysreg(pmxevtyper_el0);
+}
+
+/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
+static inline void write_sel_evtyper(int sel, unsigned long val)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ write_sysreg(val, pmxevtyper_el0);
+ isb();
+}
+
+static inline void enable_counter(int idx)
+{
+ uint64_t v = read_sysreg(pmcntenset_el0);
+
+ write_sysreg(BIT(idx) | v, pmcntenset_el0);
+ isb();
+}
+
+static inline void disable_counter(int idx)
+{
+ uint64_t v = read_sysreg(pmcntenset_el0);
+
+ write_sysreg(BIT(idx) | v, pmcntenclr_el0);
+ isb();
+}
+
+static inline void pmu_disable_reset(void)
+{
+ uint64_t pmcr = read_sysreg(pmcr_el0);
+
+ /* Reset all counters, disabling them */
+ pmcr &= ~ARMV8_PMU_PMCR_E;
+ write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
+ isb();
+}
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+ isb();
+}
+
+#define READ_PMEVTYPERN(n) \
+ return read_sysreg(pmevtyper##n##_el0)
+static inline unsigned long read_pmevtypern(int n)
+{
+ PMEVN_SWITCH(n, READ_PMEVTYPERN);
+ return 0;
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+ isb();
+}
--
2.40.1

2023-12-14 13:47:51

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Hi Shaoqin,

On 11/29/23 08:27, Shaoqin Huang wrote:
> Introduce pmu_event_filter_test for arm64 platforms. The test configures
> PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and
> check if the guest can use those events which user allow and can't use
> those events which use deny.
>
> This test refactor the create_vpmu_vm() and make it a wrapper for
> __create_vpmu_vm(), which allows some extra init code before
> KVM_ARM_VCPU_PMU_V3_INIT.
>
> And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the
> pmu event filter in KVM. And choose to filter two common event
> branches_retired and instructions_retired, and let guest use the two
> events in pmu. And check if the result is expected.
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> tools/testing/selftests/kvm/Makefile | 1 +
> .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++
> .../selftests/kvm/include/aarch64/vpmu.h | 4 +
> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
> 4 files changed, 248 insertions(+), 2 deletions(-)
> create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index b60852c222ac..5f126e1a1dbf 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
> TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
> TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
> TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
> +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
> TEST_GEN_PROGS_aarch64 += aarch64/psci_test
> TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
> TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> new file mode 100644
> index 000000000000..0e652fbdb37a
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> @@ -0,0 +1,231 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * pmu_event_filter_test - Test user limit pmu event for guest.
> + *
> + * Copyright (c) 2023 Red Hat, Inc.
> + *
> + * This test checks if the guest only see the limited pmu event that userspace
> + * sets, if the guest can use those events which user allow, and if the guest
> + * can't use those events which user deny.
> + * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
> + * is supported on the host.
> + */
> +#include <kvm_util.h>
> +#include <processor.h>
> +#include <vgic.h>
> +#include <vpmu.h>
> +#include <test_util.h>
> +#include <perf/arm_pmuv3.h>
> +
> +struct {
> + uint64_t branches_retired;
> + uint64_t instructions_retired;
> +} pmc_results;
> +
> +static struct vpmu_vm *vpmu_vm;
> +static uint64_t pmceid0;
> +
> +#define FILTER_NR 10
> +
> +struct test_desc {
> + const char *name;
> + void (*check_result)(void);
> + struct kvm_pmu_event_filter filter[FILTER_NR];
> +};
> +> +#define __DEFINE_FILTER(base, num, act) \
> + ((struct kvm_pmu_event_filter) { \
> + .base_event = base, \
> + .nevents = num, \
> + .action = act, \
> + })
> +
> +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
> +
> +#define EMPTY_FILTER { 0 }
> +
> +#define SW_INCR 0x0
> +#define INST_RETIRED 0x8
> +#define BR_RETIRED 0x21
> +
> +#define NUM_BRANCHES 10
> +
> +static void run_and_measure_loop(void)
> +{
> + asm volatile(
> + " mov x10, %[loop]\n"
> + "1: sub x10, x10, #1\n"
> + " cmp x10, #0x0\n"
> + " b.gt 1b\n"
> + :
> + : [loop] "r" (NUM_BRANCHES)
> + : "x10", "cc");
> +}
> +
> +static void guest_code(void)
> +{
> + uint64_t pmcr = read_sysreg(pmcr_el0);
> +
> + pmu_disable_reset();
> +
> + write_pmevtypern(0, BR_RETIRED);
> + write_pmevtypern(1, INST_RETIRED);
> + enable_counter(0);
> + enable_counter(1);
> + write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
> +
> + run_and_measure_loop();
> +
> + write_sysreg(pmcr, pmcr_el0);
> +
> + pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
> + pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
> +
> + GUEST_DONE();
> +}
> +
> +static void guest_get_pmceid0(void)
> +{
> + uint64_t pmceid0 = read_sysreg(pmceid0_el0);
> +
> + GUEST_PRINTF("%lx\n", pmceid0);
> +
> + GUEST_DONE();
> +}
> +
> +static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
> +{
> + struct kvm_device_attr attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
> + };
> + struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
> +
> + while (filter && filter->nevents != 0) {
> + attr.addr = (uint64_t)filter;
> + vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + filter++;
> + }
> +}
> +
> +static void create_vpmu_vm_with_filter(void *guest_code,
> + struct kvm_pmu_event_filter *filter)
> +{
> + vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
> +}
> +
> +static void run_vcpu(struct kvm_vcpu *vcpu)
> +{
> + struct ucall uc;
> +
> + while (1) {
> + vcpu_run(vcpu);
> + switch (get_ucall(vcpu, &uc)) {
> + case UCALL_DONE:
> + return;
> + case UCALL_PRINTF:
> + pmceid0 = strtoll(uc.buffer, NULL, 16);
> + break;
> + default:
> + TEST_FAIL("Unknown ucall %lu", uc.cmd);
> + }
> + }
> +}
> +
> +static void check_pmc_counting(void)
> +{
> + uint64_t br = pmc_results.branches_retired;
> + uint64_t ir = pmc_results.instructions_retired;
> +
> + TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
> + "%lu (expected %u)", br, NUM_BRANCHES);
have you tested on several machines? My experience with some events
(MEM_ACCESS for instance) is that you have variance (sometimes
significant) on some event count. I am a little bit scared that having
this br == NUM_BRANCHES check without taking into account some margin
will cause failures on some HW.

in v1 I suggested to read to PMCEID* in a guest code to check if the
event is supported. This method would also have the benefice to allow
testing more complex filter range combinations.
> + TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
> +}
> +
> +static void check_pmc_not_counting(void)
> +{
> + uint64_t br = pmc_results.branches_retired;
> + uint64_t ir = pmc_results.instructions_retired;
> +
> + TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
> + TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
> +}
> +
> +static void run_vcpu_and_sync_pmc_results(void)
> +{
> + memset(&pmc_results, 0, sizeof(pmc_results));
> + sync_global_to_guest(vpmu_vm->vm, pmc_results);
> +
> + run_vcpu(vpmu_vm->vcpu);
> +
> + sync_global_from_guest(vpmu_vm->vm, pmc_results);
> +}
> +
> +static void run_test(struct test_desc *t)
> +{
> + pr_debug("Test: %s\n", t->name);
> +
> + create_vpmu_vm_with_filter(guest_code, t->filter);
> +
> + run_vcpu_and_sync_pmc_results();
> +
> + t->check_result();
> +
> + destroy_vpmu_vm(vpmu_vm);
> +}
> +
> +static struct test_desc tests[] = {
> + {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
> + {"member_allow_filter", check_pmc_counting,
> + {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
Note the doc says that Event 0 (SW_INCR) is never filtered, as it
doesn't count a hardware event


I would use the defines exposed in the uapi
> +#define KVM_PMU_EVENT_ALLOW 0
> +#define KVM_PMU_EVENT_DENY 1
> + DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
> + {"member_deny_filter", check_pmc_not_counting,
> + {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
what is the purpose of SW_INCR. YOu do not seem to test it anyway?
> + DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
> + {"not_member_deny_filter", check_pmc_counting,
> + {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
> + {"not_member_allow_filter", check_pmc_not_counting,
> + {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
> + { 0 }
> +};
> +
> +static void for_each_test(void)
> +{
> + struct test_desc *t;
> +
> + for (t = &tests[0]; t->name; t++)
> + run_test(t);
> +}
> +
> +static bool kvm_supports_pmu_event_filter(void)
> +{
> + int r;
> +
> + vpmu_vm = create_vpmu_vm(guest_code);
> +
> + r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
> + KVM_ARM_VCPU_PMU_V3_FILTER);
you can use __vcpu_has_device_attr directly
> +
> + destroy_vpmu_vm(vpmu_vm);
> + return !r;
> +}
> +
> +static bool host_pmu_supports_events(void)
> +{
> + vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
> +
> + run_vcpu(vpmu_vm->vcpu);
> +
> + destroy_vpmu_vm(vpmu_vm);
> +
> + return pmceid0 & (BR_RETIRED | INST_RETIRED);
this will return true if either event is supported. I suspect this is
not what you want.
> +}
> +
> +int main(void)
> +{
> + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
> + TEST_REQUIRE(kvm_supports_pmu_event_filter());
> + TEST_REQUIRE(host_pmu_supports_events());
> +
> + for_each_test();
> +}
> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> index 644dae3814b5..f103d0824f8a 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> @@ -18,6 +18,10 @@ struct vpmu_vm {
> int gic_fd;
> };
>
> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
> + void *arg);
> +
> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> index b3de8fdc555e..76ea03d607f1 100644
> --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> @@ -7,8 +7,9 @@
> #include <vpmu.h>
> #include <perf/arm_pmuv3.h>
>
> -/* Create a VM that has one vCPU with PMUv3 configured. */
> -struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
> + void *arg)
> {
> struct kvm_vcpu_init init;
> uint8_t pmuver;
> @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
> "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>
> /* Initialize vPMU */
> + if (init_pmu)
> + init_pmu(vpmu_vm, arg);
> +
> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>
> return vpmu_vm;
> }
>
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +{
> + return __create_vpmu_vm(guest_code, NULL, NULL);
> +}
> +
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
> {
> close(vpmu_vm->gic_fd);

Thanks

Eric

2023-12-14 13:53:09

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v2 5/5] KVM: selftests: aarch64: Add invalid filter test in pmu_event_filter_test

Hi Shaoqin

On 11/29/23 08:27, Shaoqin Huang wrote:
> Add the invalid filter test to double check if the KVM_ARM_VCPU_PMU_V3_FILTER
> will return the expected error.
... in which situations? filter beyond the 16b event space or incorrect
action.
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> .../kvm/aarch64/pmu_event_filter_test.c | 36 +++++++++++++++++++
> 1 file changed, 36 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> index 0e652fbdb37a..4c375417b194 100644
> --- a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> @@ -7,6 +7,7 @@
> * This test checks if the guest only see the limited pmu event that userspace
> * sets, if the guest can use those events which user allow, and if the guest
> * can't use those events which user deny.
> + * It also checks that setting invalid filter ranges return the expected error.
> * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
> * is supported on the host.
> */
> @@ -197,6 +198,39 @@ static void for_each_test(void)
> run_test(t);
> }
>
> +static void set_invalid_filter(struct vpmu_vm *vm, void *arg)
> +{
> + struct kvm_pmu_event_filter invalid;
> + struct kvm_device_attr attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
> + .addr = (uint64_t)&invalid,
> + };
> + int ret = 0;
> +
> + /* The max event number is (1 << 16), set a range large than it. */
larger
> + invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0);
doc says
must fit within the event space defined by the PMU
architecture (10 bits on ARMv8.0, 16 bits from ARMv8.1 onwards).

> + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range "
> + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
> + ret, errno);
> +
> + ret = 0;
> +
> + /* Set the Invalid action. */
> + invalid = __DEFINE_FILTER(0, 1, 3);
> + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action "
> + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
> + ret, errno);
> +}
> +
> +static void test_invalid_filter(void)
> +{
> + vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL);
> + destroy_vpmu_vm(vpmu_vm);
> +}
> +
> static bool kvm_supports_pmu_event_filter(void)
> {
> int r;
> @@ -228,4 +262,6 @@ int main(void)
> TEST_REQUIRE(host_pmu_supports_events());
>
> for_each_test();
> +
> + test_invalid_filter();
> }

Eric

2023-12-14 13:55:49

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Hi Shaoqin,

On 11/29/23 08:27, Shaoqin Huang wrote:
> Introduce pmu_event_filter_test for arm64 platforms. The test configures
> PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and
> check if the guest can use those events which user allow and can't use
> those events which use deny.
>
> This test refactor the create_vpmu_vm() and make it a wrapper for
> __create_vpmu_vm(), which allows some extra init code before
> KVM_ARM_VCPU_PMU_V3_INIT.
>
> And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the
> pmu event filter in KVM. And choose to filter two common event
> branches_retired and instructions_retired, and let guest use the two
> events in pmu. And check if the result is expected.
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> tools/testing/selftests/kvm/Makefile | 1 +
> .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++
> .../selftests/kvm/include/aarch64/vpmu.h | 4 +
> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
> 4 files changed, 248 insertions(+), 2 deletions(-)
> create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index b60852c222ac..5f126e1a1dbf 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
> TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
> TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
> TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
> +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
> TEST_GEN_PROGS_aarch64 += aarch64/psci_test
> TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
> TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> new file mode 100644
> index 000000000000..0e652fbdb37a
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> @@ -0,0 +1,231 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * pmu_event_filter_test - Test user limit pmu event for guest.
> + *
> + * Copyright (c) 2023 Red Hat, Inc.
> + *
> + * This test checks if the guest only see the limited pmu event that userspace
> + * sets, if the guest can use those events which user allow, and if the guest
> + * can't use those events which user deny.
> + * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
> + * is supported on the host.
> + */
> +#include <kvm_util.h>
> +#include <processor.h>
> +#include <vgic.h>
> +#include <vpmu.h>
> +#include <test_util.h>
> +#include <perf/arm_pmuv3.h>
> +
> +struct {
> + uint64_t branches_retired;
> + uint64_t instructions_retired;
> +} pmc_results;
> +
> +static struct vpmu_vm *vpmu_vm;
> +static uint64_t pmceid0;
> +
> +#define FILTER_NR 10
> +
> +struct test_desc {
> + const char *name;
> + void (*check_result)(void);
> + struct kvm_pmu_event_filter filter[FILTER_NR];
> +};
> +
> +#define __DEFINE_FILTER(base, num, act) \
> + ((struct kvm_pmu_event_filter) { \
> + .base_event = base, \
> + .nevents = num, \
> + .action = act, \
> + })
> +
> +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
> +
> +#define EMPTY_FILTER { 0 }
> +
> +#define SW_INCR 0x0
> +#define INST_RETIRED 0x8
> +#define BR_RETIRED 0x21
> +
> +#define NUM_BRANCHES 10
> +
> +static void run_and_measure_loop(void)
> +{
> + asm volatile(
> + " mov x10, %[loop]\n"
> + "1: sub x10, x10, #1\n"
> + " cmp x10, #0x0\n"
> + " b.gt 1b\n"
> + :
> + : [loop] "r" (NUM_BRANCHES)
> + : "x10", "cc");
> +}
> +
> +static void guest_code(void)
> +{
> + uint64_t pmcr = read_sysreg(pmcr_el0);
> +
> + pmu_disable_reset();
> +
> + write_pmevtypern(0, BR_RETIRED);
> + write_pmevtypern(1, INST_RETIRED);
> + enable_counter(0);
> + enable_counter(1);
> + write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
> +
> + run_and_measure_loop();
> +
> + write_sysreg(pmcr, pmcr_el0);
> +
> + pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
> + pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
> +
> + GUEST_DONE();
> +}
> +
> +static void guest_get_pmceid0(void)
> +{
> + uint64_t pmceid0 = read_sysreg(pmceid0_el0);
> +
> + GUEST_PRINTF("%lx\n", pmceid0);
> +
> + GUEST_DONE();
> +}
> +
> +static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
> +{
> + struct kvm_device_attr attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
> + };
> + struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
> +
> + while (filter && filter->nevents != 0) {
> + attr.addr = (uint64_t)filter;
> + vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + filter++;
> + }
> +}
> +
> +static void create_vpmu_vm_with_filter(void *guest_code,
> + struct kvm_pmu_event_filter *filter)
> +{
> + vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
> +}
> +
> +static void run_vcpu(struct kvm_vcpu *vcpu)
> +{
> + struct ucall uc;
> +
> + while (1) {
> + vcpu_run(vcpu);
> + switch (get_ucall(vcpu, &uc)) {
> + case UCALL_DONE:
> + return;
> + case UCALL_PRINTF:
> + pmceid0 = strtoll(uc.buffer, NULL, 16);
> + break;
> + default:
> + TEST_FAIL("Unknown ucall %lu", uc.cmd);
> + }
> + }
> +}
> +
> +static void check_pmc_counting(void)
> +{
> + uint64_t br = pmc_results.branches_retired;
> + uint64_t ir = pmc_results.instructions_retired;
> +
> + TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
> + "%lu (expected %u)", br, NUM_BRANCHES);
> + TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
> +}
> +
> +static void check_pmc_not_counting(void)
> +{
> + uint64_t br = pmc_results.branches_retired;
> + uint64_t ir = pmc_results.instructions_retired;
> +
> + TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
> + TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
> +}
> +
> +static void run_vcpu_and_sync_pmc_results(void)
> +{
> + memset(&pmc_results, 0, sizeof(pmc_results));
> + sync_global_to_guest(vpmu_vm->vm, pmc_results);
> +
> + run_vcpu(vpmu_vm->vcpu);
> +
> + sync_global_from_guest(vpmu_vm->vm, pmc_results);
> +}
> +
> +static void run_test(struct test_desc *t)
> +{
> + pr_debug("Test: %s\n", t->name);
> +
> + create_vpmu_vm_with_filter(guest_code, t->filter);
> +
> + run_vcpu_and_sync_pmc_results();
> +
> + t->check_result();
> +
> + destroy_vpmu_vm(vpmu_vm);
> +}
> +
> +static struct test_desc tests[] = {
> + {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
> + {"member_allow_filter", check_pmc_counting,
> + {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
> + DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
> + {"member_deny_filter", check_pmc_not_counting,
> + {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
> + DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
> + {"not_member_deny_filter", check_pmc_counting,
> + {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
> + {"not_member_allow_filter", check_pmc_not_counting,
> + {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
> + { 0 }
> +};
> +
> +static void for_each_test(void)
> +{
> + struct test_desc *t;
> +
> + for (t = &tests[0]; t->name; t++)
> + run_test(t);
> +}
> +
> +static bool kvm_supports_pmu_event_filter(void)
> +{
> + int r;
> +
> + vpmu_vm = create_vpmu_vm(guest_code);
> +
> + r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
> + KVM_ARM_VCPU_PMU_V3_FILTER);
> +
> + destroy_vpmu_vm(vpmu_vm);
> + return !r;
> +}
> +
> +static bool host_pmu_supports_events(void)
> +{
> + vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
> +
> + run_vcpu(vpmu_vm->vcpu);
> +
> + destroy_vpmu_vm(vpmu_vm);
> +
> + return pmceid0 & (BR_RETIRED | INST_RETIRED);
> +}
> +
> +int main(void)
> +{
> + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
> + TEST_REQUIRE(kvm_supports_pmu_event_filter());
> + TEST_REQUIRE(host_pmu_supports_events());
> +
> + for_each_test();
> +}
> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> index 644dae3814b5..f103d0824f8a 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> @@ -18,6 +18,10 @@ struct vpmu_vm {
> int gic_fd;
> };
>
> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
> + void *arg);
> +
> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> index b3de8fdc555e..76ea03d607f1 100644
> --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> @@ -7,8 +7,9 @@
> #include <vpmu.h>
> #include <perf/arm_pmuv3.h>
>
> -/* Create a VM that has one vCPU with PMUv3 configured. */
> -struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
> + void *arg)
> {
> struct kvm_vcpu_init init;
> uint8_t pmuver;
> @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
> "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>
> /* Initialize vPMU */
> + if (init_pmu)
> + init_pmu(vpmu_vm, arg);
> +
> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>
> return vpmu_vm;
> }
>
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +{
> + return __create_vpmu_vm(guest_code, NULL, NULL);
> +}
> +
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
> {
> close(vpmu_vm->gic_fd);
While reading the doc again I can see there would be other interesting
scenari to test such as

"Note: "Cancelling" a filter by registering the opposite action for the same
range doesn't change the default action. For example, installing an ALLOW
filter for event range [0:10) as the first filter and then applying a DENY
action for the same range will leave the whole range as disabled."

also filter ranges. Using PMCEID* would simplify your life I think.

However this is more work and maybe goes beyond your original intent. Up
to you ...

Eric


2023-12-14 19:29:43

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v2 4/5] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Hi Shaoqin,

On 12/14/23 14:45, Eric Auger wrote:
> Hi Shaoqin,
>
> On 11/29/23 08:27, Shaoqin Huang wrote:
>> Introduce pmu_event_filter_test for arm64 platforms. The test configures
>> PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and
>> check if the guest can use those events which user allow and can't use
>> those events which use deny.
>>
>> This test refactor the create_vpmu_vm() and make it a wrapper for
>> __create_vpmu_vm(), which allows some extra init code before
>> KVM_ARM_VCPU_PMU_V3_INIT.
>>
>> And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the
>> pmu event filter in KVM. And choose to filter two common event
>> branches_retired and instructions_retired, and let guest use the two
>> events in pmu. And check if the result is expected.
>>
>> Signed-off-by: Shaoqin Huang <[email protected]>
>> ---
>> tools/testing/selftests/kvm/Makefile | 1 +
>> .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++
>> .../selftests/kvm/include/aarch64/vpmu.h | 4 +
>> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
>> 4 files changed, 248 insertions(+), 2 deletions(-)
>> create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>>
>> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
>> index b60852c222ac..5f126e1a1dbf 100644
>> --- a/tools/testing/selftests/kvm/Makefile
>> +++ b/tools/testing/selftests/kvm/Makefile
>> @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
>> TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
>> TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
>> TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
>> +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
>> TEST_GEN_PROGS_aarch64 += aarch64/psci_test
>> TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
>> TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
>> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>> new file mode 100644
>> index 000000000000..0e652fbdb37a
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>> @@ -0,0 +1,231 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * pmu_event_filter_test - Test user limit pmu event for guest.
>> + *
>> + * Copyright (c) 2023 Red Hat, Inc.
>> + *
>> + * This test checks if the guest only see the limited pmu event that userspace
>> + * sets, if the guest can use those events which user allow, and if the guest
>> + * can't use those events which user deny.
>> + * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
>> + * is supported on the host.
>> + */
>> +#include <kvm_util.h>
>> +#include <processor.h>
>> +#include <vgic.h>
>> +#include <vpmu.h>
>> +#include <test_util.h>
>> +#include <perf/arm_pmuv3.h>
>> +
>> +struct {
>> + uint64_t branches_retired;
>> + uint64_t instructions_retired;
>> +} pmc_results;
>> +
>> +static struct vpmu_vm *vpmu_vm;
>> +static uint64_t pmceid0;
>> +
>> +#define FILTER_NR 10
>> +
>> +struct test_desc {
>> + const char *name;
>> + void (*check_result)(void);
>> + struct kvm_pmu_event_filter filter[FILTER_NR];
>> +};
>> +> +#define __DEFINE_FILTER(base, num, act) \
>> + ((struct kvm_pmu_event_filter) { \
>> + .base_event = base, \
>> + .nevents = num, \
>> + .action = act, \
>> + })
>> +
>> +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
>> +
>> +#define EMPTY_FILTER { 0 }
>> +
>> +#define SW_INCR 0x0
>> +#define INST_RETIRED 0x8
>> +#define BR_RETIRED 0x21
>> +
>> +#define NUM_BRANCHES 10
>> +
>> +static void run_and_measure_loop(void)
>> +{
>> + asm volatile(
>> + " mov x10, %[loop]\n"
>> + "1: sub x10, x10, #1\n"
>> + " cmp x10, #0x0\n"
>> + " b.gt 1b\n"
>> + :
>> + : [loop] "r" (NUM_BRANCHES)
>> + : "x10", "cc");
>> +}
>> +
>> +static void guest_code(void)
>> +{
>> + uint64_t pmcr = read_sysreg(pmcr_el0);
>> +
>> + pmu_disable_reset();
>> +
>> + write_pmevtypern(0, BR_RETIRED);
>> + write_pmevtypern(1, INST_RETIRED);
>> + enable_counter(0);
>> + enable_counter(1);
>> + write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
>> +
>> + run_and_measure_loop();
>> +
>> + write_sysreg(pmcr, pmcr_el0);
>> +
>> + pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
>> + pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
>> +
>> + GUEST_DONE();
>> +}
>> +
>> +static void guest_get_pmceid0(void)
>> +{
>> + uint64_t pmceid0 = read_sysreg(pmceid0_el0);
>> +
>> + GUEST_PRINTF("%lx\n", pmceid0);
>> +
>> + GUEST_DONE();
>> +}
>> +
>> +static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
>> +{
>> + struct kvm_device_attr attr = {
>> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
>> + };
>> + struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
>> +
>> + while (filter && filter->nevents != 0) {
>> + attr.addr = (uint64_t)filter;
>> + vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
>> + filter++;
>> + }
>> +}
>> +
>> +static void create_vpmu_vm_with_filter(void *guest_code,
>> + struct kvm_pmu_event_filter *filter)
>> +{
>> + vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
>> +}
>> +
>> +static void run_vcpu(struct kvm_vcpu *vcpu)
>> +{
>> + struct ucall uc;
>> +
>> + while (1) {
>> + vcpu_run(vcpu);
>> + switch (get_ucall(vcpu, &uc)) {
>> + case UCALL_DONE:
>> + return;
>> + case UCALL_PRINTF:
>> + pmceid0 = strtoll(uc.buffer, NULL, 16);
>> + break;
>> + default:
>> + TEST_FAIL("Unknown ucall %lu", uc.cmd);
>> + }
>> + }
>> +}
>> +
>> +static void check_pmc_counting(void)
>> +{
>> + uint64_t br = pmc_results.branches_retired;
>> + uint64_t ir = pmc_results.instructions_retired;
>> +
>> + TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
>> + "%lu (expected %u)", br, NUM_BRANCHES);
> have you tested on several machines? My experience with some events
> (MEM_ACCESS for instance) is that you have variance (sometimes
> significant) on some event count. I am a little bit scared that having
> this br == NUM_BRANCHES check without taking into account some margin
> will cause failures on some HW.

I confirm the usual suspect, Amberwing, does not like this check ;-)

augere@qualcomm-amberwing-rep-06:~/UPSTREAM/linux/tools/testing/selftests/kvm/aarch64#
./pmu_event_filter_test
==== Test Assertion Failure ====
aarch64/pmu_event_filter_test.c:141: br && br == NUM_BRANCHES
pid=7750 tid=7750 errno=4 - Interrupted system call
1 0x0000000000401d6b: check_pmc_counting at pmu_event_filter_test.c:141
2 0x0000000000401967: run_test at pmu_event_filter_test.c:173
3 (inlined by) for_each_test at pmu_event_filter_test.c:198
4 (inlined by) main at pmu_event_filter_test.c:264
5 0x0000ffffaaa6c79b: ?? ??:0
6 0x0000ffffaaa6c86b: ?? ??:0
7 0x0000000000401aaf: _start at ??:?
Branch instructions retired = 15 (expected 10)

Eric

>
> in v1 I suggested to read to PMCEID* in a guest code to check if the
> event is supported. This method would also have the benefice to allow
> testing more complex filter range combinations.
>> + TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
>> +}
>> +
>> +static void check_pmc_not_counting(void)
>> +{
>> + uint64_t br = pmc_results.branches_retired;
>> + uint64_t ir = pmc_results.instructions_retired;
>> +
>> + TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
>> + TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
>> +}
>> +
>> +static void run_vcpu_and_sync_pmc_results(void)
>> +{
>> + memset(&pmc_results, 0, sizeof(pmc_results));
>> + sync_global_to_guest(vpmu_vm->vm, pmc_results);
>> +
>> + run_vcpu(vpmu_vm->vcpu);
>> +
>> + sync_global_from_guest(vpmu_vm->vm, pmc_results);
>> +}
>> +
>> +static void run_test(struct test_desc *t)
>> +{
>> + pr_debug("Test: %s\n", t->name);
>> +
>> + create_vpmu_vm_with_filter(guest_code, t->filter);
>> +
>> + run_vcpu_and_sync_pmc_results();
>> +
>> + t->check_result();
>> +
>> + destroy_vpmu_vm(vpmu_vm);
>> +}
>> +
>> +static struct test_desc tests[] = {
>> + {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
>> + {"member_allow_filter", check_pmc_counting,
>> + {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
> Note the doc says that Event 0 (SW_INCR) is never filtered, as it
> doesn't count a hardware event
>
>
> I would use the defines exposed in the uapi
>> +#define KVM_PMU_EVENT_ALLOW 0
>> +#define KVM_PMU_EVENT_DENY 1
>> + DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
>> + {"member_deny_filter", check_pmc_not_counting,
>> + {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
> what is the purpose of SW_INCR. YOu do not seem to test it anyway?
>> + DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
>> + {"not_member_deny_filter", check_pmc_counting,
>> + {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
>> + {"not_member_allow_filter", check_pmc_not_counting,
>> + {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
>> + { 0 }
>> +};
>> +
>> +static void for_each_test(void)
>> +{
>> + struct test_desc *t;
>> +
>> + for (t = &tests[0]; t->name; t++)
>> + run_test(t);
>> +}
>> +
>> +static bool kvm_supports_pmu_event_filter(void)
>> +{
>> + int r;
>> +
>> + vpmu_vm = create_vpmu_vm(guest_code);
>> +
>> + r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
>> + KVM_ARM_VCPU_PMU_V3_FILTER);
> you can use __vcpu_has_device_attr directly
>> +
>> + destroy_vpmu_vm(vpmu_vm);
>> + return !r;
>> +}
>> +
>> +static bool host_pmu_supports_events(void)
>> +{
>> + vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
>> +
>> + run_vcpu(vpmu_vm->vcpu);
>> +
>> + destroy_vpmu_vm(vpmu_vm);
>> +
>> + return pmceid0 & (BR_RETIRED | INST_RETIRED);
> this will return true if either event is supported. I suspect this is
> not what you want.
>> +}
>> +
>> +int main(void)
>> +{
>> + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
>> + TEST_REQUIRE(kvm_supports_pmu_event_filter());
>> + TEST_REQUIRE(host_pmu_supports_events());
>> +
>> + for_each_test();
>> +}
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> index 644dae3814b5..f103d0824f8a 100644
>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -18,6 +18,10 @@ struct vpmu_vm {
>> int gic_fd;
>> };
>>
>> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
>> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
>> + void *arg);
>> +
>> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>>
>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> index b3de8fdc555e..76ea03d607f1 100644
>> --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> @@ -7,8 +7,9 @@
>> #include <vpmu.h>
>> #include <perf/arm_pmuv3.h>
>>
>> -/* Create a VM that has one vCPU with PMUv3 configured. */
>> -struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
>> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
>> + void *arg)
>> {
>> struct kvm_vcpu_init init;
>> uint8_t pmuver;
>> @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>>
>> /* Initialize vPMU */
>> + if (init_pmu)
>> + init_pmu(vpmu_vm, arg);
>> +
>> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
>> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>>
>> return vpmu_vm;
>> }
>>
>> +/* Create a VM that has one vCPU with PMUv3 configured. */
>> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> +{
>> + return __create_vpmu_vm(guest_code, NULL, NULL);
>> +}
>> +
>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
>> {
>> close(vpmu_vm->gic_fd);
>
> Thanks
>
> Eric