2023-11-23 06:38:27

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v1 0/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test

The test is inspired by the pmu_event_filter_test which implemented by x86. On
the arm64 platform, there is the same ability to set the pmu_event_filter
through the KVM_ARM_VCPU_PMU_V3_FILTER attribute. So add the test for arm64.

The series first move some pmu common code from vpmu_counter_access to lib/
which can be used by pmu_event_filter_test. Then implements the test itself.

Shaoqin Huang (3):
KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() can be
reused
KVM: selftests: aarch64: Move the pmu helper function into lib/
KVM: selftests: aarch64: Introduce pmu_event_filter_test

tools/testing/selftests/kvm/Makefile | 2 +
.../kvm/aarch64/pmu_event_filter_test.c | 227 ++++++++++++++++++
.../kvm/aarch64/vpmu_counter_access.c | 218 ++---------------
.../selftests/kvm/include/aarch64/vpmu.h | 139 +++++++++++
.../testing/selftests/kvm/lib/aarch64/vpmu.c | 74 ++++++
5 files changed, 466 insertions(+), 194 deletions(-)
create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h
create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c

--
2.40.1


2023-11-23 06:39:12

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/

Move those pmu helper function into lib/, thus it can be used by other
pmu test.

Signed-off-by: Shaoqin Huang <[email protected]>
---
.../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
.../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++
2 files changed, 119 insertions(+), 118 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index 17305408a334..62d6315790ab 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -20,12 +20,6 @@
#include <perf/arm_pmuv3.h>
#include <linux/bitfield.h>

-/* The max number of the PMU event counters (excluding the cycle counter) */
-#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/* The cycle counter bit position that's common among the PMU registers */
-#define ARMV8_PMU_CYCLE_IDX 31
-
static struct vpmu_vm *vpmu_vm;

struct pmreg_sets {
@@ -35,118 +29,6 @@ struct pmreg_sets {

#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}

-static uint64_t get_pmcr_n(uint64_t pmcr)
-{
- return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
-}
-
-static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
-{
- *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
- *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
-}
-
-static uint64_t get_counters_mask(uint64_t n)
-{
- uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
-
- if (n)
- mask |= GENMASK(n - 1, 0);
- return mask;
-}
-
-/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline unsigned long read_sel_evcntr(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevcntr_el0);
-}
-
-/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline void write_sel_evcntr(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevcntr_el0);
- isb();
-}
-
-/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline unsigned long read_sel_evtyper(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevtyper_el0);
-}
-
-/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline void write_sel_evtyper(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevtyper_el0);
- isb();
-}
-
-static inline void enable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenset_el0);
- isb();
-}
-
-static inline void disable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenclr_el0);
- isb();
-}
-
-static void pmu_disable_reset(void)
-{
- uint64_t pmcr = read_sysreg(pmcr_el0);
-
- /* Reset all counters, disabling them */
- pmcr &= ~ARMV8_PMU_PMCR_E;
- write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
- isb();
-}
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
- isb();
-}
-
-#define READ_PMEVTYPERN(n) \
- return read_sysreg(pmevtyper##n##_el0)
-static unsigned long read_pmevtypern(int n)
-{
- PMEVN_SWITCH(n, READ_PMEVTYPERN);
- return 0;
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
- isb();
-}
-
/*
* The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
* accessors that test cases will use. Each of the accessors will
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
index 0a56183644ee..e0cc1ca1c4b7 100644
--- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
+++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
@@ -1,10 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */

#include <kvm_util.h>
+#include <perf/arm_pmuv3.h>

#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL

+/* The max number of the PMU event counters (excluding the cycle counter) */
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/* The cycle counter bit position that's common among the PMU registers */
+#define ARMV8_PMU_CYCLE_IDX 31
+
struct vpmu_vm {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
@@ -14,3 +21,115 @@ struct vpmu_vm {
struct vpmu_vm *create_vpmu_vm(void *guest_code);

void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
+
+static inline uint64_t get_pmcr_n(uint64_t pmcr)
+{
+ return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+}
+
+static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
+{
+ *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+ *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+}
+
+static inline uint64_t get_counters_mask(uint64_t n)
+{
+ uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
+
+ if (n)
+ mask |= GENMASK(n - 1, 0);
+ return mask;
+}
+
+/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
+static inline unsigned long read_sel_evcntr(int sel)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ return read_sysreg(pmxevcntr_el0);
+}
+
+/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
+static inline void write_sel_evcntr(int sel, unsigned long val)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ write_sysreg(val, pmxevcntr_el0);
+ isb();
+}
+
+/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
+static inline unsigned long read_sel_evtyper(int sel)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ return read_sysreg(pmxevtyper_el0);
+}
+
+/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
+static inline void write_sel_evtyper(int sel, unsigned long val)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ write_sysreg(val, pmxevtyper_el0);
+ isb();
+}
+
+static inline void enable_counter(int idx)
+{
+ uint64_t v = read_sysreg(pmcntenset_el0);
+
+ write_sysreg(BIT(idx) | v, pmcntenset_el0);
+ isb();
+}
+
+static inline void disable_counter(int idx)
+{
+ uint64_t v = read_sysreg(pmcntenset_el0);
+
+ write_sysreg(BIT(idx) | v, pmcntenclr_el0);
+ isb();
+}
+
+static inline void pmu_disable_reset(void)
+{
+ uint64_t pmcr = read_sysreg(pmcr_el0);
+
+ /* Reset all counters, disabling them */
+ pmcr &= ~ARMV8_PMU_PMCR_E;
+ write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
+ isb();
+}
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+ isb();
+}
+
+#define READ_PMEVTYPERN(n) \
+ return read_sysreg(pmevtyper##n##_el0)
+static inline unsigned long read_pmevtypern(int n)
+{
+ PMEVN_SWITCH(n, READ_PMEVTYPERN);
+ return 0;
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+ isb();
+}
--
2.40.1

2023-11-23 06:39:54

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v1 1/3] KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() can be reused

Move the [create|destroy]_vpmu_vm() into the lib/, which makes those
function can be used by other tests. Install the handler is specific to
the vpmu_counter_access test, so create a wrapper function for it, and
only move the common part.

No functional change.

Signed-off-by: Shaoqin Huang <[email protected]>
---
tools/testing/selftests/kvm/Makefile | 1 +
.../kvm/aarch64/vpmu_counter_access.c | 100 +++++-------------
.../selftests/kvm/include/aarch64/vpmu.h | 16 +++
.../testing/selftests/kvm/lib/aarch64/vpmu.c | 64 +++++++++++
4 files changed, 105 insertions(+), 76 deletions(-)
create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h
create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index a5963ab9215b..b60852c222ac 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -57,6 +57,7 @@ LIBKVM_aarch64 += lib/aarch64/processor.c
LIBKVM_aarch64 += lib/aarch64/spinlock.c
LIBKVM_aarch64 += lib/aarch64/ucall.c
LIBKVM_aarch64 += lib/aarch64/vgic.c
+LIBKVM_aarch64 += lib/aarch64/vpmu.c

LIBKVM_s390x += lib/s390x/diag318_test_handler.c
LIBKVM_s390x += lib/s390x/processor.c
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index 5ea78986e665..17305408a334 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -16,6 +16,7 @@
#include <processor.h>
#include <test_util.h>
#include <vgic.h>
+#include <vpmu.h>
#include <perf/arm_pmuv3.h>
#include <linux/bitfield.h>

@@ -25,13 +26,7 @@
/* The cycle counter bit position that's common among the PMU registers */
#define ARMV8_PMU_CYCLE_IDX 31

-struct vpmu_vm {
- struct kvm_vm *vm;
- struct kvm_vcpu *vcpu;
- int gic_fd;
-};
-
-static struct vpmu_vm vpmu_vm;
+static struct vpmu_vm *vpmu_vm;

struct pmreg_sets {
uint64_t set_reg_id;
@@ -421,64 +416,6 @@ static void guest_code(uint64_t expected_pmcr_n)
GUEST_DONE();
}

-#define GICD_BASE_GPA 0x8000000ULL
-#define GICR_BASE_GPA 0x80A0000ULL
-
-/* Create a VM that has one vCPU with PMUv3 configured. */
-static void create_vpmu_vm(void *guest_code)
-{
- struct kvm_vcpu_init init;
- uint8_t pmuver, ec;
- uint64_t dfr0, irq = 23;
- struct kvm_device_attr irq_attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
- .addr = (uint64_t)&irq,
- };
- struct kvm_device_attr init_attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .attr = KVM_ARM_VCPU_PMU_V3_INIT,
- };
-
- /* The test creates the vpmu_vm multiple times. Ensure a clean state */
- memset(&vpmu_vm, 0, sizeof(vpmu_vm));
-
- vpmu_vm.vm = vm_create(1);
- vm_init_descriptor_tables(vpmu_vm.vm);
- for (ec = 0; ec < ESR_EC_NUM; ec++) {
- vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
- guest_sync_handler);
- }
-
- /* Create vCPU with PMUv3 */
- vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
- init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
- vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
- vcpu_init_descriptor_tables(vpmu_vm.vcpu);
- vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
- GICD_BASE_GPA, GICR_BASE_GPA);
- __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
- "Failed to create vgic-v3, skipping");
-
- /* Make sure that PMUv3 support is indicated in the ID register */
- vcpu_get_reg(vpmu_vm.vcpu,
- KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
- pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
- TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
- pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
- "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
-
- /* Initialize vPMU */
- vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
- vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
-}
-
-static void destroy_vpmu_vm(void)
-{
- close(vpmu_vm.gic_fd);
- kvm_vm_free(vpmu_vm.vm);
-}
-
static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
{
struct ucall uc;
@@ -497,13 +434,24 @@ static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
}
}

+static void create_vpmu_vm_with_handler(void *guest_code)
+{
+ uint8_t ec;
+ vpmu_vm = create_vpmu_vm(guest_code);
+
+ for (ec = 0; ec < ESR_EC_NUM; ec++) {
+ vm_install_sync_handler(vpmu_vm->vm, VECTOR_SYNC_CURRENT, ec,
+ guest_sync_handler);
+ }
+}
+
static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
{
struct kvm_vcpu *vcpu;
uint64_t pmcr, pmcr_orig;

- create_vpmu_vm(guest_code);
- vcpu = vpmu_vm.vcpu;
+ create_vpmu_vm_with_handler(guest_code);
+ vcpu = vpmu_vm->vcpu;

vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
pmcr = pmcr_orig;
@@ -539,7 +487,7 @@ static void run_access_test(uint64_t pmcr_n)
pr_debug("Test with pmcr_n %lu\n", pmcr_n);

test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
- vcpu = vpmu_vm.vcpu;
+ vcpu = vpmu_vm->vcpu;

/* Save the initial sp to restore them later to run the guest again */
vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
@@ -550,7 +498,7 @@ static void run_access_test(uint64_t pmcr_n)
* Reset and re-initialize the vCPU, and run the guest code again to
* check if PMCR_EL0.N is preserved.
*/
- vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
+ vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
aarch64_vcpu_setup(vcpu, &init);
vcpu_init_descriptor_tables(vcpu);
@@ -559,7 +507,7 @@ static void run_access_test(uint64_t pmcr_n)

run_vcpu(vcpu, pmcr_n);

- destroy_vpmu_vm();
+ destroy_vpmu_vm(vpmu_vm);
}

static struct pmreg_sets validity_check_reg_sets[] = {
@@ -580,7 +528,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
uint64_t valid_counters_mask, max_counters_mask;

test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
- vcpu = vpmu_vm.vcpu;
+ vcpu = vpmu_vm->vcpu;

valid_counters_mask = get_counters_mask(pmcr_n);
max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
@@ -621,7 +569,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
}

- destroy_vpmu_vm();
+ destroy_vpmu_vm(vpmu_vm);
}

/*
@@ -634,7 +582,7 @@ static void run_error_test(uint64_t pmcr_n)
pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);

test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
- destroy_vpmu_vm();
+ destroy_vpmu_vm(vpmu_vm);
}

/*
@@ -645,9 +593,9 @@ static uint64_t get_pmcr_n_limit(void)
{
uint64_t pmcr;

- create_vpmu_vm(guest_code);
- vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
- destroy_vpmu_vm();
+ create_vpmu_vm_with_handler(guest_code);
+ vcpu_get_reg(vpmu_vm->vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+ destroy_vpmu_vm(vpmu_vm);
return get_pmcr_n(pmcr);
}

diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
new file mode 100644
index 000000000000..0a56183644ee
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <kvm_util.h>
+
+#define GICD_BASE_GPA 0x8000000ULL
+#define GICR_BASE_GPA 0x80A0000ULL
+
+struct vpmu_vm {
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ int gic_fd;
+};
+
+struct vpmu_vm *create_vpmu_vm(void *guest_code);
+
+void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
new file mode 100644
index 000000000000..b3de8fdc555e
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kvm_util.h>
+#include <processor.h>
+#include <test_util.h>
+#include <vgic.h>
+#include <vpmu.h>
+#include <perf/arm_pmuv3.h>
+
+/* Create a VM that has one vCPU with PMUv3 configured. */
+struct vpmu_vm *create_vpmu_vm(void *guest_code)
+{
+ struct kvm_vcpu_init init;
+ uint8_t pmuver;
+ uint64_t dfr0, irq = 23;
+ struct kvm_device_attr irq_attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
+ .addr = (uint64_t)&irq,
+ };
+ struct kvm_device_attr init_attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_INIT,
+ };
+ struct vpmu_vm *vpmu_vm;
+
+ vpmu_vm = calloc(1, sizeof(*vpmu_vm));
+ TEST_ASSERT(vpmu_vm != NULL, "Insufficient Memory");
+ memset(vpmu_vm, 0, sizeof(vpmu_vm));
+
+ vpmu_vm->vm = vm_create(1);
+ vm_init_descriptor_tables(vpmu_vm->vm);
+
+ /* Create vCPU with PMUv3 */
+ vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init);
+ init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
+ vpmu_vm->vcpu = aarch64_vcpu_add(vpmu_vm->vm, 0, &init, guest_code);
+ vcpu_init_descriptor_tables(vpmu_vm->vcpu);
+ vpmu_vm->gic_fd = vgic_v3_setup(vpmu_vm->vm, 1, 64,
+ GICD_BASE_GPA, GICR_BASE_GPA);
+ __TEST_REQUIRE(vpmu_vm->gic_fd >= 0,
+ "Failed to create vgic-v3, skipping");
+
+ /* Make sure that PMUv3 support is indicated in the ID register */
+ vcpu_get_reg(vpmu_vm->vcpu,
+ KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
+ pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
+ TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
+ pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
+ "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
+
+ /* Initialize vPMU */
+ vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
+ vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
+
+ return vpmu_vm;
+}
+
+void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
+{
+ close(vpmu_vm->gic_fd);
+ kvm_vm_free(vpmu_vm->vm);
+ free(vpmu_vm);
+}
--
2.40.1

2023-11-23 06:39:57

by Shaoqin Huang

[permalink] [raw]
Subject: [PATCH v1 3/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Introduce pmu_event_filter_test for arm64 platforms. The test configures
PMUv3 for a vCPU, and sets different pmu event filter for the vCPU, and
check if the guest can use those events which user allow and can't use
those events which use deny.

This test refactor the create_vpmu_vm() and make it a wrapper for
__create_vpmu_vm(), which can let we do some extra init before
KVM_ARM_VCPU_PMU_V3_INIT.

This test choose the branches_retired and the instructions_retired
event, and let guest use the two events in pmu. And check if the result
is expected.

Signed-off-by: Shaoqin Huang <[email protected]>
---
tools/testing/selftests/kvm/Makefile | 1 +
.../kvm/aarch64/pmu_event_filter_test.c | 227 ++++++++++++++++++
.../selftests/kvm/include/aarch64/vpmu.h | 4 +
.../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
4 files changed, 244 insertions(+), 2 deletions(-)
create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index b60852c222ac..5f126e1a1dbf 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
+TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
new file mode 100644
index 000000000000..a876f5c2033b
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pmu_event_filter_test - Test user limit pmu event for guest.
+ *
+ * Copyright (c) 2023 Red Hat, Inc.
+ *
+ * This test checks if the guest only see the limited pmu event that userspace
+ * sets, if the gust can use those events which user allow, and if the guest
+ * can't use those events which user deny.
+ * It also checks set invalid filter return the expected error.
+ * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
+ */
+#include <kvm_util.h>
+#include <processor.h>
+#include <vgic.h>
+#include <vpmu.h>
+#include <test_util.h>
+#include <perf/arm_pmuv3.h>
+
+struct {
+ uint64_t branches_retired;
+ uint64_t instructions_retired;
+} pmc_results;
+
+static struct vpmu_vm *vpmu_vm;
+
+#define FILTER_NR 10
+
+struct test_desc {
+ const char *name;
+ void (*check_result)(void);
+ struct kvm_pmu_event_filter filter[FILTER_NR];
+};
+
+#define __DEFINE_FILTER(base, num, act) \
+ ((struct kvm_pmu_event_filter) { \
+ .base_event = base, \
+ .nevents = num, \
+ .action = act, \
+ })
+
+#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
+
+#define EMPTY_FILTER { 0 }
+
+#define SW_INCR 0x0
+#define INST_RETIRED 0x8
+#define BR_RETIERD 0x21
+
+#define NUM_BRANCHES 10
+
+static void run_and_measure_loop(void)
+{
+ asm volatile(
+ " mov x10, %[loop]\n"
+ "1: sub x10, x10, #1\n"
+ " cmp x10, #0x0\n"
+ " b.gt 1b\n"
+ :
+ : [loop] "r" (NUM_BRANCHES)
+ : "x10", "cc");
+}
+
+static void guest_code(void)
+{
+ uint64_t pmcr = read_sysreg(pmcr_el0);
+
+ pmu_disable_reset();
+
+ write_pmevtypern(0, BR_RETIERD);
+ write_pmevtypern(1, INST_RETIRED);
+ enable_counter(0);
+ enable_counter(1);
+ write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
+
+ run_and_measure_loop();
+
+ write_sysreg(pmcr, pmcr_el0);
+
+ pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
+ pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
+
+ GUEST_DONE();
+}
+
+static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
+ };
+ struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
+
+ while (filter && filter->nevents != 0) {
+ attr.addr = (uint64_t)filter;
+ vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
+ filter++;
+ }
+}
+
+static void create_vpmu_vm_with_filter(void *guest_code,
+ struct kvm_pmu_event_filter *filter)
+{
+ vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ while (1) {
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ return;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+}
+
+static void check_pmc_counting(void)
+{
+ uint64_t br = pmc_results.branches_retired;
+ uint64_t ir = pmc_results.instructions_retired;
+
+ TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
+ "%lu (expected %u)", br, NUM_BRANCHES);
+ TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
+}
+
+static void check_pmc_not_counting(void)
+{
+ uint64_t br = pmc_results.branches_retired;
+ uint64_t ir = pmc_results.instructions_retired;
+
+ TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
+ TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
+}
+
+static void run_vcpu_and_sync_pmc_results(void)
+{
+ memset(&pmc_results, 0, sizeof(pmc_results));
+ sync_global_to_guest(vpmu_vm->vm, pmc_results);
+
+ run_vcpu(vpmu_vm->vcpu);
+
+ sync_global_from_guest(vpmu_vm->vm, pmc_results);
+}
+
+static void run_test(struct test_desc *t)
+{
+ pr_debug("Test: %s\n", t->name);
+
+ create_vpmu_vm_with_filter(guest_code, t->filter);
+
+ run_vcpu_and_sync_pmc_results();
+
+ t->check_result();
+
+ destroy_vpmu_vm(vpmu_vm);
+}
+
+static struct test_desc tests[] = {
+ {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
+ {"member_allow_filter", check_pmc_counting,
+ {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
+ DEFINE_FILTER(BR_RETIERD, 0), EMPTY_FILTER}},
+ {"member_deny_filter", check_pmc_not_counting,
+ {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
+ DEFINE_FILTER(BR_RETIERD, 1), EMPTY_FILTER}},
+ {"not_member_deny_filter", check_pmc_counting,
+ {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
+ {"not_member_allow_filter", check_pmc_not_counting,
+ {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
+ { 0 }
+};
+
+static void for_each_test(void)
+{
+ struct test_desc *t;
+
+ for (t = &tests[0]; t->name; t++)
+ run_test(t);
+}
+
+static void set_invalid_filter(struct vpmu_vm *vm, void *arg)
+{
+ struct kvm_pmu_event_filter invalid;
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
+ .addr = (uint64_t)&invalid,
+ };
+ int ret = 0;
+
+ /* The max event number is (1 << 16), set a range large than it. */
+ invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0);
+ ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
+ TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range "
+ "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
+ ret, errno);
+
+ ret = 0;
+
+ /* Set the Invalid action. */
+ invalid = __DEFINE_FILTER(0, 1, 3);
+ ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
+ TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action "
+ "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
+ ret, errno);
+}
+
+static void test_invalid_filter(void)
+{
+ vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL);
+ destroy_vpmu_vm(vpmu_vm);
+}
+
+int main(void)
+{
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
+
+ for_each_test();
+
+ test_invalid_filter();
+}
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
index e0cc1ca1c4b7..db97bfb07996 100644
--- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
+++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
@@ -18,6 +18,10 @@ struct vpmu_vm {
int gic_fd;
};

+struct vpmu_vm *__create_vpmu_vm(void *guest_code,
+ void (*init_pmu)(struct vpmu_vm *vm, void *arg),
+ void *arg);
+
struct vpmu_vm *create_vpmu_vm(void *guest_code);

void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
index b3de8fdc555e..76ea03d607f1 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
@@ -7,8 +7,9 @@
#include <vpmu.h>
#include <perf/arm_pmuv3.h>

-/* Create a VM that has one vCPU with PMUv3 configured. */
-struct vpmu_vm *create_vpmu_vm(void *guest_code)
+struct vpmu_vm *__create_vpmu_vm(void *guest_code,
+ void (*init_pmu)(struct vpmu_vm *vm, void *arg),
+ void *arg)
{
struct kvm_vcpu_init init;
uint8_t pmuver;
@@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);

/* Initialize vPMU */
+ if (init_pmu)
+ init_pmu(vpmu_vm, arg);
+
vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);

return vpmu_vm;
}

+/* Create a VM that has one vCPU with PMUv3 configured. */
+struct vpmu_vm *create_vpmu_vm(void *guest_code)
+{
+ return __create_vpmu_vm(guest_code, NULL, NULL);
+}
+
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
{
close(vpmu_vm->gic_fd);
--
2.40.1

2023-11-24 18:16:44

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/

Hi Shaoqin,

On 11/23/23 07:37, Shaoqin Huang wrote:
> Move those pmu helper function into lib/, thus it can be used by other
functions

Not really into lib but rather in vpmu.h header.
> pmu test.

no functional change intended
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> .../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
> .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++
> 2 files changed, 119 insertions(+), 118 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> index 17305408a334..62d6315790ab 100644
> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> @@ -20,12 +20,6 @@
> #include <perf/arm_pmuv3.h>
> #include <linux/bitfield.h>
>
> -/* The max number of the PMU event counters (excluding the cycle counter) */
> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
> -
> -/* The cycle counter bit position that's common among the PMU registers */
> -#define ARMV8_PMU_CYCLE_IDX 31
> -
> static struct vpmu_vm *vpmu_vm;
>
> struct pmreg_sets {
> @@ -35,118 +29,6 @@ struct pmreg_sets {
>
> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>
> -static uint64_t get_pmcr_n(uint64_t pmcr)
> -{
> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
> -}
> -
> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
> -{
> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
> -}
> -
> -static uint64_t get_counters_mask(uint64_t n)
> -{
> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
> -
> - if (n)
> - mask |= GENMASK(n - 1, 0);
> - return mask;
> -}
> -
> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> -static inline unsigned long read_sel_evcntr(int sel)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - return read_sysreg(pmxevcntr_el0);
> -}
> -
> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> -static inline void write_sel_evcntr(int sel, unsigned long val)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - write_sysreg(val, pmxevcntr_el0);
> - isb();
> -}
> -
> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> -static inline unsigned long read_sel_evtyper(int sel)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - return read_sysreg(pmxevtyper_el0);
> -}
> -
> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> -static inline void write_sel_evtyper(int sel, unsigned long val)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - write_sysreg(val, pmxevtyper_el0);
> - isb();
> -}
> -
> -static inline void enable_counter(int idx)
> -{
> - uint64_t v = read_sysreg(pmcntenset_el0);
> -
> - write_sysreg(BIT(idx) | v, pmcntenset_el0);
> - isb();
> -}
> -
> -static inline void disable_counter(int idx)
> -{
> - uint64_t v = read_sysreg(pmcntenset_el0);
> -
> - write_sysreg(BIT(idx) | v, pmcntenclr_el0);
> - isb();
> -}
> -
> -static void pmu_disable_reset(void)
> -{
> - uint64_t pmcr = read_sysreg(pmcr_el0);
> -
> - /* Reset all counters, disabling them */
> - pmcr &= ~ARMV8_PMU_PMCR_E;
> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
> - isb();
> -}
> -
> -#define RETURN_READ_PMEVCNTRN(n) \
> - return read_sysreg(pmevcntr##n##_el0)
> -static unsigned long read_pmevcntrn(int n)
> -{
> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
> - return 0;
> -}
> -
> -#define WRITE_PMEVCNTRN(n) \
> - write_sysreg(val, pmevcntr##n##_el0)
> -static void write_pmevcntrn(int n, unsigned long val)
> -{
> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
> - isb();
> -}
> -
> -#define READ_PMEVTYPERN(n) \
> - return read_sysreg(pmevtyper##n##_el0)
> -static unsigned long read_pmevtypern(int n)
> -{
> - PMEVN_SWITCH(n, READ_PMEVTYPERN);
> - return 0;
> -}
> -
> -#define WRITE_PMEVTYPERN(n) \
> - write_sysreg(val, pmevtyper##n##_el0)
> -static void write_pmevtypern(int n, unsigned long val)
> -{
> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
> - isb();
> -}
> -
> /*
> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
> * accessors that test cases will use. Each of the accessors will
> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> index 0a56183644ee..e0cc1ca1c4b7 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> @@ -1,10 +1,17 @@
> /* SPDX-License-Identifier: GPL-2.0 */
>
> #include <kvm_util.h>
> +#include <perf/arm_pmuv3.h>
>
> #define GICD_BASE_GPA 0x8000000ULL
> #define GICR_BASE_GPA 0x80A0000ULL
>
> +/* The max number of the PMU event counters (excluding the cycle counter) */
> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
> +
> +/* The cycle counter bit position that's common among the PMU registers */
> +#define ARMV8_PMU_CYCLE_IDX 31
> +
> struct vpmu_vm {
> struct kvm_vm *vm;
> struct kvm_vcpu *vcpu;
> @@ -14,3 +21,115 @@ struct vpmu_vm {
> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
> +
> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
> +{
> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
> +}
> +
> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
> +{
> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
> +}
> +
> +static inline uint64_t get_counters_mask(uint64_t n)
> +{
> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
> +
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> + return mask;
> +}
> +
> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> +static inline unsigned long read_sel_evcntr(int sel)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + return read_sysreg(pmxevcntr_el0);
> +}
> +
> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> +static inline void write_sel_evcntr(int sel, unsigned long val)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + write_sysreg(val, pmxevcntr_el0);
> + isb();
> +}
> +
> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> +static inline unsigned long read_sel_evtyper(int sel)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + return read_sysreg(pmxevtyper_el0);
> +}
> +
> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> +static inline void write_sel_evtyper(int sel, unsigned long val)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + write_sysreg(val, pmxevtyper_el0);
> + isb();
> +}
> +
> +static inline void enable_counter(int idx)
> +{
> + uint64_t v = read_sysreg(pmcntenset_el0);
> +
> + write_sysreg(BIT(idx) | v, pmcntenset_el0);
> + isb();
> +}
> +
> +static inline void disable_counter(int idx)
> +{
> + uint64_t v = read_sysreg(pmcntenset_el0);
> +
> + write_sysreg(BIT(idx) | v, pmcntenclr_el0);
> + isb();
> +}
> +
> +static inline void pmu_disable_reset(void)
> +{
> + uint64_t pmcr = read_sysreg(pmcr_el0);
> +
> + /* Reset all counters, disabling them */
> + pmcr &= ~ARMV8_PMU_PMCR_E;
> + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
> + isb();
> +}
> +
> +#define RETURN_READ_PMEVCNTRN(n) \
> + return read_sysreg(pmevcntr##n##_el0)
> +static inline unsigned long read_pmevcntrn(int n)
> +{
> + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
> + return 0;
> +}
> +
> +#define WRITE_PMEVCNTRN(n) \
> + write_sysreg(val, pmevcntr##n##_el0)
> +static inline void write_pmevcntrn(int n, unsigned long val)
> +{
> + PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
> + isb();
> +}
> +
> +#define READ_PMEVTYPERN(n) \
> + return read_sysreg(pmevtyper##n##_el0)
> +static inline unsigned long read_pmevtypern(int n)
> +{
> + PMEVN_SWITCH(n, READ_PMEVTYPERN);
> + return 0;
> +}
> +
> +#define WRITE_PMEVTYPERN(n) \
> + write_sysreg(val, pmevtyper##n##_el0)
> +static inline void write_pmevtypern(int n, unsigned long val)
> +{
> + PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
> + isb();
> +}

Reviewed-by: Eric Auger <[email protected]>

Thanks

Eric

2023-11-24 18:16:45

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v1 1/3] KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() can be reused

Hi Shaoqin,

On 11/23/23 07:37, Shaoqin Huang wrote:
> Move the [create|destroy]_vpmu_vm() into the lib/, which makes those
some wording suggestions below:

Move the implementation of .. into lib/aarch64/pmu.c and export their
declaration in a header so that they can be reused by other tests. Also
the title may be renamed: Make [create|destroy]_vpmu_vm() public
> function can be used by other tests. Install the handler is specific to
the sync exception handler install is test specific so we move it out of
the helper function.
> the vpmu_counter_access test, so create a wrapper function for it, and
> only move the common part.
>
> No functional change.
intended ;-)
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> tools/testing/selftests/kvm/Makefile | 1 +
> .../kvm/aarch64/vpmu_counter_access.c | 100 +++++-------------
> .../selftests/kvm/include/aarch64/vpmu.h | 16 +++
> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 64 +++++++++++
> 4 files changed, 105 insertions(+), 76 deletions(-)
> create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h
> create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index a5963ab9215b..b60852c222ac 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -57,6 +57,7 @@ LIBKVM_aarch64 += lib/aarch64/processor.c
> LIBKVM_aarch64 += lib/aarch64/spinlock.c
> LIBKVM_aarch64 += lib/aarch64/ucall.c
> LIBKVM_aarch64 += lib/aarch64/vgic.c
> +LIBKVM_aarch64 += lib/aarch64/vpmu.c
>
> LIBKVM_s390x += lib/s390x/diag318_test_handler.c
> LIBKVM_s390x += lib/s390x/processor.c
> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> index 5ea78986e665..17305408a334 100644
> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> @@ -16,6 +16,7 @@
> #include <processor.h>
> #include <test_util.h>
> #include <vgic.h>
> +#include <vpmu.h>
> #include <perf/arm_pmuv3.h>
> #include <linux/bitfield.h>
>
> @@ -25,13 +26,7 @@
> /* The cycle counter bit position that's common among the PMU registers */
> #define ARMV8_PMU_CYCLE_IDX 31
>
> -struct vpmu_vm {
> - struct kvm_vm *vm;
> - struct kvm_vcpu *vcpu;
> - int gic_fd;
> -};
> -
> -static struct vpmu_vm vpmu_vm;
> +static struct vpmu_vm *vpmu_vm;
>
> struct pmreg_sets {
> uint64_t set_reg_id;
> @@ -421,64 +416,6 @@ static void guest_code(uint64_t expected_pmcr_n)
> GUEST_DONE();
> }
>
> -#define GICD_BASE_GPA 0x8000000ULL
> -#define GICR_BASE_GPA 0x80A0000ULL
> -
> -/* Create a VM that has one vCPU with PMUv3 configured. */
> -static void create_vpmu_vm(void *guest_code)
> -{
> - struct kvm_vcpu_init init;
> - uint8_t pmuver, ec;
> - uint64_t dfr0, irq = 23;
> - struct kvm_device_attr irq_attr = {
> - .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> - .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
> - .addr = (uint64_t)&irq,
> - };
> - struct kvm_device_attr init_attr = {
> - .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> - .attr = KVM_ARM_VCPU_PMU_V3_INIT,
> - };
> -
> - /* The test creates the vpmu_vm multiple times. Ensure a clean state */
> - memset(&vpmu_vm, 0, sizeof(vpmu_vm));
> -
> - vpmu_vm.vm = vm_create(1);
> - vm_init_descriptor_tables(vpmu_vm.vm);
> - for (ec = 0; ec < ESR_EC_NUM; ec++) {
> - vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
> - guest_sync_handler);
> - }
> -
> - /* Create vCPU with PMUv3 */
> - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> - init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> - vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
> - vcpu_init_descriptor_tables(vpmu_vm.vcpu);
> - vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
> - GICD_BASE_GPA, GICR_BASE_GPA);
> - __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
> - "Failed to create vgic-v3, skipping");
> -
> - /* Make sure that PMUv3 support is indicated in the ID register */
> - vcpu_get_reg(vpmu_vm.vcpu,
> - KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
> - pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
> - TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
> - pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
> - "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
> -
> - /* Initialize vPMU */
> - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
> -}
> -
> -static void destroy_vpmu_vm(void)
> -{
> - close(vpmu_vm.gic_fd);
> - kvm_vm_free(vpmu_vm.vm);
> -}
> -
> static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
> {
> struct ucall uc;
> @@ -497,13 +434,24 @@ static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
> }
> }
>
> +static void create_vpmu_vm_with_handler(void *guest_code)
> +{
> + uint8_t ec;
> + vpmu_vm = create_vpmu_vm(guest_code);
> +
> + for (ec = 0; ec < ESR_EC_NUM; ec++) {
> + vm_install_sync_handler(vpmu_vm->vm, VECTOR_SYNC_CURRENT, ec,
> + guest_sync_handler);
> + }
> +}
> +
> static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
> {
> struct kvm_vcpu *vcpu;
> uint64_t pmcr, pmcr_orig;
>
> - create_vpmu_vm(guest_code);
> - vcpu = vpmu_vm.vcpu;
> + create_vpmu_vm_with_handler(guest_code);
> + vcpu = vpmu_vm->vcpu;
>
> vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
> pmcr = pmcr_orig;
> @@ -539,7 +487,7 @@ static void run_access_test(uint64_t pmcr_n)
> pr_debug("Test with pmcr_n %lu\n", pmcr_n);
>
> test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
> - vcpu = vpmu_vm.vcpu;
> + vcpu = vpmu_vm->vcpu;
>
> /* Save the initial sp to restore them later to run the guest again */
> vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
> @@ -550,7 +498,7 @@ static void run_access_test(uint64_t pmcr_n)
> * Reset and re-initialize the vCPU, and run the guest code again to
> * check if PMCR_EL0.N is preserved.
> */
> - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> + vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init);
> init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> aarch64_vcpu_setup(vcpu, &init);
> vcpu_init_descriptor_tables(vcpu);
> @@ -559,7 +507,7 @@ static void run_access_test(uint64_t pmcr_n)
>
> run_vcpu(vcpu, pmcr_n);
>
> - destroy_vpmu_vm();
> + destroy_vpmu_vm(vpmu_vm);
> }
>
> static struct pmreg_sets validity_check_reg_sets[] = {
> @@ -580,7 +528,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
> uint64_t valid_counters_mask, max_counters_mask;
>
> test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
> - vcpu = vpmu_vm.vcpu;
> + vcpu = vpmu_vm->vcpu;
>
> valid_counters_mask = get_counters_mask(pmcr_n);
> max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
> @@ -621,7 +569,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
> KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
> }
>
> - destroy_vpmu_vm();
> + destroy_vpmu_vm(vpmu_vm);
> }
>
> /*
> @@ -634,7 +582,7 @@ static void run_error_test(uint64_t pmcr_n)
> pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
>
> test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
> - destroy_vpmu_vm();
> + destroy_vpmu_vm(vpmu_vm);
> }
>
> /*
> @@ -645,9 +593,9 @@ static uint64_t get_pmcr_n_limit(void)
> {
> uint64_t pmcr;
>
> - create_vpmu_vm(guest_code);
> - vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> - destroy_vpmu_vm();
> + create_vpmu_vm_with_handler(guest_code);
> + vcpu_get_reg(vpmu_vm->vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> + destroy_vpmu_vm(vpmu_vm);
> return get_pmcr_n(pmcr);
> }
>
> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> new file mode 100644
> index 000000000000..0a56183644ee
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> @@ -0,0 +1,16 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#include <kvm_util.h>
> +
> +#define GICD_BASE_GPA 0x8000000ULL
> +#define GICR_BASE_GPA 0x80A0000ULL
> +
> +struct vpmu_vm {
> + struct kvm_vm *vm;
> + struct kvm_vcpu *vcpu;
> + int gic_fd;
> +};
> +
> +struct vpmu_vm *create_vpmu_vm(void *guest_code);
> +
> +void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> new file mode 100644
> index 000000000000..b3de8fdc555e
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> @@ -0,0 +1,64 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <kvm_util.h>
> +#include <processor.h>
> +#include <test_util.h>
> +#include <vgic.h>
> +#include <vpmu.h>
> +#include <perf/arm_pmuv3.h>
> +
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +{
> + struct kvm_vcpu_init init;
> + uint8_t pmuver;
> + uint64_t dfr0, irq = 23;
> + struct kvm_device_attr irq_attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
> + .addr = (uint64_t)&irq,
> + };
> + struct kvm_device_attr init_attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_INIT,
> + };
> + struct vpmu_vm *vpmu_vm;
> +
> + vpmu_vm = calloc(1, sizeof(*vpmu_vm));
> + TEST_ASSERT(vpmu_vm != NULL, "Insufficient Memory");
> + memset(vpmu_vm, 0, sizeof(vpmu_vm));
> +
> + vpmu_vm->vm = vm_create(1);
> + vm_init_descriptor_tables(vpmu_vm->vm);
> +
> + /* Create vCPU with PMUv3 */
> + vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init);
> + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> + vpmu_vm->vcpu = aarch64_vcpu_add(vpmu_vm->vm, 0, &init, guest_code);
> + vcpu_init_descriptor_tables(vpmu_vm->vcpu);
> + vpmu_vm->gic_fd = vgic_v3_setup(vpmu_vm->vm, 1, 64,
> + GICD_BASE_GPA, GICR_BASE_GPA);
> + __TEST_REQUIRE(vpmu_vm->gic_fd >= 0,
> + "Failed to create vgic-v3, skipping");
> +
> + /* Make sure that PMUv3 support is indicated in the ID register */
> + vcpu_get_reg(vpmu_vm->vcpu,
> + KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
> + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
> + TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
> + pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
> + "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
> +
> + /* Initialize vPMU */
> + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
> +
> + return vpmu_vm;
> +}
> +
> +void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
> +{
> + close(vpmu_vm->gic_fd);
> + kvm_vm_free(vpmu_vm->vm);
> + free(vpmu_vm);
> +}
Besides looks good to me
Reviewed-by: Eric Auger <[email protected]>

Eric


2023-11-27 08:39:17

by Eric Auger

[permalink] [raw]
Subject: Re: [PATCH v1 3/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Hi Shaoqin,

On 11/23/23 07:37, Shaoqin Huang wrote:
> Introduce pmu_event_filter_test for arm64 platforms. The test configures
> PMUv3 for a vCPU, and sets different pmu event filter for the vCPU, and
filters
> check if the guest can use those events which user allow and can't use
> those events which use deny.
>
> This test refactor the create_vpmu_vm() and make it a wrapper for
> __create_vpmu_vm(), which can let we do some extra init before
which can let we do -> which allows some extra init code.
> KVM_ARM_VCPU_PMU_V3_INIT.
>
> This test choose the branches_retired and the instructions_retired
> event, and let guest use the two events in pmu. And check if the result
Are you sure those events are supported?
> is expected.
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> tools/testing/selftests/kvm/Makefile | 1 +
> .../kvm/aarch64/pmu_event_filter_test.c | 227 ++++++++++++++++++
> .../selftests/kvm/include/aarch64/vpmu.h | 4 +
> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
> 4 files changed, 244 insertions(+), 2 deletions(-)
> create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index b60852c222ac..5f126e1a1dbf 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
> TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
> TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
> TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
> +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
> TEST_GEN_PROGS_aarch64 += aarch64/psci_test
> TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
> TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> new file mode 100644
> index 000000000000..a876f5c2033b
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
> @@ -0,0 +1,227 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * pmu_event_filter_test - Test user limit pmu event for guest.
> + *
> + * Copyright (c) 2023 Red Hat, Inc.
> + *
> + * This test checks if the guest only see the limited pmu event that userspace
sees
> + * sets, if the gust can use those events which user allow, and if the guest
s/gust/guest
> + * can't use those events which user deny.
> + * It also checks set invalid filter return the expected error.
it also checks that setting invalid filter ranges ...
> + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
> + */
> +#include <kvm_util.h>
> +#include <processor.h>
> +#include <vgic.h>
> +#include <vpmu.h>
> +#include <test_util.h>
> +#include <perf/arm_pmuv3.h>
> +
> +struct {
> + uint64_t branches_retired;
> + uint64_t instructions_retired;
> +} pmc_results;
> +
> +static struct vpmu_vm *vpmu_vm;
> +
> +#define FILTER_NR 10
> +
> +struct test_desc {
> + const char *name;
> + void (*check_result)(void);
> + struct kvm_pmu_event_filter filter[FILTER_NR];
> +};
> +
> +#define __DEFINE_FILTER(base, num, act) \
> + ((struct kvm_pmu_event_filter) { \
> + .base_event = base, \
> + .nevents = num, \
> + .action = act, \
> + })
> +
> +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
> +
> +#define EMPTY_FILTER { 0 }
> +
> +#define SW_INCR 0x0
> +#define INST_RETIRED 0x8
> +#define BR_RETIERD 0x21
looks like a typo
> +
> +#define NUM_BRANCHES 10
> +
> +static void run_and_measure_loop(void)
> +{
> + asm volatile(
> + " mov x10, %[loop]\n"
> + "1: sub x10, x10, #1\n"
> + " cmp x10, #0x0\n"
> + " b.gt 1b\n"
> + :
> + : [loop] "r" (NUM_BRANCHES)
> + : "x10", "cc");
> +}
> +
> +static void guest_code(void)
> +{
> + uint64_t pmcr = read_sysreg(pmcr_el0);
> +
> + pmu_disable_reset();
> +
> + write_pmevtypern(0, BR_RETIERD);
> + write_pmevtypern(1, INST_RETIRED);
> + enable_counter(0);
> + enable_counter(1);
> + write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
> +
> + run_and_measure_loop();
> +
> + write_sysreg(pmcr, pmcr_el0);
> +
> + pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
> + pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
> +
> + GUEST_DONE();
another direct way to see if the guest can use those filters is to read
the PMCEIDx that indicates whether an event is supported.

> +}
> +
> +static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
> +{
> + struct kvm_device_attr attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
> + };
> + struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
> +
> + while (filter && filter->nevents != 0) {
> + attr.addr = (uint64_t)filter;
> + vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + filter++;
> + }
> +}
> +
> +static void create_vpmu_vm_with_filter(void *guest_code,
> + struct kvm_pmu_event_filter *filter)
> +{
> + vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
> +}
> +
> +static void run_vcpu(struct kvm_vcpu *vcpu)
> +{
> + struct ucall uc;
> +
> + while (1) {
> + vcpu_run(vcpu);
> + switch (get_ucall(vcpu, &uc)) {
> + case UCALL_DONE:
> + return;
> + default:
> + TEST_FAIL("Unknown ucall %lu", uc.cmd);
> + }
> + }
> +}
> +
> +static void check_pmc_counting(void)
> +{
> + uint64_t br = pmc_results.branches_retired;
> + uint64_t ir = pmc_results.instructions_retired;
> +
> + TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
> + "%lu (expected %u)", br, NUM_BRANCHES);
> + TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
> +}
> +
> +static void check_pmc_not_counting(void)
> +{
> + uint64_t br = pmc_results.branches_retired;
> + uint64_t ir = pmc_results.instructions_retired;
> +
> + TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
> + TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
> +}
> +
> +static void run_vcpu_and_sync_pmc_results(void)
> +{
> + memset(&pmc_results, 0, sizeof(pmc_results));
> + sync_global_to_guest(vpmu_vm->vm, pmc_results);
> +
> + run_vcpu(vpmu_vm->vcpu);
> +
> + sync_global_from_guest(vpmu_vm->vm, pmc_results);
> +}
> +
> +static void run_test(struct test_desc *t)
> +{
> + pr_debug("Test: %s\n", t->name);
> +
> + create_vpmu_vm_with_filter(guest_code, t->filter);
> +
> + run_vcpu_and_sync_pmc_results();
> +
> + t->check_result();
> +
> + destroy_vpmu_vm(vpmu_vm);
> +}
> +
> +static struct test_desc tests[] = {
> + {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
> + {"member_allow_filter", check_pmc_counting,
> + {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
> + DEFINE_FILTER(BR_RETIERD, 0), EMPTY_FILTER}},
> + {"member_deny_filter", check_pmc_not_counting,
> + {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
> + DEFINE_FILTER(BR_RETIERD, 1), EMPTY_FILTER}},
> + {"not_member_deny_filter", check_pmc_counting,
> + {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
> + {"not_member_allow_filter", check_pmc_not_counting,
> + {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
> + { 0 }
> +};
> +
> +static void for_each_test(void)
> +{
> + struct test_desc *t;
> +
> + for (t = &tests[0]; t->name; t++)
> + run_test(t);
> +}
> +
> +static void set_invalid_filter(struct vpmu_vm *vm, void *arg)
> +{
> + struct kvm_pmu_event_filter invalid;
> + struct kvm_device_attr attr = {
> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
> + .addr = (uint64_t)&invalid,
> + };
> + int ret = 0;
> +
> + /* The max event number is (1 << 16), set a range large than it. */
> + invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0);
> + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range "
> + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
> + ret, errno);
> +
> + ret = 0;
> +
> + /* Set the Invalid action. */
> + invalid = __DEFINE_FILTER(0, 1, 3);
> + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
> + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action "
> + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
> + ret, errno);
> +}
> +
> +static void test_invalid_filter(void)
> +{
> + vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL);
> + destroy_vpmu_vm(vpmu_vm);
> +}
> +
> +int main(void)
> +{
> + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
> +
> + for_each_test();
> +
> + test_invalid_filter();
I would introduce test_invalid_filter in a separate patch
> +}
> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> index e0cc1ca1c4b7..db97bfb07996 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> @@ -18,6 +18,10 @@ struct vpmu_vm {
> int gic_fd;
> };
>
> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
> + void *arg);
> +
> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> index b3de8fdc555e..76ea03d607f1 100644
> --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
> @@ -7,8 +7,9 @@
> #include <vpmu.h>
> #include <perf/arm_pmuv3.h>
>
> -/* Create a VM that has one vCPU with PMUv3 configured. */
> -struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
> + void *arg)
> {
> struct kvm_vcpu_init init;
> uint8_t pmuver;
> @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
> "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>
> /* Initialize vPMU */
> + if (init_pmu)
> + init_pmu(vpmu_vm, arg);
> +
> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>
> return vpmu_vm;
> }
>
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
> +{
> + return __create_vpmu_vm(guest_code, NULL, NULL);
> +}
> +
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
> {
> close(vpmu_vm->gic_fd);
Thanks

Eric

2023-11-27 21:49:25

by Raghavendra Rao Ananta

[permalink] [raw]
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/

Hi Shaoqin,

On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <[email protected]> wrote:
>
> Move those pmu helper function into lib/, thus it can be used by other
> pmu test.
>
> Signed-off-by: Shaoqin Huang <[email protected]>
> ---
> .../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
> .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++
> 2 files changed, 119 insertions(+), 118 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> index 17305408a334..62d6315790ab 100644
> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> @@ -20,12 +20,6 @@
> #include <perf/arm_pmuv3.h>
> #include <linux/bitfield.h>
>
> -/* The max number of the PMU event counters (excluding the cycle counter) */
> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
> -
> -/* The cycle counter bit position that's common among the PMU registers */
> -#define ARMV8_PMU_CYCLE_IDX 31
> -
> static struct vpmu_vm *vpmu_vm;
>
> struct pmreg_sets {
> @@ -35,118 +29,6 @@ struct pmreg_sets {
>
> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>
> -static uint64_t get_pmcr_n(uint64_t pmcr)
> -{
> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
> -}
> -
> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
> -{
> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
> -}
> -
> -static uint64_t get_counters_mask(uint64_t n)
> -{
> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
> -
> - if (n)
> - mask |= GENMASK(n - 1, 0);
> - return mask;
> -}
> -
> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> -static inline unsigned long read_sel_evcntr(int sel)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - return read_sysreg(pmxevcntr_el0);
> -}
> -
> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> -static inline void write_sel_evcntr(int sel, unsigned long val)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - write_sysreg(val, pmxevcntr_el0);
> - isb();
> -}
> -
> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> -static inline unsigned long read_sel_evtyper(int sel)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - return read_sysreg(pmxevtyper_el0);
> -}
> -
> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> -static inline void write_sel_evtyper(int sel, unsigned long val)
> -{
> - write_sysreg(sel, pmselr_el0);
> - isb();
> - write_sysreg(val, pmxevtyper_el0);
> - isb();
> -}
> -
> -static inline void enable_counter(int idx)
> -{
> - uint64_t v = read_sysreg(pmcntenset_el0);
> -
> - write_sysreg(BIT(idx) | v, pmcntenset_el0);
> - isb();
> -}
> -
> -static inline void disable_counter(int idx)
> -{
> - uint64_t v = read_sysreg(pmcntenset_el0);
> -
> - write_sysreg(BIT(idx) | v, pmcntenclr_el0);
> - isb();
> -}
> -
> -static void pmu_disable_reset(void)
> -{
> - uint64_t pmcr = read_sysreg(pmcr_el0);
> -
> - /* Reset all counters, disabling them */
> - pmcr &= ~ARMV8_PMU_PMCR_E;
> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
> - isb();
> -}
> -
> -#define RETURN_READ_PMEVCNTRN(n) \
> - return read_sysreg(pmevcntr##n##_el0)
> -static unsigned long read_pmevcntrn(int n)
> -{
> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
> - return 0;
> -}
> -
> -#define WRITE_PMEVCNTRN(n) \
> - write_sysreg(val, pmevcntr##n##_el0)
> -static void write_pmevcntrn(int n, unsigned long val)
> -{
> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
> - isb();
> -}
> -
> -#define READ_PMEVTYPERN(n) \
> - return read_sysreg(pmevtyper##n##_el0)
> -static unsigned long read_pmevtypern(int n)
> -{
> - PMEVN_SWITCH(n, READ_PMEVTYPERN);
> - return 0;
> -}
> -
> -#define WRITE_PMEVTYPERN(n) \
> - write_sysreg(val, pmevtyper##n##_el0)
> -static void write_pmevtypern(int n, unsigned long val)
> -{
> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
> - isb();
> -}
> -
> /*
> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
> * accessors that test cases will use. Each of the accessors will
> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> index 0a56183644ee..e0cc1ca1c4b7 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
> @@ -1,10 +1,17 @@
> /* SPDX-License-Identifier: GPL-2.0 */
>
> #include <kvm_util.h>
> +#include <perf/arm_pmuv3.h>
>
> #define GICD_BASE_GPA 0x8000000ULL
> #define GICR_BASE_GPA 0x80A0000ULL
>
> +/* The max number of the PMU event counters (excluding the cycle counter) */
> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
> +
> +/* The cycle counter bit position that's common among the PMU registers */
> +#define ARMV8_PMU_CYCLE_IDX 31
> +
> struct vpmu_vm {
> struct kvm_vm *vm;
> struct kvm_vcpu *vcpu;
> @@ -14,3 +21,115 @@ struct vpmu_vm {
> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>
> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
> +
> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
> +{
> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
> +}
> +
> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
> +{
> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
> +}
> +
> +static inline uint64_t get_counters_mask(uint64_t n)
> +{
> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
> +
> + if (n)
> + mask |= GENMASK(n - 1, 0);
> + return mask;
> +}
> +
> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> +static inline unsigned long read_sel_evcntr(int sel)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + return read_sysreg(pmxevcntr_el0);
> +}
> +
> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
> +static inline void write_sel_evcntr(int sel, unsigned long val)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + write_sysreg(val, pmxevcntr_el0);
> + isb();
> +}
> +
> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> +static inline unsigned long read_sel_evtyper(int sel)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + return read_sysreg(pmxevtyper_el0);
> +}
> +
> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
> +static inline void write_sel_evtyper(int sel, unsigned long val)
> +{
> + write_sysreg(sel, pmselr_el0);
> + isb();
> + write_sysreg(val, pmxevtyper_el0);
> + isb();
> +}
> +
> +static inline void enable_counter(int idx)
> +{
> + uint64_t v = read_sysreg(pmcntenset_el0);
> +
> + write_sysreg(BIT(idx) | v, pmcntenset_el0);
> + isb();
> +}
> +
> +static inline void disable_counter(int idx)
> +{
> + uint64_t v = read_sysreg(pmcntenset_el0);
> +
> + write_sysreg(BIT(idx) | v, pmcntenclr_el0);
> + isb();
> +}
> +
As mentioned in [1], the current implementation of disable_counter()
is buggy and would end up disabling all the counters.
However if you intend to keep it (even though it would remain unused),
may be change the definition something to:

static inline void disable_counter(int idx)
{
write_sysreg(BIT(idx), pmcntenclr_el0);
isb();
}

Thank you.
Raghavendra

[1]: https://lore.kernel.org/all/[email protected]/

> +static inline void pmu_disable_reset(void)
> +{
> + uint64_t pmcr = read_sysreg(pmcr_el0);
> +
> + /* Reset all counters, disabling them */
> + pmcr &= ~ARMV8_PMU_PMCR_E;
> + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
> + isb();
> +}
> +
> +#define RETURN_READ_PMEVCNTRN(n) \
> + return read_sysreg(pmevcntr##n##_el0)
> +static inline unsigned long read_pmevcntrn(int n)
> +{
> + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
> + return 0;
> +}
> +
> +#define WRITE_PMEVCNTRN(n) \
> + write_sysreg(val, pmevcntr##n##_el0)
> +static inline void write_pmevcntrn(int n, unsigned long val)
> +{
> + PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
> + isb();
> +}
> +
> +#define READ_PMEVTYPERN(n) \
> + return read_sysreg(pmevtyper##n##_el0)
> +static inline unsigned long read_pmevtypern(int n)
> +{
> + PMEVN_SWITCH(n, READ_PMEVTYPERN);
> + return 0;
> +}
> +
> +#define WRITE_PMEVTYPERN(n) \
> + write_sysreg(val, pmevtyper##n##_el0)
> +static inline void write_pmevtypern(int n, unsigned long val)
> +{
> + PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
> + isb();
> +}
> --
> 2.40.1
>
>

2023-11-28 08:43:33

by Marc Zyngier

[permalink] [raw]
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/

On 2023-11-27 21:48, Raghavendra Rao Ananta wrote:
> Hi Shaoqin,
>
> On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <[email protected]>
> wrote:
>>
>> Move those pmu helper function into lib/, thus it can be used by other
>> pmu test.
>>
>> Signed-off-by: Shaoqin Huang <[email protected]>
>> ---
>> .../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
>> .../selftests/kvm/include/aarch64/vpmu.h | 119
>> ++++++++++++++++++
>> 2 files changed, 119 insertions(+), 118 deletions(-)
>>
>> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> index 17305408a334..62d6315790ab 100644
>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> @@ -20,12 +20,6 @@
>> #include <perf/arm_pmuv3.h>
>> #include <linux/bitfield.h>
>>
>> -/* The max number of the PMU event counters (excluding the cycle
>> counter) */
>> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> -
>> -/* The cycle counter bit position that's common among the PMU
>> registers */
>> -#define ARMV8_PMU_CYCLE_IDX 31
>> -
>> static struct vpmu_vm *vpmu_vm;
>>
>> struct pmreg_sets {
>> @@ -35,118 +29,6 @@ struct pmreg_sets {
>>
>> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>>
>> -static uint64_t get_pmcr_n(uint64_t pmcr)
>> -{
>> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) &
>> ARMV8_PMU_PMCR_N_MASK;
>> -}
>> -
>> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> -{
>> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK <<
>> ARMV8_PMU_PMCR_N_SHIFT);
>> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> -}
>> -
>> -static uint64_t get_counters_mask(uint64_t n)
>> -{
>> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> -
>> - if (n)
>> - mask |= GENMASK(n - 1, 0);
>> - return mask;
>> -}
>> -
>> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline unsigned long read_sel_evcntr(int sel)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - return read_sysreg(pmxevcntr_el0);
>> -}
>> -
>> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline void write_sel_evcntr(int sel, unsigned long val)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - write_sysreg(val, pmxevcntr_el0);
>> - isb();
>> -}
>> -
>> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline unsigned long read_sel_evtyper(int sel)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - return read_sysreg(pmxevtyper_el0);
>> -}
>> -
>> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline void write_sel_evtyper(int sel, unsigned long val)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - write_sysreg(val, pmxevtyper_el0);
>> - isb();
>> -}
>> -
>> -static inline void enable_counter(int idx)
>> -{
>> - uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> - write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> - isb();
>> -}
>> -
>> -static inline void disable_counter(int idx)
>> -{
>> - uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> - write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> - isb();
>> -}
>> -
>> -static void pmu_disable_reset(void)
>> -{
>> - uint64_t pmcr = read_sysreg(pmcr_el0);
>> -
>> - /* Reset all counters, disabling them */
>> - pmcr &= ~ARMV8_PMU_PMCR_E;
>> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
>> - isb();
>> -}
>> -
>> -#define RETURN_READ_PMEVCNTRN(n) \
>> - return read_sysreg(pmevcntr##n##_el0)
>> -static unsigned long read_pmevcntrn(int n)
>> -{
>> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
>> - return 0;
>> -}
>> -
>> -#define WRITE_PMEVCNTRN(n) \
>> - write_sysreg(val, pmevcntr##n##_el0)
>> -static void write_pmevcntrn(int n, unsigned long val)
>> -{
>> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
>> - isb();
>> -}
>> -
>> -#define READ_PMEVTYPERN(n) \
>> - return read_sysreg(pmevtyper##n##_el0)
>> -static unsigned long read_pmevtypern(int n)
>> -{
>> - PMEVN_SWITCH(n, READ_PMEVTYPERN);
>> - return 0;
>> -}
>> -
>> -#define WRITE_PMEVTYPERN(n) \
>> - write_sysreg(val, pmevtyper##n##_el0)
>> -static void write_pmevtypern(int n, unsigned long val)
>> -{
>> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
>> - isb();
>> -}
>> -
>> /*
>> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
>> * accessors that test cases will use. Each of the accessors will
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> index 0a56183644ee..e0cc1ca1c4b7 100644
>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -1,10 +1,17 @@
>> /* SPDX-License-Identifier: GPL-2.0 */
>>
>> #include <kvm_util.h>
>> +#include <perf/arm_pmuv3.h>
>>
>> #define GICD_BASE_GPA 0x8000000ULL
>> #define GICR_BASE_GPA 0x80A0000ULL
>>
>> +/* The max number of the PMU event counters (excluding the cycle
>> counter) */
>> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> +
>> +/* The cycle counter bit position that's common among the PMU
>> registers */
>> +#define ARMV8_PMU_CYCLE_IDX 31
>> +
>> struct vpmu_vm {
>> struct kvm_vm *vm;
>> struct kvm_vcpu *vcpu;
>> @@ -14,3 +21,115 @@ struct vpmu_vm {
>> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>>
>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> +
>> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
>> +{
>> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) &
>> ARMV8_PMU_PMCR_N_MASK;
>> +}
>> +
>> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> +{
>> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK <<
>> ARMV8_PMU_PMCR_N_SHIFT);
>> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> +}
>> +
>> +static inline uint64_t get_counters_mask(uint64_t n)
>> +{
>> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> +
>> + if (n)
>> + mask |= GENMASK(n - 1, 0);
>> + return mask;
>> +}
>> +
>> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline unsigned long read_sel_evcntr(int sel)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + return read_sysreg(pmxevcntr_el0);
>> +}
>> +
>> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline void write_sel_evcntr(int sel, unsigned long val)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + write_sysreg(val, pmxevcntr_el0);
>> + isb();
>> +}
>> +
>> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline unsigned long read_sel_evtyper(int sel)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + return read_sysreg(pmxevtyper_el0);
>> +}
>> +
>> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline void write_sel_evtyper(int sel, unsigned long val)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + write_sysreg(val, pmxevtyper_el0);
>> + isb();
>> +}
>> +
>> +static inline void enable_counter(int idx)
>> +{
>> + uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> + write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> + isb();
>> +}
>> +
>> +static inline void disable_counter(int idx)
>> +{
>> + uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> + write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> + isb();
>> +}
>> +
> As mentioned in [1], the current implementation of disable_counter()
> is buggy and would end up disabling all the counters.
> However if you intend to keep it (even though it would remain unused),
> may be change the definition something to:
>
> static inline void disable_counter(int idx)
> {
> write_sysreg(BIT(idx), pmcntenclr_el0);
> isb();
> }

Same thing for the enable_counter() function, by the way.
It doesn't have the same disastrous effect, but it is
buggy (imagine an interrupt disabling a counter between
the read and the write...).

In general, the set/clr registers should always be used
in their write form, never in a RMW form.

Thanks,

M.
--
Jazz is not dead. It just smells funny...

2023-11-29 03:24:14

by Shaoqin Huang

[permalink] [raw]
Subject: Re: [PATCH v1 1/3] KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() can be reused

Hi Eric,

On 11/25/23 02:14, Eric Auger wrote:
> Hi Shaoqin,
>
> On 11/23/23 07:37, Shaoqin Huang wrote:
>> Move the [create|destroy]_vpmu_vm() into the lib/, which makes those
> some wording suggestions below:
>
> Move the implementation of .. into lib/aarch64/pmu.c and export their
> declaration in a header so that they can be reused by other tests. Also
> the title may be renamed: Make [create|destroy]_vpmu_vm() public
>> function can be used by other tests. Install the handler is specific to
> the sync exception handler install is test specific so we move it out of
> the helper function.
>> the vpmu_counter_access test, so create a wrapper function for it, and
>> only move the common part.
>>
>> No functional change.
> intended ;-)

Will take your advice at the next version.

>>
>> Signed-off-by: Shaoqin Huang <[email protected]>
>> ---
>> tools/testing/selftests/kvm/Makefile | 1 +
>> .../kvm/aarch64/vpmu_counter_access.c | 100 +++++-------------
>> .../selftests/kvm/include/aarch64/vpmu.h | 16 +++
>> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 64 +++++++++++
>> 4 files changed, 105 insertions(+), 76 deletions(-)
>> create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>>
>> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
>> index a5963ab9215b..b60852c222ac 100644
>> --- a/tools/testing/selftests/kvm/Makefile
>> +++ b/tools/testing/selftests/kvm/Makefile
>> @@ -57,6 +57,7 @@ LIBKVM_aarch64 += lib/aarch64/processor.c
>> LIBKVM_aarch64 += lib/aarch64/spinlock.c
>> LIBKVM_aarch64 += lib/aarch64/ucall.c
>> LIBKVM_aarch64 += lib/aarch64/vgic.c
>> +LIBKVM_aarch64 += lib/aarch64/vpmu.c
>>
>> LIBKVM_s390x += lib/s390x/diag318_test_handler.c
>> LIBKVM_s390x += lib/s390x/processor.c
>> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> index 5ea78986e665..17305408a334 100644
>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> @@ -16,6 +16,7 @@
>> #include <processor.h>
>> #include <test_util.h>
>> #include <vgic.h>
>> +#include <vpmu.h>
>> #include <perf/arm_pmuv3.h>
>> #include <linux/bitfield.h>
>>
>> @@ -25,13 +26,7 @@
>> /* The cycle counter bit position that's common among the PMU registers */
>> #define ARMV8_PMU_CYCLE_IDX 31
>>
>> -struct vpmu_vm {
>> - struct kvm_vm *vm;
>> - struct kvm_vcpu *vcpu;
>> - int gic_fd;
>> -};
>> -
>> -static struct vpmu_vm vpmu_vm;
>> +static struct vpmu_vm *vpmu_vm;
>>
>> struct pmreg_sets {
>> uint64_t set_reg_id;
>> @@ -421,64 +416,6 @@ static void guest_code(uint64_t expected_pmcr_n)
>> GUEST_DONE();
>> }
>>
>> -#define GICD_BASE_GPA 0x8000000ULL
>> -#define GICR_BASE_GPA 0x80A0000ULL
>> -
>> -/* Create a VM that has one vCPU with PMUv3 configured. */
>> -static void create_vpmu_vm(void *guest_code)
>> -{
>> - struct kvm_vcpu_init init;
>> - uint8_t pmuver, ec;
>> - uint64_t dfr0, irq = 23;
>> - struct kvm_device_attr irq_attr = {
>> - .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> - .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
>> - .addr = (uint64_t)&irq,
>> - };
>> - struct kvm_device_attr init_attr = {
>> - .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> - .attr = KVM_ARM_VCPU_PMU_V3_INIT,
>> - };
>> -
>> - /* The test creates the vpmu_vm multiple times. Ensure a clean state */
>> - memset(&vpmu_vm, 0, sizeof(vpmu_vm));
>> -
>> - vpmu_vm.vm = vm_create(1);
>> - vm_init_descriptor_tables(vpmu_vm.vm);
>> - for (ec = 0; ec < ESR_EC_NUM; ec++) {
>> - vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
>> - guest_sync_handler);
>> - }
>> -
>> - /* Create vCPU with PMUv3 */
>> - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
>> - init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
>> - vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
>> - vcpu_init_descriptor_tables(vpmu_vm.vcpu);
>> - vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
>> - GICD_BASE_GPA, GICR_BASE_GPA);
>> - __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
>> - "Failed to create vgic-v3, skipping");
>> -
>> - /* Make sure that PMUv3 support is indicated in the ID register */
>> - vcpu_get_reg(vpmu_vm.vcpu,
>> - KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
>> - pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
>> - TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
>> - pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
>> - "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>> -
>> - /* Initialize vPMU */
>> - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
>> - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>> -}
>> -
>> -static void destroy_vpmu_vm(void)
>> -{
>> - close(vpmu_vm.gic_fd);
>> - kvm_vm_free(vpmu_vm.vm);
>> -}
>> -
>> static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
>> {
>> struct ucall uc;
>> @@ -497,13 +434,24 @@ static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
>> }
>> }
>>
>> +static void create_vpmu_vm_with_handler(void *guest_code)
>> +{
>> + uint8_t ec;
>> + vpmu_vm = create_vpmu_vm(guest_code);
>> +
>> + for (ec = 0; ec < ESR_EC_NUM; ec++) {
>> + vm_install_sync_handler(vpmu_vm->vm, VECTOR_SYNC_CURRENT, ec,
>> + guest_sync_handler);
>> + }
>> +}
>> +
>> static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
>> {
>> struct kvm_vcpu *vcpu;
>> uint64_t pmcr, pmcr_orig;
>>
>> - create_vpmu_vm(guest_code);
>> - vcpu = vpmu_vm.vcpu;
>> + create_vpmu_vm_with_handler(guest_code);
>> + vcpu = vpmu_vm->vcpu;
>>
>> vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
>> pmcr = pmcr_orig;
>> @@ -539,7 +487,7 @@ static void run_access_test(uint64_t pmcr_n)
>> pr_debug("Test with pmcr_n %lu\n", pmcr_n);
>>
>> test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
>> - vcpu = vpmu_vm.vcpu;
>> + vcpu = vpmu_vm->vcpu;
>>
>> /* Save the initial sp to restore them later to run the guest again */
>> vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
>> @@ -550,7 +498,7 @@ static void run_access_test(uint64_t pmcr_n)
>> * Reset and re-initialize the vCPU, and run the guest code again to
>> * check if PMCR_EL0.N is preserved.
>> */
>> - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
>> + vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init);
>> init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
>> aarch64_vcpu_setup(vcpu, &init);
>> vcpu_init_descriptor_tables(vcpu);
>> @@ -559,7 +507,7 @@ static void run_access_test(uint64_t pmcr_n)
>>
>> run_vcpu(vcpu, pmcr_n);
>>
>> - destroy_vpmu_vm();
>> + destroy_vpmu_vm(vpmu_vm);
>> }
>>
>> static struct pmreg_sets validity_check_reg_sets[] = {
>> @@ -580,7 +528,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
>> uint64_t valid_counters_mask, max_counters_mask;
>>
>> test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
>> - vcpu = vpmu_vm.vcpu;
>> + vcpu = vpmu_vm->vcpu;
>>
>> valid_counters_mask = get_counters_mask(pmcr_n);
>> max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
>> @@ -621,7 +569,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
>> KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
>> }
>>
>> - destroy_vpmu_vm();
>> + destroy_vpmu_vm(vpmu_vm);
>> }
>>
>> /*
>> @@ -634,7 +582,7 @@ static void run_error_test(uint64_t pmcr_n)
>> pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
>>
>> test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
>> - destroy_vpmu_vm();
>> + destroy_vpmu_vm(vpmu_vm);
>> }
>>
>> /*
>> @@ -645,9 +593,9 @@ static uint64_t get_pmcr_n_limit(void)
>> {
>> uint64_t pmcr;
>>
>> - create_vpmu_vm(guest_code);
>> - vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
>> - destroy_vpmu_vm();
>> + create_vpmu_vm_with_handler(guest_code);
>> + vcpu_get_reg(vpmu_vm->vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
>> + destroy_vpmu_vm(vpmu_vm);
>> return get_pmcr_n(pmcr);
>> }
>>
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> new file mode 100644
>> index 000000000000..0a56183644ee
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -0,0 +1,16 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +
>> +#include <kvm_util.h>
>> +
>> +#define GICD_BASE_GPA 0x8000000ULL
>> +#define GICR_BASE_GPA 0x80A0000ULL
>> +
>> +struct vpmu_vm {
>> + struct kvm_vm *vm;
>> + struct kvm_vcpu *vcpu;
>> + int gic_fd;
>> +};
>> +
>> +struct vpmu_vm *create_vpmu_vm(void *guest_code);
>> +
>> +void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> new file mode 100644
>> index 000000000000..b3de8fdc555e
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> @@ -0,0 +1,64 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +
>> +#include <kvm_util.h>
>> +#include <processor.h>
>> +#include <test_util.h>
>> +#include <vgic.h>
>> +#include <vpmu.h>
>> +#include <perf/arm_pmuv3.h>
>> +
>> +/* Create a VM that has one vCPU with PMUv3 configured. */
>> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> +{
>> + struct kvm_vcpu_init init;
>> + uint8_t pmuver;
>> + uint64_t dfr0, irq = 23;
>> + struct kvm_device_attr irq_attr = {
>> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> + .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
>> + .addr = (uint64_t)&irq,
>> + };
>> + struct kvm_device_attr init_attr = {
>> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> + .attr = KVM_ARM_VCPU_PMU_V3_INIT,
>> + };
>> + struct vpmu_vm *vpmu_vm;
>> +
>> + vpmu_vm = calloc(1, sizeof(*vpmu_vm));
>> + TEST_ASSERT(vpmu_vm != NULL, "Insufficient Memory");
>> + memset(vpmu_vm, 0, sizeof(vpmu_vm));
>> +
>> + vpmu_vm->vm = vm_create(1);
>> + vm_init_descriptor_tables(vpmu_vm->vm);
>> +
>> + /* Create vCPU with PMUv3 */
>> + vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init);
>> + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
>> + vpmu_vm->vcpu = aarch64_vcpu_add(vpmu_vm->vm, 0, &init, guest_code);
>> + vcpu_init_descriptor_tables(vpmu_vm->vcpu);
>> + vpmu_vm->gic_fd = vgic_v3_setup(vpmu_vm->vm, 1, 64,
>> + GICD_BASE_GPA, GICR_BASE_GPA);
>> + __TEST_REQUIRE(vpmu_vm->gic_fd >= 0,
>> + "Failed to create vgic-v3, skipping");
>> +
>> + /* Make sure that PMUv3 support is indicated in the ID register */
>> + vcpu_get_reg(vpmu_vm->vcpu,
>> + KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
>> + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
>> + TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
>> + pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
>> + "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>> +
>> + /* Initialize vPMU */
>> + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
>> + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>> +
>> + return vpmu_vm;
>> +}
>> +
>> +void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
>> +{
>> + close(vpmu_vm->gic_fd);
>> + kvm_vm_free(vpmu_vm->vm);
>> + free(vpmu_vm);
>> +}
> Besides looks good to me
> Reviewed-by: Eric Auger <[email protected]>

Thanks for your review. :)

>
> Eric
>
>

--
Shaoqin

2023-11-29 03:51:41

by Shaoqin Huang

[permalink] [raw]
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/

Hi Raghavendra,

On 11/28/23 05:48, Raghavendra Rao Ananta wrote:
> Hi Shaoqin,
>
> On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <[email protected]> wrote:
>>
>> Move those pmu helper function into lib/, thus it can be used by other
>> pmu test.
>>
>> Signed-off-by: Shaoqin Huang <[email protected]>
>> ---
>> .../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
>> .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++
>> 2 files changed, 119 insertions(+), 118 deletions(-)
>>
>> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> index 17305408a334..62d6315790ab 100644
>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>> @@ -20,12 +20,6 @@
>> #include <perf/arm_pmuv3.h>
>> #include <linux/bitfield.h>
>>
>> -/* The max number of the PMU event counters (excluding the cycle counter) */
>> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> -
>> -/* The cycle counter bit position that's common among the PMU registers */
>> -#define ARMV8_PMU_CYCLE_IDX 31
>> -
>> static struct vpmu_vm *vpmu_vm;
>>
>> struct pmreg_sets {
>> @@ -35,118 +29,6 @@ struct pmreg_sets {
>>
>> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>>
>> -static uint64_t get_pmcr_n(uint64_t pmcr)
>> -{
>> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
>> -}
>> -
>> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> -{
>> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
>> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> -}
>> -
>> -static uint64_t get_counters_mask(uint64_t n)
>> -{
>> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> -
>> - if (n)
>> - mask |= GENMASK(n - 1, 0);
>> - return mask;
>> -}
>> -
>> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline unsigned long read_sel_evcntr(int sel)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - return read_sysreg(pmxevcntr_el0);
>> -}
>> -
>> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> -static inline void write_sel_evcntr(int sel, unsigned long val)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - write_sysreg(val, pmxevcntr_el0);
>> - isb();
>> -}
>> -
>> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline unsigned long read_sel_evtyper(int sel)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - return read_sysreg(pmxevtyper_el0);
>> -}
>> -
>> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> -static inline void write_sel_evtyper(int sel, unsigned long val)
>> -{
>> - write_sysreg(sel, pmselr_el0);
>> - isb();
>> - write_sysreg(val, pmxevtyper_el0);
>> - isb();
>> -}
>> -
>> -static inline void enable_counter(int idx)
>> -{
>> - uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> - write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> - isb();
>> -}
>> -
>> -static inline void disable_counter(int idx)
>> -{
>> - uint64_t v = read_sysreg(pmcntenset_el0);
>> -
>> - write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> - isb();
>> -}
>> -
>> -static void pmu_disable_reset(void)
>> -{
>> - uint64_t pmcr = read_sysreg(pmcr_el0);
>> -
>> - /* Reset all counters, disabling them */
>> - pmcr &= ~ARMV8_PMU_PMCR_E;
>> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
>> - isb();
>> -}
>> -
>> -#define RETURN_READ_PMEVCNTRN(n) \
>> - return read_sysreg(pmevcntr##n##_el0)
>> -static unsigned long read_pmevcntrn(int n)
>> -{
>> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
>> - return 0;
>> -}
>> -
>> -#define WRITE_PMEVCNTRN(n) \
>> - write_sysreg(val, pmevcntr##n##_el0)
>> -static void write_pmevcntrn(int n, unsigned long val)
>> -{
>> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
>> - isb();
>> -}
>> -
>> -#define READ_PMEVTYPERN(n) \
>> - return read_sysreg(pmevtyper##n##_el0)
>> -static unsigned long read_pmevtypern(int n)
>> -{
>> - PMEVN_SWITCH(n, READ_PMEVTYPERN);
>> - return 0;
>> -}
>> -
>> -#define WRITE_PMEVTYPERN(n) \
>> - write_sysreg(val, pmevtyper##n##_el0)
>> -static void write_pmevtypern(int n, unsigned long val)
>> -{
>> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
>> - isb();
>> -}
>> -
>> /*
>> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
>> * accessors that test cases will use. Each of the accessors will
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> index 0a56183644ee..e0cc1ca1c4b7 100644
>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -1,10 +1,17 @@
>> /* SPDX-License-Identifier: GPL-2.0 */
>>
>> #include <kvm_util.h>
>> +#include <perf/arm_pmuv3.h>
>>
>> #define GICD_BASE_GPA 0x8000000ULL
>> #define GICR_BASE_GPA 0x80A0000ULL
>>
>> +/* The max number of the PMU event counters (excluding the cycle counter) */
>> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>> +
>> +/* The cycle counter bit position that's common among the PMU registers */
>> +#define ARMV8_PMU_CYCLE_IDX 31
>> +
>> struct vpmu_vm {
>> struct kvm_vm *vm;
>> struct kvm_vcpu *vcpu;
>> @@ -14,3 +21,115 @@ struct vpmu_vm {
>> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>>
>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> +
>> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
>> +{
>> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
>> +}
>> +
>> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>> +{
>> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
>> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>> +}
>> +
>> +static inline uint64_t get_counters_mask(uint64_t n)
>> +{
>> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>> +
>> + if (n)
>> + mask |= GENMASK(n - 1, 0);
>> + return mask;
>> +}
>> +
>> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline unsigned long read_sel_evcntr(int sel)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + return read_sysreg(pmxevcntr_el0);
>> +}
>> +
>> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>> +static inline void write_sel_evcntr(int sel, unsigned long val)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + write_sysreg(val, pmxevcntr_el0);
>> + isb();
>> +}
>> +
>> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline unsigned long read_sel_evtyper(int sel)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + return read_sysreg(pmxevtyper_el0);
>> +}
>> +
>> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>> +static inline void write_sel_evtyper(int sel, unsigned long val)
>> +{
>> + write_sysreg(sel, pmselr_el0);
>> + isb();
>> + write_sysreg(val, pmxevtyper_el0);
>> + isb();
>> +}
>> +
>> +static inline void enable_counter(int idx)
>> +{
>> + uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> + write_sysreg(BIT(idx) | v, pmcntenset_el0);
>> + isb();
>> +}
>> +
>> +static inline void disable_counter(int idx)
>> +{
>> + uint64_t v = read_sysreg(pmcntenset_el0);
>> +
>> + write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>> + isb();
>> +}
>> +
> As mentioned in [1], the current implementation of disable_counter()
> is buggy and would end up disabling all the counters.
> However if you intend to keep it (even though it would remain unused),
> may be change the definition something to:
>
> static inline void disable_counter(int idx)
> {
> write_sysreg(BIT(idx), pmcntenclr_el0);
> isb();
> }
>

Ok. I will integrate another patch into my seris to fix this problem.

Thanks,
Shaoqin

> Thank you.
> Raghavendra
>
> [1]: https://lore.kernel.org/all/[email protected]/
>
>> +static inline void pmu_disable_reset(void)
>> +{
>> + uint64_t pmcr = read_sysreg(pmcr_el0);
>> +
>> + /* Reset all counters, disabling them */
>> + pmcr &= ~ARMV8_PMU_PMCR_E;
>> + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
>> + isb();
>> +}
>> +
>> +#define RETURN_READ_PMEVCNTRN(n) \
>> + return read_sysreg(pmevcntr##n##_el0)
>> +static inline unsigned long read_pmevcntrn(int n)
>> +{
>> + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
>> + return 0;
>> +}
>> +
>> +#define WRITE_PMEVCNTRN(n) \
>> + write_sysreg(val, pmevcntr##n##_el0)
>> +static inline void write_pmevcntrn(int n, unsigned long val)
>> +{
>> + PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
>> + isb();
>> +}
>> +
>> +#define READ_PMEVTYPERN(n) \
>> + return read_sysreg(pmevtyper##n##_el0)
>> +static inline unsigned long read_pmevtypern(int n)
>> +{
>> + PMEVN_SWITCH(n, READ_PMEVTYPERN);
>> + return 0;
>> +}
>> +
>> +#define WRITE_PMEVTYPERN(n) \
>> + write_sysreg(val, pmevtyper##n##_el0)
>> +static inline void write_pmevtypern(int n, unsigned long val)
>> +{
>> + PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
>> + isb();
>> +}
>> --
>> 2.40.1
>>
>>
>

--
Shaoqin

2023-11-29 03:52:17

by Shaoqin Huang

[permalink] [raw]
Subject: Re: [PATCH v1 2/3] KVM: selftests: aarch64: Move the pmu helper function into lib/

Hi Marc,

On 11/28/23 16:43, Marc Zyngier wrote:
> On 2023-11-27 21:48, Raghavendra Rao Ananta wrote:
>> Hi Shaoqin,
>>
>> On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <[email protected]>
>> wrote:
>>>
>>> Move those pmu helper function into lib/, thus it can be used by other
>>> pmu test.
>>>
>>> Signed-off-by: Shaoqin Huang <[email protected]>
>>> ---
>>>  .../kvm/aarch64/vpmu_counter_access.c         | 118 -----------------
>>>  .../selftests/kvm/include/aarch64/vpmu.h      | 119 ++++++++++++++++++
>>>  2 files changed, 119 insertions(+), 118 deletions(-)
>>>
>>> diff --git
>>> a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>>> b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>>> index 17305408a334..62d6315790ab 100644
>>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
>>> @@ -20,12 +20,6 @@
>>>  #include <perf/arm_pmuv3.h>
>>>  #include <linux/bitfield.h>
>>>
>>> -/* The max number of the PMU event counters (excluding the cycle
>>> counter) */
>>> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>>> -
>>> -/* The cycle counter bit position that's common among the PMU
>>> registers */
>>> -#define ARMV8_PMU_CYCLE_IDX            31
>>> -
>>>  static struct vpmu_vm *vpmu_vm;
>>>
>>>  struct pmreg_sets {
>>> @@ -35,118 +29,6 @@ struct pmreg_sets {
>>>
>>>  #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
>>>
>>> -static uint64_t get_pmcr_n(uint64_t pmcr)
>>> -{
>>> -       return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
>>> -}
>>> -
>>> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>>> -{
>>> -       *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK <<
>>> ARMV8_PMU_PMCR_N_SHIFT);
>>> -       *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>>> -}
>>> -
>>> -static uint64_t get_counters_mask(uint64_t n)
>>> -{
>>> -       uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>>> -
>>> -       if (n)
>>> -               mask |= GENMASK(n - 1, 0);
>>> -       return mask;
>>> -}
>>> -
>>> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>>> -static inline unsigned long read_sel_evcntr(int sel)
>>> -{
>>> -       write_sysreg(sel, pmselr_el0);
>>> -       isb();
>>> -       return read_sysreg(pmxevcntr_el0);
>>> -}
>>> -
>>> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>>> -static inline void write_sel_evcntr(int sel, unsigned long val)
>>> -{
>>> -       write_sysreg(sel, pmselr_el0);
>>> -       isb();
>>> -       write_sysreg(val, pmxevcntr_el0);
>>> -       isb();
>>> -}
>>> -
>>> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>>> -static inline unsigned long read_sel_evtyper(int sel)
>>> -{
>>> -       write_sysreg(sel, pmselr_el0);
>>> -       isb();
>>> -       return read_sysreg(pmxevtyper_el0);
>>> -}
>>> -
>>> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>>> -static inline void write_sel_evtyper(int sel, unsigned long val)
>>> -{
>>> -       write_sysreg(sel, pmselr_el0);
>>> -       isb();
>>> -       write_sysreg(val, pmxevtyper_el0);
>>> -       isb();
>>> -}
>>> -
>>> -static inline void enable_counter(int idx)
>>> -{
>>> -       uint64_t v = read_sysreg(pmcntenset_el0);
>>> -
>>> -       write_sysreg(BIT(idx) | v, pmcntenset_el0);
>>> -       isb();
>>> -}
>>> -
>>> -static inline void disable_counter(int idx)
>>> -{
>>> -       uint64_t v = read_sysreg(pmcntenset_el0);
>>> -
>>> -       write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>>> -       isb();
>>> -}
>>> -
>>> -static void pmu_disable_reset(void)
>>> -{
>>> -       uint64_t pmcr = read_sysreg(pmcr_el0);
>>> -
>>> -       /* Reset all counters, disabling them */
>>> -       pmcr &= ~ARMV8_PMU_PMCR_E;
>>> -       write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
>>> -       isb();
>>> -}
>>> -
>>> -#define RETURN_READ_PMEVCNTRN(n) \
>>> -       return read_sysreg(pmevcntr##n##_el0)
>>> -static unsigned long read_pmevcntrn(int n)
>>> -{
>>> -       PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
>>> -       return 0;
>>> -}
>>> -
>>> -#define WRITE_PMEVCNTRN(n) \
>>> -       write_sysreg(val, pmevcntr##n##_el0)
>>> -static void write_pmevcntrn(int n, unsigned long val)
>>> -{
>>> -       PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
>>> -       isb();
>>> -}
>>> -
>>> -#define READ_PMEVTYPERN(n) \
>>> -       return read_sysreg(pmevtyper##n##_el0)
>>> -static unsigned long read_pmevtypern(int n)
>>> -{
>>> -       PMEVN_SWITCH(n, READ_PMEVTYPERN);
>>> -       return 0;
>>> -}
>>> -
>>> -#define WRITE_PMEVTYPERN(n) \
>>> -       write_sysreg(val, pmevtyper##n##_el0)
>>> -static void write_pmevtypern(int n, unsigned long val)
>>> -{
>>> -       PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
>>> -       isb();
>>> -}
>>> -
>>>  /*
>>>   * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
>>>   * accessors that test cases will use. Each of the accessors will
>>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>>> b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>>> index 0a56183644ee..e0cc1ca1c4b7 100644
>>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>>> @@ -1,10 +1,17 @@
>>>  /* SPDX-License-Identifier: GPL-2.0 */
>>>
>>>  #include <kvm_util.h>
>>> +#include <perf/arm_pmuv3.h>
>>>
>>>  #define GICD_BASE_GPA  0x8000000ULL
>>>  #define GICR_BASE_GPA  0x80A0000ULL
>>>
>>> +/* The max number of the PMU event counters (excluding the cycle
>>> counter) */
>>> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
>>> +
>>> +/* The cycle counter bit position that's common among the PMU
>>> registers */
>>> +#define ARMV8_PMU_CYCLE_IDX            31
>>> +
>>>  struct vpmu_vm {
>>>         struct kvm_vm *vm;
>>>         struct kvm_vcpu *vcpu;
>>> @@ -14,3 +21,115 @@ struct vpmu_vm {
>>>  struct vpmu_vm *create_vpmu_vm(void *guest_code);
>>>
>>>  void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>>> +
>>> +static inline uint64_t get_pmcr_n(uint64_t pmcr)
>>> +{
>>> +       return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
>>> +}
>>> +
>>> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
>>> +{
>>> +       *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK <<
>>> ARMV8_PMU_PMCR_N_SHIFT);
>>> +       *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
>>> +}
>>> +
>>> +static inline uint64_t get_counters_mask(uint64_t n)
>>> +{
>>> +       uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
>>> +
>>> +       if (n)
>>> +               mask |= GENMASK(n - 1, 0);
>>> +       return mask;
>>> +}
>>> +
>>> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>>> +static inline unsigned long read_sel_evcntr(int sel)
>>> +{
>>> +       write_sysreg(sel, pmselr_el0);
>>> +       isb();
>>> +       return read_sysreg(pmxevcntr_el0);
>>> +}
>>> +
>>> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
>>> +static inline void write_sel_evcntr(int sel, unsigned long val)
>>> +{
>>> +       write_sysreg(sel, pmselr_el0);
>>> +       isb();
>>> +       write_sysreg(val, pmxevcntr_el0);
>>> +       isb();
>>> +}
>>> +
>>> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>>> +static inline unsigned long read_sel_evtyper(int sel)
>>> +{
>>> +       write_sysreg(sel, pmselr_el0);
>>> +       isb();
>>> +       return read_sysreg(pmxevtyper_el0);
>>> +}
>>> +
>>> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
>>> +static inline void write_sel_evtyper(int sel, unsigned long val)
>>> +{
>>> +       write_sysreg(sel, pmselr_el0);
>>> +       isb();
>>> +       write_sysreg(val, pmxevtyper_el0);
>>> +       isb();
>>> +}
>>> +
>>> +static inline void enable_counter(int idx)
>>> +{
>>> +       uint64_t v = read_sysreg(pmcntenset_el0);
>>> +
>>> +       write_sysreg(BIT(idx) | v, pmcntenset_el0);
>>> +       isb();
>>> +}
>>> +
>>> +static inline void disable_counter(int idx)
>>> +{
>>> +       uint64_t v = read_sysreg(pmcntenset_el0);
>>> +
>>> +       write_sysreg(BIT(idx) | v, pmcntenclr_el0);
>>> +       isb();
>>> +}
>>> +
>> As mentioned in [1], the current implementation of disable_counter()
>> is buggy and would end up disabling all the counters.
>> However if you intend to keep it (even though it would remain unused),
>> may be change the definition something to:
>>
>> static inline void disable_counter(int idx)
>> {
>>     write_sysreg(BIT(idx), pmcntenclr_el0);
>>     isb();
>> }
>
> Same thing for the enable_counter() function, by the way.
> It doesn't have the same disastrous effect, but it is
> buggy (imagine an interrupt disabling a counter between
> the read and the write...).
>
> In general, the set/clr registers should always be used
> in their write form, never in a RMW form.
>

Thanks for your explaination. I will fix the enable_counter together
with the disable_counter.

Thanks,
Shaoqin

> Thanks,
>
>         M.

2023-11-29 06:59:35

by Shaoqin Huang

[permalink] [raw]
Subject: Re: [PATCH v1 3/3] KVM: selftests: aarch64: Introduce pmu_event_filter_test

Hi Eric,

On 11/27/23 16:10, Eric Auger wrote:
> Hi Shaoqin,
>
> On 11/23/23 07:37, Shaoqin Huang wrote:
>> Introduce pmu_event_filter_test for arm64 platforms. The test configures
>> PMUv3 for a vCPU, and sets different pmu event filter for the vCPU, and
> filters
>> check if the guest can use those events which user allow and can't use
>> those events which use deny.
>>
>> This test refactor the create_vpmu_vm() and make it a wrapper for
>> __create_vpmu_vm(), which can let we do some extra init before
> which can let we do -> which allows some extra init code.

Copy that.

>> KVM_ARM_VCPU_PMU_V3_INIT.
>>
>> This test choose the branches_retired and the instructions_retired
>> event, and let guest use the two events in pmu. And check if the result
> Are you sure those events are supported?
>> is expected.
>>
>> Signed-off-by: Shaoqin Huang <[email protected]>
>> ---
>> tools/testing/selftests/kvm/Makefile | 1 +
>> .../kvm/aarch64/pmu_event_filter_test.c | 227 ++++++++++++++++++
>> .../selftests/kvm/include/aarch64/vpmu.h | 4 +
>> .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +-
>> 4 files changed, 244 insertions(+), 2 deletions(-)
>> create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>>
>> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
>> index b60852c222ac..5f126e1a1dbf 100644
>> --- a/tools/testing/selftests/kvm/Makefile
>> +++ b/tools/testing/selftests/kvm/Makefile
>> @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
>> TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
>> TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
>> TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
>> +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test
>> TEST_GEN_PROGS_aarch64 += aarch64/psci_test
>> TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
>> TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
>> diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>> new file mode 100644
>> index 000000000000..a876f5c2033b
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
>> @@ -0,0 +1,227 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * pmu_event_filter_test - Test user limit pmu event for guest.
>> + *
>> + * Copyright (c) 2023 Red Hat, Inc.
>> + *
>> + * This test checks if the guest only see the limited pmu event that userspace
> sees
>> + * sets, if the gust can use those events which user allow, and if the guest
> s/gust/guest

Thanks, will correct it.

>> + * can't use those events which user deny.
>> + * It also checks set invalid filter return the expected error.
> it also checks that setting invalid filter ranges ...
>> + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
>> + */
>> +#include <kvm_util.h>
>> +#include <processor.h>
>> +#include <vgic.h>
>> +#include <vpmu.h>
>> +#include <test_util.h>
>> +#include <perf/arm_pmuv3.h>
>> +
>> +struct {
>> + uint64_t branches_retired;
>> + uint64_t instructions_retired;
>> +} pmc_results;
>> +
>> +static struct vpmu_vm *vpmu_vm;
>> +
>> +#define FILTER_NR 10
>> +
>> +struct test_desc {
>> + const char *name;
>> + void (*check_result)(void);
>> + struct kvm_pmu_event_filter filter[FILTER_NR];
>> +};
>> +
>> +#define __DEFINE_FILTER(base, num, act) \
>> + ((struct kvm_pmu_event_filter) { \
>> + .base_event = base, \
>> + .nevents = num, \
>> + .action = act, \
>> + })
>> +
>> +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
>> +
>> +#define EMPTY_FILTER { 0 }
>> +
>> +#define SW_INCR 0x0
>> +#define INST_RETIRED 0x8
>> +#define BR_RETIERD 0x21
> looks like a typo

It's a typo error. Fixed it.

>> +
>> +#define NUM_BRANCHES 10
>> +
>> +static void run_and_measure_loop(void)
>> +{
>> + asm volatile(
>> + " mov x10, %[loop]\n"
>> + "1: sub x10, x10, #1\n"
>> + " cmp x10, #0x0\n"
>> + " b.gt 1b\n"
>> + :
>> + : [loop] "r" (NUM_BRANCHES)
>> + : "x10", "cc");
>> +}
>> +
>> +static void guest_code(void)
>> +{
>> + uint64_t pmcr = read_sysreg(pmcr_el0);
>> +
>> + pmu_disable_reset();
>> +
>> + write_pmevtypern(0, BR_RETIERD);
>> + write_pmevtypern(1, INST_RETIRED);
>> + enable_counter(0);
>> + enable_counter(1);
>> + write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
>> +
>> + run_and_measure_loop();
>> +
>> + write_sysreg(pmcr, pmcr_el0);
>> +
>> + pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
>> + pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
>> +
>> + GUEST_DONE();
> another direct way to see if the guest can use those filters is to read
> the PMCEIDx that indicates whether an event is supported.
>

Yes. That's the easist way. Why I do this is because I follow the x86
design.

>> +}
>> +
>> +static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg)
>> +{
>> + struct kvm_device_attr attr = {
>> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
>> + };
>> + struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
>> +
>> + while (filter && filter->nevents != 0) {
>> + attr.addr = (uint64_t)filter;
>> + vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
>> + filter++;
>> + }
>> +}
>> +
>> +static void create_vpmu_vm_with_filter(void *guest_code,
>> + struct kvm_pmu_event_filter *filter)
>> +{
>> + vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
>> +}
>> +
>> +static void run_vcpu(struct kvm_vcpu *vcpu)
>> +{
>> + struct ucall uc;
>> +
>> + while (1) {
>> + vcpu_run(vcpu);
>> + switch (get_ucall(vcpu, &uc)) {
>> + case UCALL_DONE:
>> + return;
>> + default:
>> + TEST_FAIL("Unknown ucall %lu", uc.cmd);
>> + }
>> + }
>> +}
>> +
>> +static void check_pmc_counting(void)
>> +{
>> + uint64_t br = pmc_results.branches_retired;
>> + uint64_t ir = pmc_results.instructions_retired;
>> +
>> + TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
>> + "%lu (expected %u)", br, NUM_BRANCHES);
>> + TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
>> +}
>> +
>> +static void check_pmc_not_counting(void)
>> +{
>> + uint64_t br = pmc_results.branches_retired;
>> + uint64_t ir = pmc_results.instructions_retired;
>> +
>> + TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
>> + TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
>> +}
>> +
>> +static void run_vcpu_and_sync_pmc_results(void)
>> +{
>> + memset(&pmc_results, 0, sizeof(pmc_results));
>> + sync_global_to_guest(vpmu_vm->vm, pmc_results);
>> +
>> + run_vcpu(vpmu_vm->vcpu);
>> +
>> + sync_global_from_guest(vpmu_vm->vm, pmc_results);
>> +}
>> +
>> +static void run_test(struct test_desc *t)
>> +{
>> + pr_debug("Test: %s\n", t->name);
>> +
>> + create_vpmu_vm_with_filter(guest_code, t->filter);
>> +
>> + run_vcpu_and_sync_pmc_results();
>> +
>> + t->check_result();
>> +
>> + destroy_vpmu_vm(vpmu_vm);
>> +}
>> +
>> +static struct test_desc tests[] = {
>> + {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
>> + {"member_allow_filter", check_pmc_counting,
>> + {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
>> + DEFINE_FILTER(BR_RETIERD, 0), EMPTY_FILTER}},
>> + {"member_deny_filter", check_pmc_not_counting,
>> + {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
>> + DEFINE_FILTER(BR_RETIERD, 1), EMPTY_FILTER}},
>> + {"not_member_deny_filter", check_pmc_counting,
>> + {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
>> + {"not_member_allow_filter", check_pmc_not_counting,
>> + {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
>> + { 0 }
>> +};
>> +
>> +static void for_each_test(void)
>> +{
>> + struct test_desc *t;
>> +
>> + for (t = &tests[0]; t->name; t++)
>> + run_test(t);
>> +}
>> +
>> +static void set_invalid_filter(struct vpmu_vm *vm, void *arg)
>> +{
>> + struct kvm_pmu_event_filter invalid;
>> + struct kvm_device_attr attr = {
>> + .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>> + .attr = KVM_ARM_VCPU_PMU_V3_FILTER,
>> + .addr = (uint64_t)&invalid,
>> + };
>> + int ret = 0;
>> +
>> + /* The max event number is (1 << 16), set a range large than it. */
>> + invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0);
>> + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
>> + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range "
>> + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
>> + ret, errno);
>> +
>> + ret = 0;
>> +
>> + /* Set the Invalid action. */
>> + invalid = __DEFINE_FILTER(0, 1, 3);
>> + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
>> + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action "
>> + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
>> + ret, errno);
>> +}
>> +
>> +static void test_invalid_filter(void)
>> +{
>> + vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL);
>> + destroy_vpmu_vm(vpmu_vm);
>> +}
>> +
>> +int main(void)
>> +{
>> + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
>> +
>> + for_each_test();
>> +
>> + test_invalid_filter();
> I would introduce test_invalid_filter in a separate patch

Ok. I can split it into two.

Thanks,
Shaoqin

>> +}
>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> index e0cc1ca1c4b7..db97bfb07996 100644
>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
>> @@ -18,6 +18,10 @@ struct vpmu_vm {
>> int gic_fd;
>> };
>>
>> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
>> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
>> + void *arg);
>> +
>> struct vpmu_vm *create_vpmu_vm(void *guest_code);
>>
>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
>> diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> index b3de8fdc555e..76ea03d607f1 100644
>> --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c
>> @@ -7,8 +7,9 @@
>> #include <vpmu.h>
>> #include <perf/arm_pmuv3.h>
>>
>> -/* Create a VM that has one vCPU with PMUv3 configured. */
>> -struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
>> + void (*init_pmu)(struct vpmu_vm *vm, void *arg),
>> + void *arg)
>> {
>> struct kvm_vcpu_init init;
>> uint8_t pmuver;
>> @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
>>
>> /* Initialize vPMU */
>> + if (init_pmu)
>> + init_pmu(vpmu_vm, arg);
>> +
>> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
>> vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>>
>> return vpmu_vm;
>> }
>>
>> +/* Create a VM that has one vCPU with PMUv3 configured. */
>> +struct vpmu_vm *create_vpmu_vm(void *guest_code)
>> +{
>> + return __create_vpmu_vm(guest_code, NULL, NULL);
>> +}
>> +
>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm)
>> {
>> close(vpmu_vm->gic_fd);
> Thanks
>
> Eric
>

--
Shaoqin