2023-07-17 06:25:56

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 0/6] KVM: selftests: Improve PMU event filter settings and add test cases

Hi,

This patch series aims to improve the PMU event filter settings with a cleaner
and more organized structure and adds several test cases related to PMU event
filters.

These changes help to ensure that KVM's PMU event filter functions as expected
in all supported use cases.

Any feedback or suggestions are greatly appreciated.

Sincerely,
Jinrong Liang

Changes log:

v4:
- Rebased to 88bb466c9dec(tag: kvm-x86-next-2023.06.22);
- Add a patch to add macros for fixed counters in processor.h;
- Add a patch to drop the return of remove_event(); (Sean)
- Reverse xmas tree; (Sean)
- Optimize code style and comments; (Sean)

Previous:
https://lore.kernel.org/kvm/[email protected]/T

Jinrong Liang (6):
KVM: selftests: Add macros for fixed counters in processor.h
KVM: selftests: Drop the return of remove_event()
KVM: selftests: Introduce __kvm_pmu_event_filter to improved event
filter settings
KVM: selftests: Add test cases for unsupported PMU event filter input
values
KVM: selftests: Test if event filter meets expectations on fixed
counters
KVM: selftests: Test gp event filters don't affect fixed event filters

.../selftests/kvm/include/x86_64/processor.h | 2 +
.../kvm/x86_64/pmu_event_filter_test.c | 314 ++++++++++++------
2 files changed, 222 insertions(+), 94 deletions(-)


base-commit: 88bb466c9dec4f70d682cf38c685324e7b1b3d60
--
2.39.3



2023-07-17 06:27:16

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

From: Jinrong Liang <[email protected]>

Add custom "__kvm_pmu_event_filter" structure to improve pmu event
filter settings. Simplifies event filter setup by organizing event
filter parameters in a cleaner, more organized way.

Signed-off-by: Jinrong Liang <[email protected]>
---
.../kvm/x86_64/pmu_event_filter_test.c | 179 +++++++++---------
1 file changed, 87 insertions(+), 92 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 5ac05e64bec9..ffcbbf25b29b 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -28,6 +28,10 @@

#define NUM_BRANCHES 42

+/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
+#define MAX_FILTER_EVENTS 300
+#define MAX_TEST_EVENTS 10
+
/*
* This is how the event selector and unit mask are stored in an AMD
* core performance event-select register. Intel's format is similar,
@@ -69,21 +73,33 @@

#define INST_RETIRED EVENT(0xc0, 0)

+struct __kvm_pmu_event_filter {
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[MAX_FILTER_EVENTS];
+};
+
/*
* This event list comprises Intel's eight architectural events plus
* AMD's "retired branch instructions" for Zen[123] (and possibly
* other AMD CPUs).
*/
-static const uint64_t event_list[] = {
- EVENT(0x3c, 0),
- INST_RETIRED,
- EVENT(0x3c, 1),
- EVENT(0x2e, 0x4f),
- EVENT(0x2e, 0x41),
- EVENT(0xc4, 0),
- EVENT(0xc5, 0),
- EVENT(0xa4, 1),
- AMD_ZEN_BR_RETIRED,
+static const struct __kvm_pmu_event_filter base_event_filter = {
+ .nevents = ARRAY_SIZE(base_event_filter.events),
+ .events = {
+ EVENT(0x3c, 0),
+ INST_RETIRED,
+ EVENT(0x3c, 1),
+ EVENT(0x2e, 0x4f),
+ EVENT(0x2e, 0x41),
+ EVENT(0xc4, 0),
+ EVENT(0xc5, 0),
+ EVENT(0xa4, 1),
+ AMD_ZEN_BR_RETIRED,
+ },
};

struct {
@@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
return !r;
}

-static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
-{
- struct kvm_pmu_event_filter *f;
- int size = sizeof(*f) + nevents * sizeof(f->events[0]);
-
- f = malloc(size);
- TEST_ASSERT(f, "Out of memory");
- memset(f, 0, size);
- f->nevents = nevents;
- return f;
-}
-
-
-static struct kvm_pmu_event_filter *
-create_pmu_event_filter(const uint64_t event_list[], int nevents,
- uint32_t action, uint32_t flags)
-{
- struct kvm_pmu_event_filter *f;
- int i;
-
- f = alloc_pmu_event_filter(nevents);
- f->action = action;
- f->flags = flags;
- for (i = 0; i < nevents; i++)
- f->events[i] = event_list[i];
-
- return f;
-}
-
-static struct kvm_pmu_event_filter *event_filter(uint32_t action)
-{
- return create_pmu_event_filter(event_list,
- ARRAY_SIZE(event_list),
- action, 0);
-}
-
/*
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
*/
-static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
+static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
{
bool found = false;
int i;
@@ -313,66 +293,70 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
}

static void test_with_filter(struct kvm_vcpu *vcpu,
- struct kvm_pmu_event_filter *f)
+ struct __kvm_pmu_event_filter *__f)
{
+ struct kvm_pmu_event_filter *f = (void *)__f;
+
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
run_vcpu_and_sync_pmc_results(vcpu);
}

static void test_amd_deny_list(struct kvm_vcpu *vcpu)
{
- uint64_t event = EVENT(0x1C2, 0);
- struct kvm_pmu_event_filter *f;
+ struct __kvm_pmu_event_filter f = base_event_filter;

- f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_DENY;
+ f.nevents = 1;
+ f.events[0] = EVENT(0x1C2, 0);
+ test_with_filter(vcpu, &f);

ASSERT_PMC_COUNTING_INSTRUCTIONS();
}

static void test_member_deny_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+ struct __kvm_pmu_event_filter f = base_event_filter;

- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_DENY;
+ test_with_filter(vcpu, &f);

ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}

static void test_member_allow_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+ struct __kvm_pmu_event_filter f = base_event_filter;

- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_ALLOW;
+ test_with_filter(vcpu, &f);

ASSERT_PMC_COUNTING_INSTRUCTIONS();
}

static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+ struct __kvm_pmu_event_filter f = base_event_filter;
+
+ f.action = KVM_PMU_EVENT_DENY;

- remove_event(f, INST_RETIRED);
- remove_event(f, INTEL_BR_RETIRED);
- remove_event(f, AMD_ZEN_BR_RETIRED);
- test_with_filter(vcpu, f);
- free(f);
+ remove_event(&f, INST_RETIRED);
+ remove_event(&f, INTEL_BR_RETIRED);
+ remove_event(&f, AMD_ZEN_BR_RETIRED);
+ test_with_filter(vcpu, &f);

ASSERT_PMC_COUNTING_INSTRUCTIONS();
}

static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+ struct __kvm_pmu_event_filter f = base_event_filter;

- remove_event(f, INST_RETIRED);
- remove_event(f, INTEL_BR_RETIRED);
- remove_event(f, AMD_ZEN_BR_RETIRED);
- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_ALLOW;
+
+ remove_event(&f, INST_RETIRED);
+ remove_event(&f, INTEL_BR_RETIRED);
+ remove_event(&f, AMD_ZEN_BR_RETIRED);
+ test_with_filter(vcpu, &f);

ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
@@ -567,19 +551,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
const uint64_t masked_events[],
const int nmasked_events)
{
- struct kvm_pmu_event_filter *f;
+ struct __kvm_pmu_event_filter f = {
+ .nevents = nmasked_events,
+ .action = KVM_PMU_EVENT_ALLOW,
+ .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ };

- f = create_pmu_event_filter(masked_events, nmasked_events,
- KVM_PMU_EVENT_ALLOW,
- KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
- test_with_filter(vcpu, f);
- free(f);
+ memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
+ test_with_filter(vcpu, &f);
}

-/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
-#define MAX_FILTER_EVENTS 300
-#define MAX_TEST_EVENTS 10
-
#define ALLOW_LOADS BIT(0)
#define ALLOW_STORES BIT(1)
#define ALLOW_LOADS_STORES BIT(2)
@@ -751,17 +732,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
run_masked_events_tests(vcpu, events, nevents);
}

-static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
- int nevents, uint32_t flags)
+static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
+ struct __kvm_pmu_event_filter *__f)
{
- struct kvm_pmu_event_filter *f;
- int r;
+ struct kvm_pmu_event_filter *f = (void *)__f;

- f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
- r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
- free(f);
+ return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
+}
+
+static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
+ uint32_t flags, uint32_t action)
+{
+ struct __kvm_pmu_event_filter f = {
+ .nevents = 1,
+ .flags = flags,
+ .action = action,
+ .events = {
+ event,
+ },
+ };

- return r;
+ return do_vcpu_set_pmu_event_filter(vcpu, &f);
}

static void test_filter_ioctl(struct kvm_vcpu *vcpu)
@@ -773,14 +764,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
* Unfortunately having invalid bits set in event data is expected to
* pass when flags == 0 (bits other than eventsel+umask).
*/
- r = run_filter_test(vcpu, &e, 1, 0);
+ r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");

- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = set_pmu_single_event_filter(vcpu, e,
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");

e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = set_pmu_single_event_filter(vcpu, e,
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
}

--
2.39.3


2023-07-17 06:27:57

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 2/6] KVM: selftests: Drop the return of remove_event()

From: Jinrong Liang <[email protected]>

None of the callers consume remove_event(), and it incorrectly implies
that the incoming filter isn't modified. Drop the return.

Signed-off-by: Jinrong Liang <[email protected]>
---
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 40507ed9fe8a..5ac05e64bec9 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -265,8 +265,7 @@ static struct kvm_pmu_event_filter *event_filter(uint32_t action)
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
*/
-static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
- uint64_t event)
+static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
{
bool found = false;
int i;
@@ -279,7 +278,6 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
}
if (found)
f->nevents--;
- return f;
}

#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
--
2.39.3


2023-07-17 06:32:16

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 6/6] KVM: selftests: Test gp event filters don't affect fixed event filters

From: Jinrong Liang <[email protected]>

Add a test to ensure that setting both generic and fixed performance
event filters does not affect the consistency of the fixed event filter
behavior in KVM.

Signed-off-by: Jinrong Liang <[email protected]>
---
.../kvm/x86_64/pmu_event_filter_test.c | 27 +++++++++++++++++++
1 file changed, 27 insertions(+)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 1872b848f734..b2e432542a8c 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -835,6 +835,19 @@ static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
return run_vcpu_to_sync(vcpu);
}

+static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
+ uint32_t action,
+ uint32_t bitmap)
+{
+ struct __kvm_pmu_event_filter f = base_event_filter;
+
+ f.action = action;
+ f.fixed_counter_bitmap = bitmap;
+ do_vcpu_set_pmu_event_filter(vcpu, &f);
+
+ return run_vcpu_to_sync(vcpu);
+}
+
static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
uint8_t nr_fixed_counters)
{
@@ -861,6 +874,20 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
bitmap);
ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
+
+ /*
+ * Check that fixed_counter_bitmap has higher priority than
+ * events[] when both are set.
+ */
+ count = test_set_gp_and_fixed_event_filter(vcpu,
+ KVM_PMU_EVENT_ALLOW,
+ bitmap);
+ ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
+
+ count = test_set_gp_and_fixed_event_filter(vcpu,
+ KVM_PMU_EVENT_DENY,
+ bitmap);
+ ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
}
}

--
2.39.3


2023-07-17 06:34:54

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 4/6] KVM: selftests: Add test cases for unsupported PMU event filter input values

From: Jinrong Liang <[email protected]>

Add test cases to verify the handling of unsupported input values for the
PMU event filter. The tests cover unsupported "action" values, unsupported
"flags" values, and unsupported "nevents" values. All these cases should
return an error, as they are currently not supported by the filter.
Furthermore, the tests also cover the scenario where setting non-existent
fixed counters in the fixed bitmap does not fail.

Signed-off-by: Jinrong Liang <[email protected]>
---
.../kvm/x86_64/pmu_event_filter_test.c | 26 +++++++++++++++++++
1 file changed, 26 insertions(+)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index ffcbbf25b29b..63f85f583ef8 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -32,6 +32,10 @@
#define MAX_FILTER_EVENTS 300
#define MAX_TEST_EVENTS 10

+#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
+#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAG_MASKED_EVENTS + 1)
+#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
+
/*
* This is how the event selector and unit mask are stored in an AMD
* core performance event-select register. Intel's format is similar,
@@ -757,6 +761,8 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,

static void test_filter_ioctl(struct kvm_vcpu *vcpu)
{
+ uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ struct __kvm_pmu_event_filter f;
uint64_t e = ~0ul;
int r;

@@ -777,6 +783,26 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
+
+ f = base_event_filter;
+ f.action = PMU_EVENT_FILTER_INVALID_ACTION;
+ r = do_vcpu_set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(r, "Set invalid action is expected to fail");
+
+ f = base_event_filter;
+ f.flags = PMU_EVENT_FILTER_INVALID_FLAGS;
+ r = do_vcpu_set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(r, "Set invalid flags is expected to fail");
+
+ f = base_event_filter;
+ f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS;
+ r = do_vcpu_set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(r, "Exceeding the max number of filter events should fail");
+
+ f = base_event_filter;
+ f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0);
+ r = do_vcpu_set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
}

int main(int argc, char *argv[])
--
2.39.3


2023-07-17 06:43:04

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 5/6] KVM: selftests: Test if event filter meets expectations on fixed counters

From: Jinrong Liang <[email protected]>

Add tests to cover that pmu event_filter works as expected when
it's applied to fixed performance counters, even if there is none
fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest).

Signed-off-by: Jinrong Liang <[email protected]>
---
.../kvm/x86_64/pmu_event_filter_test.c | 80 +++++++++++++++++++
1 file changed, 80 insertions(+)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 63f85f583ef8..1872b848f734 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -27,6 +27,7 @@
#define ARCH_PERFMON_BRANCHES_RETIRED 5

#define NUM_BRANCHES 42
+#define INTEL_PMC_IDX_FIXED 32

/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
#define MAX_FILTER_EVENTS 300
@@ -805,6 +806,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
}

+static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
+{
+ for (;;) {
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
+
+ /* Only OS_EN bit is enabled for fixed counter[idx]. */
+ wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
+ BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+ GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
+ }
+}
+
+static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
+ uint32_t action, uint32_t bitmap)
+{
+ struct __kvm_pmu_event_filter f = {
+ .action = action,
+ .fixed_counter_bitmap = bitmap,
+ };
+ do_vcpu_set_pmu_event_filter(vcpu, &f);
+
+ return run_vcpu_to_sync(vcpu);
+}
+
+static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
+ uint8_t nr_fixed_counters)
+{
+ unsigned int i;
+ uint32_t bitmap;
+ uint64_t count;
+
+ TEST_ASSERT(nr_fixed_counters < sizeof(bitmap),
+ "Invalid nr_fixed_counters");
+
+ /*
+ * Check the fixed performance counter can count normally when KVM
+ * userspace doesn't set any pmu filter.
+ */
+ count = run_vcpu_to_sync(vcpu);
+ TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
+
+ for (i = 0; i < BIT(nr_fixed_counters); i++) {
+ bitmap = BIT(i);
+ count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
+ bitmap);
+ ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
+
+ count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
+ bitmap);
+ ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
+ }
+}
+
+static void test_fixed_counter_bitmap(void)
+{
+ uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ uint8_t idx;
+
+ /*
+ * Check that pmu_event_filter works as expected when it's applied to
+ * fixed performance counters.
+ */
+ for (idx = 0; idx < nr_fixed_counters; idx++) {
+ vm = vm_create_with_one_vcpu(&vcpu,
+ intel_run_fixed_counter_guest_code);
+ vcpu_args_set(vcpu, 1, idx);
+ __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
+ kvm_vm_free(vm);
+ }
+}
+
int main(int argc, char *argv[])
{
void (*guest_code)(void);
@@ -848,6 +927,7 @@ int main(int argc, char *argv[])
kvm_vm_free(vm);

test_pmu_config_disable(guest_code);
+ test_fixed_counter_bitmap();

return 0;
}
--
2.39.3


2023-07-17 06:52:08

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v4 1/6] KVM: selftests: Add macros for fixed counters in processor.h

From: Jinrong Liang <[email protected]>

Add x86 properties for the number of PMU fixed counters and the bitmask
that allows for "discontiguous" fixed counters so that tests don't have
to manually retrieve the correct CPUID leaf+register, and so that the
resulting code is self-documenting.

Signed-off-by: Jinrong Liang <[email protected]>
---
tools/testing/selftests/kvm/include/x86_64/processor.h | 2 ++
1 file changed, 2 insertions(+)

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index aa434c8f19c5..15331abf063b 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -240,6 +240,8 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
+#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
+#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)

#define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
--
2.39.3


2023-07-19 00:21:10

by Isaku Yamahata

[permalink] [raw]
Subject: Re: [PATCH v4 3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

On Mon, Jul 17, 2023 at 02:23:40PM +0800,
Jinrong Liang <[email protected]> wrote:

> From: Jinrong Liang <[email protected]>
>
> Add custom "__kvm_pmu_event_filter" structure to improve pmu event
> filter settings. Simplifies event filter setup by organizing event
> filter parameters in a cleaner, more organized way.
>
> Signed-off-by: Jinrong Liang <[email protected]>
> ---
> .../kvm/x86_64/pmu_event_filter_test.c | 179 +++++++++---------
> 1 file changed, 87 insertions(+), 92 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 5ac05e64bec9..ffcbbf25b29b 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -28,6 +28,10 @@
>
> #define NUM_BRANCHES 42
>
> +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> +#define MAX_FILTER_EVENTS 300
> +#define MAX_TEST_EVENTS 10
> +
> /*
> * This is how the event selector and unit mask are stored in an AMD
> * core performance event-select register. Intel's format is similar,
> @@ -69,21 +73,33 @@
>
> #define INST_RETIRED EVENT(0xc0, 0)
>
> +struct __kvm_pmu_event_filter {
> + __u32 action;
> + __u32 nevents;
> + __u32 fixed_counter_bitmap;
> + __u32 flags;
> + __u32 pad[4];
> + __u64 events[MAX_FILTER_EVENTS];
> +};
> +
> /*
> * This event list comprises Intel's eight architectural events plus
> * AMD's "retired branch instructions" for Zen[123] (and possibly
> * other AMD CPUs).
> */
> -static const uint64_t event_list[] = {
> - EVENT(0x3c, 0),
> - INST_RETIRED,
> - EVENT(0x3c, 1),
> - EVENT(0x2e, 0x4f),
> - EVENT(0x2e, 0x41),
> - EVENT(0xc4, 0),
> - EVENT(0xc5, 0),
> - EVENT(0xa4, 1),
> - AMD_ZEN_BR_RETIRED,
> +static const struct __kvm_pmu_event_filter base_event_filter = {
> + .nevents = ARRAY_SIZE(base_event_filter.events),
> + .events = {
> + EVENT(0x3c, 0),
> + INST_RETIRED,
> + EVENT(0x3c, 1),
> + EVENT(0x2e, 0x4f),
> + EVENT(0x2e, 0x41),
> + EVENT(0xc4, 0),
> + EVENT(0xc5, 0),
> + EVENT(0xa4, 1),
> + AMD_ZEN_BR_RETIRED,
> + },
> };
>
> struct {
> @@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
> return !r;
> }
>
> -static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
> -{
> - struct kvm_pmu_event_filter *f;
> - int size = sizeof(*f) + nevents * sizeof(f->events[0]);
> -
> - f = malloc(size);
> - TEST_ASSERT(f, "Out of memory");
> - memset(f, 0, size);
> - f->nevents = nevents;
> - return f;
> -}
> -
> -
> -static struct kvm_pmu_event_filter *
> -create_pmu_event_filter(const uint64_t event_list[], int nevents,
> - uint32_t action, uint32_t flags)
> -{
> - struct kvm_pmu_event_filter *f;
> - int i;
> -
> - f = alloc_pmu_event_filter(nevents);
> - f->action = action;
> - f->flags = flags;
> - for (i = 0; i < nevents; i++)
> - f->events[i] = event_list[i];
> -
> - return f;
> -}
> -
> -static struct kvm_pmu_event_filter *event_filter(uint32_t action)
> -{
> - return create_pmu_event_filter(event_list,
> - ARRAY_SIZE(event_list),
> - action, 0);
> -}
> -
> /*
> * Remove the first occurrence of 'event' (if any) from the filter's
> * event list.
> */
> -static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
> +static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
> {
> bool found = false;
> int i;
> @@ -313,66 +293,70 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
> }
>
> static void test_with_filter(struct kvm_vcpu *vcpu,
> - struct kvm_pmu_event_filter *f)
> + struct __kvm_pmu_event_filter *__f)
> {
> + struct kvm_pmu_event_filter *f = (void *)__f;
> +
> vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> run_vcpu_and_sync_pmc_results(vcpu);
> }
>
> static void test_amd_deny_list(struct kvm_vcpu *vcpu)
> {
> - uint64_t event = EVENT(0x1C2, 0);
> - struct kvm_pmu_event_filter *f;
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_DENY;
> + f.nevents = 1;
> + f.events[0] = EVENT(0x1C2, 0);
> + test_with_filter(vcpu, &f);

This overwrite all members. We can use designated initializer.
struct __kvm_pmu_event_filter f = {
.action = KVM_PMU_EVENT_DENY,
.nevents = 1,
.events = {
EVENT(0x1C2, 0),
},
};

Except this, looks good to me.
Reviewed-by: Isaku Yamahata <[email protected]>

Thanks,

>
> ASSERT_PMC_COUNTING_INSTRUCTIONS();
> }
>
> static void test_member_deny_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_DENY;
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> }
>
> static void test_member_allow_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_ALLOW;
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_COUNTING_INSTRUCTIONS();
> }
>
> static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> + struct __kvm_pmu_event_filter f = base_event_filter;
> +
> + f.action = KVM_PMU_EVENT_DENY;
>
> - remove_event(f, INST_RETIRED);
> - remove_event(f, INTEL_BR_RETIRED);
> - remove_event(f, AMD_ZEN_BR_RETIRED);
> - test_with_filter(vcpu, f);
> - free(f);
> + remove_event(&f, INST_RETIRED);
> + remove_event(&f, INTEL_BR_RETIRED);
> + remove_event(&f, AMD_ZEN_BR_RETIRED);
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_COUNTING_INSTRUCTIONS();
> }
>
> static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - remove_event(f, INST_RETIRED);
> - remove_event(f, INTEL_BR_RETIRED);
> - remove_event(f, AMD_ZEN_BR_RETIRED);
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_ALLOW;
> +
> + remove_event(&f, INST_RETIRED);
> + remove_event(&f, INTEL_BR_RETIRED);
> + remove_event(&f, AMD_ZEN_BR_RETIRED);
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> }
> @@ -567,19 +551,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
> const uint64_t masked_events[],
> const int nmasked_events)
> {
> - struct kvm_pmu_event_filter *f;
> + struct __kvm_pmu_event_filter f = {
> + .nevents = nmasked_events,
> + .action = KVM_PMU_EVENT_ALLOW,
> + .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> + };
>
> - f = create_pmu_event_filter(masked_events, nmasked_events,
> - KVM_PMU_EVENT_ALLOW,
> - KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> - test_with_filter(vcpu, f);
> - free(f);
> + memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
> + test_with_filter(vcpu, &f);
> }
>
> -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> -#define MAX_FILTER_EVENTS 300
> -#define MAX_TEST_EVENTS 10
> -
> #define ALLOW_LOADS BIT(0)
> #define ALLOW_STORES BIT(1)
> #define ALLOW_LOADS_STORES BIT(2)
> @@ -751,17 +732,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
> run_masked_events_tests(vcpu, events, nevents);
> }
>
> -static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
> - int nevents, uint32_t flags)
> +static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
> + struct __kvm_pmu_event_filter *__f)
> {
> - struct kvm_pmu_event_filter *f;
> - int r;
> + struct kvm_pmu_event_filter *f = (void *)__f;
>
> - f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
> - r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> - free(f);
> + return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> +}
> +
> +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
> + uint32_t flags, uint32_t action)
> +{
> + struct __kvm_pmu_event_filter f = {
> + .nevents = 1,
> + .flags = flags,
> + .action = action,
> + .events = {
> + event,
> + },
> + };
>
> - return r;
> + return do_vcpu_set_pmu_event_filter(vcpu, &f);
> }
>
> static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> @@ -773,14 +764,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> * Unfortunately having invalid bits set in event data is expected to
> * pass when flags == 0 (bits other than eventsel+umask).
> */
> - r = run_filter_test(vcpu, &e, 1, 0);
> + r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
>
> - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> + r = set_pmu_single_event_filter(vcpu, e,
> + KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> + KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
>
> e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
> - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> + r = set_pmu_single_event_filter(vcpu, e,
> + KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> + KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> }
>
> --
> 2.39.3
>

--
Isaku Yamahata <[email protected]>

2023-07-19 01:27:41

by Isaku Yamahata

[permalink] [raw]
Subject: Re: [PATCH v4 4/6] KVM: selftests: Add test cases for unsupported PMU event filter input values

On Mon, Jul 17, 2023 at 02:23:41PM +0800,
Jinrong Liang <[email protected]> wrote:

> From: Jinrong Liang <[email protected]>
>
> Add test cases to verify the handling of unsupported input values for the
> PMU event filter. The tests cover unsupported "action" values, unsupported
> "flags" values, and unsupported "nevents" values. All these cases should
> return an error, as they are currently not supported by the filter.
> Furthermore, the tests also cover the scenario where setting non-existent
> fixed counters in the fixed bitmap does not fail.
>
> Signed-off-by: Jinrong Liang <[email protected]>
> ---
> .../kvm/x86_64/pmu_event_filter_test.c | 26 +++++++++++++++++++
> 1 file changed, 26 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index ffcbbf25b29b..63f85f583ef8 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -32,6 +32,10 @@
> #define MAX_FILTER_EVENTS 300
> #define MAX_TEST_EVENTS 10
>
> +#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
> +#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAG_MASKED_EVENTS + 1)

flag is a bit mask. Not number. So +1 sounds weird.
As KVM_PMU_EVENT_FLAGS_VALID_MASK = 1, this happens to get wanted result, though.


> +#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
> +
> /*
> * This is how the event selector and unit mask are stored in an AMD
> * core performance event-select register. Intel's format is similar,
> @@ -757,6 +761,8 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
>
> static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> {
> + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> + struct __kvm_pmu_event_filter f;
> uint64_t e = ~0ul;
> int r;
>
> @@ -777,6 +783,26 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> +
> + f = base_event_filter;
> + f.action = PMU_EVENT_FILTER_INVALID_ACTION;
> + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> + TEST_ASSERT(r, "Set invalid action is expected to fail");
> +
> + f = base_event_filter;
> + f.flags = PMU_EVENT_FILTER_INVALID_FLAGS;
> + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> + TEST_ASSERT(r, "Set invalid flags is expected to fail");
> +
> + f = base_event_filter;
> + f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS;
> + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> + TEST_ASSERT(r, "Exceeding the max number of filter events should fail");
> +
> + f = base_event_filter;
> + f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0);
> + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> + TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
> }
>
> int main(int argc, char *argv[])
> --
> 2.39.3
>

--
Isaku Yamahata <[email protected]>

2023-07-19 01:43:32

by Isaku Yamahata

[permalink] [raw]
Subject: Re: [PATCH v4 5/6] KVM: selftests: Test if event filter meets expectations on fixed counters

On Mon, Jul 17, 2023 at 02:23:42PM +0800,
Jinrong Liang <[email protected]> wrote:

> From: Jinrong Liang <[email protected]>
>
> Add tests to cover that pmu event_filter works as expected when
> it's applied to fixed performance counters, even if there is none
> fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest).
>
> Signed-off-by: Jinrong Liang <[email protected]>
> ---
> .../kvm/x86_64/pmu_event_filter_test.c | 80 +++++++++++++++++++
> 1 file changed, 80 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 63f85f583ef8..1872b848f734 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -27,6 +27,7 @@
> #define ARCH_PERFMON_BRANCHES_RETIRED 5
>
> #define NUM_BRANCHES 42
> +#define INTEL_PMC_IDX_FIXED 32
>
> /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> #define MAX_FILTER_EVENTS 300
> @@ -805,6 +806,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
> }
>
> +static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
> +{
> + for (;;) {
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
> +
> + /* Only OS_EN bit is enabled for fixed counter[idx]. */
> + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
> + BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
> + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> +
> + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
> + }
> +}
> +
> +static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
> + uint32_t action, uint32_t bitmap)
> +{
> + struct __kvm_pmu_event_filter f = {
> + .action = action,
> + .fixed_counter_bitmap = bitmap,
> + };
> + do_vcpu_set_pmu_event_filter(vcpu, &f);
> +
> + return run_vcpu_to_sync(vcpu);
> +}
> +
> +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
> + uint8_t nr_fixed_counters)
> +{
> + unsigned int i;
> + uint32_t bitmap;
> + uint64_t count;
> +
> + TEST_ASSERT(nr_fixed_counters < sizeof(bitmap),

sizeof(bitmap) * 8?

> + "Invalid nr_fixed_counters");
> +
> + /*
> + * Check the fixed performance counter can count normally when KVM
> + * userspace doesn't set any pmu filter.
> + */
> + count = run_vcpu_to_sync(vcpu);
> + TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
> +
> + for (i = 0; i < BIT(nr_fixed_counters); i++) {
> + bitmap = BIT(i);
> + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
> + bitmap);
> + ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
> +
> + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
> + bitmap);
> + ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
> + }
> +}
> +
> +static void test_fixed_counter_bitmap(void)
> +{
> + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> + struct kvm_vm *vm;
> + struct kvm_vcpu *vcpu;
> + uint8_t idx;
> +
> + /*
> + * Check that pmu_event_filter works as expected when it's applied to
> + * fixed performance counters.
> + */
> + for (idx = 0; idx < nr_fixed_counters; idx++) {
> + vm = vm_create_with_one_vcpu(&vcpu,
> + intel_run_fixed_counter_guest_code);
> + vcpu_args_set(vcpu, 1, idx);
> + __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
> + kvm_vm_free(vm);
> + }
> +}
> +
> int main(int argc, char *argv[])
> {
> void (*guest_code)(void);
> @@ -848,6 +927,7 @@ int main(int argc, char *argv[])
> kvm_vm_free(vm);
>
> test_pmu_config_disable(guest_code);
> + test_fixed_counter_bitmap();
>
> return 0;
> }
> --
> 2.39.3
>

--
Isaku Yamahata <[email protected]>

2023-07-19 03:26:19

by Jinrong Liang

[permalink] [raw]
Subject: Re: [PATCH v4 5/6] KVM: selftests: Test if event filter meets expectations on fixed counters

Isaku Yamahata <[email protected]> 于2023年7月19日周三 09:21写道:
>
> On Mon, Jul 17, 2023 at 02:23:42PM +0800,
> Jinrong Liang <[email protected]> wrote:
>
> > From: Jinrong Liang <[email protected]>
> >
> > Add tests to cover that pmu event_filter works as expected when
> > it's applied to fixed performance counters, even if there is none
> > fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest).
> >
> > Signed-off-by: Jinrong Liang <[email protected]>
> > ---
> > .../kvm/x86_64/pmu_event_filter_test.c | 80 +++++++++++++++++++
> > 1 file changed, 80 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > index 63f85f583ef8..1872b848f734 100644
> > --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > @@ -27,6 +27,7 @@
> > #define ARCH_PERFMON_BRANCHES_RETIRED 5
> >
> > #define NUM_BRANCHES 42
> > +#define INTEL_PMC_IDX_FIXED 32
> >
> > /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> > #define MAX_FILTER_EVENTS 300
> > @@ -805,6 +806,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> > TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
> > }
> >
> > +static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
> > +{
> > + for (;;) {
> > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> > + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
> > +
> > + /* Only OS_EN bit is enabled for fixed counter[idx]. */
> > + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
> > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
> > + BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
> > + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
> > + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> > +
> > + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
> > + }
> > +}
> > +
> > +static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
> > + uint32_t action, uint32_t bitmap)
> > +{
> > + struct __kvm_pmu_event_filter f = {
> > + .action = action,
> > + .fixed_counter_bitmap = bitmap,
> > + };
> > + do_vcpu_set_pmu_event_filter(vcpu, &f);
> > +
> > + return run_vcpu_to_sync(vcpu);
> > +}
> > +
> > +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
> > + uint8_t nr_fixed_counters)
> > +{
> > + unsigned int i;
> > + uint32_t bitmap;
> > + uint64_t count;
> > +
> > + TEST_ASSERT(nr_fixed_counters < sizeof(bitmap),
>
> sizeof(bitmap) * 8?

Thank you for pointing this out. You are correct that we should
compare the number of fixed counters with the number of bits in the
bitmap variable, not its byte size. I will update the test as follows:

TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,

This ensures that nr_fixed_counters does not exceed the number of bits
that the bitmap variable can represent (i.e., 32 bits).

>
> > + "Invalid nr_fixed_counters");
> > +
> > + /*
> > + * Check the fixed performance counter can count normally when KVM
> > + * userspace doesn't set any pmu filter.
> > + */
> > + count = run_vcpu_to_sync(vcpu);
> > + TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
> > +
> > + for (i = 0; i < BIT(nr_fixed_counters); i++) {
> > + bitmap = BIT(i);
> > + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
> > + bitmap);
> > + ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
> > +
> > + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
> > + bitmap);
> > + ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
> > + }
> > +}
> > +
> > +static void test_fixed_counter_bitmap(void)
> > +{
> > + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> > + struct kvm_vm *vm;
> > + struct kvm_vcpu *vcpu;
> > + uint8_t idx;
> > +
> > + /*
> > + * Check that pmu_event_filter works as expected when it's applied to
> > + * fixed performance counters.
> > + */
> > + for (idx = 0; idx < nr_fixed_counters; idx++) {
> > + vm = vm_create_with_one_vcpu(&vcpu,
> > + intel_run_fixed_counter_guest_code);
> > + vcpu_args_set(vcpu, 1, idx);
> > + __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
> > + kvm_vm_free(vm);
> > + }
> > +}
> > +
> > int main(int argc, char *argv[])
> > {
> > void (*guest_code)(void);
> > @@ -848,6 +927,7 @@ int main(int argc, char *argv[])
> > kvm_vm_free(vm);
> >
> > test_pmu_config_disable(guest_code);
> > + test_fixed_counter_bitmap();
> >
> > return 0;
> > }
> > --
> > 2.39.3
> >
>
> --
> Isaku Yamahata <[email protected]>

2023-07-19 04:18:28

by Jinrong Liang

[permalink] [raw]
Subject: Re: [PATCH v4 4/6] KVM: selftests: Add test cases for unsupported PMU event filter input values

Isaku Yamahata <[email protected]> 于2023年7月19日周三 09:17写道:
>
> On Mon, Jul 17, 2023 at 02:23:41PM +0800,
> Jinrong Liang <[email protected]> wrote:
>
> > From: Jinrong Liang <[email protected]>
> >
> > Add test cases to verify the handling of unsupported input values for the
> > PMU event filter. The tests cover unsupported "action" values, unsupported
> > "flags" values, and unsupported "nevents" values. All these cases should
> > return an error, as they are currently not supported by the filter.
> > Furthermore, the tests also cover the scenario where setting non-existent
> > fixed counters in the fixed bitmap does not fail.
> >
> > Signed-off-by: Jinrong Liang <[email protected]>
> > ---
> > .../kvm/x86_64/pmu_event_filter_test.c | 26 +++++++++++++++++++
> > 1 file changed, 26 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > index ffcbbf25b29b..63f85f583ef8 100644
> > --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > @@ -32,6 +32,10 @@
> > #define MAX_FILTER_EVENTS 300
> > #define MAX_TEST_EVENTS 10
> >
> > +#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
> > +#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAG_MASKED_EVENTS + 1)
>
> flag is a bit mask. Not number. So +1 sounds weird.
> As KVM_PMU_EVENT_FLAGS_VALID_MASK = 1, this happens to get wanted result, though.

We need an invalid flags, KVM_PMU_EVENT_FLAGS_VALID_MASK is actually
equal to KVM_PMU_EVENT_FLAG_MASKED_EVENTS.

In kvm.h:

#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0)
#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)

How about this modification:

#define PMU_EVENT_FILTER_INVALID_FLAGS
(KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)

>
>
> > +#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
> > +
> > /*
> > * This is how the event selector and unit mask are stored in an AMD
> > * core performance event-select register. Intel's format is similar,
> > @@ -757,6 +761,8 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
> >
> > static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> > {
> > + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> > + struct __kvm_pmu_event_filter f;
> > uint64_t e = ~0ul;
> > int r;
> >
> > @@ -777,6 +783,26 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> > KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > KVM_PMU_EVENT_ALLOW);
> > TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> > +
> > + f = base_event_filter;
> > + f.action = PMU_EVENT_FILTER_INVALID_ACTION;
> > + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> > + TEST_ASSERT(r, "Set invalid action is expected to fail");
> > +
> > + f = base_event_filter;
> > + f.flags = PMU_EVENT_FILTER_INVALID_FLAGS;
> > + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> > + TEST_ASSERT(r, "Set invalid flags is expected to fail");
> > +
> > + f = base_event_filter;
> > + f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS;
> > + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> > + TEST_ASSERT(r, "Exceeding the max number of filter events should fail");
> > +
> > + f = base_event_filter;
> > + f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0);
> > + r = do_vcpu_set_pmu_event_filter(vcpu, &f);
> > + TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
> > }
> >
> > int main(int argc, char *argv[])
> > --
> > 2.39.3
> >
>
> --
> Isaku Yamahata <[email protected]>

2023-07-19 04:21:36

by Jinrong Liang

[permalink] [raw]
Subject: Re: [PATCH v4 3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

Isaku Yamahata <[email protected]> 于2023年7月19日周三 08:02写道:
>
> On Mon, Jul 17, 2023 at 02:23:40PM +0800,
> Jinrong Liang <[email protected]> wrote:
>
> > From: Jinrong Liang <[email protected]>
> >
> > Add custom "__kvm_pmu_event_filter" structure to improve pmu event
> > filter settings. Simplifies event filter setup by organizing event
> > filter parameters in a cleaner, more organized way.
> >
> > Signed-off-by: Jinrong Liang <[email protected]>
> > ---
> > .../kvm/x86_64/pmu_event_filter_test.c | 179 +++++++++---------
> > 1 file changed, 87 insertions(+), 92 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > index 5ac05e64bec9..ffcbbf25b29b 100644
> > --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > @@ -28,6 +28,10 @@
> >
> > #define NUM_BRANCHES 42
> >
> > +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> > +#define MAX_FILTER_EVENTS 300
> > +#define MAX_TEST_EVENTS 10
> > +
> > /*
> > * This is how the event selector and unit mask are stored in an AMD
> > * core performance event-select register. Intel's format is similar,
> > @@ -69,21 +73,33 @@
> >
> > #define INST_RETIRED EVENT(0xc0, 0)
> >
> > +struct __kvm_pmu_event_filter {
> > + __u32 action;
> > + __u32 nevents;
> > + __u32 fixed_counter_bitmap;
> > + __u32 flags;
> > + __u32 pad[4];
> > + __u64 events[MAX_FILTER_EVENTS];
> > +};
> > +
> > /*
> > * This event list comprises Intel's eight architectural events plus
> > * AMD's "retired branch instructions" for Zen[123] (and possibly
> > * other AMD CPUs).
> > */
> > -static const uint64_t event_list[] = {
> > - EVENT(0x3c, 0),
> > - INST_RETIRED,
> > - EVENT(0x3c, 1),
> > - EVENT(0x2e, 0x4f),
> > - EVENT(0x2e, 0x41),
> > - EVENT(0xc4, 0),
> > - EVENT(0xc5, 0),
> > - EVENT(0xa4, 1),
> > - AMD_ZEN_BR_RETIRED,
> > +static const struct __kvm_pmu_event_filter base_event_filter = {
> > + .nevents = ARRAY_SIZE(base_event_filter.events),
> > + .events = {
> > + EVENT(0x3c, 0),
> > + INST_RETIRED,
> > + EVENT(0x3c, 1),
> > + EVENT(0x2e, 0x4f),
> > + EVENT(0x2e, 0x41),
> > + EVENT(0xc4, 0),
> > + EVENT(0xc5, 0),
> > + EVENT(0xa4, 1),
> > + AMD_ZEN_BR_RETIRED,
> > + },
> > };
> >
> > struct {
> > @@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
> > return !r;
> > }
> >
> > -static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
> > -{
> > - struct kvm_pmu_event_filter *f;
> > - int size = sizeof(*f) + nevents * sizeof(f->events[0]);
> > -
> > - f = malloc(size);
> > - TEST_ASSERT(f, "Out of memory");
> > - memset(f, 0, size);
> > - f->nevents = nevents;
> > - return f;
> > -}
> > -
> > -
> > -static struct kvm_pmu_event_filter *
> > -create_pmu_event_filter(const uint64_t event_list[], int nevents,
> > - uint32_t action, uint32_t flags)
> > -{
> > - struct kvm_pmu_event_filter *f;
> > - int i;
> > -
> > - f = alloc_pmu_event_filter(nevents);
> > - f->action = action;
> > - f->flags = flags;
> > - for (i = 0; i < nevents; i++)
> > - f->events[i] = event_list[i];
> > -
> > - return f;
> > -}
> > -
> > -static struct kvm_pmu_event_filter *event_filter(uint32_t action)
> > -{
> > - return create_pmu_event_filter(event_list,
> > - ARRAY_SIZE(event_list),
> > - action, 0);
> > -}
> > -
> > /*
> > * Remove the first occurrence of 'event' (if any) from the filter's
> > * event list.
> > */
> > -static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
> > +static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
> > {
> > bool found = false;
> > int i;
> > @@ -313,66 +293,70 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
> > }
> >
> > static void test_with_filter(struct kvm_vcpu *vcpu,
> > - struct kvm_pmu_event_filter *f)
> > + struct __kvm_pmu_event_filter *__f)
> > {
> > + struct kvm_pmu_event_filter *f = (void *)__f;
> > +
> > vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> > run_vcpu_and_sync_pmc_results(vcpu);
> > }
> >
> > static void test_amd_deny_list(struct kvm_vcpu *vcpu)
> > {
> > - uint64_t event = EVENT(0x1C2, 0);
> > - struct kvm_pmu_event_filter *f;
> > + struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > - f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
> > - test_with_filter(vcpu, f);
> > - free(f);
> > + f.action = KVM_PMU_EVENT_DENY;
> > + f.nevents = 1;
> > + f.events[0] = EVENT(0x1C2, 0);
> > + test_with_filter(vcpu, &f);
>
> This overwrite all members. We can use designated initializer.
> struct __kvm_pmu_event_filter f = {
> .action = KVM_PMU_EVENT_DENY,
> .nevents = 1,
> .events = {
> EVENT(0x1C2, 0),
> },
> };

LGTM.

>
> Except this, looks good to me.
> Reviewed-by: Isaku Yamahata <[email protected]>
>
> Thanks,
>
> >
> > ASSERT_PMC_COUNTING_INSTRUCTIONS();
> > }
> >
> > static void test_member_deny_list(struct kvm_vcpu *vcpu)
> > {
> > - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> > + struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > - test_with_filter(vcpu, f);
> > - free(f);
> > + f.action = KVM_PMU_EVENT_DENY;
> > + test_with_filter(vcpu, &f);
> >
> > ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> > }
> >
> > static void test_member_allow_list(struct kvm_vcpu *vcpu)
> > {
> > - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> > + struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > - test_with_filter(vcpu, f);
> > - free(f);
> > + f.action = KVM_PMU_EVENT_ALLOW;
> > + test_with_filter(vcpu, &f);
> >
> > ASSERT_PMC_COUNTING_INSTRUCTIONS();
> > }
> >
> > static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
> > {
> > - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> > + struct __kvm_pmu_event_filter f = base_event_filter;
> > +
> > + f.action = KVM_PMU_EVENT_DENY;
> >
> > - remove_event(f, INST_RETIRED);
> > - remove_event(f, INTEL_BR_RETIRED);
> > - remove_event(f, AMD_ZEN_BR_RETIRED);
> > - test_with_filter(vcpu, f);
> > - free(f);
> > + remove_event(&f, INST_RETIRED);
> > + remove_event(&f, INTEL_BR_RETIRED);
> > + remove_event(&f, AMD_ZEN_BR_RETIRED);
> > + test_with_filter(vcpu, &f);
> >
> > ASSERT_PMC_COUNTING_INSTRUCTIONS();
> > }
> >
> > static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
> > {
> > - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> > + struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > - remove_event(f, INST_RETIRED);
> > - remove_event(f, INTEL_BR_RETIRED);
> > - remove_event(f, AMD_ZEN_BR_RETIRED);
> > - test_with_filter(vcpu, f);
> > - free(f);
> > + f.action = KVM_PMU_EVENT_ALLOW;
> > +
> > + remove_event(&f, INST_RETIRED);
> > + remove_event(&f, INTEL_BR_RETIRED);
> > + remove_event(&f, AMD_ZEN_BR_RETIRED);
> > + test_with_filter(vcpu, &f);
> >
> > ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> > }
> > @@ -567,19 +551,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
> > const uint64_t masked_events[],
> > const int nmasked_events)
> > {
> > - struct kvm_pmu_event_filter *f;
> > + struct __kvm_pmu_event_filter f = {
> > + .nevents = nmasked_events,
> > + .action = KVM_PMU_EVENT_ALLOW,
> > + .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > + };
> >
> > - f = create_pmu_event_filter(masked_events, nmasked_events,
> > - KVM_PMU_EVENT_ALLOW,
> > - KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> > - test_with_filter(vcpu, f);
> > - free(f);
> > + memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
> > + test_with_filter(vcpu, &f);
> > }
> >
> > -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> > -#define MAX_FILTER_EVENTS 300
> > -#define MAX_TEST_EVENTS 10
> > -
> > #define ALLOW_LOADS BIT(0)
> > #define ALLOW_STORES BIT(1)
> > #define ALLOW_LOADS_STORES BIT(2)
> > @@ -751,17 +732,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
> > run_masked_events_tests(vcpu, events, nevents);
> > }
> >
> > -static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
> > - int nevents, uint32_t flags)
> > +static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
> > + struct __kvm_pmu_event_filter *__f)
> > {
> > - struct kvm_pmu_event_filter *f;
> > - int r;
> > + struct kvm_pmu_event_filter *f = (void *)__f;
> >
> > - f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
> > - r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> > - free(f);
> > + return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> > +}
> > +
> > +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
> > + uint32_t flags, uint32_t action)
> > +{
> > + struct __kvm_pmu_event_filter f = {
> > + .nevents = 1,
> > + .flags = flags,
> > + .action = action,
> > + .events = {
> > + event,
> > + },
> > + };
> >
> > - return r;
> > + return do_vcpu_set_pmu_event_filter(vcpu, &f);
> > }
> >
> > static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> > @@ -773,14 +764,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> > * Unfortunately having invalid bits set in event data is expected to
> > * pass when flags == 0 (bits other than eventsel+umask).
> > */
> > - r = run_filter_test(vcpu, &e, 1, 0);
> > + r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
> > TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> >
> > - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> > + r = set_pmu_single_event_filter(vcpu, e,
> > + KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > + KVM_PMU_EVENT_ALLOW);
> > TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
> >
> > e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
> > - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> > + r = set_pmu_single_event_filter(vcpu, e,
> > + KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > + KVM_PMU_EVENT_ALLOW);
> > TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> > }
> >
> > --
> > 2.39.3
> >
>
> --
> Isaku Yamahata <[email protected]>