2023-08-10 11:16:23

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v6 0/6] KVM: selftests: Improve PMU event filter settings and add test cases

Hi,

This patch series aims to improve the PMU event filter settings with a cleaner
and more organized structure and adds several test cases related to PMU event
filters.

These changes help to ensure that KVM's PMU event filter functions as expected
in all supported use cases.

Any feedback or suggestions are greatly appreciated.

Sincerely,
Jinrong Liang

Changes log:

v6:
- Rebased to 74c2185c5b74(tag: kvm-x86-next-2023.08.02);
- Use TEST_ASSERT_EQ() instead of ASSERT_EQ();
- Fix a bug about x86 properties for Intel PMU in patch v5;

v5:
https://lore.kernel.org/kvm/[email protected]/T/


Jinrong Liang (6):
KVM: selftests: Add x86 properties for Intel PMU in processor.h
KVM: selftests: Drop the return of remove_event()
KVM: selftests: Introduce __kvm_pmu_event_filter to improved event
filter settings
KVM: selftests: Add test cases for unsupported PMU event filter input
values
KVM: selftests: Test if event filter meets expectations on fixed
counters
KVM: selftests: Test gp event filters don't affect fixed event filters

.../selftests/kvm/include/x86_64/processor.h | 5 +
.../kvm/x86_64/pmu_event_filter_test.c | 317 ++++++++++++------
2 files changed, 228 insertions(+), 94 deletions(-)


base-commit: 74c2185c5b74fd0ae91133ad5afe8684f6a02b91
--
2.39.3



2023-08-10 11:21:12

by Jinrong Liang

[permalink] [raw]
Subject: [PATCH v6 3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

From: Jinrong Liang <[email protected]>

Add custom "__kvm_pmu_event_filter" structure to improve pmu event
filter settings. Simplifies event filter setup by organizing event
filter parameters in a cleaner, more organized way.

Suggested-by: Sean Christopherson <[email protected]>
Signed-off-by: Jinrong Liang <[email protected]>
---
.../kvm/x86_64/pmu_event_filter_test.c | 182 +++++++++---------
1 file changed, 90 insertions(+), 92 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 5ac05e64bec9..94f5a89aac40 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -28,6 +28,10 @@

#define NUM_BRANCHES 42

+/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
+#define MAX_FILTER_EVENTS 300
+#define MAX_TEST_EVENTS 10
+
/*
* This is how the event selector and unit mask are stored in an AMD
* core performance event-select register. Intel's format is similar,
@@ -69,21 +73,33 @@

#define INST_RETIRED EVENT(0xc0, 0)

+struct __kvm_pmu_event_filter {
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[MAX_FILTER_EVENTS];
+};
+
/*
* This event list comprises Intel's eight architectural events plus
* AMD's "retired branch instructions" for Zen[123] (and possibly
* other AMD CPUs).
*/
-static const uint64_t event_list[] = {
- EVENT(0x3c, 0),
- INST_RETIRED,
- EVENT(0x3c, 1),
- EVENT(0x2e, 0x4f),
- EVENT(0x2e, 0x41),
- EVENT(0xc4, 0),
- EVENT(0xc5, 0),
- EVENT(0xa4, 1),
- AMD_ZEN_BR_RETIRED,
+static const struct __kvm_pmu_event_filter base_event_filter = {
+ .nevents = ARRAY_SIZE(base_event_filter.events),
+ .events = {
+ EVENT(0x3c, 0),
+ INST_RETIRED,
+ EVENT(0x3c, 1),
+ EVENT(0x2e, 0x4f),
+ EVENT(0x2e, 0x41),
+ EVENT(0xc4, 0),
+ EVENT(0xc5, 0),
+ EVENT(0xa4, 1),
+ AMD_ZEN_BR_RETIRED,
+ },
};

struct {
@@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
return !r;
}

-static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
-{
- struct kvm_pmu_event_filter *f;
- int size = sizeof(*f) + nevents * sizeof(f->events[0]);
-
- f = malloc(size);
- TEST_ASSERT(f, "Out of memory");
- memset(f, 0, size);
- f->nevents = nevents;
- return f;
-}
-
-
-static struct kvm_pmu_event_filter *
-create_pmu_event_filter(const uint64_t event_list[], int nevents,
- uint32_t action, uint32_t flags)
-{
- struct kvm_pmu_event_filter *f;
- int i;
-
- f = alloc_pmu_event_filter(nevents);
- f->action = action;
- f->flags = flags;
- for (i = 0; i < nevents; i++)
- f->events[i] = event_list[i];
-
- return f;
-}
-
-static struct kvm_pmu_event_filter *event_filter(uint32_t action)
-{
- return create_pmu_event_filter(event_list,
- ARRAY_SIZE(event_list),
- action, 0);
-}
-
/*
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
*/
-static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
+static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
{
bool found = false;
int i;
@@ -313,66 +293,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
}

static void test_with_filter(struct kvm_vcpu *vcpu,
- struct kvm_pmu_event_filter *f)
+ struct __kvm_pmu_event_filter *__f)
{
+ struct kvm_pmu_event_filter *f = (void *)__f;
+
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
run_vcpu_and_sync_pmc_results(vcpu);
}

static void test_amd_deny_list(struct kvm_vcpu *vcpu)
{
- uint64_t event = EVENT(0x1C2, 0);
- struct kvm_pmu_event_filter *f;
+ struct __kvm_pmu_event_filter f = {
+ .action = KVM_PMU_EVENT_DENY,
+ .nevents = 1,
+ .events = {
+ EVENT(0x1C2, 0),
+ },
+ };

- f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
- test_with_filter(vcpu, f);
- free(f);
+ test_with_filter(vcpu, &f);

ASSERT_PMC_COUNTING_INSTRUCTIONS();
}

static void test_member_deny_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+ struct __kvm_pmu_event_filter f = base_event_filter;

- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_DENY;
+ test_with_filter(vcpu, &f);

ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}

static void test_member_allow_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+ struct __kvm_pmu_event_filter f = base_event_filter;

- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_ALLOW;
+ test_with_filter(vcpu, &f);

ASSERT_PMC_COUNTING_INSTRUCTIONS();
}

static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+ struct __kvm_pmu_event_filter f = base_event_filter;

- remove_event(f, INST_RETIRED);
- remove_event(f, INTEL_BR_RETIRED);
- remove_event(f, AMD_ZEN_BR_RETIRED);
- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_DENY;
+
+ remove_event(&f, INST_RETIRED);
+ remove_event(&f, INTEL_BR_RETIRED);
+ remove_event(&f, AMD_ZEN_BR_RETIRED);
+ test_with_filter(vcpu, &f);

ASSERT_PMC_COUNTING_INSTRUCTIONS();
}

static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+ struct __kvm_pmu_event_filter f = base_event_filter;
+
+ f.action = KVM_PMU_EVENT_ALLOW;

- remove_event(f, INST_RETIRED);
- remove_event(f, INTEL_BR_RETIRED);
- remove_event(f, AMD_ZEN_BR_RETIRED);
- test_with_filter(vcpu, f);
- free(f);
+ remove_event(&f, INST_RETIRED);
+ remove_event(&f, INTEL_BR_RETIRED);
+ remove_event(&f, AMD_ZEN_BR_RETIRED);
+ test_with_filter(vcpu, &f);

ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
@@ -567,19 +554,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
const uint64_t masked_events[],
const int nmasked_events)
{
- struct kvm_pmu_event_filter *f;
+ struct __kvm_pmu_event_filter f = {
+ .nevents = nmasked_events,
+ .action = KVM_PMU_EVENT_ALLOW,
+ .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ };

- f = create_pmu_event_filter(masked_events, nmasked_events,
- KVM_PMU_EVENT_ALLOW,
- KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
- test_with_filter(vcpu, f);
- free(f);
+ memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
+ test_with_filter(vcpu, &f);
}

-/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
-#define MAX_FILTER_EVENTS 300
-#define MAX_TEST_EVENTS 10
-
#define ALLOW_LOADS BIT(0)
#define ALLOW_STORES BIT(1)
#define ALLOW_LOADS_STORES BIT(2)
@@ -751,17 +735,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
run_masked_events_tests(vcpu, events, nevents);
}

-static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
- int nevents, uint32_t flags)
+static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
+ struct __kvm_pmu_event_filter *__f)
{
- struct kvm_pmu_event_filter *f;
- int r;
+ struct kvm_pmu_event_filter *f = (void *)__f;

- f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
- r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
- free(f);
+ return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
+}
+
+static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
+ uint32_t flags, uint32_t action)
+{
+ struct __kvm_pmu_event_filter f = {
+ .nevents = 1,
+ .flags = flags,
+ .action = action,
+ .events = {
+ event,
+ },
+ };

- return r;
+ return do_vcpu_set_pmu_event_filter(vcpu, &f);
}

static void test_filter_ioctl(struct kvm_vcpu *vcpu)
@@ -773,14 +767,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
* Unfortunately having invalid bits set in event data is expected to
* pass when flags == 0 (bits other than eventsel+umask).
*/
- r = run_filter_test(vcpu, &e, 1, 0);
+ r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");

- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = set_pmu_single_event_filter(vcpu, e,
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");

e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = set_pmu_single_event_filter(vcpu, e,
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
}

--
2.39.3


2023-08-11 03:19:31

by Jinrong Liang

[permalink] [raw]
Subject: Re: [PATCH v6 3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

在 2023/8/10 17:09, Jinrong Liang 写道:
> From: Jinrong Liang <[email protected]>
>
> Add custom "__kvm_pmu_event_filter" structure to improve pmu event
> filter settings. Simplifies event filter setup by organizing event
> filter parameters in a cleaner, more organized way.

I apologize for the oversight in this patch submission. I forgot to
include the "Reviewed-by" tag. Please find the updated information below:

Reviewed-by: Isaku Yamahata <[email protected]>

When reviewing the patch, please take this tag into account.

>
> Suggested-by: Sean Christopherson <[email protected]>
> Signed-off-by: Jinrong Liang <[email protected]>
> ---
> .../kvm/x86_64/pmu_event_filter_test.c | 182 +++++++++---------
> 1 file changed, 90 insertions(+), 92 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 5ac05e64bec9..94f5a89aac40 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -28,6 +28,10 @@
>
> #define NUM_BRANCHES 42
>
> +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> +#define MAX_FILTER_EVENTS 300
> +#define MAX_TEST_EVENTS 10
> +
> /*
> * This is how the event selector and unit mask are stored in an AMD
> * core performance event-select register. Intel's format is similar,
> @@ -69,21 +73,33 @@
>
> #define INST_RETIRED EVENT(0xc0, 0)
>
> +struct __kvm_pmu_event_filter {
> + __u32 action;
> + __u32 nevents;
> + __u32 fixed_counter_bitmap;
> + __u32 flags;
> + __u32 pad[4];
> + __u64 events[MAX_FILTER_EVENTS];
> +};
> +
> /*
> * This event list comprises Intel's eight architectural events plus
> * AMD's "retired branch instructions" for Zen[123] (and possibly
> * other AMD CPUs).
> */
> -static const uint64_t event_list[] = {
> - EVENT(0x3c, 0),
> - INST_RETIRED,
> - EVENT(0x3c, 1),
> - EVENT(0x2e, 0x4f),
> - EVENT(0x2e, 0x41),
> - EVENT(0xc4, 0),
> - EVENT(0xc5, 0),
> - EVENT(0xa4, 1),
> - AMD_ZEN_BR_RETIRED,
> +static const struct __kvm_pmu_event_filter base_event_filter = {
> + .nevents = ARRAY_SIZE(base_event_filter.events),
> + .events = {
> + EVENT(0x3c, 0),
> + INST_RETIRED,
> + EVENT(0x3c, 1),
> + EVENT(0x2e, 0x4f),
> + EVENT(0x2e, 0x41),
> + EVENT(0xc4, 0),
> + EVENT(0xc5, 0),
> + EVENT(0xa4, 1),
> + AMD_ZEN_BR_RETIRED,
> + },
> };
>
> struct {
> @@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
> return !r;
> }
>
> -static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
> -{
> - struct kvm_pmu_event_filter *f;
> - int size = sizeof(*f) + nevents * sizeof(f->events[0]);
> -
> - f = malloc(size);
> - TEST_ASSERT(f, "Out of memory");
> - memset(f, 0, size);
> - f->nevents = nevents;
> - return f;
> -}
> -
> -
> -static struct kvm_pmu_event_filter *
> -create_pmu_event_filter(const uint64_t event_list[], int nevents,
> - uint32_t action, uint32_t flags)
> -{
> - struct kvm_pmu_event_filter *f;
> - int i;
> -
> - f = alloc_pmu_event_filter(nevents);
> - f->action = action;
> - f->flags = flags;
> - for (i = 0; i < nevents; i++)
> - f->events[i] = event_list[i];
> -
> - return f;
> -}
> -
> -static struct kvm_pmu_event_filter *event_filter(uint32_t action)
> -{
> - return create_pmu_event_filter(event_list,
> - ARRAY_SIZE(event_list),
> - action, 0);
> -}
> -
> /*
> * Remove the first occurrence of 'event' (if any) from the filter's
> * event list.
> */
> -static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
> +static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
> {
> bool found = false;
> int i;
> @@ -313,66 +293,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
> }
>
> static void test_with_filter(struct kvm_vcpu *vcpu,
> - struct kvm_pmu_event_filter *f)
> + struct __kvm_pmu_event_filter *__f)
> {
> + struct kvm_pmu_event_filter *f = (void *)__f;
> +
> vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> run_vcpu_and_sync_pmc_results(vcpu);
> }
>
> static void test_amd_deny_list(struct kvm_vcpu *vcpu)
> {
> - uint64_t event = EVENT(0x1C2, 0);
> - struct kvm_pmu_event_filter *f;
> + struct __kvm_pmu_event_filter f = {
> + .action = KVM_PMU_EVENT_DENY,
> + .nevents = 1,
> + .events = {
> + EVENT(0x1C2, 0),
> + },
> + };
>
> - f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
> - test_with_filter(vcpu, f);
> - free(f);
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_COUNTING_INSTRUCTIONS();
> }
>
> static void test_member_deny_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_DENY;
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> }
>
> static void test_member_allow_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_ALLOW;
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_COUNTING_INSTRUCTIONS();
> }
>
> static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> + struct __kvm_pmu_event_filter f = base_event_filter;
>
> - remove_event(f, INST_RETIRED);
> - remove_event(f, INTEL_BR_RETIRED);
> - remove_event(f, AMD_ZEN_BR_RETIRED);
> - test_with_filter(vcpu, f);
> - free(f);
> + f.action = KVM_PMU_EVENT_DENY;
> +
> + remove_event(&f, INST_RETIRED);
> + remove_event(&f, INTEL_BR_RETIRED);
> + remove_event(&f, AMD_ZEN_BR_RETIRED);
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_COUNTING_INSTRUCTIONS();
> }
>
> static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
> {
> - struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> + struct __kvm_pmu_event_filter f = base_event_filter;
> +
> + f.action = KVM_PMU_EVENT_ALLOW;
>
> - remove_event(f, INST_RETIRED);
> - remove_event(f, INTEL_BR_RETIRED);
> - remove_event(f, AMD_ZEN_BR_RETIRED);
> - test_with_filter(vcpu, f);
> - free(f);
> + remove_event(&f, INST_RETIRED);
> + remove_event(&f, INTEL_BR_RETIRED);
> + remove_event(&f, AMD_ZEN_BR_RETIRED);
> + test_with_filter(vcpu, &f);
>
> ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> }
> @@ -567,19 +554,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
> const uint64_t masked_events[],
> const int nmasked_events)
> {
> - struct kvm_pmu_event_filter *f;
> + struct __kvm_pmu_event_filter f = {
> + .nevents = nmasked_events,
> + .action = KVM_PMU_EVENT_ALLOW,
> + .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> + };
>
> - f = create_pmu_event_filter(masked_events, nmasked_events,
> - KVM_PMU_EVENT_ALLOW,
> - KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> - test_with_filter(vcpu, f);
> - free(f);
> + memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
> + test_with_filter(vcpu, &f);
> }
>
> -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> -#define MAX_FILTER_EVENTS 300
> -#define MAX_TEST_EVENTS 10
> -
> #define ALLOW_LOADS BIT(0)
> #define ALLOW_STORES BIT(1)
> #define ALLOW_LOADS_STORES BIT(2)
> @@ -751,17 +735,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
> run_masked_events_tests(vcpu, events, nevents);
> }
>
> -static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
> - int nevents, uint32_t flags)
> +static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
> + struct __kvm_pmu_event_filter *__f)
> {
> - struct kvm_pmu_event_filter *f;
> - int r;
> + struct kvm_pmu_event_filter *f = (void *)__f;
>
> - f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
> - r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> - free(f);
> + return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> +}
> +
> +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
> + uint32_t flags, uint32_t action)
> +{
> + struct __kvm_pmu_event_filter f = {
> + .nevents = 1,
> + .flags = flags,
> + .action = action,
> + .events = {
> + event,
> + },
> + };
>
> - return r;
> + return do_vcpu_set_pmu_event_filter(vcpu, &f);
> }
>
> static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> @@ -773,14 +767,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> * Unfortunately having invalid bits set in event data is expected to
> * pass when flags == 0 (bits other than eventsel+umask).
> */
> - r = run_filter_test(vcpu, &e, 1, 0);
> + r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
>
> - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> + r = set_pmu_single_event_filter(vcpu, e,
> + KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> + KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
>
> e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
> - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> + r = set_pmu_single_event_filter(vcpu, e,
> + KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> + KVM_PMU_EVENT_ALLOW);
> TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> }
>


2023-08-19 10:21:09

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v6 3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

On Tue, Aug 15, 2023, Jinrong Liang wrote:
> Isaku Yamahata <[email protected]> 于2023年8月15日周二 07:49写道:
> >
> > On Thu, Aug 10, 2023 at 05:09:42PM +0800,
> > Jinrong Liang <[email protected]> wrote:
> >
> > > From: Jinrong Liang <[email protected]>
> > >
> > > Add custom "__kvm_pmu_event_filter" structure to improve pmu event
> > > filter settings. Simplifies event filter setup by organizing event
> > > filter parameters in a cleaner, more organized way.
> > >
> > > Suggested-by: Sean Christopherson <[email protected]>
> > > Signed-off-by: Jinrong Liang <[email protected]>
> > > ---
> > > .../kvm/x86_64/pmu_event_filter_test.c | 182 +++++++++---------
> > > 1 file changed, 90 insertions(+), 92 deletions(-)
> > >
> > > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > > index 5ac05e64bec9..94f5a89aac40 100644
> > > --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > > +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > > @@ -28,6 +28,10 @@
> > >
> > > #define NUM_BRANCHES 42
> > >
> > > +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> > > +#define MAX_FILTER_EVENTS 300
> >
> > Can we simply use KVM_PMU_EVENT_FILTER_MAX_EVENTS and remove MAX_FILTER_EVENTS?
>
> I didn't find the definition of KVM_PMU_EVENT_FILTER_MAX_EVENTS in
> selftests. KVM_PMU_EVENT_FILTER_MAX_EVENTS is defined in pmu.c. To use
> it, we need to define it in selftests.

Huh. That seems like something that should be enumerated to userspace.

> > > +#define MAX_TEST_EVENTS 10
> > > +
> > > /*
> > > * This is how the event selector and unit mask are stored in an AMD
> > > * core performance event-select register. Intel's format is similar,
> > > @@ -69,21 +73,33 @@
> > >
> > > #define INST_RETIRED EVENT(0xc0, 0)
> > >
> > > +struct __kvm_pmu_event_filter {
> > > + __u32 action;
> > > + __u32 nevents;
> > > + __u32 fixed_counter_bitmap;
> > > + __u32 flags;
> > > + __u32 pad[4];
> > > + __u64 events[MAX_FILTER_EVENTS];
> > > +};
> > > +
> >
> > Is this same to struct kvm_pmu_event_filter?
>
> In tools/arch/x86/include/uapi/asm/kvm.h
>
> /* for KVM_CAP_PMU_EVENT_FILTER */
> struct kvm_pmu_event_filter {
> __u32 action;
> __u32 nevents;
> __u32 fixed_counter_bitmap;
> __u32 flags;
> __u32 pad[4];
> __u64 events[];
> };

To more directly answer Isaku's question:

They're *basically* the same, and have an identical layout, but the struct defined
by KVM uses a flexible array because the number of events comes from userspace
and forcing userspace to create an 1KiB+ object just to define a single event
filter would be obnoxious.

There are alternatives, e.g. using an struct overlay to set a single entry:

struct {
struct kvm_msrs header;
struct kvm_msr_entry entry;
} buffer = {};

memset(&buffer, 0, sizeof(buffer));
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
buffer.entry.data = msr_value;

but that gets annoying (and IMO confusing) because of the nested structs.

I'll massage the changelog to callout the alternative, and why it's undesirable.