2024-04-19 06:56:52

by Mi, Dapeng

[permalink] [raw]
Subject: [kvm-unit-tests Patch v4 16/17] x86: pmu: Adjust lower boundary of branch-misses event

Since the IBPB command is added to force to trigger a branch miss at
least, the lower boundary of branch misses event is increased to 1 by
default. For these CPUs without IBPB support, adjust dynamically the
lower boundary to 0 to avoid false positive.

Signed-off-by: Dapeng Mi <[email protected]>
---
x86/pmu.c | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/x86/pmu.c b/x86/pmu.c
index 0b3dd1ba1766..e0da522c004b 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -73,12 +73,12 @@ struct pmu_event {
{"llc references", 0x4f2e, 1, 2*N},
{"llc misses", 0x412e, 1, 1*N},
{"branches", 0x00c4, 1*N, 1.1*N},
- {"branch misses", 0x00c5, 0, 0.1*N},
+ {"branch misses", 0x00c5, 1, 0.1*N},
}, amd_gp_events[] = {
{"core cycles", 0x0076, 1*N, 50*N},
{"instructions", 0x00c0, 10*N, 10.2*N},
{"branches", 0x00c2, 1*N, 1.1*N},
- {"branch misses", 0x00c3, 0, 0.1*N},
+ {"branch misses", 0x00c3, 1, 0.1*N},
}, fixed_events[] = {
{"fixed 0", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N},
{"fixed 1", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N},
@@ -94,6 +94,7 @@ enum {
INTEL_REF_CYCLES_IDX = 2,
INTEL_LLC_MISSES_IDX = 4,
INTEL_BRANCHES_IDX = 5,
+ INTEL_BRANCH_MISS_IDX = 6,
};

/*
@@ -103,6 +104,7 @@ enum {
enum {
AMD_INSTRUCTIONS_IDX = 1,
AMD_BRANCHES_IDX = 2,
+ AMD_BRANCH_MISS_IDX = 3,
};

char *buf;
@@ -166,7 +168,8 @@ static inline void loop(u64 cntrs)
}

static void adjust_events_range(struct pmu_event *gp_events,
- int instruction_idx, int branch_idx)
+ int instruction_idx, int branch_idx,
+ int branch_miss_idx)
{
/*
* If HW supports GLOBAL_CTRL MSR, enabling and disabling PMCs are
@@ -181,6 +184,17 @@ static void adjust_events_range(struct pmu_event *gp_events,
gp_events[branch_idx].min = LOOP_BRANCHES;
gp_events[branch_idx].max = LOOP_BRANCHES;
}
+
+ /*
+ * For CPUs without IBPB support, no way to force to trigger a
+ * branch miss and the measured branch misses is possible to be
+ * 0. Thus overwrite the lower boundary of branch misses event
+ * to 0 to avoid false positive.
+ */
+ if (!has_ibpb()) {
+ /* branch misses event */
+ gp_events[branch_miss_idx].min = 0;
+ }
}

volatile uint64_t irq_received;
@@ -885,6 +899,7 @@ int main(int ac, char **av)
{
int instruction_idx;
int branch_idx;
+ int branch_miss_idx;

setup_vm();
handle_irq(PMI_VECTOR, cnt_overflow);
@@ -901,6 +916,7 @@ int main(int ac, char **av)
gp_events_size = sizeof(intel_gp_events)/sizeof(intel_gp_events[0]);
instruction_idx = INTEL_INSTRUCTIONS_IDX;
branch_idx = INTEL_BRANCHES_IDX;
+ branch_miss_idx = INTEL_BRANCH_MISS_IDX;

/*
* For legacy Intel CPUS without clflush/clflushopt support,
@@ -917,9 +933,10 @@ int main(int ac, char **av)
gp_events = (struct pmu_event *)amd_gp_events;
instruction_idx = AMD_INSTRUCTIONS_IDX;
branch_idx = AMD_BRANCHES_IDX;
+ branch_miss_idx = AMD_BRANCH_MISS_IDX;
report_prefix_push("AMD");
}
- adjust_events_range(gp_events, instruction_idx, branch_idx);
+ adjust_events_range(gp_events, instruction_idx, branch_idx, branch_miss_idx);

printf("PMU version: %d\n", pmu.version);
printf("GP counters: %d\n", pmu.nr_gp_counters);
--
2.34.1