2023-06-09 16:41:37

by Wang, Weilin

[permalink] [raw]
Subject: [PATCH v2 2/3] perf test: Add skip list for metrics known would fail

Add skip list for metrics known would fail because some of the metrics are
very likely to fail due to multiplexing or other errors. So add all of the
flaky tests into the skip list.

Signed-off-by: Weilin Wang <[email protected]>
---
.../tests/shell/lib/perf_metric_validation.py | 31 ++++++++++++++++---
.../lib/perf_metric_validation_rules.json | 11 +++++++
2 files changed, 38 insertions(+), 4 deletions(-)

diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 7bc5b9a5f62f..e59941089350 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -12,7 +12,7 @@ class Validator:
self.reportfname = reportfname
self.rules = None
self.collectlist=metrics
- self.metrics = set()
+ self.metrics = set(metrics)
self.tolerance = t

self.workloads = [x for x in workload.split(",") if x]
@@ -148,6 +148,7 @@ class Validator:
self.errlist.append("Metric '%s' is not collected"%(name))
elif val < 0:
negmetric.add("{0}(={1:.4f})".format(name, val))
+ self.collectlist[0].append(name)
else:
pcnt += 1
tcnt += 1
@@ -266,6 +267,7 @@ class Validator:
passcnt += 1
else:
faillist.append({'MetricName':m['Name'], 'CollectedValue':result})
+ self.collectlist[0].append(m['Name'])

self.totalcnt += totalcnt
self.passedcnt += passcnt
@@ -348,7 +350,7 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
- collectlist[rule["RuleIndex"]] = set(metrics)
+ collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]

for idx, metrics in collectlist.items():
if idx == 0: wl = "sleep 0.5".split()
@@ -356,9 +358,12 @@ class Validator:
for metric in metrics:
command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
command.extend(wl)
+ print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
self.convert(data, idx)
+ self.collectlist = dict()
+ self.collectlist[0] = list()
# End of Collector and Converter

# Start of Rule Generator
@@ -386,6 +391,20 @@ class Validator:

return

+ def remove_unsupported_rules(self, rules, skiplist: set = None):
+ for m in skiplist:
+ self.metrics.discard(m)
+ new_rules = []
+ for rule in rules:
+ add_rule = True
+ for m in rule["Metrics"]:
+ if m["Name"] not in self.metrics:
+ add_rule = False
+ break
+ if add_rule:
+ new_rules.append(rule)
+ return new_rules
+
def create_rules(self):
"""
Create full rules which includes:
@@ -394,7 +413,10 @@ class Validator:

Reindex all the rules to avoid repeated RuleIndex
"""
- self.rules = self.read_json(self.rulefname)['RelationshipRules']
+ data = self.read_json(self.rulefname)
+ rules = data['RelationshipRules']
+ skiplist = set(data['SkipList'])
+ self.rules = self.remove_unsupported_rules(rules, skiplist)
pctgrule = {'RuleIndex':0,
'TestType':'SingleMetricTest',
'RangeLower':'0',
@@ -453,7 +475,8 @@ class Validator:

The final report is written into a JSON file.
'''
- self.parse_perf_metrics()
+ if not self.collectlist:
+ self.parse_perf_metrics()
self.create_rules()
for i in range(0, len(self.workloads)):
self._init_data()
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
index debaa910da9f..eb6f59e018b7 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
+++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
@@ -1,4 +1,15 @@
{
+ "SkipList": [
+ "tsx_aborted_cycles",
+ "tsx_transactional_cycles",
+ "C2_Pkg_Residency",
+ "C6_Pkg_Residency",
+ "C1_Core_Residency",
+ "C6_Core_Residency",
+ "tma_false_sharing",
+ "tma_remote_cache",
+ "tma_contested_accesses"
+ ],
"RelationshipRules": [
{
"RuleIndex": 1,
--
2.39.1