2022-07-07 20:55:43

by Ian Rogers

[permalink] [raw]
Subject: [PATCH v6 2/2] perf test: Json format checking

From: Claire Jensen <[email protected]>

Add field checking tests for perf stat JSON output.
Sanity checks the expected number of fields are present, that the
expected keys are present and they have the correct values.

Signed-off-by: Claire Jensen <[email protected]>
Signed-off-by: Ian Rogers <[email protected]>
---
.../tests/shell/lib/perf_json_output_lint.py | 95 +++++++++++
tools/perf/tests/shell/stat+json_output.sh | 147 ++++++++++++++++++
2 files changed, 242 insertions(+)
create mode 100644 tools/perf/tests/shell/lib/perf_json_output_lint.py
create mode 100755 tools/perf/tests/shell/stat+json_output.sh

diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
new file mode 100644
index 000000000000..aaa4a8677b6c
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Basic sanity check of perf JSON output as specified in the man page.
+
+import argparse
+import sys
+import json
+
+ap = argparse.ArgumentParser()
+ap.add_argument('--no-args', action='store_true')
+ap.add_argument('--interval', action='store_true')
+ap.add_argument('--system-wide-no-aggr', action='store_true')
+ap.add_argument('--system-wide', action='store_true')
+ap.add_argument('--event', action='store_true')
+ap.add_argument('--per-core', action='store_true')
+ap.add_argument('--per-thread', action='store_true')
+ap.add_argument('--per-die', action='store_true')
+ap.add_argument('--per-node', action='store_true')
+ap.add_argument('--per-socket', action='store_true')
+args = ap.parse_args()
+
+Lines = sys.stdin.readlines()
+
+def isfloat(num):
+ try:
+ float(num)
+ return True
+ except ValueError:
+ return False
+
+
+def isint(num):
+ try:
+ int(num)
+ return True
+ except ValueError:
+ return False
+
+def is_counter_value(num):
+ return isfloat(num) or num == '<not counted>' or num == '<not supported>'
+
+def check_json_output(expected_items):
+ if expected_items != -1:
+ for line in Lines:
+ if 'failed' not in line:
+ count = 0
+ count = line.count(',')
+ if count != expected_items and (count == 1 or count == 2) and 'metric-value' in line:
+ # Events that generate >1 metric may have isolated metric
+ # values and possibly have an interval prefix.
+ continue
+ if count != expected_items:
+ raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
+ f' in \'{line}\'')
+ checks = {
+ 'aggregate-number': lambda x: isfloat(x),
+ 'core': lambda x: True,
+ 'counter-value': lambda x: is_counter_value(x),
+ 'cgroup': lambda x: True,
+ 'cpu': lambda x: isint(x),
+ 'die': lambda x: True,
+ 'event': lambda x: True,
+ 'event-runtime': lambda x: isfloat(x),
+ 'interval': lambda x: isfloat(x),
+ 'metric-unit': lambda x: True,
+ 'metric-value': lambda x: isfloat(x),
+ 'node': lambda x: True,
+ 'pcnt-running': lambda x: isfloat(x),
+ 'socket': lambda x: True,
+ 'thread': lambda x: True,
+ 'unit': lambda x: True,
+ }
+ input = '[\n' + ','.join(Lines) + '\n]'
+ for item in json.loads(input):
+ for key, value in item.items():
+ if key not in checks:
+ raise RuntimeError(f'Unexpected key: key={key} value={value}')
+ if not checks[key](value):
+ raise RuntimeError(f'Check failed for: key={key} value={value}')
+
+
+try:
+ if args.no_args or args.system_wide or args.event:
+ expected_items = 6
+ elif args.interval or args.per_thread or args.system_wide_no_aggr:
+ expected_items = 7
+ elif args.per_core or args.per_socket or args.per_node or args.per_die:
+ expected_items = 8
+ else:
+ # If no option is specified, don't check the number of items.
+ expected_items = -1
+ check_json_output(expected_items)
+except:
+ print('Test failed for input:\n' + '\n'.join(Lines))
+ raise
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
new file mode 100755
index 000000000000..ea8714a36051
--- /dev/null
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+# perf stat JSON output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Checks various perf stat JSON output commands for the
+# correct number of fields.
+
+set -e
+
+pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ elif which python > /dev/null
+ then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking json output: no args "
+ perf stat -j true 2>&1 | $PYTHON $pythonchecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j -a true 2>&1 | $PYTHON $pythonchecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ echo -n "Checking json output: system wide no aggregation "
+ perf stat -j -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking json output: interval "
+ perf stat -j -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking json output: event "
+ perf stat -j -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking json output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking json output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking json output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking json output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking json output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
+ echo "[Success]"
+}
+
+check_no_args
+check_system_wide
+check_system_wide_no_aggr
+check_interval
+check_event
+check_per_core
+check_per_thread
+check_per_die
+check_per_node
+check_per_socket
+exit 0
--
2.37.0.rc0.161.g10f37bed90-goog


2022-07-08 09:25:25

by Thomas Richter

[permalink] [raw]
Subject: Re: [PATCH v6 2/2] perf test: Json format checking

On 7/7/22 22:12, Ian Rogers wrote:
> From: Claire Jensen <[email protected]>
>
> Add field checking tests for perf stat JSON output.
> Sanity checks the expected number of fields are present, that the
> expected keys are present and they have the correct values.
>
> Signed-off-by: Claire Jensen <[email protected]>
> Signed-off-by: Ian Rogers <[email protected]>
> ---
> .../tests/shell/lib/perf_json_output_lint.py | 95 +++++++++++
> tools/perf/tests/shell/stat+json_output.sh | 147 ++++++++++++++++++
> 2 files changed, 242 insertions(+)
> create mode 100644 tools/perf/tests/shell/lib/perf_json_output_lint.py
> create mode 100755 tools/perf/tests/shell/stat+json_output.sh
>
....
I wonder if it is really necessary to have a python file to post process the
perf stat output?

With
commit 7473ee56dbc9 ("perf test: Add checking for perf stat CSV output.")
the same approach was done which led to issues on s390 and required an additional
patch to fix this:
commit ec906102e5b7 ("perf test: Fix "perf stat CSV output linter" test on s390").

I wonder if you can do the perf stat output checking using bash/linux tools as it
was done in commit ec906102e5b7. This would make maintenance much easier.

Just me 2 cents...

--
Thomas Richter, Dept 3303, IBM s390 Linux Development, Boeblingen, Germany
--
Vorsitzender des Aufsichtsrats: Gregor Pillen
Geschäftsführung: David Faller
Sitz der Gesellschaft: Böblingen / Registergericht: Amtsgericht Stuttgart, HRB 243294

2022-07-08 16:06:13

by Ian Rogers

[permalink] [raw]
Subject: Re: [PATCH v6 2/2] perf test: Json format checking

On Fri, Jul 8, 2022 at 1:31 AM Thomas Richter <[email protected]> wrote:
>
> On 7/7/22 22:12, Ian Rogers wrote:
> > From: Claire Jensen <[email protected]>
> >
> > Add field checking tests for perf stat JSON output.
> > Sanity checks the expected number of fields are present, that the
> > expected keys are present and they have the correct values.
> >
> > Signed-off-by: Claire Jensen <[email protected]>
> > Signed-off-by: Ian Rogers <[email protected]>
> > ---
> > .../tests/shell/lib/perf_json_output_lint.py | 95 +++++++++++
> > tools/perf/tests/shell/stat+json_output.sh | 147 ++++++++++++++++++
> > 2 files changed, 242 insertions(+)
> > create mode 100644 tools/perf/tests/shell/lib/perf_json_output_lint.py
> > create mode 100755 tools/perf/tests/shell/stat+json_output.sh
> >
> ....
> I wonder if it is really necessary to have a python file to post process the
> perf stat output?
>
> With
> commit 7473ee56dbc9 ("perf test: Add checking for perf stat CSV output.")
> the same approach was done which led to issues on s390 and required an additional
> patch to fix this:
> commit ec906102e5b7 ("perf test: Fix "perf stat CSV output linter" test on s390").
>
> I wonder if you can do the perf stat output checking using bash/linux tools as it
> was done in commit ec906102e5b7. This would make maintenance much easier.
>
> Just me 2 cents...

Hi Thomas,

In this case using python avoids writing a JSON decoder, which would
be harder to do in bash although there is perhaps a linter or some
such we could use. That'd wind up being another dependency :-/

Thanks,
Ian

> --
> Thomas Richter, Dept 3303, IBM s390 Linux Development, Boeblingen, Germany
> --
> Vorsitzender des Aufsichtsrats: Gregor Pillen
> Geschäftsführung: David Faller
> Sitz der Gesellschaft: Böblingen / Registergericht: Amtsgericht Stuttgart, HRB 243294

2022-07-18 15:49:32

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v6 2/2] perf test: Json format checking

Em Mon, Jul 18, 2022 at 12:44:59PM -0300, Arnaldo Carvalho de Melo escreveu:
> Em Thu, Jul 07, 2022 at 01:12:13PM -0700, Ian Rogers escreveu:
> > From: Claire Jensen <[email protected]>
> >
> > Add field checking tests for perf stat JSON output.
> > Sanity checks the expected number of fields are present, that the
> > expected keys are present and they have the correct values.
>
> it isn't installing the lib:
>
> [root@five ~]# perf test -v json
> 91: perf stat JSON output linter :
> --- start ---
> test child forked, pid 4086678
> Checking json output: no args python3: can't open file '/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py': [Errno 2] No such file or directory
> test child finished with -2
> ---- end ----
> perf stat JSON output linter: Skip
> [root@five ~]#
>
> I'm trying to fix, but please test it after installing...


It should:

install-tests: all install-gtk
$(call QUIET_INSTALL, tests) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) tests/pe-file.exe* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
$(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'

/me checking...

- Arnaldo

> - Arnaldo
>
>
> > Signed-off-by: Claire Jensen <[email protected]>
> > Signed-off-by: Ian Rogers <[email protected]>
> > ---
> > .../tests/shell/lib/perf_json_output_lint.py | 95 +++++++++++
> > tools/perf/tests/shell/stat+json_output.sh | 147 ++++++++++++++++++
> > 2 files changed, 242 insertions(+)
> > create mode 100644 tools/perf/tests/shell/lib/perf_json_output_lint.py
> > create mode 100755 tools/perf/tests/shell/stat+json_output.sh
> >
> > diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
> > new file mode 100644
> > index 000000000000..aaa4a8677b6c
> > --- /dev/null
> > +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
> > @@ -0,0 +1,95 @@
> > +#!/usr/bin/python
> > +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
> > +# Basic sanity check of perf JSON output as specified in the man page.
> > +
> > +import argparse
> > +import sys
> > +import json
> > +
> > +ap = argparse.ArgumentParser()
> > +ap.add_argument('--no-args', action='store_true')
> > +ap.add_argument('--interval', action='store_true')
> > +ap.add_argument('--system-wide-no-aggr', action='store_true')
> > +ap.add_argument('--system-wide', action='store_true')
> > +ap.add_argument('--event', action='store_true')
> > +ap.add_argument('--per-core', action='store_true')
> > +ap.add_argument('--per-thread', action='store_true')
> > +ap.add_argument('--per-die', action='store_true')
> > +ap.add_argument('--per-node', action='store_true')
> > +ap.add_argument('--per-socket', action='store_true')
> > +args = ap.parse_args()
> > +
> > +Lines = sys.stdin.readlines()
> > +
> > +def isfloat(num):
> > + try:
> > + float(num)
> > + return True
> > + except ValueError:
> > + return False
> > +
> > +
> > +def isint(num):
> > + try:
> > + int(num)
> > + return True
> > + except ValueError:
> > + return False
> > +
> > +def is_counter_value(num):
> > + return isfloat(num) or num == '<not counted>' or num == '<not supported>'
> > +
> > +def check_json_output(expected_items):
> > + if expected_items != -1:
> > + for line in Lines:
> > + if 'failed' not in line:
> > + count = 0
> > + count = line.count(',')
> > + if count != expected_items and (count == 1 or count == 2) and 'metric-value' in line:
> > + # Events that generate >1 metric may have isolated metric
> > + # values and possibly have an interval prefix.
> > + continue
> > + if count != expected_items:
> > + raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
> > + f' in \'{line}\'')
> > + checks = {
> > + 'aggregate-number': lambda x: isfloat(x),
> > + 'core': lambda x: True,
> > + 'counter-value': lambda x: is_counter_value(x),
> > + 'cgroup': lambda x: True,
> > + 'cpu': lambda x: isint(x),
> > + 'die': lambda x: True,
> > + 'event': lambda x: True,
> > + 'event-runtime': lambda x: isfloat(x),
> > + 'interval': lambda x: isfloat(x),
> > + 'metric-unit': lambda x: True,
> > + 'metric-value': lambda x: isfloat(x),
> > + 'node': lambda x: True,
> > + 'pcnt-running': lambda x: isfloat(x),
> > + 'socket': lambda x: True,
> > + 'thread': lambda x: True,
> > + 'unit': lambda x: True,
> > + }
> > + input = '[\n' + ','.join(Lines) + '\n]'
> > + for item in json.loads(input):
> > + for key, value in item.items():
> > + if key not in checks:
> > + raise RuntimeError(f'Unexpected key: key={key} value={value}')
> > + if not checks[key](value):
> > + raise RuntimeError(f'Check failed for: key={key} value={value}')
> > +
> > +
> > +try:
> > + if args.no_args or args.system_wide or args.event:
> > + expected_items = 6
> > + elif args.interval or args.per_thread or args.system_wide_no_aggr:
> > + expected_items = 7
> > + elif args.per_core or args.per_socket or args.per_node or args.per_die:
> > + expected_items = 8
> > + else:
> > + # If no option is specified, don't check the number of items.
> > + expected_items = -1
> > + check_json_output(expected_items)
> > +except:
> > + print('Test failed for input:\n' + '\n'.join(Lines))
> > + raise
> > diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
> > new file mode 100755
> > index 000000000000..ea8714a36051
> > --- /dev/null
> > +++ b/tools/perf/tests/shell/stat+json_output.sh
> > @@ -0,0 +1,147 @@
> > +#!/bin/bash
> > +# perf stat JSON output linter
> > +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
> > +# Checks various perf stat JSON output commands for the
> > +# correct number of fields.
> > +
> > +set -e
> > +
> > +pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
> > +if [ "x$PYTHON" == "x" ]
> > +then
> > + if which python3 > /dev/null
> > + then
> > + PYTHON=python3
> > + elif which python > /dev/null
> > + then
> > + PYTHON=python
> > + else
> > + echo Skipping test, python not detected please set environment variable PYTHON.
> > + exit 2
> > + fi
> > +fi
> > +
> > +# Return true if perf_event_paranoid is > $1 and not running as root.
> > +function ParanoidAndNotRoot()
> > +{
> > + [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
> > +}
> > +
> > +check_no_args()
> > +{
> > + echo -n "Checking json output: no args "
> > + perf stat -j true 2>&1 | $PYTHON $pythonchecker --no-args
> > + echo "[Success]"
> > +}
> > +
> > +check_system_wide()
> > +{
> > + echo -n "Checking json output: system wide "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + perf stat -j -a true 2>&1 | $PYTHON $pythonchecker --system-wide
> > + echo "[Success]"
> > +}
> > +
> > +check_system_wide_no_aggr()
> > +{
> > + echo -n "Checking json output: system wide "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + echo -n "Checking json output: system wide no aggregation "
> > + perf stat -j -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
> > + echo "[Success]"
> > +}
> > +
> > +check_interval()
> > +{
> > + echo -n "Checking json output: interval "
> > + perf stat -j -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
> > + echo "[Success]"
> > +}
> > +
> > +
> > +check_event()
> > +{
> > + echo -n "Checking json output: event "
> > + perf stat -j -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
> > + echo "[Success]"
> > +}
> > +
> > +check_per_core()
> > +{
> > + echo -n "Checking json output: per core "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + perf stat -j --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
> > + echo "[Success]"
> > +}
> > +
> > +check_per_thread()
> > +{
> > + echo -n "Checking json output: per thread "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + perf stat -j --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
> > + echo "[Success]"
> > +}
> > +
> > +check_per_die()
> > +{
> > + echo -n "Checking json output: per die "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + perf stat -j --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
> > + echo "[Success]"
> > +}
> > +
> > +check_per_node()
> > +{
> > + echo -n "Checking json output: per node "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + perf stat -j --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
> > + echo "[Success]"
> > +}
> > +
> > +check_per_socket()
> > +{
> > + echo -n "Checking json output: per socket "
> > + if ParanoidAndNotRoot 0
> > + then
> > + echo "[Skip] paranoia and not root"
> > + return
> > + fi
> > + perf stat -j --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
> > + echo "[Success]"
> > +}
> > +
> > +check_no_args
> > +check_system_wide
> > +check_system_wide_no_aggr
> > +check_interval
> > +check_event
> > +check_per_core
> > +check_per_thread
> > +check_per_die
> > +check_per_node
> > +check_per_socket
> > +exit 0
> > --
> > 2.37.0.rc0.161.g10f37bed90-goog
>
> --
>
> - Arnaldo

--

- Arnaldo

2022-07-18 15:49:51

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v6 2/2] perf test: Json format checking

Em Thu, Jul 07, 2022 at 01:12:13PM -0700, Ian Rogers escreveu:
> From: Claire Jensen <[email protected]>
>
> Add field checking tests for perf stat JSON output.
> Sanity checks the expected number of fields are present, that the
> expected keys are present and they have the correct values.

it isn't installing the lib:

[root@five ~]# perf test -v json
91: perf stat JSON output linter :
--- start ---
test child forked, pid 4086678
Checking json output: no args python3: can't open file '/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py': [Errno 2] No such file or directory
test child finished with -2
---- end ----
perf stat JSON output linter: Skip
[root@five ~]#

I'm trying to fix, but please test it after installing...

- Arnaldo


> Signed-off-by: Claire Jensen <[email protected]>
> Signed-off-by: Ian Rogers <[email protected]>
> ---
> .../tests/shell/lib/perf_json_output_lint.py | 95 +++++++++++
> tools/perf/tests/shell/stat+json_output.sh | 147 ++++++++++++++++++
> 2 files changed, 242 insertions(+)
> create mode 100644 tools/perf/tests/shell/lib/perf_json_output_lint.py
> create mode 100755 tools/perf/tests/shell/stat+json_output.sh
>
> diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
> new file mode 100644
> index 000000000000..aaa4a8677b6c
> --- /dev/null
> +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
> @@ -0,0 +1,95 @@
> +#!/usr/bin/python
> +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
> +# Basic sanity check of perf JSON output as specified in the man page.
> +
> +import argparse
> +import sys
> +import json
> +
> +ap = argparse.ArgumentParser()
> +ap.add_argument('--no-args', action='store_true')
> +ap.add_argument('--interval', action='store_true')
> +ap.add_argument('--system-wide-no-aggr', action='store_true')
> +ap.add_argument('--system-wide', action='store_true')
> +ap.add_argument('--event', action='store_true')
> +ap.add_argument('--per-core', action='store_true')
> +ap.add_argument('--per-thread', action='store_true')
> +ap.add_argument('--per-die', action='store_true')
> +ap.add_argument('--per-node', action='store_true')
> +ap.add_argument('--per-socket', action='store_true')
> +args = ap.parse_args()
> +
> +Lines = sys.stdin.readlines()
> +
> +def isfloat(num):
> + try:
> + float(num)
> + return True
> + except ValueError:
> + return False
> +
> +
> +def isint(num):
> + try:
> + int(num)
> + return True
> + except ValueError:
> + return False
> +
> +def is_counter_value(num):
> + return isfloat(num) or num == '<not counted>' or num == '<not supported>'
> +
> +def check_json_output(expected_items):
> + if expected_items != -1:
> + for line in Lines:
> + if 'failed' not in line:
> + count = 0
> + count = line.count(',')
> + if count != expected_items and (count == 1 or count == 2) and 'metric-value' in line:
> + # Events that generate >1 metric may have isolated metric
> + # values and possibly have an interval prefix.
> + continue
> + if count != expected_items:
> + raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
> + f' in \'{line}\'')
> + checks = {
> + 'aggregate-number': lambda x: isfloat(x),
> + 'core': lambda x: True,
> + 'counter-value': lambda x: is_counter_value(x),
> + 'cgroup': lambda x: True,
> + 'cpu': lambda x: isint(x),
> + 'die': lambda x: True,
> + 'event': lambda x: True,
> + 'event-runtime': lambda x: isfloat(x),
> + 'interval': lambda x: isfloat(x),
> + 'metric-unit': lambda x: True,
> + 'metric-value': lambda x: isfloat(x),
> + 'node': lambda x: True,
> + 'pcnt-running': lambda x: isfloat(x),
> + 'socket': lambda x: True,
> + 'thread': lambda x: True,
> + 'unit': lambda x: True,
> + }
> + input = '[\n' + ','.join(Lines) + '\n]'
> + for item in json.loads(input):
> + for key, value in item.items():
> + if key not in checks:
> + raise RuntimeError(f'Unexpected key: key={key} value={value}')
> + if not checks[key](value):
> + raise RuntimeError(f'Check failed for: key={key} value={value}')
> +
> +
> +try:
> + if args.no_args or args.system_wide or args.event:
> + expected_items = 6
> + elif args.interval or args.per_thread or args.system_wide_no_aggr:
> + expected_items = 7
> + elif args.per_core or args.per_socket or args.per_node or args.per_die:
> + expected_items = 8
> + else:
> + # If no option is specified, don't check the number of items.
> + expected_items = -1
> + check_json_output(expected_items)
> +except:
> + print('Test failed for input:\n' + '\n'.join(Lines))
> + raise
> diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
> new file mode 100755
> index 000000000000..ea8714a36051
> --- /dev/null
> +++ b/tools/perf/tests/shell/stat+json_output.sh
> @@ -0,0 +1,147 @@
> +#!/bin/bash
> +# perf stat JSON output linter
> +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
> +# Checks various perf stat JSON output commands for the
> +# correct number of fields.
> +
> +set -e
> +
> +pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
> +if [ "x$PYTHON" == "x" ]
> +then
> + if which python3 > /dev/null
> + then
> + PYTHON=python3
> + elif which python > /dev/null
> + then
> + PYTHON=python
> + else
> + echo Skipping test, python not detected please set environment variable PYTHON.
> + exit 2
> + fi
> +fi
> +
> +# Return true if perf_event_paranoid is > $1 and not running as root.
> +function ParanoidAndNotRoot()
> +{
> + [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
> +}
> +
> +check_no_args()
> +{
> + echo -n "Checking json output: no args "
> + perf stat -j true 2>&1 | $PYTHON $pythonchecker --no-args
> + echo "[Success]"
> +}
> +
> +check_system_wide()
> +{
> + echo -n "Checking json output: system wide "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + perf stat -j -a true 2>&1 | $PYTHON $pythonchecker --system-wide
> + echo "[Success]"
> +}
> +
> +check_system_wide_no_aggr()
> +{
> + echo -n "Checking json output: system wide "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + echo -n "Checking json output: system wide no aggregation "
> + perf stat -j -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
> + echo "[Success]"
> +}
> +
> +check_interval()
> +{
> + echo -n "Checking json output: interval "
> + perf stat -j -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
> + echo "[Success]"
> +}
> +
> +
> +check_event()
> +{
> + echo -n "Checking json output: event "
> + perf stat -j -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
> + echo "[Success]"
> +}
> +
> +check_per_core()
> +{
> + echo -n "Checking json output: per core "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + perf stat -j --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
> + echo "[Success]"
> +}
> +
> +check_per_thread()
> +{
> + echo -n "Checking json output: per thread "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + perf stat -j --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
> + echo "[Success]"
> +}
> +
> +check_per_die()
> +{
> + echo -n "Checking json output: per die "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + perf stat -j --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
> + echo "[Success]"
> +}
> +
> +check_per_node()
> +{
> + echo -n "Checking json output: per node "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + perf stat -j --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
> + echo "[Success]"
> +}
> +
> +check_per_socket()
> +{
> + echo -n "Checking json output: per socket "
> + if ParanoidAndNotRoot 0
> + then
> + echo "[Skip] paranoia and not root"
> + return
> + fi
> + perf stat -j --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
> + echo "[Success]"
> +}
> +
> +check_no_args
> +check_system_wide
> +check_system_wide_no_aggr
> +check_interval
> +check_event
> +check_per_core
> +check_per_thread
> +check_per_die
> +check_per_node
> +check_per_socket
> +exit 0
> --
> 2.37.0.rc0.161.g10f37bed90-goog

--

- Arnaldo

2022-07-18 16:11:15

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v6 2/2] perf test: Json format checking

Em Mon, Jul 18, 2022 at 12:46:10PM -0300, Arnaldo Carvalho de Melo escreveu:
> Em Mon, Jul 18, 2022 at 12:44:59PM -0300, Arnaldo Carvalho de Melo escreveu:
> > Em Thu, Jul 07, 2022 at 01:12:13PM -0700, Ian Rogers escreveu:
> > > From: Claire Jensen <[email protected]>
> > >
> > > Add field checking tests for perf stat JSON output.
> > > Sanity checks the expected number of fields are present, that the
> > > expected keys are present and they have the correct values.
> >
> > it isn't installing the lib:
> >
> > [root@five ~]# perf test -v json
> > 91: perf stat JSON output linter :
> > --- start ---
> > test child forked, pid 4086678
> > Checking json output: no args python3: can't open file '/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py': [Errno 2] No such file or directory
> > test child finished with -2
> > ---- end ----
> > perf stat JSON output linter: Skip
> > [root@five ~]#
> >
> > I'm trying to fix, but please test it after installing...
>
>
> It should:
>
> install-tests: all install-gtk
> $(call QUIET_INSTALL, tests) \
> $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
> $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
> $(INSTALL) tests/pe-file.exe* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
> $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
> $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
> $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
> $(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
> $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
> $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
>
> /me checking...

The patch below is needed, but then should we mix .py and .sh in that
directory?

⬢[acme@toolbox perf]$ git diff
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8f0b1fb39984fb7b..65e5ba767fd6210e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -1006,6 +1006,7 @@ install-tests: all install-gtk
$(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
$(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
+ $(INSTALL) tests/shell/lib/*.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'

install-bin: install-tools install-tests install-traceevent-plugins

⬢[acme@toolbox perf]$

And then, after that I'm getting:

[root@five ~]# perf test json
91: perf stat JSON output linter : FAILED!
[root@five ~]# perf test -v json |& tail -20
{"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "2675.000000", "unit" : "", "event" : "stalled-cycles-backend", "event-runtime" : 2864158, "pcnt-running" : 100.00, "metric-value" : 0.341252, "metric-unit" : "backend cycles idle"}

{"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "233533.000000", "unit" : "", "event" : "instructions", "event-runtime" : 2865528, "pcnt-running" : 100.00, "metric-value" : 0.297920, "metric-unit" : "insn per cycle"}

{"core" : "S0-D0-C15", "aggregate-number" : 2, "metric-value" : 0.192975, "metric-unit" : "stalled cycles per insn"}

{"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "50214.000000", "unit" : "", "event" : "branches", "event-runtime" : 2865638, "pcnt-running" : 100.00, "metric-value" : 17.295742, "metric-unit" : "M/sec"}

{"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "1513.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 1564989, "pcnt-running" : 54.00, "metric-value" : 3.013104, "metric-unit" : "of all branches"}

Traceback (most recent call last):
File "/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py", line 92, in <module>
check_json_output(expected_items)
File "/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py", line 53, in check_json_output
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
RuntimeError: wrong number of fields. counted 3 expected 8 in '{"core" : "S0-D0-C0", "aggregate-number" : 2, "metric-value" : 0.094345, "metric-unit" : "stalled cycles per insn"}
'
test child finished with -1
---- end ----
perf stat JSON output linter: FAILED!
[root@five ~]#

Can you please check and send a v7?

- Arnaldo

2022-08-05 20:35:54

by Ian Rogers

[permalink] [raw]
Subject: Re: [PATCH v6 2/2] perf test: Json format checking

On Mon, Jul 18, 2022 at 8:49 AM Arnaldo Carvalho de Melo
<[email protected]> wrote:
>
> Em Mon, Jul 18, 2022 at 12:46:10PM -0300, Arnaldo Carvalho de Melo escreveu:
> > Em Mon, Jul 18, 2022 at 12:44:59PM -0300, Arnaldo Carvalho de Melo escreveu:
> > > Em Thu, Jul 07, 2022 at 01:12:13PM -0700, Ian Rogers escreveu:
> > > > From: Claire Jensen <[email protected]>
> > > >
> > > > Add field checking tests for perf stat JSON output.
> > > > Sanity checks the expected number of fields are present, that the
> > > > expected keys are present and they have the correct values.
> > >
> > > it isn't installing the lib:
> > >
> > > [root@five ~]# perf test -v json
> > > 91: perf stat JSON output linter :
> > > --- start ---
> > > test child forked, pid 4086678
> > > Checking json output: no args python3: can't open file '/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py': [Errno 2] No such file or directory
> > > test child finished with -2
> > > ---- end ----
> > > perf stat JSON output linter: Skip
> > > [root@five ~]#
> > >
> > > I'm trying to fix, but please test it after installing...
> >
> >
> > It should:
> >
> > install-tests: all install-gtk
> > $(call QUIET_INSTALL, tests) \
> > $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
> > $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
> > $(INSTALL) tests/pe-file.exe* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
> > $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
> > $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
> > $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
> > $(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
> > $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
> > $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
> >
> > /me checking...
>
> The patch below is needed, but then should we mix .py and .sh in that
> directory?
>
> ⬢[acme@toolbox perf]$ git diff
> diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
> index 8f0b1fb39984fb7b..65e5ba767fd6210e 100644
> --- a/tools/perf/Makefile.perf
> +++ b/tools/perf/Makefile.perf
> @@ -1006,6 +1006,7 @@ install-tests: all install-gtk
> $(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
> $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
> $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
> + $(INSTALL) tests/shell/lib/*.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
>
> install-bin: install-tools install-tests install-traceevent-plugins
>
> ⬢[acme@toolbox perf]$
>
> And then, after that I'm getting:
>
> [root@five ~]# perf test json
> 91: perf stat JSON output linter : FAILED!
> [root@five ~]# perf test -v json |& tail -20
> {"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "2675.000000", "unit" : "", "event" : "stalled-cycles-backend", "event-runtime" : 2864158, "pcnt-running" : 100.00, "metric-value" : 0.341252, "metric-unit" : "backend cycles idle"}
>
> {"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "233533.000000", "unit" : "", "event" : "instructions", "event-runtime" : 2865528, "pcnt-running" : 100.00, "metric-value" : 0.297920, "metric-unit" : "insn per cycle"}
>
> {"core" : "S0-D0-C15", "aggregate-number" : 2, "metric-value" : 0.192975, "metric-unit" : "stalled cycles per insn"}
>
> {"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "50214.000000", "unit" : "", "event" : "branches", "event-runtime" : 2865638, "pcnt-running" : 100.00, "metric-value" : 17.295742, "metric-unit" : "M/sec"}
>
> {"core" : "S0-D0-C15", "aggregate-number" : 2, "counter-value" : "1513.000000", "unit" : "", "event" : "branch-misses", "event-runtime" : 1564989, "pcnt-running" : 54.00, "metric-value" : 3.013104, "metric-unit" : "of all branches"}
>
> Traceback (most recent call last):
> File "/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py", line 92, in <module>
> check_json_output(expected_items)
> File "/var/home/acme/libexec/perf-core/tests/shell/lib/perf_json_output_lint.py", line 53, in check_json_output
> raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
> RuntimeError: wrong number of fields. counted 3 expected 8 in '{"core" : "S0-D0-C0", "aggregate-number" : 2, "metric-value" : 0.094345, "metric-unit" : "stalled cycles per insn"}
> '
> test child finished with -1
> ---- end ----
> perf stat JSON output linter: FAILED!
> [root@five ~]#
>
> Can you please check and send a v7?

Done. Sorry for the delay:
https://lore.kernel.org/lkml/[email protected]/

Thanks,
Ian

> - Arnaldo