2019-03-02 01:18:32

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 0/7] perf script python: add Python3 support

This is v2 of my version of the patchset. Incorporating the
previous feedback. Some changes from v1 were already merged.

Patch 1/7 deals with the existing inconsistent indentation.
Indentation is now consistent per file but varying styles (tabs,
4 spaces and 8 spaces).
I will followup at a later date with changes to checkpatch to ensure
that the syntax per file is maintained.

Patches 2/7 through 5/7 were sent in v1, they have been changed
to remove the previous indentation changes

Patch 6/7 was sent in v1. I had previously *not* been able to test
export-to-postgresql.py. I was able to do so this time and found
that more changes were needed. The author of the original code
seems concerned about code-style so I would suggest you only merge
with his explicit ACK.

Patch 7/7 was not in v1, it cleans up some repeated use of date
functions in the SQL exporters. It is not mandatory for Python3
support. It is dependent on Patch#6.

I hope I've got everything correct, I've retested until I feel I
can't look at Python code anymore for a while :-). Hopefully I've
not made any more mistakes. If I have, please LMK and I'll do v3.

Thanks

Tony


2019-03-02 01:18:44

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 1/7] perf script python: remove mixed indentation

Remove mixed indentation in Python scripts. Revert to either all
tabs (most common form) or all spaces (4 or 8) depending on what
was the intent of the original commit. This is necessary to
complete Python3 support as it will flag an error if it encounters
mixed indentation.

Signed-off-by: Tony Jones <[email protected]>
---
tools/perf/scripts/python/check-perf-trace.py | 65 +++++++++++-----------
tools/perf/scripts/python/compaction-times.py | 8 +--
.../perf/scripts/python/event_analyzing_sample.py | 6 +-
.../perf/scripts/python/failed-syscalls-by-pid.py | 38 ++++++-------
tools/perf/scripts/python/futex-contention.py | 2 +-
tools/perf/scripts/python/intel-pt-events.py | 32 +++++------
tools/perf/scripts/python/mem-phys-addr.py | 7 ++-
tools/perf/scripts/python/net_dropmonitor.py | 2 +-
tools/perf/scripts/python/netdev-times.py | 12 ++--
tools/perf/scripts/python/sched-migration.py | 6 +-
tools/perf/scripts/python/sctop.py | 13 +++--
tools/perf/scripts/python/stackcollapse.py | 2 +-
tools/perf/scripts/python/syscall-counts-by-pid.py | 47 ++++++++--------
tools/perf/scripts/python/syscall-counts.py | 31 +++++------
14 files changed, 136 insertions(+), 135 deletions(-)

diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..f4838db3e518 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -23,60 +23,59 @@ def trace_begin():
pass

def trace_end():
- print_unhandled()
+ print_unhandled()

def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)

- print_uncommon(context)
+ print_uncommon(context)

- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print "vec=%s\n" % (symbol_str("irq__softirq_entry", "vec", vec)),

def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)

- print_uncommon(context)
+ print_uncommon(context)

- print "call_site=%u, ptr=%u, bytes_req=%u, " \
+ print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
-
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),

def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1

def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ (event_name, cpu, secs, nsecs, pid, comm),

# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print "common_preempt_count=%d, common_flags=%s, " \
+ "common_lock_depth=%d, " % \
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context))

def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return

- print "\nunhandled events:\n\n",
+ print "\nunhandled events:\n\n",

- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),

- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print "%-40s %10d\n" % (event_name, unhandled[event_name])
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)

def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):

chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)

def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):

chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..2ec8915b74c5 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -37,7 +37,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None

def trace_begin():
- print "In trace_begin:\n"
+ print "In trace_begin:\n"

#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -102,7 +102,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))

def trace_end():
- print "In trace_end:\n"
+ print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -187,4 +187,4 @@ def show_pebs_ll():
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))

def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index 3648e8b986ec..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -58,22 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())

def print_error_totals():
- if for_comm is not None:
- print("\nsyscall errors for %s:\n" % (for_comm))
- else:
- print("\nsyscall errors:\n")
-
- print("%-30s %10s" % ("comm [pid]", "count"))
- print("%-30s %10s" % ("------------------------------", "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print(" syscall: %-16s" % syscall_name(id))
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" err = %-20s %10d" % (strerror(ret), val))
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..f221c62e0a10 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -46,5 +46,5 @@ def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ (process_names[tid], tid, lock, count, avg)

diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..2177722f509e 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -85,22 +85,22 @@ def print_common_ip(sample, symbol, dso):
print "%16x %s (%s)" % (ip, symbol, dso)

def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if (param_dict.has_key("dso")):
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if (param_dict.has_key("symbol")):
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"

if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index fb0bbcbfa0f0..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -44,12 +44,13 @@ def print_memory_type():
print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
- end='');
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
key = lambda kv: (kv[1], kv[0]), reverse = True):
- print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
- end='')
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')

def trace_begin():
parse_iomem()
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index 212557a02c50..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -7,7 +7,7 @@ import os
import sys

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 267bda49325d..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -124,14 +124,16 @@ def print_receive(hunk):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print(PF_NAPI_POLL %
- (diff_msec(base_t, event['event_t']), event['dev']))
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
print("")
else:
print(PF_JOINT)
else:
print(PF_NET_RECV %
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
event['len']))
if 'comm' in event.keys():
print(PF_WJOINT)
@@ -256,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)

def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -353,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)

@@ -390,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3984bf51f3c5..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -14,10 +14,10 @@ import sys

from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 987ffae7c8ca..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -13,9 +13,9 @@ from __future__ import print_function
import os, sys, time

try:
- import thread
+ import thread
except ImportError:
- import _thread as thread
+ import _thread as thread

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -75,11 +75,12 @@ def print_syscall_totals(interval):

print("%-40s %10s" % ("event", "count"))
print("%-40s %10s" %
- ("----------------------------------------",
- "----------"))
+ ("----------------------------------------",
+ "----------"))

- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 5e703efaddcc..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -27,7 +27,7 @@ from collections import defaultdict
from optparse import OptionParser, make_option

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index 42782487b0e9..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -39,11 +39,10 @@ def trace_end():
print_syscall_totals()

def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -51,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1

def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())

def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events by comm/pid:\n")
-
- print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].items(), \
- key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" %-38s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index 0ebd89cfd42c..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -36,8 +36,8 @@ def trace_end():
print_syscall_totals()

def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -47,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1

def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())

def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events:\n")
-
- print("%-40s %10s" % ("event", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "-----------"))
-
- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
- print("%-40s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))
--
2.16.4


2019-03-02 01:18:53

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 3/7] perf script python: add Python3 support to check-perf-trace.py

Support both Python 2 and Python 3 in the check-perf-trace.py script.

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of from __future__ implies the minimum supported version of
Python2 is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Signed-off-by: Seeteena Thoufeek <[email protected]>
Cc: Tom Zanussi <[email protected]>
---
tools/perf/scripts/python/check-perf-trace.py | 31 +++++++++++++++------------
1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index f4838db3e518..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.

+from __future__ import print_function
+
import os
import sys

@@ -19,7 +21,7 @@ from perf_trace_context import *
unhandled = autodict()

def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass

def trace_end():
@@ -33,7 +35,7 @@ def irq__softirq_entry(event_name, context, common_cpu,

print_uncommon(context)

- print "vec=%s\n" % (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))

def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
@@ -44,10 +46,10 @@ def kmem__kmalloc(event_name, context, common_cpu,

print_uncommon(context)

- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))

def trace_unhandled(event_name, context, event_fields_dict):
try:
@@ -56,26 +58,27 @@ def trace_unhandled(event_name, context, event_fields_dict):
unhandled[event_name] = 1

def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
+ print("%-20s %5u %05u.%09u %8u %-20s " %
(event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')

# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, " \
- "common_lock_depth=%d, " % \
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
(common_pc(context), trace_flag_str(common_flags(context)),
- common_lock_depth(context))
+ common_lock_depth(context)))

def print_unhandled():
keys = unhandled.keys()
if not keys:
return

- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")

- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))

for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
--
2.16.4


2019-03-02 01:18:57

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 4/7] perf script python: add Python3 support to event_analyzing_sample.py

Support both Python2 and Python3 in the event_analyzing_sample.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Signed-off-by: Seeteena Thoufeek <[email protected]>
Cc: Feng Tang <[email protected]>
---
.../perf/scripts/python/event_analyzing_sample.py | 48 +++++++++++-----------
1 file changed, 25 insertions(+), 23 deletions(-)

diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 2ec8915b74c5..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#

+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None

def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")

#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]

# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"

- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))

def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return

- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")

# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))

#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():

count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return

- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")

# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
--
2.16.4


2019-03-02 01:19:04

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 6/7] perf script python: add Python3 support to sql scripts

Support both Python2 and Python3 in the exported-sql-viewer.py,
export-to-postgresql.py and export-to-sqlite.py scripts

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Signed-off-by: Seeteena Thoufeek <[email protected]>
Cc: Adrian Hunter <[email protected]>
---
tools/perf/scripts/python/export-to-postgresql.py | 65 +++++++++++++++--------
tools/perf/scripts/python/export-to-sqlite.py | 23 ++++----
tools/perf/scripts/python/exported-sql-viewer.py | 42 ++++++++++-----
3 files changed, 84 insertions(+), 46 deletions(-)

diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 390a351d15ea..439bbbf1e036 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.

+from __future__ import print_function
+
import os
import sys
import struct
@@ -199,6 +201,16 @@ import datetime

from PySide.QtSql import *

+if sys.version_info < (3, 0):
+ def tobytes(str):
+ return str
+else:
+ def tobytes(str):
+ # Use latin-1 (ISO-8859-1) so all code-points 0-255 will result
+ # in one byte (note utf-8 is 2 bytes for values > 128 and
+ # ascii is limited to values <= 128)
+ return bytes(str, "ISO-8859-1")
+
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
@@ -234,12 +246,14 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False

+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)

def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+ printerr("where: columns 'all' or 'branches'")
+ printerr(" calls 'calls' => create calls and call_paths table")
+ printerr(" callchains 'callchains' => create call_paths table")
raise Exception("Too few arguments")

if (len(sys.argv) < 2):
@@ -273,7 +287,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())

-print datetime.datetime.today(), "Creating database..."
+print(datetime.datetime.today(), "Creating database...")

db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -506,12 +520,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
' FROM samples')


-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", tobytes("PGCOPY\n\377\r\n\0"), 0, 0)
+file_trailer = tobytes("\377\377")

def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
- file = open(path_name, "w+")
+ file = open(path_name, "wb+")
file.write(file_header)
return file

@@ -526,13 +540,13 @@ def copy_output_file_direct(file, table_name):

# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
- conn = PQconnectdb("dbname = " + dbname)
+ conn = PQconnectdb(tobytes("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
- res = PQexec(conn, sql)
+ res = PQexec(conn, tobytes(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
@@ -566,7 +580,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")

def trace_begin():
- print datetime.datetime.today(), "Writing to intermediate files..."
+ print(datetime.datetime.today(), "Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -582,7 +596,7 @@ def trace_begin():
unhandled_count = 0

def trace_end():
- print datetime.datetime.today(), "Copying to database..."
+ print(datetime.datetime.today(), "Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -597,7 +611,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")

- print datetime.datetime.today(), "Removing intermediate files..."
+ print(datetime.datetime.today(), "Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -612,7 +626,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print datetime.datetime.today(), "Adding primary keys"
+ print(datetime.datetime.today(), "Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -627,7 +641,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')

- print datetime.datetime.today(), "Adding foreign keys"
+ print(datetime.datetime.today(), "Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -663,8 +677,8 @@ def trace_end():
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')

if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
+ print(datetime.datetime.today(), "Done")

def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -676,13 +690,13 @@ def sched__sched_switch(*x):
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
- value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
+ value = struct.pack(fmt, 2, 8, evsel_id, n, tobytes(evsel_name))
evsel_file.write(value)

def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
- value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
+ value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, tobytes(root_dir))
machine_file.write(value)

def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
@@ -692,7 +706,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
- value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
+ value = struct.pack(fmt, 2, 8, comm_id, n, tobytes(comm_str))
comm_file.write(value)

def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
@@ -705,19 +719,24 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
- value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
+ value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1,
+ tobytes(short_name), n2,
+ tobytes(long_name), n3,
+ tobytes(build_id))
dso_file.write(value)

def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
- value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
+ value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8,
+ sym_start, 8, sym_end, 4, binding, n,
+ tobytes(symbol_name))
symbol_file.write(value)

def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
- value = struct.pack(fmt, 2, 4, branch_type, n, name)
+ value = struct.pack(fmt, 2, 4, branch_type, n, tobytes(name))
branch_type_file.write(value)

def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index eb63e6c7107f..3da338243aed 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.

+from __future__ import print_function
+
import os
import sys
import struct
@@ -60,11 +62,14 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False

+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
def usage():
- print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
raise Exception("Too few arguments")

if (len(sys.argv) < 2):
@@ -100,7 +105,7 @@ def do_query_(q):
return
raise Exception("Query failed: " + q.lastError().text())

-print datetime.datetime.today(), "Creating database..."
+print(datetime.datetime.today(), "Creating database ...")

db_exists = False
try:
@@ -378,7 +383,7 @@ if perf_db_export_calls:
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")

def trace_begin():
- print datetime.datetime.today(), "Writing records..."
+ print(datetime.datetime.today(), "Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
@@ -397,14 +402,14 @@ unhandled_count = 0
def trace_end():
do_query(query, 'END TRANSACTION')

- print datetime.datetime.today(), "Adding indexes"
+ print(datetime.datetime.today(), "Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')

if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
+ print(datetime.datetime.today(), "Done")

def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index afec9479ca7f..e38518cdcbc3 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -88,11 +88,20 @@
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])

+from __future__ import print_function
+
import sys
import weakref
import threading
import string
-import cPickle
+try:
+ # Python2
+ import cPickle as pickle
+ # size of pickled integer big enough for record size
+ glb_nsz = 8
+except ImportError:
+ import pickle
+ glb_nsz = 16
import re
import os
from PySide.QtCore import *
@@ -102,6 +111,15 @@ from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event

+# xrange is range in Python3
+try:
+ xrange
+except NameError:
+ xrange = range
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
# Data formatting helpers

def tohex(ip):
@@ -1004,10 +1022,6 @@ class ChildDataItemFinder():

glb_chunk_sz = 10000

-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
# Background process for SQL data fetcher

class SQLFetcherProcess():
@@ -1066,7 +1080,7 @@ class SQLFetcherProcess():
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
- nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
@@ -1084,9 +1098,9 @@ class SQLFetcherProcess():
self.wait_event.wait()

def AddToBuffer(self, obj):
- d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+ d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
- nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
@@ -1198,12 +1212,12 @@ class SQLFetcher(QObject):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
- n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+ n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
- n = cPickle.loads(self.buffer[0 : glb_nsz])
+ n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
- obj = cPickle.loads(self.buffer[pos : pos + n])
+ obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj

@@ -2973,7 +2987,7 @@ class DBRef():

def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+ printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")

dbname = sys.argv[1]
@@ -2986,8 +3000,8 @@ def Main():

is_sqlite3 = False
try:
- f = open(dbname)
- if f.read(15) == "SQLite format 3":
+ f = open(dbname, "rb")
+ if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
--
2.16.4


2019-03-02 01:19:11

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 7/7] perf script python: add printdate function to SQL exporters

Introduce a printdate function to eliminate the repetitive use of
datetime.datetime.today() in the SQL exporting scripts.

Signed-off-by: Tony Jones <[email protected]>
Cc: Adrian Hunter <[email protected]>
---
tools/perf/scripts/python/export-to-postgresql.py | 19 +++++++++++--------
tools/perf/scripts/python/export-to-sqlite.py | 13 ++++++++-----
2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 439bbbf1e036..515dc5506427 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -249,6 +249,9 @@ perf_db_export_callchains = False
def printerr(*args, **kw_args):
print(*args, file=sys.stderr, **kw_args)

+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
def usage():
printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
printerr("where: columns 'all' or 'branches'")
@@ -287,7 +290,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())

-print(datetime.datetime.today(), "Creating database...")
+printdate("Creating database...")

db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -580,7 +583,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")

def trace_begin():
- print(datetime.datetime.today(), "Writing to intermediate files...")
+ printdate("Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -596,7 +599,7 @@ def trace_begin():
unhandled_count = 0

def trace_end():
- print(datetime.datetime.today(), "Copying to database...")
+ printdate("Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -611,7 +614,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")

- print(datetime.datetime.today(), "Removing intermediate files...")
+ printdate("Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -626,7 +629,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print(datetime.datetime.today(), "Adding primary keys")
+ printdate("Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -641,7 +644,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')

- print(datetime.datetime.today(), "Adding foreign keys")
+ printdate("Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -677,8 +680,8 @@ def trace_end():
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')

if (unhandled_count):
- print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
- print(datetime.datetime.today(), "Done")
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")

def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index 3da338243aed..3b71902a5a21 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -65,6 +65,9 @@ perf_db_export_callchains = False
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)

+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
def usage():
printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
printerr("where: columns 'all' or 'branches'");
@@ -105,7 +108,7 @@ def do_query_(q):
return
raise Exception("Query failed: " + q.lastError().text())

-print(datetime.datetime.today(), "Creating database ...")
+printdate("Creating database ...")

db_exists = False
try:
@@ -383,7 +386,7 @@ if perf_db_export_calls:
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")

def trace_begin():
- print(datetime.datetime.today(), "Writing records...")
+ printdate("Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
@@ -402,14 +405,14 @@ unhandled_count = 0
def trace_end():
do_query(query, 'END TRANSACTION')

- print(datetime.datetime.today(), "Adding indexes")
+ printdate("Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')

if (unhandled_count):
- print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
- print(datetime.datetime.today(), "Done")
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")

def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
--
2.16.4


2019-03-02 01:19:34

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

Support both Python2 and Python3 in the intel-pt-events.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Signed-off-by: Seeteena Thoufeek <[email protected]>
Cc: Adrian Hunter <[email protected]>
---
tools/perf/scripts/python/intel-pt-events.py | 30 +++++++++++++++++-----------
1 file changed, 18 insertions(+), 12 deletions(-)

diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index 2177722f509e..5c42c4c359dc 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.

+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,10 +24,10 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *

def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")

def trace_end():
- print "End"
+ print("End")

def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
@@ -35,21 +37,21 @@ def print_ptwrite(raw_buf):
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')

def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')

def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')

def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')

def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')

def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,18 +74,21 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')

def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')

def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))

def process_event(param_dict):
event_attr = param_dict["attr"]
@@ -92,12 +98,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]

# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if "dso" in param_dict:
dso = param_dict["dso"]
else:
dso = "[unknown]"

- if (param_dict.has_key("symbol")):
+ if "symbol" in param_dict:
symbol = param_dict["symbol"]
else:
symbol = "[unknown]"
--
2.16.4


2019-03-02 01:19:52

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2 2/7] perf script python: add Python3 support to futex-contention.py

Support both Python2 and Python3 in the futex-contention.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Signed-off-by: Seeteena Thoufeek <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
---
tools/perf/scripts/python/futex-contention.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index f221c62e0a10..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention

+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,

def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]

def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")

def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))

--
2.16.4


2019-03-05 10:20:17

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 0/7] perf script python: add Python3 support

On 2/03/19 3:18 AM, Tony Jones wrote:
> This is v2 of my version of the patchset. Incorporating the
> previous feedback. Some changes from v1 were already merged.
>
> Patch 1/7 deals with the existing inconsistent indentation.
> Indentation is now consistent per file but varying styles (tabs,
> 4 spaces and 8 spaces).
> I will followup at a later date with changes to checkpatch to ensure
> that the syntax per file is maintained.
>
> Patches 2/7 through 5/7 were sent in v1, they have been changed
> to remove the previous indentation changes
>
> Patch 6/7 was sent in v1. I had previously *not* been able to test
> export-to-postgresql.py. I was able to do so this time and found
> that more changes were needed. The author of the original code
> seems concerned about code-style so I would suggest you only merge
> with his explicit ACK.
>
> Patch 7/7 was not in v1, it cleans up some repeated use of date
> functions in the SQL exporters. It is not mandatory for Python3
> support. It is dependent on Patch#6.
>
> I hope I've got everything correct, I've retested until I feel I
> can't look at Python code anymore for a while :-). Hopefully I've
> not made any more mistakes. If I have, please LMK and I'll do v3.

perf tools link against python2 so have the scripts been tested with python3?

$ ldd tools/perf/perf | grep python
libpython2.7.so.1.0 => /usr/lib/x86_64-linux-gnu/libpython2.7.so.1.0
(0x00007ff8a2de2000)

2019-03-05 10:20:54

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

On 2/03/19 3:19 AM, Tony Jones wrote:
> Support both Python2 and Python3 in the intel-pt-events.py script
>
> There may be differences in the ordering of output lines due to
> differences in dictionary ordering etc. However the format within lines
> should be unchanged.
>
> The use of 'from __future__' implies the minimum supported Python2 version
> is now v2.6
>
> Signed-off-by: Tony Jones <[email protected]>
> Signed-off-by: Seeteena Thoufeek <[email protected]>
> Cc: Adrian Hunter <[email protected]>

One change missed, see below, otherwise:

Acked-by: Adrian Hunter <[email protected]>

> ---
> tools/perf/scripts/python/intel-pt-events.py | 30 +++++++++++++++++-----------
> 1 file changed, 18 insertions(+), 12 deletions(-)
>
> diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
> index 2177722f509e..5c42c4c359dc 100644
> --- a/tools/perf/scripts/python/intel-pt-events.py
> +++ b/tools/perf/scripts/python/intel-pt-events.py
> @@ -10,6 +10,8 @@
> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> # more details.
>
> +from __future__ import print_function
> +
> import os
> import sys
> import struct
> @@ -22,10 +24,10 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
> #from Core import *
>
> def trace_begin():
> - print "Intel PT Power Events and PTWRITE"
> + print("Intel PT Power Events and PTWRITE")
>
> def trace_end():
> - print "End"
> + print("End")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])

Also above line

> @@ -35,21 +37,21 @@ def print_ptwrite(raw_buf):
> flags = data[0]
> payload = data[1]
> exact_ip = flags & 1
> - print "IP: %u payload: %#x" % (exact_ip, payload),
> + print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
>
> def print_cbr(raw_buf):
> data = struct.unpack_from("<BBBBII", raw_buf)
> cbr = data[0]
> f = (data[4] + 500) / 1000
> p = ((cbr * 1000 / data[2]) + 5) / 10
> - print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
> + print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
>
> def print_mwait(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> payload = data[1]
> hints = payload & 0xff
> extensions = (payload >> 32) & 0x3
> - print "hints: %#x extensions: %#x" % (hints, extensions),
> + print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
>
> def print_pwre(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> @@ -57,13 +59,14 @@ def print_pwre(raw_buf):
> hw = (payload >> 7) & 1
> cstate = (payload >> 12) & 0xf
> subcstate = (payload >> 8) & 0xf
> - print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
> + print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
> + end=' ')
>
> def print_exstop(raw_buf):
> data = struct.unpack_from("<I", raw_buf)
> flags = data[0]
> exact_ip = flags & 1
> - print "IP: %u" % (exact_ip),
> + print("IP: %u" % (exact_ip), end=' ')
>
> def print_pwrx(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> @@ -71,18 +74,21 @@ def print_pwrx(raw_buf):
> deepest_cstate = payload & 0xf
> last_cstate = (payload >> 4) & 0xf
> wake_reason = (payload >> 8) & 0xf
> - print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
> + print("deepest cstate: %u last cstate: %u wake reason: %#x" %
> + (deepest_cstate, last_cstate, wake_reason), end=' ')
>
> def print_common_start(comm, sample, name):
> ts = sample["time"]
> cpu = sample["cpu"]
> pid = sample["pid"]
> tid = sample["tid"]
> - print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
> + print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
> + (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
> + end=' ')
>
> def print_common_ip(sample, symbol, dso):
> ip = sample["ip"]
> - print "%16x %s (%s)" % (ip, symbol, dso)
> + print("%16x %s (%s)" % (ip, symbol, dso))
>
> def process_event(param_dict):
> event_attr = param_dict["attr"]
> @@ -92,12 +98,12 @@ def process_event(param_dict):
> name = param_dict["ev_name"]
>
> # Symbol and dso info are not always resolved
> - if (param_dict.has_key("dso")):
> + if "dso" in param_dict:
> dso = param_dict["dso"]
> else:
> dso = "[unknown]"
>
> - if (param_dict.has_key("symbol")):
> + if "symbol" in param_dict:
> symbol = param_dict["symbol"]
> else:
> symbol = "[unknown]"
>


2019-03-05 10:48:05

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 1/7] perf script python: remove mixed indentation

On 2/03/19 3:18 AM, Tony Jones wrote:
> Remove mixed indentation in Python scripts. Revert to either all
> tabs (most common form) or all spaces (4 or 8) depending on what
> was the intent of the original commit. This is necessary to
> complete Python3 support as it will flag an error if it encounters
> mixed indentation.
>
> Signed-off-by: Tony Jones <[email protected]>

For intel-pt-events.py

Acked-by: Adrian Hunter <[email protected]>


2019-03-05 15:53:40

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

On 3/5/19 2:16 AM, Adrian Hunter wrote:
> On 2/03/19 3:19 AM, Tony Jones wrote:
>> Support both Python2 and Python3 in the intel-pt-events.py script
>>
>> There may be differences in the ordering of output lines due to
>> differences in dictionary ordering etc. However the format within lines
>> should be unchanged.
>>
>> The use of 'from __future__' implies the minimum supported Python2 version
>> is now v2.6
>>
>> Signed-off-by: Tony Jones <[email protected]>
>> Signed-off-by: Seeteena Thoufeek <[email protected]>
>> Cc: Adrian Hunter <[email protected]>
>
> One change missed, see below, otherwise:

I tested the patch on a Skylake system but it was not connected to our standard
network so I had to move files manually. It seems I managed to somehow mess up
and not attach the correct patch :(
>> def trace_unhandled(event_name, context, event_fields_dict):
>> print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
>
> Also above line

You are correct. Apologies. I'll send a revised version.

tony


2019-03-05 15:55:27

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2 0/7] perf script python: add Python3 support

On 3/5/19 6:53 AM, Tony Jones wrote:
> On 3/5/19 1:55 AM, Adrian Hunter wrote:
>
>> perf tools link against python2 so have the scripts been tested with python3?
>
> Not if PYTHON=python3 is specified on the build line. Yes, of course they were tested.

See below for exporter. The viewer also works but obviously harder to demonstrate via log.

$ make PYTHON=python3 prefix=/tmp/perf install
$ ldd /tmp/perf/bin/perf | grep python
libpython3.6m.so.1.0 => /usr/lib64/libpython3.6m.so.1.0 (0x00007f90d32c3000)

# /tmp/perf/bin/perf record -o /tmp/perf.data -e cycles,instructions,branches /bin/false
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.017 MB /tmp/perf.data (39 samples) ]

postgres@svr1:~> id
uid=26(postgres) gid=26(postgres) groups=26(postgres)

postgres@svr1:~> ldd /tmp/perf/bin/perf | grep python
libpython3.6m.so.1.0 => /usr/lib64/libpython3.6m.so.1.0 (0x00007f7714156000)

postgres@svr1:~> /tmp/perf/bin/perf script -i /tmp/perf.data -s /tmp/perf/libexec/perf-core/scripts/python/export-to-postgresql.py db1
2019-03-05 07:14:04.243419 Creating database...
This version of PostgreSQL is not supported and may not work.
This version of PostgreSQL is not supported and may not work.
2019-03-05 07:14:05.122981 Writing to intermediate files...
2019-03-05 07:14:05.187927 Copying to database...
2019-03-05 07:14:05.222272 Removing intermediate files...
2019-03-05 07:14:05.222766 Adding primary keys
2019-03-05 07:14:05.714776 Adding foreign keys

postgres@svr1:~> pg_dump -a -t public.comms -t public.dsos db1
--
-- PostgreSQL database dump
--

-- Dumped from database version 10.6
-- Dumped by pg_dump version 10.6

SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET client_min_messages = warning;
SET row_security = off;

--
-- Data for Name: comms; Type: TABLE DATA; Schema: public; Owner: postgres
--

COPY public.comms (id, comm) FROM stdin;
0 unknown
1 perf
2 false
\.


--
-- Data for Name: dsos; Type: TABLE DATA; Schema: public; Owner: postgres
--

COPY public.dsos (id, machine_id, short_name, long_name, build_id) FROM stdin;
0 0 unknown unknown
1 1 [kernel.kallsyms] [kernel.kallsyms] 0621827fb34281c413e26a0c2d6ba1c0abd706b9
2 1 ld-2.26.so /lib64/ld-2.26.so bca321ae4ce1ab788b186e2085f60c1c9723cc14
\.


--
-- PostgreSQL database dump complete
--

2019-03-05 16:11:26

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

On 3/5/19 7:02 AM, Tony Jones wrote:

> I tested the patch on a Skylake system but it was not connected to our standard
> network so I had to move files manually. It seems I managed to somehow mess up
> and not attach the correct patch :(

Sure enough, I managed to attach the wrong patch. Sorry. I'll attach revised (v3)
to this thread as the change is trivial.

I am seeing a lot of "instruction trace errors" on this Skylake system. I see them
using Python2 without any of the changes. I've not looked at this.

Output of v3.

# head /proc/cpuinfo
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 94
model name : Intel(R) Xeon(R) CPU E3-1240 v5 @ 3.50GHz
stepping : 3
microcode : 0xc6
cpu MHz : 1600.887
cache size : 8192 KB
physical id : 0

# ldd /tmp/perf/bin/perf | grep python
libpython3.6m.so.1.0 => /usr/lib64/libpython3.6m.so.1.0 (0x00007fd8fdaae000)

# /tmp/perf/bin/perf script report intel-pt-events -i /tmp/perf.data | tail -20
swapper 0/0 [006] 17424.535324338 cbr: 27 freq: 2703 MHz ( 77%) 0 [unknown] ([unknown])
perf 20899/20899 [003] 17424.535324363 cbr: 27 freq: 2703 MHz ( 77%) 0 [unknown] ([unknown])
swapper 0/0 [004] 17424.535324387 cbr: 27 freq: 2703 MHz ( 77%) 0 [unknown] ([unknown])
swapper 0/0 [000] 17424.535324867 cbr: 27 freq: 2703 MHz ( 77%) 0 [unknown] ([unknown])
swapper 0/0 [006] 17424.543315376 cbr: 24 freq: 2403 MHz ( 69%) 0 [unknown] ([unknown])
perf 20899/20899 [003] 17424.699315686 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [006] 17424.714915068 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [005] 17424.779321732 cbr: 9 freq: 901 MHz ( 26%) 0 [unknown] ([unknown])
swapper 0/0 [001] 17424.779322738 cbr: 9 freq: 901 MHz ( 26%) 0 [unknown] ([unknown])
swapper 0/0 [003] 17424.903321958 cbr: 9 freq: 901 MHz ( 26%) 0 [unknown] ([unknown])
swapper 0/0 [000] 17425.099316258 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [003] 17425.107316167 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [004] 17425.519574381 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [001] 17425.519796998 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [002] 17425.519828095 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [005] 17425.519897417 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
swapper 0/0 [007] 17425.519936473 cbr: 16 freq: 1602 MHz ( 46%) 0 [unknown] ([unknown])
Warning:
260 instruction trace errors
End

2019-03-05 16:32:25

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2 0/7] perf script python: add Python3 support

On 3/5/19 1:55 AM, Adrian Hunter wrote:

> perf tools link against python2 so have the scripts been tested with python3?

Not if PYTHON=python3 is specified on the build line. Yes, of course they were tested.


2019-03-05 17:56:03

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

On 3/5/19 8:10 AM, Tony Jones wrote:

>
> Sure enough, I managed to attach the wrong patch. Sorry. I'll attach revised (v3)
> to this thread as the change is trivial.

From: Tony Jones <[email protected]>
Date: Tue, 05 Mar 2019 08:31:30 -0800
Subject: [PATCH v3] perf script python: add Python3 support to intel-pt-events.py

Support both Python2 and Python3 in the intel-pt-events.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Signed-off-by: Seeteena Thoufeek <[email protected]>
Cc: Adrian Hunter <[email protected]>
---
tools/perf/scripts/python/intel-pt-events.py | 32 ++++++++++++++++-----------
1 file changed, 19 insertions(+), 13 deletions(-)

--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.

+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PA
#from Core import *

def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")

def trace_end():
- print "End"
+ print("End")

def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))

def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')

def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')

def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')

def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')

def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')

def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,18 +74,21 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')

def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')

def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))

def process_event(param_dict):
event_attr = param_dict["attr"]
@@ -92,12 +98,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]

# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if "dso" in param_dict:
dso = param_dict["dso"]
else:
dso = "[unknown]"

- if (param_dict.has_key("symbol")):
+ if "symbol" in param_dict:
symbol = param_dict["symbol"]
else:
symbol = "[unknown]"

2019-03-06 09:42:12

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] perf script python: add Python3 support to sql scripts

On 2/03/19 3:19 AM, Tony Jones wrote:
> Support both Python2 and Python3 in the exported-sql-viewer.py,
> export-to-postgresql.py and export-to-sqlite.py scripts
>
> There may be differences in the ordering of output lines due to
> differences in dictionary ordering etc. However the format within lines
> should be unchanged.
>
> The use of 'from __future__' implies the minimum supported Python2 version
> is now v2.6
>
> Signed-off-by: Tony Jones <[email protected]>
> Signed-off-by: Seeteena Thoufeek <[email protected]>
> Cc: Adrian Hunter <[email protected]>

Apart from one issue (see below), it looks good, thank you!

> ---
> tools/perf/scripts/python/export-to-postgresql.py | 65 +++++++++++++++--------
> tools/perf/scripts/python/export-to-sqlite.py | 23 ++++----
> tools/perf/scripts/python/exported-sql-viewer.py | 42 ++++++++++-----
> 3 files changed, 84 insertions(+), 46 deletions(-)
>
> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
> index 390a351d15ea..439bbbf1e036 100644
> --- a/tools/perf/scripts/python/export-to-postgresql.py
> +++ b/tools/perf/scripts/python/export-to-postgresql.py
> @@ -10,6 +10,8 @@
> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> # more details.
>
> +from __future__ import print_function
> +
> import os
> import sys
> import struct
> @@ -199,6 +201,16 @@ import datetime
>
> from PySide.QtSql import *
>
> +if sys.version_info < (3, 0):
> + def tobytes(str):
> + return str
> +else:
> + def tobytes(str):
> + # Use latin-1 (ISO-8859-1) so all code-points 0-255 will result
> + # in one byte (note utf-8 is 2 bytes for values > 128 and
> + # ascii is limited to values <= 128)
> + return bytes(str, "ISO-8859-1")

Probably this should be the server_encoding, but python2 allowed UTF-8
so let's just use UTF-8 for now. That will also mean doing the conversion
before getting the len(), otherwise len() can be wrong.

Example of unicode symbol (works with python2 but not python3):

$ cat unicode-var.c
void myfunc\U00000520(void)
{
}

int main()
{
myfunc\U00000520();
return 0;
}
$ gcc -O0 -ggdb3 -o unicode-var -finput-charset=UTF-8 -fextended-identifiers -fexec-charset=UTF-8 unicode-var.c
$ perf record -e intel_pt//u ./unicode-var
$ ldd `which perf` | grep python
libpython2.7.so.1.0 => /usr/lib/x86_64-linux-gnu/libpython2.7.so.1.0 (0x00007f2ca45bc000)
$ perf script --itrace=be -s tools/perf/scripts/python/export-to-postgresql.py uvar_1 branches calls
2019-03-06 02:29:22.603095 Creating database...
The server version of this PostgreSQL is unknown, falling back to the client version.
The server version of this PostgreSQL is unknown, falling back to the client version.
2019-03-06 02:29:22.945439 Writing to intermediate files...
2019-03-06 02:29:22.991863 Copying to database...
2019-03-06 02:29:23.017039 Removing intermediate files...
2019-03-06 02:29:23.017542 Adding primary keys
2019-03-06 02:29:23.097973 Adding foreign keys
2019-03-06 02:29:23.161803 Done
$ make PYTHON=python3 -C tools/perf install >/dev/null
$ ldd `which perf` | grep python
libpython3.6m.so.1.0 => /usr/lib/x86_64-linux-gnu/libpython3.6m.so.1.0 (0x00007f4ec161f000)
$ perf script --itrace=be -s tools/perf/scripts/python/export-to-postgresql.py uvar_2 branches calls
2019-03-06 02:36:19.837460 Creating database...
The server version of this PostgreSQL is unknown, falling back to the client version.
The server version of this PostgreSQL is unknown, falling back to the client version.
2019-03-06 02:36:20.168318 Writing to intermediate files...
Traceback (most recent call last):
File "tools/perf/scripts/python/export-to-postgresql.py", line 733, in symbol_table
tobytes(symbol_name))
File "tools/perf/scripts/python/export-to-postgresql.py", line 212, in tobytes
return bytes(str, "ISO-8859-1")
UnicodeEncodeError: 'latin-1' codec can't encode character '\u0520' in position 6: ordinal not in range(256)
Fatal Python error: problem in Python trace event handler

Current thread 0x00007f1706eb5740 (most recent call first):
Aborted (core dumped)

> +
> # Need to access PostgreSQL C library directly to use COPY FROM STDIN
> from ctypes import *
> libpq = CDLL("libpq.so.5")
> @@ -234,12 +246,14 @@ perf_db_export_mode = True
> perf_db_export_calls = False
> perf_db_export_callchains = False
>
> +def printerr(*args, **kw_args):
> + print(*args, file=sys.stderr, **kw_args)
>
> def usage():
> - print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
> - print >> sys.stderr, "where: columns 'all' or 'branches'"
> - print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
> - print >> sys.stderr, " callchains 'callchains' => create call_paths table"
> + printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
> + printerr("where: columns 'all' or 'branches'")
> + printerr(" calls 'calls' => create calls and call_paths table")
> + printerr(" callchains 'callchains' => create call_paths table")
> raise Exception("Too few arguments")
>
> if (len(sys.argv) < 2):
> @@ -273,7 +287,7 @@ def do_query(q, s):
> return
> raise Exception("Query failed: " + q.lastError().text())
>
> -print datetime.datetime.today(), "Creating database..."
> +print(datetime.datetime.today(), "Creating database...")
>
> db = QSqlDatabase.addDatabase('QPSQL')
> query = QSqlQuery(db)
> @@ -506,12 +520,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
> ' FROM samples')
>
>
> -file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
> -file_trailer = "\377\377"
> +file_header = struct.pack("!11sii", tobytes("PGCOPY\n\377\r\n\0"), 0, 0)
> +file_trailer = tobytes("\377\377")

Please use bytes literals here i.e. b"PGCOPY\n\377\r\n\0"

>
> def open_output_file(file_name):
> path_name = output_dir_name + "/" + file_name
> - file = open(path_name, "w+")
> + file = open(path_name, "wb+")
> file.write(file_header)
> return file
>
> @@ -526,13 +540,13 @@ def copy_output_file_direct(file, table_name):
>
> # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
> def copy_output_file(file, table_name):
> - conn = PQconnectdb("dbname = " + dbname)
> + conn = PQconnectdb(tobytes("dbname = " + dbname))

This is sending bytes to the client library, whereas the data files
are loaded by the server. I guess they could use different character
encodings, so we should at least add a comment here that the same
encoding is being used for both.

> if (PQstatus(conn)):
> raise Exception("COPY FROM STDIN PQconnectdb failed")
> file.write(file_trailer)
> file.seek(0)
> sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
> - res = PQexec(conn, sql)
> + res = PQexec(conn, tobytes(sql))
> if (PQresultStatus(res) != 4):
> raise Exception("COPY FROM STDIN PQexec failed")
> data = file.read(65536)
> @@ -566,7 +580,7 @@ if perf_db_export_calls:
> call_file = open_output_file("call_table.bin")
>
> def trace_begin():
> - print datetime.datetime.today(), "Writing to intermediate files..."
> + print(datetime.datetime.today(), "Writing to intermediate files...")
> # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
> evsel_table(0, "unknown")
> machine_table(0, 0, "unknown")
> @@ -582,7 +596,7 @@ def trace_begin():
> unhandled_count = 0
>
> def trace_end():
> - print datetime.datetime.today(), "Copying to database..."
> + print(datetime.datetime.today(), "Copying to database...")
> copy_output_file(evsel_file, "selected_events")
> copy_output_file(machine_file, "machines")
> copy_output_file(thread_file, "threads")
> @@ -597,7 +611,7 @@ def trace_end():
> if perf_db_export_calls:
> copy_output_file(call_file, "calls")
>
> - print datetime.datetime.today(), "Removing intermediate files..."
> + print(datetime.datetime.today(), "Removing intermediate files...")
> remove_output_file(evsel_file)
> remove_output_file(machine_file)
> remove_output_file(thread_file)
> @@ -612,7 +626,7 @@ def trace_end():
> if perf_db_export_calls:
> remove_output_file(call_file)
> os.rmdir(output_dir_name)
> - print datetime.datetime.today(), "Adding primary keys"
> + print(datetime.datetime.today(), "Adding primary keys")
> do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
> do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
> do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
> @@ -627,7 +641,7 @@ def trace_end():
> if perf_db_export_calls:
> do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
>
> - print datetime.datetime.today(), "Adding foreign keys"
> + print(datetime.datetime.today(), "Adding foreign keys")
> do_query(query, 'ALTER TABLE threads '
> 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
> 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
> @@ -663,8 +677,8 @@ def trace_end():
> do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
>
> if (unhandled_count):
> - print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
> - print datetime.datetime.today(), "Done"
> + print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
> + print(datetime.datetime.today(), "Done")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> global unhandled_count
> @@ -676,13 +690,13 @@ def sched__sched_switch(*x):
> def evsel_table(evsel_id, evsel_name, *x):
> n = len(evsel_name)
> fmt = "!hiqi" + str(n) + "s"
> - value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
> + value = struct.pack(fmt, 2, 8, evsel_id, n, tobytes(evsel_name))
> evsel_file.write(value)
>
> def machine_table(machine_id, pid, root_dir, *x):
> n = len(root_dir)
> fmt = "!hiqiii" + str(n) + "s"
> - value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
> + value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, tobytes(root_dir))
> machine_file.write(value)
>
> def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
> @@ -692,7 +706,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
> def comm_table(comm_id, comm_str, *x):
> n = len(comm_str)
> fmt = "!hiqi" + str(n) + "s"
> - value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
> + value = struct.pack(fmt, 2, 8, comm_id, n, tobytes(comm_str))
> comm_file.write(value)
>
> def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
> @@ -705,19 +719,24 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
> n2 = len(long_name)
> n3 = len(build_id)
> fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
> - value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
> + value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1,
> + tobytes(short_name), n2,
> + tobytes(long_name), n3,
> + tobytes(build_id))
> dso_file.write(value)
>
> def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
> n = len(symbol_name)
> fmt = "!hiqiqiqiqiii" + str(n) + "s"
> - value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
> + value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8,
> + sym_start, 8, sym_end, 4, binding, n,
> + tobytes(symbol_name))
> symbol_file.write(value)
>
> def branch_type_table(branch_type, name, *x):
> n = len(name)
> fmt = "!hiii" + str(n) + "s"
> - value = struct.pack(fmt, 2, 4, branch_type, n, name)
> + value = struct.pack(fmt, 2, 4, branch_type, n, tobytes(name))
> branch_type_file.write(value)
>
> def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
> diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
> index eb63e6c7107f..3da338243aed 100644
> --- a/tools/perf/scripts/python/export-to-sqlite.py
> +++ b/tools/perf/scripts/python/export-to-sqlite.py
> @@ -10,6 +10,8 @@
> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> # more details.
>
> +from __future__ import print_function
> +
> import os
> import sys
> import struct
> @@ -60,11 +62,14 @@ perf_db_export_mode = True
> perf_db_export_calls = False
> perf_db_export_callchains = False
>
> +def printerr(*args, **keyword_args):
> + print(*args, file=sys.stderr, **keyword_args)
> +
> def usage():
> - print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
> - print >> sys.stderr, "where: columns 'all' or 'branches'"
> - print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
> - print >> sys.stderr, " callchains 'callchains' => create call_paths table"
> + printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
> + printerr("where: columns 'all' or 'branches'");
> + printerr(" calls 'calls' => create calls and call_paths table");
> + printerr(" callchains 'callchains' => create call_paths table");
> raise Exception("Too few arguments")
>
> if (len(sys.argv) < 2):
> @@ -100,7 +105,7 @@ def do_query_(q):
> return
> raise Exception("Query failed: " + q.lastError().text())
>
> -print datetime.datetime.today(), "Creating database..."
> +print(datetime.datetime.today(), "Creating database ...")
>
> db_exists = False
> try:
> @@ -378,7 +383,7 @@ if perf_db_export_calls:
> call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
>
> def trace_begin():
> - print datetime.datetime.today(), "Writing records..."
> + print(datetime.datetime.today(), "Writing records...")
> do_query(query, 'BEGIN TRANSACTION')
> # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
> evsel_table(0, "unknown")
> @@ -397,14 +402,14 @@ unhandled_count = 0
> def trace_end():
> do_query(query, 'END TRANSACTION')
>
> - print datetime.datetime.today(), "Adding indexes"
> + print(datetime.datetime.today(), "Adding indexes")
> if perf_db_export_calls:
> do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
> do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
>
> if (unhandled_count):
> - print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
> - print datetime.datetime.today(), "Done"
> + print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
> + print(datetime.datetime.today(), "Done")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> global unhandled_count
> diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
> index afec9479ca7f..e38518cdcbc3 100755
> --- a/tools/perf/scripts/python/exported-sql-viewer.py
> +++ b/tools/perf/scripts/python/exported-sql-viewer.py
> @@ -88,11 +88,20 @@
> # 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
> # 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
>
> +from __future__ import print_function
> +
> import sys
> import weakref
> import threading
> import string
> -import cPickle
> +try:
> + # Python2
> + import cPickle as pickle
> + # size of pickled integer big enough for record size
> + glb_nsz = 8
> +except ImportError:
> + import pickle
> + glb_nsz = 16
> import re
> import os
> from PySide.QtCore import *
> @@ -102,6 +111,15 @@ from decimal import *
> from ctypes import *
> from multiprocessing import Process, Array, Value, Event
>
> +# xrange is range in Python3
> +try:
> + xrange
> +except NameError:
> + xrange = range
> +
> +def printerr(*args, **keyword_args):
> + print(*args, file=sys.stderr, **keyword_args)
> +
> # Data formatting helpers
>
> def tohex(ip):
> @@ -1004,10 +1022,6 @@ class ChildDataItemFinder():
>
> glb_chunk_sz = 10000
>
> -# size of pickled integer big enough for record size
> -
> -glb_nsz = 8
> -
> # Background process for SQL data fetcher
>
> class SQLFetcherProcess():
> @@ -1066,7 +1080,7 @@ class SQLFetcherProcess():
> return True
> if space >= glb_nsz:
> # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
> - nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
> + nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
> self.buffer[self.local_head : self.local_head + len(nd)] = nd
> self.local_head = 0
> if self.local_tail - self.local_head > sz:
> @@ -1084,9 +1098,9 @@ class SQLFetcherProcess():
> self.wait_event.wait()
>
> def AddToBuffer(self, obj):
> - d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
> + d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
> n = len(d)
> - nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
> + nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
> sz = n + glb_nsz
> self.WaitForSpace(sz)
> pos = self.local_head
> @@ -1198,12 +1212,12 @@ class SQLFetcher(QObject):
> pos = self.local_tail
> if len(self.buffer) - pos < glb_nsz:
> pos = 0
> - n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
> + n = pickle.loads(self.buffer[pos : pos + glb_nsz])
> if n == 0:
> pos = 0
> - n = cPickle.loads(self.buffer[0 : glb_nsz])
> + n = pickle.loads(self.buffer[0 : glb_nsz])
> pos += glb_nsz
> - obj = cPickle.loads(self.buffer[pos : pos + n])
> + obj = pickle.loads(self.buffer[pos : pos + n])
> self.local_tail = pos + n
> return obj
>
> @@ -2973,7 +2987,7 @@ class DBRef():
>
> def Main():
> if (len(sys.argv) < 2):
> - print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
> + printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
> raise Exception("Too few arguments")
>
> dbname = sys.argv[1]
> @@ -2986,8 +3000,8 @@ def Main():
>
> is_sqlite3 = False
> try:
> - f = open(dbname)
> - if f.read(15) == "SQLite format 3":
> + f = open(dbname, "rb")
> + if f.read(15) == b'SQLite format 3':
> is_sqlite3 = True
> f.close()
> except:
>


2019-03-06 09:44:31

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

On 5/03/19 6:19 PM, Tony Jones wrote:
> On 3/5/19 8:10 AM, Tony Jones wrote:
>
>>
>> Sure enough, I managed to attach the wrong patch. Sorry. I'll attach revised (v3)
>> to this thread as the change is trivial.
>
> From: Tony Jones <[email protected]>
> Date: Tue, 05 Mar 2019 08:31:30 -0800
> Subject: [PATCH v3] perf script python: add Python3 support to intel-pt-events.py
>
> Support both Python2 and Python3 in the intel-pt-events.py script
>
> There may be differences in the ordering of output lines due to
> differences in dictionary ordering etc. However the format within lines
> should be unchanged.
>
> The use of 'from __future__' implies the minimum supported Python2 version
> is now v2.6
>
> Signed-off-by: Tony Jones <[email protected]>
> Signed-off-by: Seeteena Thoufeek <[email protected]>
> Cc: Adrian Hunter <[email protected]>

Acked-by: Adrian Hunter <[email protected]>

> ---
> tools/perf/scripts/python/intel-pt-events.py | 32 ++++++++++++++++-----------
> 1 file changed, 19 insertions(+), 13 deletions(-)
>
> --- a/tools/perf/scripts/python/intel-pt-events.py
> +++ b/tools/perf/scripts/python/intel-pt-events.py
> @@ -10,6 +10,8 @@
> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> # more details.
>
> +from __future__ import print_function
> +
> import os
> import sys
> import struct
> @@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PA
> #from Core import *
>
> def trace_begin():
> - print "Intel PT Power Events and PTWRITE"
> + print("Intel PT Power Events and PTWRITE")
>
> def trace_end():
> - print "End"
> + print("End")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> - print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
> + print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
>
> def print_ptwrite(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> flags = data[0]
> payload = data[1]
> exact_ip = flags & 1
> - print "IP: %u payload: %#x" % (exact_ip, payload),
> + print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
>
> def print_cbr(raw_buf):
> data = struct.unpack_from("<BBBBII", raw_buf)
> cbr = data[0]
> f = (data[4] + 500) / 1000
> p = ((cbr * 1000 / data[2]) + 5) / 10
> - print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
> + print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
>
> def print_mwait(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> payload = data[1]
> hints = payload & 0xff
> extensions = (payload >> 32) & 0x3
> - print "hints: %#x extensions: %#x" % (hints, extensions),
> + print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
>
> def print_pwre(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> @@ -57,13 +59,14 @@ def print_pwre(raw_buf):
> hw = (payload >> 7) & 1
> cstate = (payload >> 12) & 0xf
> subcstate = (payload >> 8) & 0xf
> - print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
> + print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
> + end=' ')
>
> def print_exstop(raw_buf):
> data = struct.unpack_from("<I", raw_buf)
> flags = data[0]
> exact_ip = flags & 1
> - print "IP: %u" % (exact_ip),
> + print("IP: %u" % (exact_ip), end=' ')
>
> def print_pwrx(raw_buf):
> data = struct.unpack_from("<IQ", raw_buf)
> @@ -71,18 +74,21 @@ def print_pwrx(raw_buf):
> deepest_cstate = payload & 0xf
> last_cstate = (payload >> 4) & 0xf
> wake_reason = (payload >> 8) & 0xf
> - print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
> + print("deepest cstate: %u last cstate: %u wake reason: %#x" %
> + (deepest_cstate, last_cstate, wake_reason), end=' ')
>
> def print_common_start(comm, sample, name):
> ts = sample["time"]
> cpu = sample["cpu"]
> pid = sample["pid"]
> tid = sample["tid"]
> - print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
> + print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
> + (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
> + end=' ')
>
> def print_common_ip(sample, symbol, dso):
> ip = sample["ip"]
> - print "%16x %s (%s)" % (ip, symbol, dso)
> + print("%16x %s (%s)" % (ip, symbol, dso))
>
> def process_event(param_dict):
> event_attr = param_dict["attr"]
> @@ -92,12 +98,12 @@ def process_event(param_dict):
> name = param_dict["ev_name"]
>
> # Symbol and dso info are not always resolved
> - if (param_dict.has_key("dso")):
> + if "dso" in param_dict:
> dso = param_dict["dso"]
> else:
> dso = "[unknown]"
>
> - if (param_dict.has_key("symbol")):
> + if "symbol" in param_dict:
> symbol = param_dict["symbol"]
> else:
> symbol = "[unknown]"
>


2019-03-06 19:14:41

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] perf script python: add Python3 support to sql scripts

On 3/6/19 1:26 AM, Adrian Hunter wrote:
> On 2/03/19 3:19 AM, Tony Jones wrote:
>> Support both Python2 and Python3 in the exported-sql-viewer.py,
>> export-to-postgresql.py and export-to-sqlite.py scripts
>>
>> There may be differences in the ordering of output lines due to
>> differences in dictionary ordering etc. However the format within lines
>> should be unchanged.
>>
>> The use of 'from __future__' implies the minimum supported Python2 version
>> is now v2.6
>>
>> Signed-off-by: Tony Jones <[email protected]>
>> Signed-off-by: Seeteena Thoufeek <[email protected]>
>> Cc: Adrian Hunter <[email protected]>
>
> Apart from one issue (see below), it looks good, thank you!
>
>> ---
>> tools/perf/scripts/python/export-to-postgresql.py | 65 +++++++++++++++--------
>> tools/perf/scripts/python/export-to-sqlite.py | 23 ++++----
>> tools/perf/scripts/python/exported-sql-viewer.py | 42 ++++++++++-----
>> 3 files changed, 84 insertions(+), 46 deletions(-)
>>
>> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
>> index 390a351d15ea..439bbbf1e036 100644
>> --- a/tools/perf/scripts/python/export-to-postgresql.py
>> +++ b/tools/perf/scripts/python/export-to-postgresql.py
>> @@ -10,6 +10,8 @@
>> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
>> # more details.
>>
>> +from __future__ import print_function
>> +
>> import os
>> import sys
>> import struct
>> @@ -199,6 +201,16 @@ import datetime
>>
>> from PySide.QtSql import *
>>
>> +if sys.version_info < (3, 0):
>> + def tobytes(str):
>> + return str
>> +else:
>> + def tobytes(str):
>> + # Use latin-1 (ISO-8859-1) so all code-points 0-255 will result
>> + # in one byte (note utf-8 is 2 bytes for values > 128 and
>> + # ascii is limited to values <= 128)
>> + return bytes(str, "ISO-8859-1")
>
> Probably this should be the server_encoding, but python2 allowed UTF-8
> so let's just use UTF-8 for now. That will also mean doing the conversion
> before getting the len(), otherwise len() can be wrong.

I'm not totally understanding what you're saying here. The rationale for
using latin-1 and not UTF-8 was clearly expressed in the comment. Else you
do indeed run into length issues.

Would it be easier, since you have a) more familiarity with the code b) some
specific issues I'm not fully understanding if you just took this patch and
made the changes you want yourself. I doubt I'll ever use these scripta, my
interest is purely in eliminating Python2 as a fixed requirement.

Tony

2019-03-06 21:18:18

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 1/7] perf script python: remove mixed indentation

Em Fri, Mar 01, 2019 at 05:18:57PM -0800, Tony Jones escreveu:
> Remove mixed indentation in Python scripts. Revert to either all
> tabs (most common form) or all spaces (4 or 8) depending on what
> was the intent of the original commit. This is necessary to
> complete Python3 support as it will flag an error if it encounters
> mixed indentation.

Thanks, applied.

- Arnaldo

2019-03-06 21:18:20

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 2/7] perf script python: add Python3 support to futex-contention.py

Em Fri, Mar 01, 2019 at 05:18:58PM -0800, Tony Jones escreveu:
> Support both Python2 and Python3 in the futex-contention.py script
>
> There may be differences in the ordering of output lines due to
> differences in dictionary ordering etc. However the format within lines
> should be unchanged.
>
> The use of 'from __future__' implies the minimum supported Python2 version
> is now v2.6

thanks, applied

2019-03-06 21:18:45

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 4/7] perf script python: add Python3 support to event_analyzing_sample.py

Em Fri, Mar 01, 2019 at 05:19:00PM -0800, Tony Jones escreveu:
> Support both Python2 and Python3 in the event_analyzing_sample.py script
>
> There may be differences in the ordering of output lines due to
> differences in dictionary ordering etc. However the format within lines
> should be unchanged.
>
> The use of 'from __future__' implies the minimum supported Python2 version
> is now v2.6

Thanks, applied.

2019-03-06 21:18:52

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 3/7] perf script python: add Python3 support to check-perf-trace.py

Em Fri, Mar 01, 2019 at 05:18:59PM -0800, Tony Jones escreveu:
> Support both Python 2 and Python 3 in the check-perf-trace.py script.
>
> There may be differences in the ordering of output lines due to
> differences in dictionary ordering etc. However the format within lines
> should be unchanged.
>
> The use of from __future__ implies the minimum supported version of
> Python2 is now v2.6

Thanks, applied

2019-03-06 21:19:41

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 5/7] perf script python: add Python3 support to intel-pt-events.py

Em Wed, Mar 06, 2019 at 11:28:35AM +0200, Adrian Hunter escreveu:
> On 5/03/19 6:19 PM, Tony Jones wrote:
> > On 3/5/19 8:10 AM, Tony Jones wrote:
> >
> >>
> >> Sure enough, I managed to attach the wrong patch. Sorry. I'll attach revised (v3)
> >> to this thread as the change is trivial.
> >
> > From: Tony Jones <[email protected]>
> > Date: Tue, 05 Mar 2019 08:31:30 -0800
> > Subject: [PATCH v3] perf script python: add Python3 support to intel-pt-events.py
> >
> > Support both Python2 and Python3 in the intel-pt-events.py script
> >
> > There may be differences in the ordering of output lines due to
> > differences in dictionary ordering etc. However the format within lines
> > should be unchanged.
> >
> > The use of 'from __future__' implies the minimum supported Python2 version
> > is now v2.6
> >
> > Signed-off-by: Tony Jones <[email protected]>
> > Signed-off-by: Seeteena Thoufeek <[email protected]>
> > Cc: Adrian Hunter <[email protected]>
>
> Acked-by: Adrian Hunter <[email protected]>

Thanks, applied.

2019-03-06 21:20:01

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 7/7] perf script python: add printdate function to SQL exporters

Em Fri, Mar 01, 2019 at 05:19:03PM -0800, Tony Jones escreveu:
> Introduce a printdate function to eliminate the repetitive use of
> datetime.datetime.today() in the SQL exporting scripts.

Seems easy enough, but since Adrian provided the Acked-by to the other
patch, can this be done this time again?

Thanks,

- Arnaldo

> Signed-off-by: Tony Jones <[email protected]>
> Cc: Adrian Hunter <[email protected]>
> ---
> tools/perf/scripts/python/export-to-postgresql.py | 19 +++++++++++--------
> tools/perf/scripts/python/export-to-sqlite.py | 13 ++++++++-----
> 2 files changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
> index 439bbbf1e036..515dc5506427 100644
> --- a/tools/perf/scripts/python/export-to-postgresql.py
> +++ b/tools/perf/scripts/python/export-to-postgresql.py
> @@ -249,6 +249,9 @@ perf_db_export_callchains = False
> def printerr(*args, **kw_args):
> print(*args, file=sys.stderr, **kw_args)
>
> +def printdate(*args, **kw_args):
> + print(datetime.datetime.today(), *args, sep=' ', **kw_args)
> +
> def usage():
> printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
> printerr("where: columns 'all' or 'branches'")
> @@ -287,7 +290,7 @@ def do_query(q, s):
> return
> raise Exception("Query failed: " + q.lastError().text())
>
> -print(datetime.datetime.today(), "Creating database...")
> +printdate("Creating database...")
>
> db = QSqlDatabase.addDatabase('QPSQL')
> query = QSqlQuery(db)
> @@ -580,7 +583,7 @@ if perf_db_export_calls:
> call_file = open_output_file("call_table.bin")
>
> def trace_begin():
> - print(datetime.datetime.today(), "Writing to intermediate files...")
> + printdate("Writing to intermediate files...")
> # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
> evsel_table(0, "unknown")
> machine_table(0, 0, "unknown")
> @@ -596,7 +599,7 @@ def trace_begin():
> unhandled_count = 0
>
> def trace_end():
> - print(datetime.datetime.today(), "Copying to database...")
> + printdate("Copying to database...")
> copy_output_file(evsel_file, "selected_events")
> copy_output_file(machine_file, "machines")
> copy_output_file(thread_file, "threads")
> @@ -611,7 +614,7 @@ def trace_end():
> if perf_db_export_calls:
> copy_output_file(call_file, "calls")
>
> - print(datetime.datetime.today(), "Removing intermediate files...")
> + printdate("Removing intermediate files...")
> remove_output_file(evsel_file)
> remove_output_file(machine_file)
> remove_output_file(thread_file)
> @@ -626,7 +629,7 @@ def trace_end():
> if perf_db_export_calls:
> remove_output_file(call_file)
> os.rmdir(output_dir_name)
> - print(datetime.datetime.today(), "Adding primary keys")
> + printdate("Adding primary keys")
> do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
> do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
> do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
> @@ -641,7 +644,7 @@ def trace_end():
> if perf_db_export_calls:
> do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
>
> - print(datetime.datetime.today(), "Adding foreign keys")
> + printdate("Adding foreign keys")
> do_query(query, 'ALTER TABLE threads '
> 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
> 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
> @@ -677,8 +680,8 @@ def trace_end():
> do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
>
> if (unhandled_count):
> - print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
> - print(datetime.datetime.today(), "Done")
> + printdate("Warning: ", unhandled_count, " unhandled events")
> + printdate("Done")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> global unhandled_count
> diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
> index 3da338243aed..3b71902a5a21 100644
> --- a/tools/perf/scripts/python/export-to-sqlite.py
> +++ b/tools/perf/scripts/python/export-to-sqlite.py
> @@ -65,6 +65,9 @@ perf_db_export_callchains = False
> def printerr(*args, **keyword_args):
> print(*args, file=sys.stderr, **keyword_args)
>
> +def printdate(*args, **kw_args):
> + print(datetime.datetime.today(), *args, sep=' ', **kw_args)
> +
> def usage():
> printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
> printerr("where: columns 'all' or 'branches'");
> @@ -105,7 +108,7 @@ def do_query_(q):
> return
> raise Exception("Query failed: " + q.lastError().text())
>
> -print(datetime.datetime.today(), "Creating database ...")
> +printdate("Creating database ...")
>
> db_exists = False
> try:
> @@ -383,7 +386,7 @@ if perf_db_export_calls:
> call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
>
> def trace_begin():
> - print(datetime.datetime.today(), "Writing records...")
> + printdate("Writing records...")
> do_query(query, 'BEGIN TRANSACTION')
> # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
> evsel_table(0, "unknown")
> @@ -402,14 +405,14 @@ unhandled_count = 0
> def trace_end():
> do_query(query, 'END TRANSACTION')
>
> - print(datetime.datetime.today(), "Adding indexes")
> + printdate("Adding indexes")
> if perf_db_export_calls:
> do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
> do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
>
> if (unhandled_count):
> - print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
> - print(datetime.datetime.today(), "Done")
> + printdate("Warning: ", unhandled_count, " unhandled events")
> + printdate("Done")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> global unhandled_count
> --
> 2.16.4

--

- Arnaldo

2019-03-07 18:52:31

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] perf script python: add Python3 support to sql scripts

Em Wed, Mar 06, 2019 at 08:32:42AM -0800, Tony Jones escreveu:
> On 3/6/19 1:26 AM, Adrian Hunter wrote:
> > On 2/03/19 3:19 AM, Tony Jones wrote:
> >> Support both Python2 and Python3 in the exported-sql-viewer.py,
> >> export-to-postgresql.py and export-to-sqlite.py scripts
> >>
> >> There may be differences in the ordering of output lines due to
> >> differences in dictionary ordering etc. However the format within lines
> >> should be unchanged.
> >>
> >> The use of 'from __future__' implies the minimum supported Python2 version
> >> is now v2.6
> >>
> >> Signed-off-by: Tony Jones <[email protected]>
> >> Signed-off-by: Seeteena Thoufeek <[email protected]>
> >> Cc: Adrian Hunter <[email protected]>
> >
> > Apart from one issue (see below), it looks good, thank you!
> >
> >> ---
> >> tools/perf/scripts/python/export-to-postgresql.py | 65 +++++++++++++++--------
> >> tools/perf/scripts/python/export-to-sqlite.py | 23 ++++----
> >> tools/perf/scripts/python/exported-sql-viewer.py | 42 ++++++++++-----
> >> 3 files changed, 84 insertions(+), 46 deletions(-)
> >>
> >> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
> >> index 390a351d15ea..439bbbf1e036 100644
> >> --- a/tools/perf/scripts/python/export-to-postgresql.py
> >> +++ b/tools/perf/scripts/python/export-to-postgresql.py
> >> @@ -10,6 +10,8 @@
> >> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> >> # more details.
> >>
> >> +from __future__ import print_function
> >> +
> >> import os
> >> import sys
> >> import struct
> >> @@ -199,6 +201,16 @@ import datetime
> >>
> >> from PySide.QtSql import *
> >>
> >> +if sys.version_info < (3, 0):
> >> + def tobytes(str):
> >> + return str
> >> +else:
> >> + def tobytes(str):
> >> + # Use latin-1 (ISO-8859-1) so all code-points 0-255 will result
> >> + # in one byte (note utf-8 is 2 bytes for values > 128 and
> >> + # ascii is limited to values <= 128)
> >> + return bytes(str, "ISO-8859-1")
> >
> > Probably this should be the server_encoding, but python2 allowed UTF-8
> > so let's just use UTF-8 for now. That will also mean doing the conversion
> > before getting the len(), otherwise len() can be wrong.
>
> I'm not totally understanding what you're saying here. The rationale for
> using latin-1 and not UTF-8 was clearly expressed in the comment. Else you
> do indeed run into length issues.
>
> Would it be easier, since you have a) more familiarity with the code b) some
> specific issues I'm not fully understanding if you just took this patch and
> made the changes you want yourself. I doubt I'll ever use these scripta, my
> interest is purely in eliminating Python2 as a fixed requirement.

Adrian, can you please reply here? I'm not familiar with this tobytes()
python2/python3 difference, what do you mean about using
'server_encoding'? Where is that defined?

- Arnaldo

2019-03-08 09:50:08

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] perf script python: add Python3 support to sql scripts

On 7/03/19 8:51 PM, Arnaldo Carvalho de Melo wrote:
> Em Wed, Mar 06, 2019 at 08:32:42AM -0800, Tony Jones escreveu:
>> On 3/6/19 1:26 AM, Adrian Hunter wrote:
>>> On 2/03/19 3:19 AM, Tony Jones wrote:
>>>> Support both Python2 and Python3 in the exported-sql-viewer.py,
>>>> export-to-postgresql.py and export-to-sqlite.py scripts
>>>>
>>>> There may be differences in the ordering of output lines due to
>>>> differences in dictionary ordering etc. However the format within lines
>>>> should be unchanged.
>>>>
>>>> The use of 'from __future__' implies the minimum supported Python2 version
>>>> is now v2.6
>>>>
>>>> Signed-off-by: Tony Jones <[email protected]>
>>>> Signed-off-by: Seeteena Thoufeek <[email protected]>
>>>> Cc: Adrian Hunter <[email protected]>
>>>
>>> Apart from one issue (see below), it looks good, thank you!
>>>
>>>> ---
>>>> tools/perf/scripts/python/export-to-postgresql.py | 65 +++++++++++++++--------
>>>> tools/perf/scripts/python/export-to-sqlite.py | 23 ++++----
>>>> tools/perf/scripts/python/exported-sql-viewer.py | 42 ++++++++++-----
>>>> 3 files changed, 84 insertions(+), 46 deletions(-)
>>>>
>>>> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
>>>> index 390a351d15ea..439bbbf1e036 100644
>>>> --- a/tools/perf/scripts/python/export-to-postgresql.py
>>>> +++ b/tools/perf/scripts/python/export-to-postgresql.py
>>>> @@ -10,6 +10,8 @@
>>>> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
>>>> # more details.
>>>>
>>>> +from __future__ import print_function
>>>> +
>>>> import os
>>>> import sys
>>>> import struct
>>>> @@ -199,6 +201,16 @@ import datetime
>>>>
>>>> from PySide.QtSql import *
>>>>
>>>> +if sys.version_info < (3, 0):
>>>> + def tobytes(str):
>>>> + return str
>>>> +else:
>>>> + def tobytes(str):
>>>> + # Use latin-1 (ISO-8859-1) so all code-points 0-255 will result
>>>> + # in one byte (note utf-8 is 2 bytes for values > 128 and
>>>> + # ascii is limited to values <= 128)
>>>> + return bytes(str, "ISO-8859-1")
>>>
>>> Probably this should be the server_encoding, but python2 allowed UTF-8
>>> so let's just use UTF-8 for now. That will also mean doing the conversion
>>> before getting the len(), otherwise len() can be wrong.
>>
>> I'm not totally understanding what you're saying here. The rationale for
>> using latin-1 and not UTF-8 was clearly expressed in the comment. Else you
>> do indeed run into length issues.
>>
>> Would it be easier, since you have a) more familiarity with the code b) some
>> specific issues I'm not fully understanding if you just took this patch and
>> made the changes you want yourself. I doubt I'll ever use these scripta, my
>> interest is purely in eliminating Python2 as a fixed requirement.
>
> Adrian, can you please reply here? I'm not familiar with this tobytes()
> python2/python3 difference, what do you mean about using
> 'server_encoding'? Where is that defined?

Under python 2 the character set was not changed, so UTF-8, for example,
would pass through unmodified.

Under python 3, the perf strings are converted to unicode because that
is what python 3 uses for strings.

So under python 3, the correct character set must be used when converting
back to a character encoding that postgrsql expects.

client_encoding is a postgresql connection parameter.

server_encoding is a postgresql database parameter.

To keep things simple for now, I would prefer to hard code UTF-8 rather
than ISO-8859-1 because I think it is more future-proof. UTF-8 is a
superset of ISO-8859-1 but can have multi-byte characters, so the
conversion must be performed before calculating the output string length.

Ideally, the script would ask/tell the client or server what character
encoding to use, but hard coding will do for now.


This is what I would like:


diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 390a351d15ea..00ab972a2eba 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.

+from __future__ import print_function
+
import os
import sys
import struct
@@ -199,6 +201,18 @@ import datetime

from PySide.QtSql import *

+if sys.version_info < (3, 0):
+ def toserverstr(str):
+ return str
+ def toclientstr(str):
+ return str
+else:
+ # Assume UTF-8 server_encoding and client_encoding
+ def toserverstr(str):
+ return bytes(str, "UTF_8")
+ def toclientstr(str):
+ return bytes(str, "UTF_8")
+
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
@@ -234,12 +248,14 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False

+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)

def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+ printerr("where: columns 'all' or 'branches'")
+ printerr(" calls 'calls' => create calls and call_paths table")
+ printerr(" callchains 'callchains' => create call_paths table")
raise Exception("Too few arguments")

if (len(sys.argv) < 2):
@@ -273,7 +289,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())

-print datetime.datetime.today(), "Creating database..."
+print(datetime.datetime.today(), "Creating database...")

db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -506,12 +522,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
' FROM samples')


-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"

def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
- file = open(path_name, "w+")
+ file = open(path_name, "wb+")
file.write(file_header)
return file

@@ -526,13 +542,13 @@ def copy_output_file_direct(file, table_name):

# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
- conn = PQconnectdb("dbname = " + dbname)
+ conn = PQconnectdb(toclientstr("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
- res = PQexec(conn, sql)
+ res = PQexec(conn, toclientstr(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
@@ -566,7 +582,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")

def trace_begin():
- print datetime.datetime.today(), "Writing to intermediate files..."
+ print(datetime.datetime.today(), "Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -582,7 +598,7 @@ def trace_begin():
unhandled_count = 0

def trace_end():
- print datetime.datetime.today(), "Copying to database..."
+ print(datetime.datetime.today(), "Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -597,7 +613,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")

- print datetime.datetime.today(), "Removing intermediate files..."
+ print(datetime.datetime.today(), "Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -612,7 +628,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print datetime.datetime.today(), "Adding primary keys"
+ print(datetime.datetime.today(), "Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -627,7 +643,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')

- print datetime.datetime.today(), "Adding foreign keys"
+ print(datetime.datetime.today(), "Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -663,8 +679,8 @@ def trace_end():
do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')

if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
+ print(datetime.datetime.today(), "Done")

def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -674,12 +690,14 @@ def sched__sched_switch(*x):
pass

def evsel_table(evsel_id, evsel_name, *x):
+ evsel_name = toserverstr(evsel_name)
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)

def machine_table(machine_id, pid, root_dir, *x):
+ root_dir = toserverstr(root_dir)
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -690,6 +708,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
thread_file.write(value)

def comm_table(comm_id, comm_str, *x):
+ comm_str = toserverstr(comm_str)
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -701,6 +720,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
comm_thread_file.write(value)

def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+ short_name = toserverstr(short_name)
+ long_name = toserverstr(long_name)
+ build_id = toserverstr(build_id)
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
@@ -709,12 +731,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
dso_file.write(value)

def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+ symbol_name = toserverstr(symbol_name)
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)

def branch_type_table(branch_type, name, *x):
+ name = toserverstr(name)
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)

2019-03-08 12:52:52

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v2 7/7] perf script python: add printdate function to SQL exporters

On 6/03/19 11:13 PM, Arnaldo Carvalho de Melo wrote:
> Em Fri, Mar 01, 2019 at 05:19:03PM -0800, Tony Jones escreveu:
>> Introduce a printdate function to eliminate the repetitive use of
>> datetime.datetime.today() in the SQL exporting scripts.
>
> Seems easy enough, but since Adrian provided the Acked-by to the other
> patch, can this be done this time again?

Acked-by: Adrian Hunter <[email protected]>


2019-03-08 14:38:24

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2 6/7] perf script python: add Python3 support to sql scripts

Em Fri, Mar 08, 2019 at 11:47:55AM +0200, Adrian Hunter escreveu:
> On 7/03/19 8:51 PM, Arnaldo Carvalho de Melo wrote:
> > Em Wed, Mar 06, 2019 at 08:32:42AM -0800, Tony Jones escreveu:
> >> On 3/6/19 1:26 AM, Adrian Hunter wrote:
> >>> On 2/03/19 3:19 AM, Tony Jones wrote:
> >>>> Support both Python2 and Python3 in the exported-sql-viewer.py,
> >>>> export-to-postgresql.py and export-to-sqlite.py scripts
> >>>>
> >>>> There may be differences in the ordering of output lines due to
> >>>> differences in dictionary ordering etc. However the format within lines
> >>>> should be unchanged.
> >>>>
> >>>> The use of 'from __future__' implies the minimum supported Python2 version
> >>>> is now v2.6
> >>>>
> >>>> Signed-off-by: Tony Jones <[email protected]>
> >>>> Signed-off-by: Seeteena Thoufeek <[email protected]>
> >>>> Cc: Adrian Hunter <[email protected]>
> >>>
> >>> Apart from one issue (see below), it looks good, thank you!
> >>>
> >>>> ---
> >>>> tools/perf/scripts/python/export-to-postgresql.py | 65 +++++++++++++++--------
> >>>> tools/perf/scripts/python/export-to-sqlite.py | 23 ++++----
> >>>> tools/perf/scripts/python/exported-sql-viewer.py | 42 ++++++++++-----
> >>>> 3 files changed, 84 insertions(+), 46 deletions(-)
> >>>>
> >>>> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
> >>>> index 390a351d15ea..439bbbf1e036 100644
> >>>> --- a/tools/perf/scripts/python/export-to-postgresql.py
> >>>> +++ b/tools/perf/scripts/python/export-to-postgresql.py
> >>>> @@ -10,6 +10,8 @@
> >>>> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> >>>> # more details.
> >>>>
> >>>> +from __future__ import print_function
> >>>> +
> >>>> import os
> >>>> import sys
> >>>> import struct
> >>>> @@ -199,6 +201,16 @@ import datetime
> >>>>
> >>>> from PySide.QtSql import *
> >>>>
> >>>> +if sys.version_info < (3, 0):
> >>>> + def tobytes(str):
> >>>> + return str
> >>>> +else:
> >>>> + def tobytes(str):
> >>>> + # Use latin-1 (ISO-8859-1) so all code-points 0-255 will result
> >>>> + # in one byte (note utf-8 is 2 bytes for values > 128 and
> >>>> + # ascii is limited to values <= 128)
> >>>> + return bytes(str, "ISO-8859-1")
> >>>
> >>> Probably this should be the server_encoding, but python2 allowed UTF-8
> >>> so let's just use UTF-8 for now. That will also mean doing the conversion
> >>> before getting the len(), otherwise len() can be wrong.
> >>
> >> I'm not totally understanding what you're saying here. The rationale for
> >> using latin-1 and not UTF-8 was clearly expressed in the comment. Else you
> >> do indeed run into length issues.
> >>
> >> Would it be easier, since you have a) more familiarity with the code b) some
> >> specific issues I'm not fully understanding if you just took this patch and
> >> made the changes you want yourself. I doubt I'll ever use these scripta, my
> >> interest is purely in eliminating Python2 as a fixed requirement.
> >
> > Adrian, can you please reply here? I'm not familiar with this tobytes()
> > python2/python3 difference, what do you mean about using
> > 'server_encoding'? Where is that defined?
>
> Under python 2 the character set was not changed, so UTF-8, for example,
> would pass through unmodified.
>
> Under python 3, the perf strings are converted to unicode because that
> is what python 3 uses for strings.
>
> So under python 3, the correct character set must be used when converting
> back to a character encoding that postgrsql expects.
>
> client_encoding is a postgresql connection parameter.
>
> server_encoding is a postgresql database parameter.
>
> To keep things simple for now, I would prefer to hard code UTF-8 rather
> than ISO-8859-1 because I think it is more future-proof. UTF-8 is a
> superset of ISO-8859-1 but can have multi-byte characters, so the
> conversion must be performed before calculating the output string length.
>
> Ideally, the script would ask/tell the client or server what character
> encoding to use, but hard coding will do for now.
>
>
> This is what I would like:

Tony, can you check this one so that I may process it? Would be nice to
fold Adrian's comments above into the end result, ok?

Thanks,

- Arnaldo

>
> diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
> index 390a351d15ea..00ab972a2eba 100644
> --- a/tools/perf/scripts/python/export-to-postgresql.py
> +++ b/tools/perf/scripts/python/export-to-postgresql.py
> @@ -10,6 +10,8 @@
> # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> # more details.
>
> +from __future__ import print_function
> +
> import os
> import sys
> import struct
> @@ -199,6 +201,18 @@ import datetime
>
> from PySide.QtSql import *
>
> +if sys.version_info < (3, 0):
> + def toserverstr(str):
> + return str
> + def toclientstr(str):
> + return str
> +else:
> + # Assume UTF-8 server_encoding and client_encoding
> + def toserverstr(str):
> + return bytes(str, "UTF_8")
> + def toclientstr(str):
> + return bytes(str, "UTF_8")
> +
> # Need to access PostgreSQL C library directly to use COPY FROM STDIN
> from ctypes import *
> libpq = CDLL("libpq.so.5")
> @@ -234,12 +248,14 @@ perf_db_export_mode = True
> perf_db_export_calls = False
> perf_db_export_callchains = False
>
> +def printerr(*args, **kw_args):
> + print(*args, file=sys.stderr, **kw_args)
>
> def usage():
> - print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
> - print >> sys.stderr, "where: columns 'all' or 'branches'"
> - print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
> - print >> sys.stderr, " callchains 'callchains' => create call_paths table"
> + printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
> + printerr("where: columns 'all' or 'branches'")
> + printerr(" calls 'calls' => create calls and call_paths table")
> + printerr(" callchains 'callchains' => create call_paths table")
> raise Exception("Too few arguments")
>
> if (len(sys.argv) < 2):
> @@ -273,7 +289,7 @@ def do_query(q, s):
> return
> raise Exception("Query failed: " + q.lastError().text())
>
> -print datetime.datetime.today(), "Creating database..."
> +print(datetime.datetime.today(), "Creating database...")
>
> db = QSqlDatabase.addDatabase('QPSQL')
> query = QSqlQuery(db)
> @@ -506,12 +522,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
> ' FROM samples')
>
>
> -file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
> -file_trailer = "\377\377"
> +file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
> +file_trailer = b"\377\377"
>
> def open_output_file(file_name):
> path_name = output_dir_name + "/" + file_name
> - file = open(path_name, "w+")
> + file = open(path_name, "wb+")
> file.write(file_header)
> return file
>
> @@ -526,13 +542,13 @@ def copy_output_file_direct(file, table_name):
>
> # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
> def copy_output_file(file, table_name):
> - conn = PQconnectdb("dbname = " + dbname)
> + conn = PQconnectdb(toclientstr("dbname = " + dbname))
> if (PQstatus(conn)):
> raise Exception("COPY FROM STDIN PQconnectdb failed")
> file.write(file_trailer)
> file.seek(0)
> sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
> - res = PQexec(conn, sql)
> + res = PQexec(conn, toclientstr(sql))
> if (PQresultStatus(res) != 4):
> raise Exception("COPY FROM STDIN PQexec failed")
> data = file.read(65536)
> @@ -566,7 +582,7 @@ if perf_db_export_calls:
> call_file = open_output_file("call_table.bin")
>
> def trace_begin():
> - print datetime.datetime.today(), "Writing to intermediate files..."
> + print(datetime.datetime.today(), "Writing to intermediate files...")
> # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
> evsel_table(0, "unknown")
> machine_table(0, 0, "unknown")
> @@ -582,7 +598,7 @@ def trace_begin():
> unhandled_count = 0
>
> def trace_end():
> - print datetime.datetime.today(), "Copying to database..."
> + print(datetime.datetime.today(), "Copying to database...")
> copy_output_file(evsel_file, "selected_events")
> copy_output_file(machine_file, "machines")
> copy_output_file(thread_file, "threads")
> @@ -597,7 +613,7 @@ def trace_end():
> if perf_db_export_calls:
> copy_output_file(call_file, "calls")
>
> - print datetime.datetime.today(), "Removing intermediate files..."
> + print(datetime.datetime.today(), "Removing intermediate files...")
> remove_output_file(evsel_file)
> remove_output_file(machine_file)
> remove_output_file(thread_file)
> @@ -612,7 +628,7 @@ def trace_end():
> if perf_db_export_calls:
> remove_output_file(call_file)
> os.rmdir(output_dir_name)
> - print datetime.datetime.today(), "Adding primary keys"
> + print(datetime.datetime.today(), "Adding primary keys")
> do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
> do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
> do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
> @@ -627,7 +643,7 @@ def trace_end():
> if perf_db_export_calls:
> do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
>
> - print datetime.datetime.today(), "Adding foreign keys"
> + print(datetime.datetime.today(), "Adding foreign keys")
> do_query(query, 'ALTER TABLE threads '
> 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
> 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
> @@ -663,8 +679,8 @@ def trace_end():
> do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
>
> if (unhandled_count):
> - print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
> - print datetime.datetime.today(), "Done"
> + print(datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events")
> + print(datetime.datetime.today(), "Done")
>
> def trace_unhandled(event_name, context, event_fields_dict):
> global unhandled_count
> @@ -674,12 +690,14 @@ def sched__sched_switch(*x):
> pass
>
> def evsel_table(evsel_id, evsel_name, *x):
> + evsel_name = toserverstr(evsel_name)
> n = len(evsel_name)
> fmt = "!hiqi" + str(n) + "s"
> value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
> evsel_file.write(value)
>
> def machine_table(machine_id, pid, root_dir, *x):
> + root_dir = toserverstr(root_dir)
> n = len(root_dir)
> fmt = "!hiqiii" + str(n) + "s"
> value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
> @@ -690,6 +708,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
> thread_file.write(value)
>
> def comm_table(comm_id, comm_str, *x):
> + comm_str = toserverstr(comm_str)
> n = len(comm_str)
> fmt = "!hiqi" + str(n) + "s"
> value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
> @@ -701,6 +720,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
> comm_thread_file.write(value)
>
> def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
> + short_name = toserverstr(short_name)
> + long_name = toserverstr(long_name)
> + build_id = toserverstr(build_id)
> n1 = len(short_name)
> n2 = len(long_name)
> n3 = len(build_id)
> @@ -709,12 +731,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
> dso_file.write(value)
>
> def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
> + symbol_name = toserverstr(symbol_name)
> n = len(symbol_name)
> fmt = "!hiqiqiqiqiii" + str(n) + "s"
> value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
> symbol_file.write(value)
>
> def branch_type_table(branch_type, name, *x):
> + name = toserverstr(name)
> n = len(name)
> fmt = "!hiii" + str(n) + "s"
> value = struct.pack(fmt, 2, 4, branch_type, n, name)

--

- Arnaldo

Subject: [tip:perf/urgent] perf script python: Add Python3 support to futex-contention.py

Commit-ID: de2ec16bd438945813198d4de2339a396904c206
Gitweb: https://git.kernel.org/tip/de2ec16bd438945813198d4de2339a396904c206
Author: Tony Jones <[email protected]>
AuthorDate: Fri, 1 Mar 2019 17:18:58 -0800
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitDate: Wed, 6 Mar 2019 18:10:43 -0300

perf script python: Add Python3 support to futex-contention.py

Support both Python2 and Python3 in the futex-contention.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Seeteena Thoufeek <[email protected]>
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
tools/perf/scripts/python/futex-contention.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index f221c62e0a10..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention

+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,

def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]

def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")

def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))


Subject: [tip:perf/urgent] perf script python: Remove mixed indentation

Commit-ID: b504d7f6876515b74c8e27a44ccdb22372616d97
Gitweb: https://git.kernel.org/tip/b504d7f6876515b74c8e27a44ccdb22372616d97
Author: Tony Jones <[email protected]>
AuthorDate: Fri, 1 Mar 2019 17:18:57 -0800
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitDate: Wed, 6 Mar 2019 18:09:14 -0300

perf script python: Remove mixed indentation

Remove mixed indentation in Python scripts. Revert to either all tabs
(most common form) or all spaces (4 or 8) depending on what was the
intent of the original commit. This is necessary to complete Python3
support as it will flag an error if it encounters mixed indentation.

Signed-off-by: Tony Jones <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
tools/perf/scripts/python/check-perf-trace.py | 65 +++++++++++-----------
tools/perf/scripts/python/compaction-times.py | 8 +--
.../perf/scripts/python/event_analyzing_sample.py | 6 +-
.../perf/scripts/python/failed-syscalls-by-pid.py | 38 ++++++-------
tools/perf/scripts/python/futex-contention.py | 2 +-
tools/perf/scripts/python/intel-pt-events.py | 32 +++++------
tools/perf/scripts/python/mem-phys-addr.py | 7 ++-
tools/perf/scripts/python/net_dropmonitor.py | 2 +-
tools/perf/scripts/python/netdev-times.py | 12 ++--
tools/perf/scripts/python/sched-migration.py | 6 +-
tools/perf/scripts/python/sctop.py | 13 +++--
tools/perf/scripts/python/stackcollapse.py | 2 +-
tools/perf/scripts/python/syscall-counts-by-pid.py | 47 ++++++++--------
tools/perf/scripts/python/syscall-counts.py | 31 +++++------
14 files changed, 136 insertions(+), 135 deletions(-)

diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..f4838db3e518 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -23,60 +23,59 @@ def trace_begin():
pass

def trace_end():
- print_unhandled()
+ print_unhandled()

def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)

- print_uncommon(context)
+ print_uncommon(context)

- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print "vec=%s\n" % (symbol_str("irq__softirq_entry", "vec", vec)),

def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)

- print_uncommon(context)
+ print_uncommon(context)

- print "call_site=%u, ptr=%u, bytes_req=%u, " \
+ print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
-
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),

def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1

def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ (event_name, cpu, secs, nsecs, pid, comm),

# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print "common_preempt_count=%d, common_flags=%s, " \
+ "common_lock_depth=%d, " % \
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context))

def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return

- print "\nunhandled events:\n\n",
+ print "\nunhandled events:\n\n",

- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),

- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print "%-40s %10d\n" % (event_name, unhandled[event_name])
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)

def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):

chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)

def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):

chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..2ec8915b74c5 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -37,7 +37,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None

def trace_begin():
- print "In trace_begin:\n"
+ print "In trace_begin:\n"

#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -102,7 +102,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))

def trace_end():
- print "In trace_end:\n"
+ print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -187,4 +187,4 @@ def show_pebs_ll():
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))

def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index 3648e8b986ec..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -58,22 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())

def print_error_totals():
- if for_comm is not None:
- print("\nsyscall errors for %s:\n" % (for_comm))
- else:
- print("\nsyscall errors:\n")
-
- print("%-30s %10s" % ("comm [pid]", "count"))
- print("%-30s %10s" % ("------------------------------", "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print(" syscall: %-16s" % syscall_name(id))
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" err = %-20s %10d" % (strerror(ret), val))
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..f221c62e0a10 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -46,5 +46,5 @@ def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ (process_names[tid], tid, lock, count, avg)

diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..2177722f509e 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -85,22 +85,22 @@ def print_common_ip(sample, symbol, dso):
print "%16x %s (%s)" % (ip, symbol, dso)

def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if (param_dict.has_key("dso")):
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if (param_dict.has_key("symbol")):
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"

if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index fb0bbcbfa0f0..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -44,12 +44,13 @@ def print_memory_type():
print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
- end='');
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
key = lambda kv: (kv[1], kv[0]), reverse = True):
- print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
- end='')
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')

def trace_begin():
parse_iomem()
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index 212557a02c50..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -7,7 +7,7 @@ import os
import sys

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 267bda49325d..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -124,14 +124,16 @@ def print_receive(hunk):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print(PF_NAPI_POLL %
- (diff_msec(base_t, event['event_t']), event['dev']))
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
print("")
else:
print(PF_JOINT)
else:
print(PF_NET_RECV %
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
event['len']))
if 'comm' in event.keys():
print(PF_WJOINT)
@@ -256,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)

def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -353,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)

@@ -390,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3984bf51f3c5..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -14,10 +14,10 @@ import sys

from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 987ffae7c8ca..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -13,9 +13,9 @@ from __future__ import print_function
import os, sys, time

try:
- import thread
+ import thread
except ImportError:
- import _thread as thread
+ import _thread as thread

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -75,11 +75,12 @@ def print_syscall_totals(interval):

print("%-40s %10s" % ("event", "count"))
print("%-40s %10s" %
- ("----------------------------------------",
- "----------"))
+ ("----------------------------------------",
+ "----------"))

- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 5e703efaddcc..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -27,7 +27,7 @@ from collections import defaultdict
from optparse import OptionParser, make_option

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index 42782487b0e9..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -39,11 +39,10 @@ def trace_end():
print_syscall_totals()

def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -51,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1

def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())

def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events by comm/pid:\n")
-
- print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].items(), \
- key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" %-38s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index 0ebd89cfd42c..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -36,8 +36,8 @@ def trace_end():
print_syscall_totals()

def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -47,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1

def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())

def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events:\n")
-
- print("%-40s %10s" % ("event", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "-----------"))
-
- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
- print("%-40s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))

Subject: [tip:perf/urgent] perf script python: Add Python3 support to event_analyzing_sample.py

Commit-ID: c253c72e9d6723c8b078beb362f050059ef5de39
Gitweb: https://git.kernel.org/tip/c253c72e9d6723c8b078beb362f050059ef5de39
Author: Tony Jones <[email protected]>
AuthorDate: Fri, 1 Mar 2019 17:19:00 -0800
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitDate: Wed, 6 Mar 2019 18:11:11 -0300

perf script python: Add Python3 support to event_analyzing_sample.py

Support both Python2 and Python3 in the event_analyzing_sample.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Cc: Feng Tang <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Seeteena Thoufeek <[email protected]>
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
.../perf/scripts/python/event_analyzing_sample.py | 48 +++++++++++-----------
1 file changed, 25 insertions(+), 23 deletions(-)

diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 2ec8915b74c5..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#

+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None

def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")

#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]

# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"

- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))

def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return

- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")

# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))

#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():

count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return

- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")

# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))

def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))

Subject: [tip:perf/urgent] perf script python: Add Python3 support to intel-pt-events.py

Commit-ID: fdf2460c297f1bb2f3bd20b3b52903b267af9050
Gitweb: https://git.kernel.org/tip/fdf2460c297f1bb2f3bd20b3b52903b267af9050
Author: Tony Jones <[email protected]>
AuthorDate: Tue, 5 Mar 2019 08:19:02 -0800
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitDate: Wed, 6 Mar 2019 18:12:33 -0300

perf script python: Add Python3 support to intel-pt-events.py

Support both Python2 and Python3 in the intel-pt-events.py script

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of 'from __future__' implies the minimum supported Python2 version
is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Acked-by: Adrian Hunter <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Seeteena Thoufeek <[email protected]>
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
tools/perf/scripts/python/intel-pt-events.py | 32 +++++++++++++++++-----------
1 file changed, 19 insertions(+), 13 deletions(-)

diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index 2177722f509e..a73847c8f548 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.

+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *

def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")

def trace_end():
- print "End"
+ print("End")

def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))

def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')

def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')

def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')

def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')

def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')

def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,18 +74,21 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')

def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')

def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))

def process_event(param_dict):
event_attr = param_dict["attr"]
@@ -92,12 +98,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]

# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if "dso" in param_dict:
dso = param_dict["dso"]
else:
dso = "[unknown]"

- if (param_dict.has_key("symbol")):
+ if "symbol" in param_dict:
symbol = param_dict["symbol"]
else:
symbol = "[unknown]"

Subject: [tip:perf/urgent] perf script python: add Python3 support to check-perf-trace.py

Commit-ID: 57e604b16362273af6a517abaa6cd1133a7fc732
Gitweb: https://git.kernel.org/tip/57e604b16362273af6a517abaa6cd1133a7fc732
Author: Tony Jones <[email protected]>
AuthorDate: Fri, 1 Mar 2019 17:18:59 -0800
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitDate: Wed, 6 Mar 2019 18:10:46 -0300

perf script python: add Python3 support to check-perf-trace.py

Support both Python 2 and Python 3 in the check-perf-trace.py script.

There may be differences in the ordering of output lines due to
differences in dictionary ordering etc. However the format within lines
should be unchanged.

The use of from __future__ implies the minimum supported version of
Python2 is now v2.6

Signed-off-by: Tony Jones <[email protected]>
Cc: Tom Zanussi <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Seeteena Thoufeek <[email protected]>
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
tools/perf/scripts/python/check-perf-trace.py | 31 +++++++++++++++------------
1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index f4838db3e518..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.

+from __future__ import print_function
+
import os
import sys

@@ -19,7 +21,7 @@ from perf_trace_context import *
unhandled = autodict()

def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass

def trace_end():
@@ -33,7 +35,7 @@ def irq__softirq_entry(event_name, context, common_cpu,

print_uncommon(context)

- print "vec=%s\n" % (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))

def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
@@ -44,10 +46,10 @@ def kmem__kmalloc(event_name, context, common_cpu,

print_uncommon(context)

- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))

def trace_unhandled(event_name, context, event_fields_dict):
try:
@@ -56,26 +58,27 @@ def trace_unhandled(event_name, context, event_fields_dict):
unhandled[event_name] = 1

def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
+ print("%-20s %5u %05u.%09u %8u %-20s " %
(event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')

# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, " \
- "common_lock_depth=%d, " % \
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
(common_pc(context), trace_flag_str(common_flags(context)),
- common_lock_depth(context))
+ common_lock_depth(context)))

def print_unhandled():
keys = unhandled.keys()
if not keys:
return

- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")

- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))

for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))