2015-08-06 23:01:56

by Tony Jones

[permalink] [raw]
Subject: [PATCH] perf script/python: add new compaction-times script

This patch creates a new script (compaction-times) to report time
spent in mm compaction. Report times in nanoseconds (default) or
microseconds (-u). Option -p will break down times by process id,
-pv will further decompose by each compaction entry/exit.
Entries may be further filtered by pid, pid-range or comm (regex).

The script is useful when analysing workloads that compact memory.
The most common example will be THP allocations on systems with a
lot of uptime that has fragmented memory. This is an example of
using the script to analyse a thpscale from mmtests which
deliberately fragments memory and allocates THP in 4 separate
threads

# Recording step, one of the following;
$ perf record -e 'compaction:mm_compaction_*' ./workload
$ perf script record compaction-times

# Reporting: basic
$ perf script report compaction-times
total: 15825912302ns (1416084 migrated 160674 failed)

# Reporting: Per task stall times
$ perf script report compaction-times -- -p
total: 15825912302ns (1416084 migrated 160674 failed)
5024[thpscale]: 2588338168ns (476189 migrated 32 failed)
5025[thpscale]: 2469205861ns (460767 migrated 7 failed)
5026[thpscale]: 8246032631ns (659 migrated 160621 failed)
5023[thpscale]: 2522335642ns (478469 migrated 14 failed)

# Reporting: Per-compaction attempt broken down by task
$ perf script report compaction-times -- -pv
total: 15825912302ns (1416084 migrated 160674 failed)
5024[thpscale]: 2588338168ns (476189 migrated 32 failed)
5024[thpscale].1: 3549721ns (352 migrated 0 failed)
5024[thpscale].2: 173091ns (32 migrated 0 failed)
5024[thpscale].3: 695548ns (160 migrated 0 failed)
5024[thpscale].4: 138561ns (32 migrated 0 failed)
5024[thpscale].5: 1123068ns (224 migrated 0 failed)
5024[thpscale].6: 163610ns (32 migrated 0 failed)
..... output continues ...

Signed-off-by: Tony Jones <[email protected]>
Cc: Mel Gorman <[email protected]>
---
.../scripts/python/bin/compaction-times-record | 2 +
.../scripts/python/bin/compaction-times-report | 4 +
tools/perf/scripts/python/compaction-times.py | 219 +++++++++++++++++++++
3 files changed, 225 insertions(+)
create mode 100644 tools/perf/scripts/python/bin/compaction-times-record
create mode 100644 tools/perf/scripts/python/bin/compaction-times-report
create mode 100644 tools/perf/scripts/python/compaction-times.py

diff --git a/tools/perf/scripts/python/bin/compaction-times-record b/tools/perf/scripts/python/bin/compaction-times-record
new file mode 100644
index 0000000..2b7fc42
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e compaction:mm_compaction_begin -e compaction:mm_compaction_end -e compaction:mm_compaction_migratepages $@
diff --git a/tools/perf/scripts/python/bin/compaction-times-report b/tools/perf/scripts/python/bin/compaction-times-report
new file mode 100644
index 0000000..65c5b64
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+#description: display time taken by mm compaction
+#args: [-p|-pv] [-u] [pid|pid-range|comm-regex]
+perf script -s "$PERF_EXEC_PATH"/scripts/python/compaction-times.py $@
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
new file mode 100644
index 0000000..961bc27
--- /dev/null
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -0,0 +1,219 @@
+# report time spent in compaction
+# Licensed under the terms of the GNU GPL License version 2
+
+# testing:
+# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
+
+import os
+import sys
+import re
+
+usage = "perf script -s compaction-times.py -- [-p|-pv] [-u] [pid|pid-range|comm-regex]"
+
+class pdisp:
+ DISP_DFL=0
+ DISP_PROC=1
+ DISP_PROC_VERBOSE=2
+
+class comm_filter:
+ def __init__(self, re):
+ self.re = re
+
+ def filter(self, pid, comm):
+ m=self.re.search(comm)
+ return m==None or m.group() == ""
+
+class pid_filter:
+ def __init__(self, low, high):
+ self.low = (0 if low == "" else int(low))
+ self.high = (0 if high == "" else int(high))
+
+ def filter(self, pid, comm):
+ return not (pid >= self.low and (self.high == 0 or pid <= self.high))
+
+def ns(sec, nsec):
+ return (sec * 1000000000) + nsec;
+
+def time(ns):
+ return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
+
+class cnode:
+ def __init__(self, ns=0, nmigrated=0, nfailed=0):
+ self.ns = ns
+ self.nmigrated = nmigrated
+ self.nfailed = nfailed
+
+ def complete(self, secs, nsecs):
+ self.ns = ns(secs, nsecs) - self.ns
+
+ def increment(self, nmigrated, nfailed):
+ self.nmigrated += nmigrated
+ self.nfailed += nfailed
+
+ def __str__(self):
+ return "%s (%d migrated %d failed)" % (time(self.ns), self.nmigrated, self.nfailed)
+
+class chead:
+ heads = {}
+ ns = 0
+ nmigrated = 0
+ nfailed = 0
+ fobj = None
+
+ @classmethod
+ def add_filter(cls, filter):
+ cls.fobj = filter
+
+ @classmethod
+ def create_pending(cls, pid, comm, start_secs, start_nsecs):
+ filtered=0
+ try:
+ head = cls.heads[pid]
+ filtered=head.is_filtered()
+ except KeyError:
+ if cls.fobj != None:
+ filtered=cls.fobj.filter(pid, comm)
+ head = cls.heads[pid] = chead(comm, pid, filtered)
+
+ if not filtered:
+ head.mark_pending(start_secs, start_nsecs)
+
+ @classmethod
+ def increment_pending(cls, pid, nmigrated, nfailed):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.do_increment(nmigrated, nfailed)
+ else:
+ print "missing start compaction event for pid %d" % pid
+
+ @classmethod
+ def complete_pending(cls, pid, secs, nsecs):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.make_complete(secs, nsecs)
+ else:
+ print "missing start compaction event for pid %d" % pid
+
+ @classmethod
+ def gen(cls):
+ if opt_proc != pdisp.DISP_DFL:
+ for i in cls.heads:
+ yield cls.heads[i]
+
+ @classmethod
+ def str(cls):
+ return "%s (%d migrated %d failed)" % (time(cls.ns), cls.nmigrated, cls.nfailed)
+
+ def __init__(self, comm, pid, filtered):
+ self.comm = comm
+ self.pid = pid
+ self.ns = 0
+ self.nmigrated = 0
+ self.nfailed = 0
+ self.filtered = filtered
+ self.list = []
+
+ def mark_pending(self, secs, nsecs):
+ self.pending = cnode(ns(secs, nsecs), 0, 0);
+
+ def do_increment(self, nmigrated, nfailed):
+ self.pending.increment(nmigrated, nfailed);
+
+ def make_complete(self, secs, nsecs):
+ self.pending.complete(secs, nsecs)
+ chead.ns += self.pending.ns
+ chead.nmigrated += self.pending.nmigrated
+ chead.nfailed += self.pending.nfailed
+ if opt_proc != pdisp.DISP_DFL:
+ self.ns += self.pending.ns
+ self.nmigrated += self.pending.nmigrated
+ self.nfailed += self.pending.nfailed
+ if opt_proc == pdisp.DISP_PROC_VERBOSE:
+ self.list.append(self.pending);
+ self.pending = None;
+
+ def enumerate(self):
+ if opt_proc == pdisp.DISP_PROC_VERBOSE and not self.is_filtered():
+ for i, pelem in enumerate(self.list):
+ print "%d[%s].%d: %s" % (self.pid, self.comm, i+1, pelem)
+
+ def is_pending(self):
+ return self.pending != None;
+
+ def is_filtered(self):
+ return self.filtered;
+
+ def display(self):
+ if not self.is_filtered():
+ print "%d[%s]: %s (%d migrated %d failed)" % (self.pid, self.comm, time(self.ns), self.nmigrated, self.nfailed)
+
+
+def trace_end():
+ print "total: %s" % chead.str()
+ for i in chead.gen():
+ i.display(),
+ i.enumerate();
+
+def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, nr_migrated, nr_failed):
+
+ chead.increment_pending(common_pid, nr_migrated, nr_failed);
+
+def compaction__mm_compaction_end(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync, status):
+
+ chead.complete_pending(common_pid, common_secs, common_nsecs);
+
+def compaction__mm_compaction_begin(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync):
+
+ chead.create_pending(common_pid, common_comm, common_secs, common_nsecs);
+
+comm_re=None
+pid_re=None
+pid_regex="^(\d*)-(\d*)$|^(\d*)$"
+
+opt_proc = pdisp.DISP_DFL
+opt_ns = True
+
+argc=len(sys.argv) - 1
+if argc >= 1:
+ pid_re = re.compile(pid_regex);
+
+ for i, opt in enumerate(sys.argv[1:]):
+ if opt[0] == "-":
+ if opt == "-p":
+ opt_proc = pdisp.DISP_PROC
+ elif opt == "-pv":
+ opt_proc = pdisp.DISP_PROC_VERBOSE
+ elif opt == '-u':
+ opt_ns = False
+ else:
+ print usage
+ exit(1)
+
+ elif i == argc - 1:
+ m=pid_re.search(opt)
+ if m != None and m.group() != "":
+ if m.group(3) != None:
+ f=pid_filter(m.group(3), m.group(3))
+ else:
+ f=pid_filter(m.group(1), m.group(2))
+ else:
+ try:
+ comm_re=re.compile(opt)
+ except:
+ print "invalid regex '%s'" % opt
+ print usage
+ exit(1)
+ f=comm_filter(comm_re)
+
+ chead.add_filter(f)
+
--
2.4.6


2015-08-11 09:13:42

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] perf script/python: add new compaction-times script

On 08/07/2015 01:01 AM, Tony Jones wrote:
> This patch creates a new script (compaction-times) to report time
> spent in mm compaction. Report times in nanoseconds (default) or
> microseconds (-u). Option -p will break down times by process id,
> -pv will further decompose by each compaction entry/exit.
> Entries may be further filtered by pid, pid-range or comm (regex).
>
> The script is useful when analysing workloads that compact memory.
> The most common example will be THP allocations on systems with a
> lot of uptime that has fragmented memory. This is an example of
> using the script to analyse a thpscale from mmtests which
> deliberately fragments memory and allocates THP in 4 separate
> threads
>
> # Recording step, one of the following;
> $ perf record -e 'compaction:mm_compaction_*' ./workload
> $ perf script record compaction-times
>
> # Reporting: basic
> $ perf script report compaction-times
> total: 15825912302ns (1416084 migrated 160674 failed)
>
> # Reporting: Per task stall times
> $ perf script report compaction-times -- -p
> total: 15825912302ns (1416084 migrated 160674 failed)
> 5024[thpscale]: 2588338168ns (476189 migrated 32 failed)
> 5025[thpscale]: 2469205861ns (460767 migrated 7 failed)
> 5026[thpscale]: 8246032631ns (659 migrated 160621 failed)
> 5023[thpscale]: 2522335642ns (478469 migrated 14 failed)
>
> # Reporting: Per-compaction attempt broken down by task
> $ perf script report compaction-times -- -pv
> total: 15825912302ns (1416084 migrated 160674 failed)
> 5024[thpscale]: 2588338168ns (476189 migrated 32 failed)
> 5024[thpscale].1: 3549721ns (352 migrated 0 failed)
> 5024[thpscale].2: 173091ns (32 migrated 0 failed)
> 5024[thpscale].3: 695548ns (160 migrated 0 failed)
> 5024[thpscale].4: 138561ns (32 migrated 0 failed)
> 5024[thpscale].5: 1123068ns (224 migrated 0 failed)
> 5024[thpscale].6: 163610ns (32 migrated 0 failed)
> ..... output continues ...
>
> Signed-off-by: Tony Jones <[email protected]>

Tested-by: Vlastimil Babka <[email protected]>

It would be nice, if the script also reported number of pages scanned
and isolated by the compaction migration and free scanners.
This would provide the same metrics as vmstat, but possible to break
down per process, which is useful.

You would need to process two extra tracepoints:
mm_compaction_isolate_migratepages
mm_compaction_isolate_freepages

Note that vmstat lumps the pages isolated in either of those into a
shared counter COMPACTISOLATED, but for determining efficiency of each
of the scanners, it would be better if the script reported them separately.

Thanks,
Vlastimil

2015-08-17 19:48:54

by Tony Jones

[permalink] [raw]
Subject: [PATCH v2] perf script/python: add new compaction-times script

This patch creates a new script (compaction-times) to report time
spent in mm compaction. It is possible to report times in nanoseconds
(default) or microseconds (-u). The option -p will break down results
by process id, -pv will further decompose by each compaction entry/exit.
For each compaction entry/exit what is reported is controlled by the
options -t (report only timing), -m (report migration stats), -ms
(report migration scanner stats) and -fs (report free scanner stats).
The default is to report all. Entries may be further filtered by pid,
pid-range or comm (regex).

The script is useful when analysing workloads that compact memory. The
most common example will be THP allocations on systems with a lot of
uptime that has fragmented memory. This is an example of using the script
to analyse a thpscale from mmtests which deliberately fragments memory
and allocates THP in 4 separate threads

# Recording step, one of the following;
$ perf record -e 'compaction:mm_compaction_*' ./workload
$ perf script record compaction-times

# Reporting: basic
total: 2444505743ns migration: moved=357738 failed=39275
free_scanner: scanned=2705578 isolated=387875
migration_scanner: scanned=414426 isolated=397013

# Reporting: Per task stall times
$ perf script report compaction-times -- -t -p
total: 2444505743ns
6384[thpscale]: 740800017ns
6385[thpscale]: 274119512ns
6386[thpscale]: 832961337ns
6383[thpscale]: 596624877ns

# Reporting: Per-compaction attempts for task 6385
$ perf script report compaction-times -- -m -pv 6385
total: 274119512ns migration: moved=14893 failed=24285
6385[thpscale]: 274119512ns migration: moved=14893 failed=24285
6385[thpscale].1: 3033277ns migration: moved=511 failed=1
6385[thpscale].2: 9592094ns migration: moved=1524 failed=12
6385[thpscale].3: 2495587ns migration: moved=512 failed=0
6385[thpscale].4: 2561766ns migration: moved=512 failed=0
6385[thpscale].5: 2523521ns migration: moved=512 failed=0
..... output continues ...

Changes since v1:
- report stats for isolate_migratepages and isolate_freepages
(Vlastimil Babka)
- refactor code to achieve above
- add help text
- output to stdout/stderr explicitly

Signed-off-by: Tony Jones <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Vlastimil Babka <[email protected]>
---
.../scripts/python/bin/compaction-times-record | 2 +
.../scripts/python/bin/compaction-times-report | 4 +
tools/perf/scripts/python/compaction-times.py | 312 +++++++++++++++++++++
3 files changed, 318 insertions(+)
create mode 100644 tools/perf/scripts/python/bin/compaction-times-record
create mode 100644 tools/perf/scripts/python/bin/compaction-times-report
create mode 100644 tools/perf/scripts/python/compaction-times.py

diff --git a/tools/perf/scripts/python/bin/compaction-times-record b/tools/perf/scripts/python/bin/compaction-times-record
new file mode 100644
index 0000000..6edcd40
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e compaction:mm_compaction_begin -e compaction:mm_compaction_end -e compaction:mm_compaction_migratepages -e compaction:mm_compaction_isolate_migratepages -e compaction:mm_compaction_isolate_freepages $@
diff --git a/tools/perf/scripts/python/bin/compaction-times-report b/tools/perf/scripts/python/bin/compaction-times-report
new file mode 100644
index 0000000..3dc1389
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+#description: display time taken by mm compaction
+#args: [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]
+perf script -s "$PERF_EXEC_PATH"/scripts/python/compaction-times.py $@
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
new file mode 100644
index 0000000..3dad587
--- /dev/null
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -0,0 +1,312 @@
+# report time spent in compaction
+# Licensed under the terms of the GNU GPL License version 2
+
+# testing:
+# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
+
+import os
+import sys
+import re
+
+import signal
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
+
+class popt:
+ DISP_DFL = 0
+ DISP_PROC = 1
+ DISP_PROC_VERBOSE=2
+
+class topt:
+ DISP_TIME = 0
+ DISP_MIG = 1
+ DISP_ISOLFREE = 2
+ DISP_ISOLMIG = 4
+ DISP_ALL = 7
+
+class comm_filter:
+ def __init__(self, re):
+ self.re = re
+
+ def filter(self, pid, comm):
+ m = self.re.search(comm)
+ return m == None or m.group() == ""
+
+class pid_filter:
+ def __init__(self, low, high):
+ self.low = (0 if low == "" else int(low))
+ self.high = (0 if high == "" else int(high))
+
+ def filter(self, pid, comm):
+ return not (pid >= self.low and (self.high == 0 or pid <= self.high))
+
+def set_type(t):
+ global opt_disp
+ opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
+
+def ns(sec, nsec):
+ return (sec * 1000000000) + nsec
+
+def time(ns):
+ return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
+
+class pair:
+ def __init__(self, aval, bval, alabel = None, blabel = None):
+ self.alabel = alabel
+ self.blabel = blabel
+ self.aval = aval
+ self.bval = bval
+
+ def __add__(self, rhs):
+ self.aval += rhs.aval
+ self.bval += rhs.bval
+ return self
+
+ def __str__(self):
+ return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
+
+class cnode:
+ def __init__(self, ns):
+ self.ns = ns
+ self.migrated = pair(0, 0, "moved", "failed")
+ self.fscan = pair(0,0, "scanned", "isolated")
+ self.mscan = pair(0,0, "scanned", "isolated")
+
+ def __add__(self, rhs):
+ self.ns += rhs.ns
+ self.migrated += rhs.migrated
+ self.fscan += rhs.fscan
+ self.mscan += rhs.mscan
+ return self
+
+ def __str__(self):
+ prev = 0
+ s = "%s " % time(self.ns)
+ if (opt_disp & topt.DISP_MIG):
+ s += "migration: %s" % self.migrated
+ prev = 1
+ if (opt_disp & topt.DISP_ISOLFREE):
+ s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
+ prev = 1
+ if (opt_disp & topt.DISP_ISOLMIG):
+ s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
+ return s
+
+ def complete(self, secs, nsecs):
+ self.ns = ns(secs, nsecs) - self.ns
+
+ def increment(self, migrated, fscan, mscan):
+ if (migrated != None):
+ self.migrated += migrated
+ if (fscan != None):
+ self.fscan += fscan
+ if (mscan != None):
+ self.mscan += mscan
+
+
+class chead:
+ heads = {}
+ val = cnode(0);
+ fobj = None
+
+ @classmethod
+ def add_filter(cls, filter):
+ cls.fobj = filter
+
+ @classmethod
+ def create_pending(cls, pid, comm, start_secs, start_nsecs):
+ filtered = 0
+ try:
+ head = cls.heads[pid]
+ filtered = head.is_filtered()
+ except KeyError:
+ if cls.fobj != None:
+ filtered = cls.fobj.filter(pid, comm)
+ head = cls.heads[pid] = chead(comm, pid, filtered)
+
+ if not filtered:
+ head.mark_pending(start_secs, start_nsecs)
+
+ @classmethod
+ def increment_pending(cls, pid, migrated, fscan, mscan):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.do_increment(migrated, fscan, mscan)
+ else:
+ sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+ @classmethod
+ def complete_pending(cls, pid, secs, nsecs):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.make_complete(secs, nsecs)
+ else:
+ sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+ @classmethod
+ def gen(cls):
+ if opt_proc != popt.DISP_DFL:
+ for i in cls.heads:
+ yield cls.heads[i]
+
+ @classmethod
+ def str(cls):
+ return cls.val
+
+ def __init__(self, comm, pid, filtered):
+ self.comm = comm
+ self.pid = pid
+ self.val = cnode(0)
+ self.pending = None
+ self.filtered = filtered
+ self.list = []
+
+ def __add__(self, rhs):
+ self.ns += rhs.ns
+ self.val += rhs.val
+ return self
+
+ def mark_pending(self, secs, nsecs):
+ self.pending = cnode(ns(secs, nsecs))
+
+ def do_increment(self, migrated, fscan, mscan):
+ self.pending.increment(migrated, fscan, mscan)
+
+ def make_complete(self, secs, nsecs):
+ self.pending.complete(secs, nsecs)
+ chead.val += self.pending
+
+ if opt_proc != popt.DISP_DFL:
+ self.val += self.pending
+
+ if opt_proc == popt.DISP_PROC_VERBOSE:
+ self.list.append(self.pending)
+ self.pending = None
+
+ def enumerate(self):
+ if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
+ for i, pelem in enumerate(self.list):
+ sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
+
+ def is_pending(self):
+ return self.pending != None
+
+ def is_filtered(self):
+ return self.filtered
+
+ def display(self):
+ if not self.is_filtered():
+ sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
+
+
+def trace_end():
+ sys.stdout.write("total: %s\n" % chead.str())
+ for i in chead.gen():
+ i.display(),
+ i.enumerate()
+
+def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, nr_migrated, nr_failed):
+
+ chead.increment_pending(common_pid,
+ pair(nr_migrated, nr_failed), None, None)
+
+def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+ chead.increment_pending(common_pid,
+ None, pair(nr_scanned, nr_taken), None)
+
+def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+ chead.increment_pending(common_pid,
+ None, None, pair(nr_scanned, nr_taken))
+
+def compaction__mm_compaction_end(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync, status):
+
+ chead.complete_pending(common_pid, common_secs, common_nsecs)
+
+def compaction__mm_compaction_begin(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync):
+
+ chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
+
+def pr_help():
+ global usage
+
+ sys.stdout.write(usage)
+ sys.stdout.write("\n")
+ sys.stdout.write("-h display this help\n")
+ sys.stdout.write("-p display by process\n")
+ sys.stdout.write("-pv display by process (verbose)\n")
+ sys.stdout.write("-t display stall times only\n")
+ sys.stdout.write("-m display stats for migration\n")
+ sys.stdout.write("-fs display stats for free scanner\n")
+ sys.stdout.write("-ms display stats for migration scanner\n")
+ sys.stdout.write("-u display results in microseconds (default nanoseconds)\n")
+
+
+comm_re = None
+pid_re = None
+pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+
+opt_proc = popt.DISP_DFL
+opt_disp = topt.DISP_ALL
+
+opt_ns = True
+
+argc = len(sys.argv) - 1
+if argc >= 1:
+ pid_re = re.compile(pid_regex)
+
+ for i, opt in enumerate(sys.argv[1:]):
+ if opt[0] == "-":
+ if opt == "-h":
+ pr_help()
+ exit(0);
+ elif opt == "-p":
+ opt_proc = popt.DISP_PROC
+ elif opt == "-pv":
+ opt_proc = popt.DISP_PROC_VERBOSE
+ elif opt == '-u':
+ opt_ns = False
+ elif opt == "-t":
+ set_type(topt.DISP_TIME)
+ elif opt == "-m":
+ set_type(topt.DISP_MIG)
+ elif opt == "-fs":
+ set_type(topt.DISP_ISOLFREE)
+ elif opt == "-ms":
+ set_type(topt.DISP_ISOLMIG)
+ else:
+ sys.exit(usage)
+
+ elif i == argc - 1:
+ m = pid_re.search(opt)
+ if m != None and m.group() != "":
+ if m.group(3) != None:
+ f = pid_filter(m.group(3), m.group(3))
+ else:
+ f = pid_filter(m.group(1), m.group(2))
+ else:
+ try:
+ comm_re=re.compile(opt)
+ except:
+ sys.stderr.write("invalid regex '%s'" % opt)
+ sys.exit(usage)
+ f = comm_filter(comm_re)
+
+ chead.add_filter(f)
+
--
2.4.6

2015-08-20 09:33:27

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] perf script/python: add new compaction-times script

On 08/17/2015 09:48 PM, Tony Jones wrote:
> This patch creates a new script (compaction-times) to report time
> spent in mm compaction. It is possible to report times in nanoseconds
> (default) or microseconds (-u). The option -p will break down results
> by process id, -pv will further decompose by each compaction entry/exit.
> For each compaction entry/exit what is reported is controlled by the
> options -t (report only timing), -m (report migration stats), -ms
> (report migration scanner stats) and -fs (report free scanner stats).
> The default is to report all. Entries may be further filtered by pid,
> pid-range or comm (regex).
>
> The script is useful when analysing workloads that compact memory. The
> most common example will be THP allocations on systems with a lot of
> uptime that has fragmented memory. This is an example of using the script
> to analyse a thpscale from mmtests which deliberately fragments memory
> and allocates THP in 4 separate threads
>
> # Recording step, one of the following;
> $ perf record -e 'compaction:mm_compaction_*' ./workload
> $ perf script record compaction-times
>
> # Reporting: basic
> total: 2444505743ns migration: moved=357738 failed=39275
> free_scanner: scanned=2705578 isolated=387875
> migration_scanner: scanned=414426 isolated=397013
>
> # Reporting: Per task stall times
> $ perf script report compaction-times -- -t -p
> total: 2444505743ns
> 6384[thpscale]: 740800017ns
> 6385[thpscale]: 274119512ns
> 6386[thpscale]: 832961337ns
> 6383[thpscale]: 596624877ns
>
> # Reporting: Per-compaction attempts for task 6385
> $ perf script report compaction-times -- -m -pv 6385
> total: 274119512ns migration: moved=14893 failed=24285
> 6385[thpscale]: 274119512ns migration: moved=14893 failed=24285
> 6385[thpscale].1: 3033277ns migration: moved=511 failed=1
> 6385[thpscale].2: 9592094ns migration: moved=1524 failed=12
> 6385[thpscale].3: 2495587ns migration: moved=512 failed=0
> 6385[thpscale].4: 2561766ns migration: moved=512 failed=0
> 6385[thpscale].5: 2523521ns migration: moved=512 failed=0
> ..... output continues ...
>
> Changes since v1:
> - report stats for isolate_migratepages and isolate_freepages
> (Vlastimil Babka)
> - refactor code to achieve above
> - add help text
> - output to stdout/stderr explicitly
>
> Signed-off-by: Tony Jones <[email protected]>
> Cc: Mel Gorman <[email protected]>
> Cc: Vlastimil Babka <[email protected]>

Acked-by: Vlastimil Babka <[email protected]>

Works for me, thanks. I'll leave it to perf experts to judge if the
options and output formatting matches what's common for perf scripts.

2015-08-20 19:42:49

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v2] perf script/python: add new compaction-times script

Em Thu, Aug 20, 2015 at 11:33:24AM +0200, Vlastimil Babka escreveu:
> On 08/17/2015 09:48 PM, Tony Jones wrote:
> >This patch creates a new script (compaction-times) to report time
> >spent in mm compaction. It is possible to report times in nanoseconds
> >(default) or microseconds (-u). The option -p will break down results
> >by process id, -pv will further decompose by each compaction entry/exit.
> >For each compaction entry/exit what is reported is controlled by the
> >options -t (report only timing), -m (report migration stats), -ms
> >(report migration scanner stats) and -fs (report free scanner stats).
> >The default is to report all. Entries may be further filtered by pid,
> >pid-range or comm (regex).
> >
> >The script is useful when analysing workloads that compact memory. The
> >most common example will be THP allocations on systems with a lot of
> >uptime that has fragmented memory. This is an example of using the script
> >to analyse a thpscale from mmtests which deliberately fragments memory
> >and allocates THP in 4 separate threads
> >
> > # Recording step, one of the following;
> > $ perf record -e 'compaction:mm_compaction_*' ./workload
> > $ perf script record compaction-times
> >
> > # Reporting: basic
> > total: 2444505743ns migration: moved=357738 failed=39275
> > free_scanner: scanned=2705578 isolated=387875
> > migration_scanner: scanned=414426 isolated=397013
> >
> > # Reporting: Per task stall times
> > $ perf script report compaction-times -- -t -p
> > total: 2444505743ns
> > 6384[thpscale]: 740800017ns
> > 6385[thpscale]: 274119512ns
> > 6386[thpscale]: 832961337ns
> > 6383[thpscale]: 596624877ns
> >
> > # Reporting: Per-compaction attempts for task 6385
> > $ perf script report compaction-times -- -m -pv 6385
> > total: 274119512ns migration: moved=14893 failed=24285
> > 6385[thpscale]: 274119512ns migration: moved=14893 failed=24285
> > 6385[thpscale].1: 3033277ns migration: moved=511 failed=1
> > 6385[thpscale].2: 9592094ns migration: moved=1524 failed=12
> > 6385[thpscale].3: 2495587ns migration: moved=512 failed=0
> > 6385[thpscale].4: 2561766ns migration: moved=512 failed=0
> > 6385[thpscale].5: 2523521ns migration: moved=512 failed=0
> > ..... output continues ...
> >
> >Changes since v1:
> >- report stats for isolate_migratepages and isolate_freepages
> > (Vlastimil Babka)
> >- refactor code to achieve above
> >- add help text
> >- output to stdout/stderr explicitly
> >
> >Signed-off-by: Tony Jones <[email protected]>
> >Cc: Mel Gorman <[email protected]>
> >Cc: Vlastimil Babka <[email protected]>
>
> Acked-by: Vlastimil Babka <[email protected]>
>
> Works for me, thanks. I'll leave it to perf experts to judge if the options
> and output formatting matches what's common for perf scripts.

Well, we don't have any firm set standard for outputting from scripts,
so if you did it inspired by existing scripts outputs, probably that is
good enough, having someone testing it is a plus, and in these cases, if
the "works for me" means you actually tested it, can we replace the
"Acked-by" with a "Tested-by"?

Ingo, others, comments on formatting?

- Arnaldo

2015-08-21 16:02:23

by Tony Jones

[permalink] [raw]
Subject: Re: [PATCH v2] perf script/python: add new compaction-times script

On 08/20/2015 12:42 PM, Arnaldo Carvalho de Melo wrote:

> Well, we don't have any firm set standard for outputting from scripts,
> so if you did it inspired by existing scripts outputs, probably that is
> good enough, having someone testing it is a plus, and in these cases, if
> the "works for me" means you actually tested it, can we replace the
> "Acked-by" with a "Tested-by"?

The output seemed consistent with the existing scripts in that, as you point out, there really isn't much consistency. Taking a second look, the only thing I can see is that scripts such as futex-contention and failed-syscalls-by-pid output "comm *[pid]" whereas I'm outputting "pid[comm]". That's certainly easy to change.

Yes, I believe Vlastimil tested v1 and v2 but I'll let him speak to this and to whether his approval goes beyond testing (re: acked-by).

thanks

tony

2015-08-21 18:05:50

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH v2] perf script/python: add new compaction-times script

On 20.8.2015 21:42, Arnaldo Carvalho de Melo wrote:
> Em Thu, Aug 20, 2015 at 11:33:24AM +0200, Vlastimil Babka escreveu:
>> On 08/17/2015 09:48 PM, Tony Jones wrote:
>>
>> Works for me, thanks. I'll leave it to perf experts to judge if the options
>> and output formatting matches what's common for perf scripts.
>
> Well, we don't have any firm set standard for outputting from scripts,
> so if you did it inspired by existing scripts outputs, probably that is
> good enough, having someone testing it is a plus, and in these cases, if
> the "works for me" means you actually tested it, can we replace the
> "Acked-by" with a "Tested-by"?

Yeah you can change to Tested-by. Thanks.

> Ingo, others, comments on formatting?
>
> - Arnaldo
>

Subject: [tip:perf/core] perf scripts python: Add new compaction-times script

Commit-ID: 84e5d89a777cca26cc513264752b439874702dd6
Gitweb: http://git.kernel.org/tip/84e5d89a777cca26cc513264752b439874702dd6
Author: Tony Jones <[email protected]>
AuthorDate: Mon, 17 Aug 2015 12:48:52 -0700
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitDate: Fri, 28 Aug 2015 14:53:40 -0300

perf scripts python: Add new compaction-times script

This patch creates a new script (compaction-times) to report time
spent in mm compaction. It is possible to report times in nanoseconds
(default) or microseconds (-u).

The option -p will break down results by process id, -pv will further
decompose by each compaction entry/exit.

For each compaction entry/exit what is reported is controlled by the
options:

-t report only timing
-m report migration stats
-ms report migration scanner stats
-fs report free scanner stats

The default is to report all.

Entries may be further filtered by pid, pid-range or comm (regex).

The script is useful when analysing workloads that compact memory. The
most common example will be THP allocations on systems with a lot of
uptime that has fragmented memory.

This is an example of using the script to analyse a thpscale from
mmtests which deliberately fragments memory and allocates THP in 4
separate threads

# Recording step, one of the following;
$ perf record -e 'compaction:mm_compaction_*' ./workload
# or:
$ perf script record compaction-times

# Reporting: basic
total: 2444505743ns migration: moved=357738 failed=39275
free_scanner: scanned=2705578 isolated=387875
migration_scanner: scanned=414426 isolated=397013

# Reporting: Per task stall times
$ perf script report compaction-times -- -t -p
total: 2444505743ns
6384[thpscale]: 740800017ns
6385[thpscale]: 274119512ns
6386[thpscale]: 832961337ns
6383[thpscale]: 596624877ns

# Reporting: Per-compaction attempts for task 6385
$ perf script report compaction-times -- -m -pv 6385
total: 274119512ns migration: moved=14893 failed=24285
6385[thpscale]: 274119512ns migration: moved=14893 failed=24285
6385[thpscale].1: 3033277ns migration: moved=511 failed=1
6385[thpscale].2: 9592094ns migration: moved=1524 failed=12
6385[thpscale].3: 2495587ns migration: moved=512 failed=0
6385[thpscale].4: 2561766ns migration: moved=512 failed=0
6385[thpscale].5: 2523521ns migration: moved=512 failed=0
..... output continues ...

Changes since v1:
- report stats for isolate_migratepages and isolate_freepages
(Vlastimil Babka)
- refactor code to achieve above
- add help text
- output to stdout/stderr explicitly

Signed-off-by: Tony Jones <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
.../scripts/python/bin/compaction-times-record | 2 +
.../scripts/python/bin/compaction-times-report | 4 +
tools/perf/scripts/python/compaction-times.py | 311 +++++++++++++++++++++
3 files changed, 317 insertions(+)

diff --git a/tools/perf/scripts/python/bin/compaction-times-record b/tools/perf/scripts/python/bin/compaction-times-record
new file mode 100644
index 0000000..6edcd40
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e compaction:mm_compaction_begin -e compaction:mm_compaction_end -e compaction:mm_compaction_migratepages -e compaction:mm_compaction_isolate_migratepages -e compaction:mm_compaction_isolate_freepages $@
diff --git a/tools/perf/scripts/python/bin/compaction-times-report b/tools/perf/scripts/python/bin/compaction-times-report
new file mode 100644
index 0000000..3dc1389
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+#description: display time taken by mm compaction
+#args: [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]
+perf script -s "$PERF_EXEC_PATH"/scripts/python/compaction-times.py $@
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
new file mode 100644
index 0000000..239cb05
--- /dev/null
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -0,0 +1,311 @@
+# report time spent in compaction
+# Licensed under the terms of the GNU GPL License version 2
+
+# testing:
+# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
+
+import os
+import sys
+import re
+
+import signal
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
+
+class popt:
+ DISP_DFL = 0
+ DISP_PROC = 1
+ DISP_PROC_VERBOSE=2
+
+class topt:
+ DISP_TIME = 0
+ DISP_MIG = 1
+ DISP_ISOLFREE = 2
+ DISP_ISOLMIG = 4
+ DISP_ALL = 7
+
+class comm_filter:
+ def __init__(self, re):
+ self.re = re
+
+ def filter(self, pid, comm):
+ m = self.re.search(comm)
+ return m == None or m.group() == ""
+
+class pid_filter:
+ def __init__(self, low, high):
+ self.low = (0 if low == "" else int(low))
+ self.high = (0 if high == "" else int(high))
+
+ def filter(self, pid, comm):
+ return not (pid >= self.low and (self.high == 0 or pid <= self.high))
+
+def set_type(t):
+ global opt_disp
+ opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
+
+def ns(sec, nsec):
+ return (sec * 1000000000) + nsec
+
+def time(ns):
+ return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
+
+class pair:
+ def __init__(self, aval, bval, alabel = None, blabel = None):
+ self.alabel = alabel
+ self.blabel = blabel
+ self.aval = aval
+ self.bval = bval
+
+ def __add__(self, rhs):
+ self.aval += rhs.aval
+ self.bval += rhs.bval
+ return self
+
+ def __str__(self):
+ return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
+
+class cnode:
+ def __init__(self, ns):
+ self.ns = ns
+ self.migrated = pair(0, 0, "moved", "failed")
+ self.fscan = pair(0,0, "scanned", "isolated")
+ self.mscan = pair(0,0, "scanned", "isolated")
+
+ def __add__(self, rhs):
+ self.ns += rhs.ns
+ self.migrated += rhs.migrated
+ self.fscan += rhs.fscan
+ self.mscan += rhs.mscan
+ return self
+
+ def __str__(self):
+ prev = 0
+ s = "%s " % time(self.ns)
+ if (opt_disp & topt.DISP_MIG):
+ s += "migration: %s" % self.migrated
+ prev = 1
+ if (opt_disp & topt.DISP_ISOLFREE):
+ s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
+ prev = 1
+ if (opt_disp & topt.DISP_ISOLMIG):
+ s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
+ return s
+
+ def complete(self, secs, nsecs):
+ self.ns = ns(secs, nsecs) - self.ns
+
+ def increment(self, migrated, fscan, mscan):
+ if (migrated != None):
+ self.migrated += migrated
+ if (fscan != None):
+ self.fscan += fscan
+ if (mscan != None):
+ self.mscan += mscan
+
+
+class chead:
+ heads = {}
+ val = cnode(0);
+ fobj = None
+
+ @classmethod
+ def add_filter(cls, filter):
+ cls.fobj = filter
+
+ @classmethod
+ def create_pending(cls, pid, comm, start_secs, start_nsecs):
+ filtered = 0
+ try:
+ head = cls.heads[pid]
+ filtered = head.is_filtered()
+ except KeyError:
+ if cls.fobj != None:
+ filtered = cls.fobj.filter(pid, comm)
+ head = cls.heads[pid] = chead(comm, pid, filtered)
+
+ if not filtered:
+ head.mark_pending(start_secs, start_nsecs)
+
+ @classmethod
+ def increment_pending(cls, pid, migrated, fscan, mscan):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.do_increment(migrated, fscan, mscan)
+ else:
+ sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+ @classmethod
+ def complete_pending(cls, pid, secs, nsecs):
+ head = cls.heads[pid]
+ if not head.is_filtered():
+ if head.is_pending():
+ head.make_complete(secs, nsecs)
+ else:
+ sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+ @classmethod
+ def gen(cls):
+ if opt_proc != popt.DISP_DFL:
+ for i in cls.heads:
+ yield cls.heads[i]
+
+ @classmethod
+ def str(cls):
+ return cls.val
+
+ def __init__(self, comm, pid, filtered):
+ self.comm = comm
+ self.pid = pid
+ self.val = cnode(0)
+ self.pending = None
+ self.filtered = filtered
+ self.list = []
+
+ def __add__(self, rhs):
+ self.ns += rhs.ns
+ self.val += rhs.val
+ return self
+
+ def mark_pending(self, secs, nsecs):
+ self.pending = cnode(ns(secs, nsecs))
+
+ def do_increment(self, migrated, fscan, mscan):
+ self.pending.increment(migrated, fscan, mscan)
+
+ def make_complete(self, secs, nsecs):
+ self.pending.complete(secs, nsecs)
+ chead.val += self.pending
+
+ if opt_proc != popt.DISP_DFL:
+ self.val += self.pending
+
+ if opt_proc == popt.DISP_PROC_VERBOSE:
+ self.list.append(self.pending)
+ self.pending = None
+
+ def enumerate(self):
+ if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
+ for i, pelem in enumerate(self.list):
+ sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
+
+ def is_pending(self):
+ return self.pending != None
+
+ def is_filtered(self):
+ return self.filtered
+
+ def display(self):
+ if not self.is_filtered():
+ sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
+
+
+def trace_end():
+ sys.stdout.write("total: %s\n" % chead.str())
+ for i in chead.gen():
+ i.display(),
+ i.enumerate()
+
+def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, nr_migrated, nr_failed):
+
+ chead.increment_pending(common_pid,
+ pair(nr_migrated, nr_failed), None, None)
+
+def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+ chead.increment_pending(common_pid,
+ None, pair(nr_scanned, nr_taken), None)
+
+def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+ chead.increment_pending(common_pid,
+ None, None, pair(nr_scanned, nr_taken))
+
+def compaction__mm_compaction_end(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync, status):
+
+ chead.complete_pending(common_pid, common_secs, common_nsecs)
+
+def compaction__mm_compaction_begin(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, zone_start, migrate_start, free_start, zone_end,
+ sync):
+
+ chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
+
+def pr_help():
+ global usage
+
+ sys.stdout.write(usage)
+ sys.stdout.write("\n")
+ sys.stdout.write("-h display this help\n")
+ sys.stdout.write("-p display by process\n")
+ sys.stdout.write("-pv display by process (verbose)\n")
+ sys.stdout.write("-t display stall times only\n")
+ sys.stdout.write("-m display stats for migration\n")
+ sys.stdout.write("-fs display stats for free scanner\n")
+ sys.stdout.write("-ms display stats for migration scanner\n")
+ sys.stdout.write("-u display results in microseconds (default nanoseconds)\n")
+
+
+comm_re = None
+pid_re = None
+pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+
+opt_proc = popt.DISP_DFL
+opt_disp = topt.DISP_ALL
+
+opt_ns = True
+
+argc = len(sys.argv) - 1
+if argc >= 1:
+ pid_re = re.compile(pid_regex)
+
+ for i, opt in enumerate(sys.argv[1:]):
+ if opt[0] == "-":
+ if opt == "-h":
+ pr_help()
+ exit(0);
+ elif opt == "-p":
+ opt_proc = popt.DISP_PROC
+ elif opt == "-pv":
+ opt_proc = popt.DISP_PROC_VERBOSE
+ elif opt == '-u':
+ opt_ns = False
+ elif opt == "-t":
+ set_type(topt.DISP_TIME)
+ elif opt == "-m":
+ set_type(topt.DISP_MIG)
+ elif opt == "-fs":
+ set_type(topt.DISP_ISOLFREE)
+ elif opt == "-ms":
+ set_type(topt.DISP_ISOLMIG)
+ else:
+ sys.exit(usage)
+
+ elif i == argc - 1:
+ m = pid_re.search(opt)
+ if m != None and m.group() != "":
+ if m.group(3) != None:
+ f = pid_filter(m.group(3), m.group(3))
+ else:
+ f = pid_filter(m.group(1), m.group(2))
+ else:
+ try:
+ comm_re=re.compile(opt)
+ except:
+ sys.stderr.write("invalid regex '%s'" % opt)
+ sys.exit(usage)
+ f = comm_filter(comm_re)
+
+ chead.add_filter(f)