2009-11-22 09:57:57

by Pekka Enberg

[permalink] [raw]
Subject: [PATCH] perf kmem: Add --sort hit and --sort frag

This patch adds support for "--sort hit" and "--sort frag" to the "perf kmem"
tool. The former was already mentioned in the help text and the latter is
useful for finding call-sites that exhibit worst case behavior for SLAB
allocators.

Cc: Li Zefan <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Eduard - Gabriel Munteanu <[email protected]>
Cc: [email protected] <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
---
tools/perf/builtin-kmem.c | 29 ++++++++++++++++++++++++++++-
1 files changed, 28 insertions(+), 1 deletions(-)

diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index f315b05..4145049 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -443,6 +443,15 @@ static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
return 0;
}

+static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->hit < r->hit)
+ return -1;
+ else if (l->hit > r->hit)
+ return 1;
+ return 0;
+}
+
static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->bytes_alloc < r->bytes_alloc)
@@ -452,6 +461,20 @@ static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
return 0;
}

+static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ double x, y;
+
+ x = fragmentation(l->bytes_req, l->bytes_alloc);
+ y = fragmentation(r->bytes_req, r->bytes_alloc);
+
+ if (x < y)
+ return -1;
+ else if (x > y)
+ return 1;
+ return 0;
+}
+
static int parse_sort_opt(const struct option *opt __used,
const char *arg, int unset __used)
{
@@ -464,8 +487,12 @@ static int parse_sort_opt(const struct option *opt __used,
sort_fn = ptr_cmp;
else if (strcmp(arg, "call_site") == 0)
sort_fn = callsite_cmp;
+ else if (strcmp(arg, "hit") == 0)
+ sort_fn = hit_cmp;
else if (strcmp(arg, "bytes") == 0)
sort_fn = bytes_cmp;
+ else if (strcmp(arg, "frag") == 0)
+ sort_fn = frag_cmp;
else
return -1;

@@ -517,7 +544,7 @@ static const struct option kmem_options[] = {
"stat selector, Pass 'alloc' or 'caller'.",
parse_stat_opt),
OPT_CALLBACK('s', "sort", NULL, "key",
- "sort by key: ptr, call_site, hit, bytes",
+ "sort by key: ptr, call_site, hit, bytes, frag",
parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num",
"show n lins",
--
1.6.0.4


2009-11-22 10:26:31

by Pekka Enberg

[permalink] [raw]
Subject: [tip:perf/core] perf kmem: Add --sort hit and --sort frag

Commit-ID: f3ced7cdb24e7968a353d828955fa2daf4167e72
Gitweb: http://git.kernel.org/tip/f3ced7cdb24e7968a353d828955fa2daf4167e72
Author: Pekka Enberg <[email protected]>
AuthorDate: Sun, 22 Nov 2009 11:58:00 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 22 Nov 2009 11:21:37 +0100

perf kmem: Add --sort hit and --sort frag

This patch adds support for "--sort hit" and "--sort frag" to
the "perf kmem" tool. The former was already mentioned in the
help text and the latter is useful for finding call-sites that
exhibit worst case behavior for SLAB allocators.

Signed-off-by: Pekka Enberg <[email protected]>
Cc: Li Zefan <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Eduard - Gabriel Munteanu <[email protected]>
Cc: [email protected] <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/builtin-kmem.c | 29 ++++++++++++++++++++++++++++-
1 files changed, 28 insertions(+), 1 deletions(-)

diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index f315b05..4145049 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -443,6 +443,15 @@ static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
return 0;
}

+static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->hit < r->hit)
+ return -1;
+ else if (l->hit > r->hit)
+ return 1;
+ return 0;
+}
+
static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->bytes_alloc < r->bytes_alloc)
@@ -452,6 +461,20 @@ static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
return 0;
}

+static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ double x, y;
+
+ x = fragmentation(l->bytes_req, l->bytes_alloc);
+ y = fragmentation(r->bytes_req, r->bytes_alloc);
+
+ if (x < y)
+ return -1;
+ else if (x > y)
+ return 1;
+ return 0;
+}
+
static int parse_sort_opt(const struct option *opt __used,
const char *arg, int unset __used)
{
@@ -464,8 +487,12 @@ static int parse_sort_opt(const struct option *opt __used,
sort_fn = ptr_cmp;
else if (strcmp(arg, "call_site") == 0)
sort_fn = callsite_cmp;
+ else if (strcmp(arg, "hit") == 0)
+ sort_fn = hit_cmp;
else if (strcmp(arg, "bytes") == 0)
sort_fn = bytes_cmp;
+ else if (strcmp(arg, "frag") == 0)
+ sort_fn = frag_cmp;
else
return -1;

@@ -517,7 +544,7 @@ static const struct option kmem_options[] = {
"stat selector, Pass 'alloc' or 'caller'.",
parse_stat_opt),
OPT_CALLBACK('s', "sort", NULL, "key",
- "sort by key: ptr, call_site, hit, bytes",
+ "sort by key: ptr, call_site, hit, bytes, frag",
parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num",
"show n lins",