2010-01-30 11:43:44

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH 00/12 v2] perf lock: New subcommand "perf lock", for analyzing lock statistics

Adding new subcommand "perf lock" to perf.

I made this patch series on
latest perf/core of tip (ef12a141306c90336a3a10d40213ecd98624d274),
so please apply this series to perf/core.

This patch is version 2.
I removed lack of util/include/linux/hash.h
and compile error of builtin-lock.c on 32bit env.

Sorry for my slow work, Frederic and Ingo.
I have a lot of rest ToDos,
but finally perf lock can provide minimal functions for analyzing lock statistics.

This series requires one more revirting patch.
But it is very temporary solution,
so I cannot make this series include it.
I'll send it as reply to this cover letter later only for testing,
if you are interested in testing this series,
you need to apply it.

Hitoshi Mitake (11):
perf tools: Add __data_loc support
perf: Add util/include/linuxhash.h to include hash.h of kernel
lockdep: Add information of file and line where lock inited to struct
lockdep_map
lockdep: Add file and line to initialize sequence of spinlock
lockdep: Add file and line to initialize sequence of rwlock
lockdep: Add file and line to initialize sequence of rwsem
lockdep: Add file and line to initialize sequence of rwsem (x86)
lockdep: Add file and line to initialize sequence of mutex
lockdep: Fix the way to initialize class_mutex for information of
file and line
perf lock: Enhance information of lock trace events
perf lock: New subcommand "perf lock", for analyzing lock statistics

arch/x86/include/asm/rwsem.h | 9 +-
drivers/base/class.c | 3 +-
include/linux/lockdep.h | 20 +-
include/linux/mutex-debug.h | 2 +-
include/linux/mutex.h | 12 +-
include/linux/rwlock.h | 6 +-
include/linux/rwlock_types.h | 6 +-
include/linux/rwsem-spinlock.h | 11 +-
include/linux/spinlock.h | 6 +-
include/linux/spinlock_types.h | 6 +-
include/trace/events/lock.h | 49 ++-
kernel/lockdep.c | 10 +-
kernel/mutex-debug.c | 5 +-
kernel/mutex-debug.h | 3 +-
kernel/mutex.c | 5 +-
kernel/mutex.h | 2 +-
lib/rwsem-spinlock.c | 5 +-
lib/rwsem.c | 5 +-
lib/spinlock_debug.c | 12 +-
tools/perf/Makefile | 2 +
tools/perf/builtin-lock.c | 724 ++++++++++++++++++++++++++++++++++
tools/perf/builtin.h | 1 +
tools/perf/perf.c | 1 +
tools/perf/util/include/linux/hash.h | 5 +
tools/perf/util/trace-event-parse.c | 7 +
25 files changed, 870 insertions(+), 47 deletions(-)
create mode 100644 tools/perf/builtin-lock.c
create mode 100644 tools/perf/util/include/linux/hash.h


2010-01-30 11:43:52

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 11/11] perf lock: New subcommand "perf lock", for analyzing lock statistics

Adding new subcommand "perf lock" to perf.
I have a lot of rest ToDos,
but now perf lock can provide minimal functions for analyzing lock statistics.

This patch containts some over-80 chars line,
but I judged that removing these lines makes
dirty points for readability.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
tools/perf/Makefile | 1 +
tools/perf/builtin-lock.c | 724 +++++++++++++++++++++++++++++++++++++++++++++
tools/perf/builtin.h | 1 +
tools/perf/perf.c | 1 +
4 files changed, 727 insertions(+), 0 deletions(-)
create mode 100644 tools/perf/builtin-lock.c

diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b2bce1f..4296930 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -456,6 +456,7 @@ BUILTIN_OBJS += builtin-top.o
BUILTIN_OBJS += builtin-trace.o
BUILTIN_OBJS += builtin-probe.o
BUILTIN_OBJS += builtin-kmem.o
+BUILTIN_OBJS += builtin-lock.o

PERFLIBS = $(LIB_FILE)

diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
new file mode 100644
index 0000000..2b5f887
--- /dev/null
+++ b/tools/perf/builtin-lock.c
@@ -0,0 +1,724 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+#include "util/session.h"
+
+#include <sys/types.h>
+#include <sys/prctl.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+#include <limits.h>
+
+#include <linux/list.h>
+#include <linux/hash.h>
+
+/* based on kernel/lockdep.c */
+#define LOCKHASH_BITS 12
+#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
+
+static struct list_head lockhash_table[LOCKHASH_SIZE];
+
+#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
+#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+
+#define LOCK_STATE_UNLOCKED 0 /* initial state */
+#define LOCK_STATE_LOCKED 1
+
+struct lock_stat {
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */
+
+ /* FIXME: raw_field_value() returns unsigned long long,
+ * so address of lockdep_map should be dealed as 64bit.
+ * Is there more better solution? */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
+ char *file;
+ unsigned int line;
+
+ int state;
+ u64 prev_event_time; /* timestamp of previous event */
+
+ unsigned int nr_acquired;
+ unsigned int nr_acquire;
+ unsigned int nr_contended;
+ unsigned int nr_release;
+
+ /* these times are in nano sec. */
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
+};
+
+/* build simple key function one is bigger than two */
+#define SINGLE_KEY(member) \
+ static int lock_stat_key_ ## member(struct lock_stat *one, \
+ struct lock_stat *two) \
+ { \
+ return one->member > two->member; \
+ }
+
+SINGLE_KEY(nr_acquired)
+SINGLE_KEY(nr_contended)
+SINGLE_KEY(wait_time_total)
+SINGLE_KEY(wait_time_min)
+SINGLE_KEY(wait_time_max)
+
+struct lock_key {
+ /*
+ * name: the value for specify by user
+ * this should be simpler than raw name of member
+ * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
+ */
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
+};
+
+static const char *sort_key = "acquired";
+static int (*compare)(struct lock_stat *, struct lock_stat *);
+
+#define DEF_KEY_LOCK(name, fn_suffix) \
+ { #name, lock_stat_key_ ## fn_suffix }
+struct lock_key keys[] = {
+ DEF_KEY_LOCK(acquired, nr_acquired),
+ DEF_KEY_LOCK(contended, nr_contended),
+ DEF_KEY_LOCK(wait_total, wait_time_total),
+ DEF_KEY_LOCK(wait_min, wait_time_min),
+ DEF_KEY_LOCK(wait_max, wait_time_max),
+
+ /* extra comparisons much complicated should be here */
+
+ { NULL, NULL }
+};
+
+static void select_key(void)
+{
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return;
+ }
+ }
+
+ die("Unknown compare key:%s\n", sort_key);
+}
+
+static struct rb_root result; /* place to store sorted data */
+
+static void insert_to_result(struct lock_stat *st,
+ int (*bigger)(struct lock_stat *,
+ struct lock_stat *))
+{
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct lock_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct lock_stat, rb);
+ parent = *rb;
+
+ if (bigger(st, p))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&st->rb, parent, rb);
+ rb_insert_color(&st->rb, &result);
+}
+
+/* returns left most element of result, and erase it */
+static struct lock_stat *pop_from_result(void)
+{
+ struct rb_node *node = result.rb_node;
+
+ if (!node)
+ return NULL;
+
+ while (node->rb_left)
+ node = node->rb_left;
+
+ rb_erase(node, &result);
+ return container_of(node, struct lock_stat, rb);
+}
+
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name,
+ const char *file, unsigned int line)
+{
+ struct list_head *entry = lockhashentry(addr);
+ struct lock_stat *ret, *new;
+
+ list_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+
+ new = zalloc(sizeof(struct lock_stat));
+ if (!new)
+ goto alloc_failed;
+
+ new->addr = addr;
+ new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ if (!new->name)
+ goto alloc_failed;
+ strcpy(new->name, name);
+ new->file = zalloc(sizeof(char) * strlen(file) + 1);
+ if (!new->file)
+ goto alloc_failed;
+ strcpy(new->file, file);
+ new->line = line;
+
+ /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
+ new->state = LOCK_STATE_UNLOCKED;
+ new->wait_time_min = ULLONG_MAX;
+
+ list_add(&new->hash_entry, entry);
+ return new;
+
+alloc_failed:
+ die("memory allocation failed\n");
+}
+
+static char const *input_name = "perf.data";
+
+static int profile_cpu = -1;
+
+struct raw_event_sample {
+ u32 size;
+ char data[0];
+};
+
+struct trace_acquire_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_acquired_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_contended_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_release_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_lock_handler {
+ void (*acquire_event)(struct trace_acquire_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*acquired_event)(struct trace_acquired_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*contended_event)(struct trace_contended_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*release_event)(struct trace_release_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+};
+
+static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquire_event->addr, acquire_event->name,
+ acquire_event->file, acquire_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquired_event->addr, acquired_event->name,
+ acquired_event->file, acquired_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ st->state = LOCK_STATE_LOCKED;
+ st->nr_acquired++;
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void prof_lock_contended_event(struct trace_contended_event *contended_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(contended_event->addr, contended_event->name,
+ contended_event->file, contended_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->nr_contended++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void prof_lock_release_event(struct trace_release_event *release_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+ u64 hold_time;
+
+ st = lock_stat_findnew(release_event->addr, release_event->name,
+ release_event->file, release_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->state = LOCK_STATE_UNLOCKED;
+ hold_time = timestamp - st->prev_event_time;
+
+ if (timestamp < st->prev_event_time) {
+ /* terribly, this can happen... */
+ goto end;
+ }
+
+ if (st->wait_time_min > hold_time)
+ st->wait_time_min = hold_time;
+ if (st->wait_time_max < hold_time)
+ st->wait_time_max = hold_time;
+ st->wait_time_total += hold_time;
+
+ st->nr_release++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+end:
+ st->prev_event_time = timestamp;
+}
+
+/* lock oriented handlers */
+/* TODO: handlers for CPU oriented, thread oriented */
+static struct trace_lock_handler prof_lock_ops = {
+ .acquire_event = prof_lock_acquire_event,
+ .acquired_event = prof_lock_acquired_event,
+ .contended_event = prof_lock_contended_event,
+ .release_event = prof_lock_release_event,
+};
+
+static struct trace_lock_handler *trace_handler;
+
+static void
+process_lock_acquire_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquire_event acquire_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquire_event.addr, &tmp, sizeof(void *));
+ acquire_event.name = (char *)raw_field_ptr(event, "name", data);
+ acquire_event.file = (char *)raw_field_ptr(event, "file", data);
+ acquire_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->acquire_event(&acquire_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_lock_acquired_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquired_event acquired_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquired_event.addr, &tmp, sizeof(void *));
+ acquired_event.name = (char *)raw_field_ptr(event, "name", data);
+ acquired_event.file = (char *)raw_field_ptr(event, "file", data);
+ acquired_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->acquired_event(&acquired_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_lock_contended_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_contended_event contended_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&contended_event.addr, &tmp, sizeof(void *));
+ contended_event.name = (char *)raw_field_ptr(event, "name", data);
+ contended_event.file = (char *)raw_field_ptr(event, "file", data);
+ contended_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->contended_event(&contended_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_lock_release_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_release_event release_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&release_event.addr, &tmp, sizeof(void *));
+ release_event.name = (char *)raw_field_ptr(event, "name", data);
+ release_event.file = (char *)raw_field_ptr(event, "file", data);
+ release_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->release_event(&release_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_raw_event(void *data, int cpu,
+ u64 timestamp, struct thread *thread)
+{
+ struct event *event;
+ int type;
+
+ type = trace_parse_common_type(data);
+ event = trace_find_event(type);
+
+ if (!strcmp(event->name, "lock_acquire"))
+ process_lock_acquire_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_acquired"))
+ process_lock_acquired_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_contended"))
+ process_lock_contended_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_release"))
+ process_lock_release_event(data, event, cpu, timestamp, thread);
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct thread *thread;
+ struct sample_data data;
+
+ bzero(&data, sizeof(struct sample_data));
+ event__parse_sample(event, session->sample_type, &data);
+ thread = perf_session__findnew(session, data.pid);
+
+ /*
+ * FIXME: this causes warn on 32bit environment
+ * because of (void *)data.ip (type of data.ip is u64)
+ */
+/* dump_printf("(IP, %d): %d/%d: %p period: %llu\n", */
+/* event->header.misc, */
+/* data.pid, data.tid, (void *)data.ip, data.period); */
+
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
+ return 0;
+
+ process_raw_event(data.raw_data, data.cpu, data.time, thread);
+
+ return 0;
+}
+
+/* TODO: various way to print, coloring, nano or milli sec */
+static void print_result(void)
+{
+ struct lock_stat *st;
+ char cut_name[20];
+
+ printf("%18s ", "ID");
+ printf("%20s ", "Name");
+ printf("%10s ", "acquired");
+ printf("%10s ", "contended");
+
+ printf("%15s ", "total wait (ns)");
+ printf("%15s ", "max wait (ns)");
+ printf("%15s ", "min wait (ns)");
+
+ printf("\n\n");
+
+ while ((st = pop_from_result())) {
+ bzero(cut_name, 20);
+
+ printf("%p ", st->addr);
+
+ if (strlen(st->name) < 16) {
+ /* output raw name */
+ printf("%20s ", st->name);
+ } else {
+ strncpy(cut_name, st->name, 16);
+ cut_name[16] = '.';
+ cut_name[17] = '.';
+ cut_name[18] = '.';
+ cut_name[19] = '\0';
+ /* cut off name for saving output style */
+ printf("%20s ", cut_name);
+ }
+
+ printf("%10u ", st->nr_acquired);
+ printf("%10u ", st->nr_contended);
+
+ printf("%15llu ", st->wait_time_total);
+ printf("%15llu ", st->wait_time_max);
+ printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ 0 : st->wait_time_min);
+ printf("\n");
+ }
+}
+
+static void dump_map(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ printf("%p: %s (src: %s, line: %u)\n",
+ st->addr, st->name, st->file, st->line);
+ }
+ }
+}
+
+static struct perf_event_ops eops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+};
+
+static struct perf_session *session;
+
+static int read_events(void)
+{
+ session = perf_session__new(input_name, O_RDONLY, 0);
+ if (!session)
+ die("Initializing perf session failed\n");
+
+ return perf_session__process_events(session, &eops);
+}
+
+static void sort_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare);
+ }
+ }
+}
+
+static void __cmd_prof(void)
+{
+ setup_pager();
+ select_key();
+ read_events();
+ sort_result();
+ print_result();
+}
+
+static const char * const prof_usage[] = {
+ "perf sched prof [<options>]",
+ NULL
+};
+
+static const struct option prof_options[] = {
+ OPT_STRING('k', "key", &sort_key, "acquired",
+ "key for sorting"),
+ /* TODO: type */
+ OPT_END()
+};
+
+static const char * const lock_usage[] = {
+ "perf lock [<options>] {record|trace|prof}",
+ NULL
+};
+
+static const struct option lock_options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_BOOLEAN('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static const char *record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-M",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "lock:lock_acquire:r",
+ "-e", "lock:lock_acquired:r",
+ "-e", "lock:lock_contended:r",
+ "-e", "lock:lock_release:r",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_lock(int argc, const char **argv, const char *prefix __used)
+{
+ unsigned int i;
+
+ symbol__init();
+ for (i = 0; i < LOCKHASH_SIZE; i++)
+ INIT_LIST_HEAD(lockhash_table + i);
+
+ argc = parse_options(argc, argv, lock_options, lock_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(lock_usage, lock_options);
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strncmp(argv[0], "prof", 4)) {
+ trace_handler = &prof_lock_ops;
+ if (argc) {
+ argc = parse_options(argc, argv,
+ prof_options, prof_usage, 0);
+ if (argc)
+ usage_with_options(prof_usage, prof_options);
+ }
+ __cmd_prof();
+ } else if (!strcmp(argv[0], "trace")) {
+ /* Aliased to 'perf trace' */
+ return cmd_trace(argc, argv, prefix);
+ } else if (!strcmp(argv[0], "map")) {
+ /* recycling prof_lock_ops */
+ trace_handler = &prof_lock_ops;
+ setup_pager();
+ read_events();
+ dump_map();
+ } else {
+ usage_with_options(lock_usage, lock_options);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index dee97cf..10fe49e 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -31,5 +31,6 @@ extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_probe(int argc, const char **argv, const char *prefix);
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
+extern int cmd_lock(int argc, const char **argv, const char *prefix);

#endif
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 05c861c..d5a29e6 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -302,6 +302,7 @@ static void handle_internal_command(int argc, const char **argv)
{ "sched", cmd_sched, 0 },
{ "probe", cmd_probe, 0 },
{ "kmem", cmd_kmem, 0 },
+ { "lock", cmd_lock, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
--
1.6.5.2

2010-01-30 11:43:50

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 05/11] lockdep: Add file and line to initialize sequence of rwlock

rwlock has two way to be initialized.
1: the macros DEFINE_{SPIN,RW}LOCK for statically defined locks
2: the functions {spin_,rw}lock_init() for locks on dynamically allocated memory

This patch modifies these two initialize sequences
for adding __FILE__ and __LINE__ to lockdep_map.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
include/linux/rwlock.h | 6 ++++--
include/linux/rwlock_types.h | 6 +++++-
lib/spinlock_debug.c | 6 ++++--
3 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 71e0b00..cdef180 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -16,12 +16,14 @@

#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key,
+ const char *file, unsigned int line);
# define rwlock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __rwlock_init((lock), #lock, &__key); \
+ __rwlock_init((lock), #lock, &__key, \
+ __FILE__, __LINE__); \
} while (0)
#else
# define rwlock_init(lock) \
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index bd31808..e5e70c4 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -25,7 +25,11 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed

#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { \
+ .name = #lockname, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 81fa789..1cdae8b 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -34,14 +34,16 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init);

void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *file, unsigned int line)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
+ __lockdep_init_map(&lock->dep_map, name, key, 0,
+ file, line);
#endif
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
--
1.6.5.2

2010-01-30 11:44:20

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 02/11] perf: Add util/include/linuxhash.h to include hash.h of kernel

linux/hash.h, hash header of kernel, is also useful for perf.

util/include/linuxhash.h includes linux/hash.h, so we can use
hash facilities (e.g. hash_long()) in perf now.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
tools/perf/Makefile | 1 +
tools/perf/util/include/linux/hash.h | 5 +++++
2 files changed, 6 insertions(+), 0 deletions(-)
create mode 100644 tools/perf/util/include/linux/hash.h

diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 9b173e6..b2bce1f 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -333,6 +333,7 @@ LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
new file mode 100644
index 0000000..201f573
--- /dev/null
+++ b/tools/perf/util/include/linux/hash.h
@@ -0,0 +1,5 @@
+#include "../../../../include/linux/hash.h"
+
+#ifndef PERF_HASH_H
+#define PERF_HASH_H
+#endif
--
1.6.5.2

2010-01-30 11:44:24

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 08/11] lockdep: Add file and line to initialize sequence of mutex

Like other ones of this patch series, this patch adds
__FILE__ and __LINE__ to lockdep_map of mutex.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
include/linux/mutex-debug.h | 2 +-
include/linux/mutex.h | 12 +++++++++---
kernel/mutex-debug.c | 5 +++--
kernel/mutex-debug.h | 3 ++-
kernel/mutex.c | 5 +++--
kernel/mutex.h | 2 +-
6 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 731d77d..f86bf4e 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -15,7 +15,7 @@
do { \
static struct lock_class_key __key; \
\
- __mutex_init((mutex), #mutex, &__key); \
+ __mutex_init((mutex), #mutex, &__key, __FILE__, __LINE__); \
} while (0)

extern void mutex_destroy(struct mutex *lock);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 878cab4..ce9082a 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -82,14 +82,19 @@ struct mutex_waiter {
do { \
static struct lock_class_key __key; \
\
- __mutex_init((mutex), #mutex, &__key); \
+ __mutex_init((mutex), #mutex, &__key, \
+ __FILE__, __LINE__); \
} while (0)
# define mutex_destroy(mutex) do { } while (0)
#endif

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
+ , .dep_map = { \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .name = #lockname, \
+ }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
@@ -105,7 +110,8 @@ do { \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)

extern void __mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key,
+ const char *file, unsigned int line);

/**
* mutex_is_locked - is the mutex locked
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index ec815a9..4a1321b 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -81,14 +81,15 @@ void debug_mutex_unlock(struct mutex *lock)
}

void debug_mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *file, unsigned int line)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
+ __lockdep_init_map(&lock->dep_map, name, key, 0, file, line);
#endif
lock->magic = lock;
}
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 57d527a..154a909 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -25,7 +25,8 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct thread_info *ti);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key,
+ const char *file, unsigned int line);

static inline void mutex_set_owner(struct mutex *lock)
{
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 632f04c..80e3876 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -46,14 +46,15 @@
* It is not allowed to initialize an already locked mutex.
*/
void
-__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
+__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key,
+ const char *file, unsigned int line)
{
atomic_set(&lock->count, 1);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock);

- debug_mutex_init(lock, name, key);
+ debug_mutex_init(lock, name, key, file, line);
}

EXPORT_SYMBOL(__mutex_init);
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 67578ca..81914e6 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -40,7 +40,7 @@ static inline void mutex_clear_owner(struct mutex *lock)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
-#define debug_mutex_init(lock, name, key) do { } while (0)
+#define debug_mutex_init(lock, name, key, file, line) do { } while (0)

static inline void
debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
--
1.6.5.2

2010-01-30 11:44:38

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 09/11] lockdep: Fix the way to initialize class_mutex for information of file and line

This patch adds __FILE__ and __LINE__ to arguments passed for
__mutex_init() called in __class_register().
This affects to lockdep_map of class_mutex of struct class_private,
and these will be used to make trace lock events more human friendly.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Acked-by: Greg Kroah-Hartman <[email protected]>
---
drivers/base/class.c | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746d..cc0630f 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -162,7 +162,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
INIT_LIST_HEAD(&cp->class_interfaces);
kset_init(&cp->class_dirs);
- __mutex_init(&cp->class_mutex, "struct class mutex", key);
+ __mutex_init(&cp->class_mutex, "struct class mutex", key,
+ __FILE__, __LINE__);
error = kobject_set_name(&cp->class_subsys.kobj, "%s", cls->name);
if (error) {
kfree(cp);
--
1.6.5.2

2010-01-30 11:44:52

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 10/11] perf lock: Enhance information of lock trace events

Now lock trace events have address and __FILE__ and __LINE__ of their lockdep_map.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
include/trace/events/lock.h | 49 ++++++++++++++++++++++++++++++++++--------
1 files changed, 39 insertions(+), 10 deletions(-)

diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba1..3eef226 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,16 +20,23 @@ TRACE_EVENT(lock_acquire,
TP_STRUCT__entry(
__field(unsigned int, flags)
__string(name, lock->name)
+ __field(void *, lockdep_addr)
+ __string(file, lock->file)
+ __field(unsigned int, line)
),

TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
+ __assign_str(file, lock->file);
+ __entry->line = lock->line;
),

- TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
+ TP_printk("%p %s%s%s %s:%u", __entry->lockdep_addr,
+ (__entry->flags & 1) ? "try " : "",
(__entry->flags & 2) ? "read " : "",
- __get_str(name))
+ __get_str(name), __get_str(file), __entry->line)
);

TRACE_EVENT(lock_release,
@@ -40,13 +47,21 @@ TRACE_EVENT(lock_release,

TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
+ __string(file, lock->file)
+ __field(unsigned int, line)
),

TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
+ __assign_str(file, lock->file);
+ __entry->line = lock->line;
),

- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s %s:%u",
+ __entry->lockdep_addr, __get_str(name),
+ __get_str(file), __entry->line)
);

#ifdef CONFIG_LOCK_STAT
@@ -59,13 +74,21 @@ TRACE_EVENT(lock_contended,

TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
+ __string(file, lock->file)
+ __field(unsigned int, line)
),

TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
+ __assign_str(file, lock->file);
+ __entry->line = lock->line;
),

- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s %s:%u",
+ __entry->lockdep_addr, __get_str(name),
+ __get_str(file), __entry->line)
);

TRACE_EVENT(lock_acquired,
@@ -75,16 +98,22 @@ TRACE_EVENT(lock_acquired,

TP_STRUCT__entry(
__string(name, lock->name)
- __field(unsigned long, wait_usec)
- __field(unsigned long, wait_nsec_rem)
+ __field(s64, wait_nsec)
+ __field(void *, lockdep_addr)
+ __string(file, lock->file)
+ __field(unsigned int, line)
),
+
TP_fast_assign(
__assign_str(name, lock->name);
- __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
- __entry->wait_usec = (unsigned long) waittime;
+ __entry->wait_nsec = waittime;
+ __entry->lockdep_addr = lock;
+ __assign_str(file, lock->file);
+ __entry->line = lock->line;
),
- TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
- __entry->wait_nsec_rem)
+ TP_printk("%p %s %s:%u (%llu ns)", __entry->lockdep_addr,
+ __get_str(name), __get_str(file), __entry->line,
+ __entry->wait_nsec)
);

#endif
--
1.6.5.2

2010-01-30 11:45:00

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 07/11] lockdep: Add file and line to initialize sequence of rwsem (x86)

This patch adds __FILE__ and __LINE__ to lockdep_map of rw_semaphore.
This patch only affects to x86 architecture dependent rwsem.

If this patch is accepted, I'll prepare and send the patch
adds __FILE__ and __LINE__ to rw_semaphore of other architectures
which provides architecture dependent rwsem.h .

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
arch/x86/include/asm/rwsem.h | 9 +++++++--
1 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index ca7517d..24cbe3a 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -74,7 +74,12 @@ struct rw_semaphore {
};

#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_DEP_MAP_INIT(lockname) \
+ , .dep_map = { \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .name = #lockname, \
+ }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
@@ -96,7 +101,7 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_rwsem((sem), #sem, &__key, __FILE__, __LINE__); \
} while (0)

/*
--
1.6.5.2

2010-01-30 11:45:43

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 01/11] perf tools: Add __data_loc support

This patch is required to test the next patch for perf lock.

At 064739bc4b3d7f424b2f25547e6611bcf0132415 ,
support for the modifier "__data_loc" of format is added.

But, when I wanted to parse format of lock_acquired (or some
event else), raw_field_ptr() did not returned correct pointer.

So I modified raw_field_ptr() like this patch. Then
raw_field_ptr() works well.

Signed-off-by: Hitoshi Mitake <[email protected]>
Acked-by: Frederic Weisbecker <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Tom Zanussi <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/util/trace-event-parse.c | 7 +++++++
1 files changed, 7 insertions(+), 0 deletions(-)

diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index c5c32be..13aea80 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1925,6 +1925,13 @@ void *raw_field_ptr(struct event *event, const char *name, void *data)
if (!field)
return NULL;

+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+ return data + offset;
+ }
+
return data + field->offset;
}

--
1.6.5.2

2010-01-30 11:45:49

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 04/11] lockdep: Add file and line to initialize sequence of spinlock

spinlock has two way to be initialized.
1: the macros DEFINE_{SPIN,RW}LOCK for statically defined locks
2: the functions {spin_,rw}lock_init() for locks on dynamically allocated memory

This patch modifies these two initialize sequences
for adding __FILE__ and __LINE__ to lockdep_map.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
include/linux/spinlock.h | 6 ++++--
include/linux/spinlock_types.h | 6 +++++-
lib/spinlock_debug.c | 6 ++++--
3 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 8608821..f8b8363 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -90,12 +90,14 @@

#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key,
+ const char *file, unsigned int line);
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __raw_spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key, \
+ __FILE__, __LINE__); \
} while (0)

#else
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 851b778..6f2d558 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -36,7 +36,11 @@ typedef struct raw_spinlock {
#define SPINLOCK_OWNER_INIT ((void *)-1L)

#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { \
+ .name = #lockname, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }
#else
# define SPIN_DEP_MAP_INIT(lockname)
#endif
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 4755b98..81fa789 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -14,14 +14,16 @@
#include <linux/module.h>

void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *file, unsigned int line)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
+ __lockdep_init_map(&lock->dep_map, name, key, 0,
+ file, line);
#endif
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
--
1.6.5.2

2010-01-30 11:46:06

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 06/11] lockdep: Add file and line to initialize sequence of rwsem

Like other ones of this patch series, this patch adds
__FILE__ and __LINE__ to lockdep_map of rw_semaphore.
This patch only affects to architecture independent rwsem.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
include/linux/rwsem-spinlock.h | 11 ++++++++---
lib/rwsem-spinlock.c | 5 +++--
lib/rwsem.c | 5 +++--
3 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index bdfcc25..53f68e6 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -38,7 +38,11 @@ struct rw_semaphore {
};

#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .name = #lockname, \
+ }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
@@ -51,13 +55,14 @@ struct rw_semaphore {
struct rw_semaphore name = __RWSEM_INITIALIZER(name)

extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key,
+ const char *file, unsigned int line);

#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- __init_rwsem((sem), #sem, &__key); \
+ __init_rwsem((sem), #sem, &__key, __FILE__, __LINE__); \
} while (0)

extern void __down_read(struct rw_semaphore *sem);
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ccf95bf..4aceeb4 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -34,14 +34,15 @@ EXPORT_SYMBOL(rwsem_is_locked);
* initialise the semaphore
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *file, unsigned int line)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key, 0);
+ __lockdep_init_map(&sem->dep_map, name, key, 0, file, line);
#endif
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e..3f8d5cd 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -12,14 +12,15 @@
* Initialize an rwsem:
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *file, unsigned int line)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key, 0);
+ __lockdep_init_map(&sem->dep_map, name, key, 0, file, line);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
--
1.6.5.2

2010-01-30 11:46:24

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH v2 03/11] lockdep: Add information of file and line where lock inited to struct lockdep_map

__FILE__ and __LINE__ where lock instances initialized are useful information
for profiling lock behaviour.
This patch adds these to struct lockdep_map.

The impact of this patch is making lockdep_init_map() macro.
Former lockdep_init_map() was renamed to __lockdep_init_map().
And lockdep_init_map() is a simple wrapper
to pass __FILE__ and __LINE__ to __lockdep_init_map() now.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
include/linux/lockdep.h | 20 ++++++++++++++++----
kernel/lockdep.c | 10 +++++++---
2 files changed, 23 insertions(+), 7 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9ccf0e2..a631afa 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -134,6 +134,8 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache;
const char *name;
+ const char *file;
+ unsigned int line;
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
@@ -241,15 +243,25 @@ extern void lockdep_on(void);
* to lockdep:
*/

-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
+extern void __lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass,
+ const char *file, unsigned int line);
+
+/*
+ * There's many direct call to __lockdep_init_map() (former lockdep_init_map()),
+ * and these lacks __FILE__ and __LINE__ .
+ * Current lockdep_init_map() is a wrapper for it.
+ */
+#define lockdep_init_map(lock, name, key, subclass) \
+ __lockdep_init_map(lock, name, key, subclass, __FILE__, __LINE__)

/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
+ { .name = (_name), .key = (void *)(_key), \
+ .file = __FILE__, .line = __LINE__}

/*
* Reinitialize a lock key - for cases where there is special locking or
@@ -342,7 +354,7 @@ static inline void lockdep_on(void)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
-# define lockdep_init_map(lock, name, key, sub) \
+# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
# define lockdep_set_class_and_name(lock, key, name) \
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5feaddc..f0f6dfd 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2681,8 +2681,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* Initialize a lock instance's lock-class mapping info:
*/
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass)
+void __lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass,
+ const char *file, unsigned int line)
{
lock->class_cache = NULL;
#ifdef CONFIG_LOCK_STAT
@@ -2713,8 +2714,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,

if (subclass)
register_lock_class(lock, subclass, 1);
+
+ lock->file = file;
+ lock->line = line;
}
-EXPORT_SYMBOL_GPL(lockdep_init_map);
+EXPORT_SYMBOL_GPL(__lockdep_init_map);

/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
--
1.6.5.2

2010-01-30 11:55:52

by Hitoshi Mitake

[permalink] [raw]
Subject: Revert "perf record: Intercept all events"

ONLY FOR TEST USE!!!

This reverts commit f5a2c3dce03621b55f84496f58adc2d1a87ca16f.

This patch is required for making "perf lock rec" work.
The commit f5a2c3dce0 changes write_event() of builtin-record.c .
And changed write_event() sometimes doesn't stop with perf lock rec.

I'm researching what makes write_event() loop infinity,
and noticed that there are some events with event_t->header.size == 0.
But the detail of the problem,
like kind of these events, isn't clear...

*** I cannot sing this! ***
---
tools/perf/builtin-record.c | 28 ++++++++++------------------
1 files changed, 10 insertions(+), 18 deletions(-)

diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7bb9ca1..614fa9a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -113,24 +113,16 @@ static void write_output(void *buf, size_t size)

static void write_event(event_t *buf, size_t size)
{
- size_t processed_size = buf->header.size;
- event_t *ev = buf;
-
- do {
- /*
- * Add it to the list of DSOs, so that when we finish this
- * record session we can pick the available build-ids.
- */
- if (ev->header.type == PERF_RECORD_MMAP) {
- struct list_head *head = &dsos__user;
- if (ev->header.misc == 1)
- head = &dsos__kernel;
- __dsos__findnew(head, ev->mmap.filename);
- }
-
- ev = ((void *)ev) + ev->header.size;
- processed_size += ev->header.size;
- } while (processed_size < size);
+ /*
+ * Add it to the list of DSOs, so that when we finish this
+ * record session we can pick the available build-ids.
+ */
+ if (buf->header.type == PERF_RECORD_MMAP) {
+ struct list_head *head = &dsos__user;
+ if (buf->mmap.header.misc == 1)
+ head = &dsos__kernel;
+ __dsos__findnew(head, buf->mmap.filename);
+ }

write_output(buf, size);
}
--
1.6.5.2

2010-01-31 08:30:26

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 00/12 v2] perf lock: New subcommand "perf lock", for analyzing lock statistics


FYI, i've applied a file/line-less version of 'perf lock' to perf/core today.

The basic workflow is the usual:

perf lock record sleep 1 # or some other command
perf lock report # or 'perf lock trace'

[ I think we can do all the things that file/line can do with a less intrusive
(and more standard) call-site-IP based approach. For now we can key off the
names of the locks, that's coarser but also informative and allows us to
progress.

I've renamed 'perf lock prof' to 'perf lock report' - which is more in line
with other perf tools. ]

The tool clearly needs more work at the moment: i have tried perf lock on a 16
cpus box, and it was very slow, while it didnt really record all that many
events to justify the slowdown. A simple:

perf lock record sleep 1

makes the system very slow and requires a Ctrl-C to stop:

# time perf lock record sleep 1
^C[ perf record: Woken up 0 times to write data ]
[ perf record: Captured and wrote 5.204 MB perf.data (~227374 samples) ]

real 0m11.941s
user 0m0.020s
sys 0m11.661s

(The kernel config i used witht that is attached.)

My suspicion is that the overhead of CONFIG_LOCK_STAT based tracing is way too
high at the moment, and needs to be reduced. I have removed the '-R' option
from perf lock record (which it got from perf sched where it makes sense but
here it's not really needed and -R further increases overhead), but that has
not solved the slowdown.

'top' shows this kind of messy picture of a high-overhead system:

PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
15003 root 20 0 28900 1116 784 R 102.2 0.0 0:14.23 perf
14981 mingo 20 0 15068 1396 880 R 85.7 0.0 0:11.67 top
15036 nobody 30 10 120m 22m 3224 R 74.4 0.1 0:04.47 cc1
15030 nobody 30 10 125m 27m 2744 R 64.6 0.1 0:03.88 cc1
20 root 20 0 0 0 0 R 53.4 0.0 0:15.04 ksoftirqd/8
7646 nobody 30 10 23488 720 264 S 51.9 0.0 0:04.96 distccd
43 root 20 0 0 0 0 R 50.2 0.0 0:06.26 events/8
15037 nobody 30 10 15696 4248 876 S 39.3 0.0 0:02.36 as
2891 nobody 30 10 23488 764 264 R 24.8 0.0 0:03.26 distccd

A couple of other details i noticed:

- 'perf lock' does not show up in the list of commands if one types 'perf'.
You can fix this by adding it to command-list.txt.

- i think we should add a reference to 'perf lock' in the config
LOCK_STAT entry's help text in lib/Kconfig.debug - so that people can see
what tool to use with lock statistics.

- perf report should be used to further reduce the overhead of
CONFIG_LOCK_STAT. If we want people to use this to tune for performance, we
want it to be exceptionally fast. Both the perf-lock-running and
perf-lock-not-running cases should be optimized. (Perhaps dynamic patching
techniques should be considered as well.)

- we waste 30 characters for the 'ID' column in perf lock report, which is in
all hexa, while no human would ever really read it, and would rarely rely
on it as well. Should be removed and only added back on option request or
so.

Thanks,

Ingo


Attachments:
(No filename) (3.38 kB)
config (77.95 kB)
Download all attachments

2010-01-31 08:31:55

by Hitoshi Mitake

[permalink] [raw]
Subject: [tip:perf/core] perf tools: Add __data_loc support

Commit-ID: 86d8d29634de4464d568e7c335c0da6cba64e8ab
Gitweb: http://git.kernel.org/tip/86d8d29634de4464d568e7c335c0da6cba64e8ab
Author: Hitoshi Mitake <[email protected]>
AuthorDate: Sat, 30 Jan 2010 20:43:23 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 31 Jan 2010 08:27:52 +0100

perf tools: Add __data_loc support

This patch is required to test the next patch for perf lock.

At 064739bc4b3d7f424b2f25547e6611bcf0132415 ,
support for the modifier "__data_loc" of format is added.

But, when I wanted to parse format of lock_acquired (or some
event else), raw_field_ptr() did not returned correct pointer.

So I modified raw_field_ptr() like this patch. Then
raw_field_ptr() works well.

Signed-off-by: Hitoshi Mitake <[email protected]>
Acked-by: Frederic Weisbecker <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Tom Zanussi <[email protected]>
Cc: Steven Rostedt <[email protected]>
LKML-Reference: <[email protected]>
[ v3: fixed minor stylistic detail ]
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/util/trace-event-parse.c | 9 +++++++++
1 files changed, 9 insertions(+), 0 deletions(-)

diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index c5c32be..c4b3cb8 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1925,6 +1925,15 @@ void *raw_field_ptr(struct event *event, const char *name, void *data)
if (!field)
return NULL;

+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+
+ return data + offset;
+ }
+
return data + field->offset;
}

2010-01-31 08:32:26

by Hitoshi Mitake

[permalink] [raw]
Subject: [tip:perf/core] perf lock: Enhance information of lock trace events

Commit-ID: c965be10ca3cb0bdd04016c852764afaf8e647c8
Gitweb: http://git.kernel.org/tip/c965be10ca3cb0bdd04016c852764afaf8e647c8
Author: Hitoshi Mitake <[email protected]>
AuthorDate: Sat, 30 Jan 2010 20:43:32 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 31 Jan 2010 09:08:23 +0100

perf lock: Enhance information of lock trace events

Add wait time and lock identification details.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
LKML-Reference: <[email protected]>
[ removed the file/line bits as we can do that better via IPs ]
Signed-off-by: Ingo Molnar <[email protected]>
---
include/trace/events/lock.h | 29 ++++++++++++++++++++---------
1 files changed, 20 insertions(+), 9 deletions(-)

diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba1..5c1dcfc 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
TP_STRUCT__entry(
__field(unsigned int, flags)
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),

TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),

- TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
+ TP_printk("%p %s%s%s", __entry->lockdep_addr,
+ (__entry->flags & 1) ? "try " : "",
(__entry->flags & 2) ? "read " : "",
__get_str(name))
);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,

TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),

TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),

- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s",
+ __entry->lockdep_addr, __get_str(name))
);

#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,

TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),

TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),

- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s",
+ __entry->lockdep_addr, __get_str(name))
);

TRACE_EVENT(lock_acquired,
@@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,

TP_STRUCT__entry(
__string(name, lock->name)
- __field(unsigned long, wait_usec)
- __field(unsigned long, wait_nsec_rem)
+ __field(s64, wait_nsec)
+ __field(void *, lockdep_addr)
),
+
TP_fast_assign(
__assign_str(name, lock->name);
- __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
- __entry->wait_usec = (unsigned long) waittime;
+ __entry->wait_nsec = waittime;
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
- __entry->wait_nsec_rem)
+ TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
+ __get_str(name),
+ __entry->wait_nsec)
);

#endif

2010-01-31 08:32:35

by Hitoshi Mitake

[permalink] [raw]
Subject: [tip:perf/core] perf lock: Introduce new tool "perf lock", for analyzing lock statistics

Commit-ID: 9b5e350c7a46a471d5b452836dbafe9aeaeca435
Gitweb: http://git.kernel.org/tip/9b5e350c7a46a471d5b452836dbafe9aeaeca435
Author: Hitoshi Mitake <[email protected]>
AuthorDate: Sat, 30 Jan 2010 20:43:33 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 31 Jan 2010 09:08:26 +0100

perf lock: Introduce new tool "perf lock", for analyzing lock statistics

Adding new subcommand "perf lock" to perf.

I have a lot of remaining ToDos, but for now perf lock can
already provide minimal functionality for analyzing lock
statistics.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/Makefile | 1 +
tools/perf/builtin-lock.c | 724 +++++++++++++++++++++++++++++++++++++++++++++
tools/perf/builtin.h | 1 +
tools/perf/perf.c | 1 +
4 files changed, 727 insertions(+), 0 deletions(-)

diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b2bce1f..4296930 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -456,6 +456,7 @@ BUILTIN_OBJS += builtin-top.o
BUILTIN_OBJS += builtin-trace.o
BUILTIN_OBJS += builtin-probe.o
BUILTIN_OBJS += builtin-kmem.o
+BUILTIN_OBJS += builtin-lock.o

PERFLIBS = $(LIB_FILE)

diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
new file mode 100644
index 0000000..2b5f887
--- /dev/null
+++ b/tools/perf/builtin-lock.c
@@ -0,0 +1,724 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+#include "util/session.h"
+
+#include <sys/types.h>
+#include <sys/prctl.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+#include <limits.h>
+
+#include <linux/list.h>
+#include <linux/hash.h>
+
+/* based on kernel/lockdep.c */
+#define LOCKHASH_BITS 12
+#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
+
+static struct list_head lockhash_table[LOCKHASH_SIZE];
+
+#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
+#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+
+#define LOCK_STATE_UNLOCKED 0 /* initial state */
+#define LOCK_STATE_LOCKED 1
+
+struct lock_stat {
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */
+
+ /* FIXME: raw_field_value() returns unsigned long long,
+ * so address of lockdep_map should be dealed as 64bit.
+ * Is there more better solution? */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
+ char *file;
+ unsigned int line;
+
+ int state;
+ u64 prev_event_time; /* timestamp of previous event */
+
+ unsigned int nr_acquired;
+ unsigned int nr_acquire;
+ unsigned int nr_contended;
+ unsigned int nr_release;
+
+ /* these times are in nano sec. */
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
+};
+
+/* build simple key function one is bigger than two */
+#define SINGLE_KEY(member) \
+ static int lock_stat_key_ ## member(struct lock_stat *one, \
+ struct lock_stat *two) \
+ { \
+ return one->member > two->member; \
+ }
+
+SINGLE_KEY(nr_acquired)
+SINGLE_KEY(nr_contended)
+SINGLE_KEY(wait_time_total)
+SINGLE_KEY(wait_time_min)
+SINGLE_KEY(wait_time_max)
+
+struct lock_key {
+ /*
+ * name: the value for specify by user
+ * this should be simpler than raw name of member
+ * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
+ */
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
+};
+
+static const char *sort_key = "acquired";
+static int (*compare)(struct lock_stat *, struct lock_stat *);
+
+#define DEF_KEY_LOCK(name, fn_suffix) \
+ { #name, lock_stat_key_ ## fn_suffix }
+struct lock_key keys[] = {
+ DEF_KEY_LOCK(acquired, nr_acquired),
+ DEF_KEY_LOCK(contended, nr_contended),
+ DEF_KEY_LOCK(wait_total, wait_time_total),
+ DEF_KEY_LOCK(wait_min, wait_time_min),
+ DEF_KEY_LOCK(wait_max, wait_time_max),
+
+ /* extra comparisons much complicated should be here */
+
+ { NULL, NULL }
+};
+
+static void select_key(void)
+{
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return;
+ }
+ }
+
+ die("Unknown compare key:%s\n", sort_key);
+}
+
+static struct rb_root result; /* place to store sorted data */
+
+static void insert_to_result(struct lock_stat *st,
+ int (*bigger)(struct lock_stat *,
+ struct lock_stat *))
+{
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct lock_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct lock_stat, rb);
+ parent = *rb;
+
+ if (bigger(st, p))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&st->rb, parent, rb);
+ rb_insert_color(&st->rb, &result);
+}
+
+/* returns left most element of result, and erase it */
+static struct lock_stat *pop_from_result(void)
+{
+ struct rb_node *node = result.rb_node;
+
+ if (!node)
+ return NULL;
+
+ while (node->rb_left)
+ node = node->rb_left;
+
+ rb_erase(node, &result);
+ return container_of(node, struct lock_stat, rb);
+}
+
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name,
+ const char *file, unsigned int line)
+{
+ struct list_head *entry = lockhashentry(addr);
+ struct lock_stat *ret, *new;
+
+ list_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+
+ new = zalloc(sizeof(struct lock_stat));
+ if (!new)
+ goto alloc_failed;
+
+ new->addr = addr;
+ new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ if (!new->name)
+ goto alloc_failed;
+ strcpy(new->name, name);
+ new->file = zalloc(sizeof(char) * strlen(file) + 1);
+ if (!new->file)
+ goto alloc_failed;
+ strcpy(new->file, file);
+ new->line = line;
+
+ /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
+ new->state = LOCK_STATE_UNLOCKED;
+ new->wait_time_min = ULLONG_MAX;
+
+ list_add(&new->hash_entry, entry);
+ return new;
+
+alloc_failed:
+ die("memory allocation failed\n");
+}
+
+static char const *input_name = "perf.data";
+
+static int profile_cpu = -1;
+
+struct raw_event_sample {
+ u32 size;
+ char data[0];
+};
+
+struct trace_acquire_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_acquired_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_contended_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_release_event {
+ void *addr;
+ const char *name;
+ const char *file;
+ unsigned int line;
+};
+
+struct trace_lock_handler {
+ void (*acquire_event)(struct trace_acquire_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*acquired_event)(struct trace_acquired_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*contended_event)(struct trace_contended_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*release_event)(struct trace_release_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+};
+
+static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquire_event->addr, acquire_event->name,
+ acquire_event->file, acquire_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquired_event->addr, acquired_event->name,
+ acquired_event->file, acquired_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ st->state = LOCK_STATE_LOCKED;
+ st->nr_acquired++;
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void prof_lock_contended_event(struct trace_contended_event *contended_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(contended_event->addr, contended_event->name,
+ contended_event->file, contended_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->nr_contended++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
+}
+
+static void prof_lock_release_event(struct trace_release_event *release_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct lock_stat *st;
+ u64 hold_time;
+
+ st = lock_stat_findnew(release_event->addr, release_event->name,
+ release_event->file, release_event->line);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->state = LOCK_STATE_UNLOCKED;
+ hold_time = timestamp - st->prev_event_time;
+
+ if (timestamp < st->prev_event_time) {
+ /* terribly, this can happen... */
+ goto end;
+ }
+
+ if (st->wait_time_min > hold_time)
+ st->wait_time_min = hold_time;
+ if (st->wait_time_max < hold_time)
+ st->wait_time_max = hold_time;
+ st->wait_time_total += hold_time;
+
+ st->nr_release++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+end:
+ st->prev_event_time = timestamp;
+}
+
+/* lock oriented handlers */
+/* TODO: handlers for CPU oriented, thread oriented */
+static struct trace_lock_handler prof_lock_ops = {
+ .acquire_event = prof_lock_acquire_event,
+ .acquired_event = prof_lock_acquired_event,
+ .contended_event = prof_lock_contended_event,
+ .release_event = prof_lock_release_event,
+};
+
+static struct trace_lock_handler *trace_handler;
+
+static void
+process_lock_acquire_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquire_event acquire_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquire_event.addr, &tmp, sizeof(void *));
+ acquire_event.name = (char *)raw_field_ptr(event, "name", data);
+ acquire_event.file = (char *)raw_field_ptr(event, "file", data);
+ acquire_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->acquire_event(&acquire_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_lock_acquired_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquired_event acquired_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquired_event.addr, &tmp, sizeof(void *));
+ acquired_event.name = (char *)raw_field_ptr(event, "name", data);
+ acquired_event.file = (char *)raw_field_ptr(event, "file", data);
+ acquired_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->acquired_event(&acquired_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_lock_contended_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_contended_event contended_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&contended_event.addr, &tmp, sizeof(void *));
+ contended_event.name = (char *)raw_field_ptr(event, "name", data);
+ contended_event.file = (char *)raw_field_ptr(event, "file", data);
+ contended_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->contended_event(&contended_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_lock_release_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_release_event release_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&release_event.addr, &tmp, sizeof(void *));
+ release_event.name = (char *)raw_field_ptr(event, "name", data);
+ release_event.file = (char *)raw_field_ptr(event, "file", data);
+ release_event.line =
+ (unsigned int)raw_field_value(event, "line", data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->release_event(&release_event,
+ event, cpu, timestamp, thread);
+ }
+}
+
+static void
+process_raw_event(void *data, int cpu,
+ u64 timestamp, struct thread *thread)
+{
+ struct event *event;
+ int type;
+
+ type = trace_parse_common_type(data);
+ event = trace_find_event(type);
+
+ if (!strcmp(event->name, "lock_acquire"))
+ process_lock_acquire_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_acquired"))
+ process_lock_acquired_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_contended"))
+ process_lock_contended_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_release"))
+ process_lock_release_event(data, event, cpu, timestamp, thread);
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct thread *thread;
+ struct sample_data data;
+
+ bzero(&data, sizeof(struct sample_data));
+ event__parse_sample(event, session->sample_type, &data);
+ thread = perf_session__findnew(session, data.pid);
+
+ /*
+ * FIXME: this causes warn on 32bit environment
+ * because of (void *)data.ip (type of data.ip is u64)
+ */
+/* dump_printf("(IP, %d): %d/%d: %p period: %llu\n", */
+/* event->header.misc, */
+/* data.pid, data.tid, (void *)data.ip, data.period); */
+
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
+ return 0;
+
+ process_raw_event(data.raw_data, data.cpu, data.time, thread);
+
+ return 0;
+}
+
+/* TODO: various way to print, coloring, nano or milli sec */
+static void print_result(void)
+{
+ struct lock_stat *st;
+ char cut_name[20];
+
+ printf("%18s ", "ID");
+ printf("%20s ", "Name");
+ printf("%10s ", "acquired");
+ printf("%10s ", "contended");
+
+ printf("%15s ", "total wait (ns)");
+ printf("%15s ", "max wait (ns)");
+ printf("%15s ", "min wait (ns)");
+
+ printf("\n\n");
+
+ while ((st = pop_from_result())) {
+ bzero(cut_name, 20);
+
+ printf("%p ", st->addr);
+
+ if (strlen(st->name) < 16) {
+ /* output raw name */
+ printf("%20s ", st->name);
+ } else {
+ strncpy(cut_name, st->name, 16);
+ cut_name[16] = '.';
+ cut_name[17] = '.';
+ cut_name[18] = '.';
+ cut_name[19] = '\0';
+ /* cut off name for saving output style */
+ printf("%20s ", cut_name);
+ }
+
+ printf("%10u ", st->nr_acquired);
+ printf("%10u ", st->nr_contended);
+
+ printf("%15llu ", st->wait_time_total);
+ printf("%15llu ", st->wait_time_max);
+ printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ 0 : st->wait_time_min);
+ printf("\n");
+ }
+}
+
+static void dump_map(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ printf("%p: %s (src: %s, line: %u)\n",
+ st->addr, st->name, st->file, st->line);
+ }
+ }
+}
+
+static struct perf_event_ops eops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+};
+
+static struct perf_session *session;
+
+static int read_events(void)
+{
+ session = perf_session__new(input_name, O_RDONLY, 0);
+ if (!session)
+ die("Initializing perf session failed\n");
+
+ return perf_session__process_events(session, &eops);
+}
+
+static void sort_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare);
+ }
+ }
+}
+
+static void __cmd_prof(void)
+{
+ setup_pager();
+ select_key();
+ read_events();
+ sort_result();
+ print_result();
+}
+
+static const char * const prof_usage[] = {
+ "perf sched prof [<options>]",
+ NULL
+};
+
+static const struct option prof_options[] = {
+ OPT_STRING('k', "key", &sort_key, "acquired",
+ "key for sorting"),
+ /* TODO: type */
+ OPT_END()
+};
+
+static const char * const lock_usage[] = {
+ "perf lock [<options>] {record|trace|prof}",
+ NULL
+};
+
+static const struct option lock_options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_BOOLEAN('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static const char *record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-M",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "lock:lock_acquire:r",
+ "-e", "lock:lock_acquired:r",
+ "-e", "lock:lock_contended:r",
+ "-e", "lock:lock_release:r",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_lock(int argc, const char **argv, const char *prefix __used)
+{
+ unsigned int i;
+
+ symbol__init();
+ for (i = 0; i < LOCKHASH_SIZE; i++)
+ INIT_LIST_HEAD(lockhash_table + i);
+
+ argc = parse_options(argc, argv, lock_options, lock_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(lock_usage, lock_options);
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strncmp(argv[0], "prof", 4)) {
+ trace_handler = &prof_lock_ops;
+ if (argc) {
+ argc = parse_options(argc, argv,
+ prof_options, prof_usage, 0);
+ if (argc)
+ usage_with_options(prof_usage, prof_options);
+ }
+ __cmd_prof();
+ } else if (!strcmp(argv[0], "trace")) {
+ /* Aliased to 'perf trace' */
+ return cmd_trace(argc, argv, prefix);
+ } else if (!strcmp(argv[0], "map")) {
+ /* recycling prof_lock_ops */
+ trace_handler = &prof_lock_ops;
+ setup_pager();
+ read_events();
+ dump_map();
+ } else {
+ usage_with_options(lock_usage, lock_options);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index dee97cf..10fe49e 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -31,5 +31,6 @@ extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_probe(int argc, const char **argv, const char *prefix);
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
+extern int cmd_lock(int argc, const char **argv, const char *prefix);

#endif
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 109b89b..57cb107 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -303,6 +303,7 @@ static void handle_internal_command(int argc, const char **argv)
{ "sched", cmd_sched, 0 },
{ "probe", cmd_probe, 0 },
{ "kmem", cmd_kmem, 0 },
+ { "lock", cmd_lock, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;

2010-01-31 08:32:22

by Hitoshi Mitake

[permalink] [raw]
Subject: [tip:perf/core] perf: Add util/include/linuxhash.h to include hash.h of kernel

Commit-ID: 18e97e06b5fb2d7f6cf272ca07d26d8247db8723
Gitweb: http://git.kernel.org/tip/18e97e06b5fb2d7f6cf272ca07d26d8247db8723
Author: Hitoshi Mitake <[email protected]>
AuthorDate: Sat, 30 Jan 2010 20:43:24 +0900
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 31 Jan 2010 08:27:53 +0100

perf: Add util/include/linuxhash.h to include hash.h of kernel

linux/hash.h, hash header of kernel, is also useful for perf.

util/include/linuxhash.h includes linux/hash.h, so we can use
hash facilities (e.g. hash_long()) in perf now.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/Makefile | 1 +
tools/perf/util/include/linux/hash.h | 5 +++++
2 files changed, 6 insertions(+), 0 deletions(-)

diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 9b173e6..b2bce1f 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -333,6 +333,7 @@ LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += util/include/linux/bitops.h
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
new file mode 100644
index 0000000..201f573
--- /dev/null
+++ b/tools/perf/util/include/linux/hash.h
@@ -0,0 +1,5 @@
+#include "../../../../include/linux/hash.h"
+
+#ifndef PERF_HASH_H
+#define PERF_HASH_H
+#endif

2010-01-31 08:33:13

by Ingo Molnar

[permalink] [raw]
Subject: [tip:perf/core] perf lock: Clean up various details

Commit-ID: 59f411b62c9282891274e721fea29026b0eda3cc
Gitweb: http://git.kernel.org/tip/59f411b62c9282891274e721fea29026b0eda3cc
Author: Ingo Molnar <[email protected]>
AuthorDate: Sun, 31 Jan 2010 08:27:58 +0100
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 31 Jan 2010 09:08:27 +0100

perf lock: Clean up various details

Fix up a few small stylistic details:

- use consistent vertical spacing/alignment
- remove line80 artifacts
- group some global variables better
- remove dead code

Plus rename 'prof' to 'report' to make it more in line with other
tools, and remove the line/file keying as we really want to use
IPs like the other tools do.

Signed-off-by: Ingo Molnar <[email protected]>
Cc: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/builtin-lock.c | 210 ++++++++++++++++++---------------------------
1 files changed, 82 insertions(+), 128 deletions(-)

diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 2b5f887..fb9ab2a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -32,37 +32,37 @@ static struct list_head lockhash_table[LOCKHASH_SIZE];
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))

-#define LOCK_STATE_UNLOCKED 0 /* initial state */
-#define LOCK_STATE_LOCKED 1
+#define LOCK_STATE_UNLOCKED 0 /* initial state */
+#define LOCK_STATE_LOCKED 1

struct lock_stat {
- struct list_head hash_entry;
- struct rb_node rb; /* used for sorting */
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */

- /* FIXME: raw_field_value() returns unsigned long long,
+ /*
+ * FIXME: raw_field_value() returns unsigned long long,
* so address of lockdep_map should be dealed as 64bit.
- * Is there more better solution? */
- void *addr; /* address of lockdep_map, used as ID */
- char *name; /* for strcpy(), we cannot use const */
- char *file;
- unsigned int line;
+ * Is there more better solution?
+ */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */

- int state;
- u64 prev_event_time; /* timestamp of previous event */
+ int state;
+ u64 prev_event_time; /* timestamp of previous event */

- unsigned int nr_acquired;
- unsigned int nr_acquire;
- unsigned int nr_contended;
- unsigned int nr_release;
+ unsigned int nr_acquired;
+ unsigned int nr_acquire;
+ unsigned int nr_contended;
+ unsigned int nr_release;

/* these times are in nano sec. */
- u64 wait_time_total;
- u64 wait_time_min;
- u64 wait_time_max;
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
};

/* build simple key function one is bigger than two */
-#define SINGLE_KEY(member) \
+#define SINGLE_KEY(member) \
static int lock_stat_key_ ## member(struct lock_stat *one, \
struct lock_stat *two) \
{ \
@@ -81,12 +81,15 @@ struct lock_key {
* this should be simpler than raw name of member
* e.g. nr_acquired -> acquired, wait_time_total -> wait_total
*/
- const char *name;
- int (*key)(struct lock_stat*, struct lock_stat*);
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
};

-static const char *sort_key = "acquired";
-static int (*compare)(struct lock_stat *, struct lock_stat *);
+static const char *sort_key = "acquired";
+
+static int (*compare)(struct lock_stat *, struct lock_stat *);
+
+static struct rb_root result; /* place to store sorted data */

#define DEF_KEY_LOCK(name, fn_suffix) \
{ #name, lock_stat_key_ ## fn_suffix }
@@ -116,11 +119,8 @@ static void select_key(void)
die("Unknown compare key:%s\n", sort_key);
}

-static struct rb_root result; /* place to store sorted data */
-
static void insert_to_result(struct lock_stat *st,
- int (*bigger)(struct lock_stat *,
- struct lock_stat *))
+ int (*bigger)(struct lock_stat *, struct lock_stat *))
{
struct rb_node **rb = &result.rb_node;
struct rb_node *parent = NULL;
@@ -155,8 +155,7 @@ static struct lock_stat *pop_from_result(void)
return container_of(node, struct lock_stat, rb);
}

-static struct lock_stat *lock_stat_findnew(void *addr, const char *name,
- const char *file, unsigned int line)
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
{
struct list_head *entry = lockhashentry(addr);
struct lock_stat *ret, *new;
@@ -175,11 +174,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name,
if (!new->name)
goto alloc_failed;
strcpy(new->name, name);
- new->file = zalloc(sizeof(char) * strlen(file) + 1);
- if (!new->file)
- goto alloc_failed;
- strcpy(new->file, file);
- new->line = line;

/* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
new->state = LOCK_STATE_UNLOCKED;
@@ -197,36 +191,28 @@ static char const *input_name = "perf.data";
static int profile_cpu = -1;

struct raw_event_sample {
- u32 size;
- char data[0];
+ u32 size;
+ char data[0];
};

struct trace_acquire_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};

struct trace_acquired_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};

struct trace_contended_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};

struct trace_release_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};

struct trace_lock_handler {
@@ -255,7 +241,8 @@ struct trace_lock_handler {
struct thread *thread);
};

-static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
+static void
+report_lock_acquire_event(struct trace_acquire_event *acquire_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -263,8 +250,7 @@ static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
{
struct lock_stat *st;

- st = lock_stat_findnew(acquire_event->addr, acquire_event->name,
- acquire_event->file, acquire_event->line);
+ st = lock_stat_findnew(acquire_event->addr, acquire_event->name);

switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -279,7 +265,8 @@ static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
st->prev_event_time = timestamp;
}

-static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event,
+static void
+report_lock_acquired_event(struct trace_acquired_event *acquired_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -287,8 +274,7 @@ static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event
{
struct lock_stat *st;

- st = lock_stat_findnew(acquired_event->addr, acquired_event->name,
- acquired_event->file, acquired_event->line);
+ st = lock_stat_findnew(acquired_event->addr, acquired_event->name);

switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -305,7 +291,8 @@ static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event
st->prev_event_time = timestamp;
}

-static void prof_lock_contended_event(struct trace_contended_event *contended_event,
+static void
+report_lock_contended_event(struct trace_contended_event *contended_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -313,8 +300,7 @@ static void prof_lock_contended_event(struct trace_contended_event *contended_ev
{
struct lock_stat *st;

- st = lock_stat_findnew(contended_event->addr, contended_event->name,
- contended_event->file, contended_event->line);
+ st = lock_stat_findnew(contended_event->addr, contended_event->name);

switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -330,7 +316,8 @@ static void prof_lock_contended_event(struct trace_contended_event *contended_ev
st->prev_event_time = timestamp;
}

-static void prof_lock_release_event(struct trace_release_event *release_event,
+static void
+report_lock_release_event(struct trace_release_event *release_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -339,8 +326,7 @@ static void prof_lock_release_event(struct trace_release_event *release_event,
struct lock_stat *st;
u64 hold_time;

- st = lock_stat_findnew(release_event->addr, release_event->name,
- release_event->file, release_event->line);
+ st = lock_stat_findnew(release_event->addr, release_event->name);

switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -373,11 +359,11 @@ end:

/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
-static struct trace_lock_handler prof_lock_ops = {
- .acquire_event = prof_lock_acquire_event,
- .acquired_event = prof_lock_acquired_event,
- .contended_event = prof_lock_contended_event,
- .release_event = prof_lock_release_event,
+static struct trace_lock_handler report_lock_ops = {
+ .acquire_event = report_lock_acquire_event,
+ .acquired_event = report_lock_acquired_event,
+ .contended_event = report_lock_contended_event,
+ .release_event = report_lock_release_event,
};

static struct trace_lock_handler *trace_handler;
@@ -395,14 +381,9 @@ process_lock_acquire_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&acquire_event.addr, &tmp, sizeof(void *));
acquire_event.name = (char *)raw_field_ptr(event, "name", data);
- acquire_event.file = (char *)raw_field_ptr(event, "file", data);
- acquire_event.line =
- (unsigned int)raw_field_value(event, "line", data);

- if (trace_handler->acquire_event) {
- trace_handler->acquire_event(&acquire_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
}

static void
@@ -418,14 +399,9 @@ process_lock_acquired_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&acquired_event.addr, &tmp, sizeof(void *));
acquired_event.name = (char *)raw_field_ptr(event, "name", data);
- acquired_event.file = (char *)raw_field_ptr(event, "file", data);
- acquired_event.line =
- (unsigned int)raw_field_value(event, "line", data);

- if (trace_handler->acquire_event) {
- trace_handler->acquired_event(&acquired_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
}

static void
@@ -441,14 +417,9 @@ process_lock_contended_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&contended_event.addr, &tmp, sizeof(void *));
contended_event.name = (char *)raw_field_ptr(event, "name", data);
- contended_event.file = (char *)raw_field_ptr(event, "file", data);
- contended_event.line =
- (unsigned int)raw_field_value(event, "line", data);

- if (trace_handler->acquire_event) {
- trace_handler->contended_event(&contended_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
}

static void
@@ -464,14 +435,9 @@ process_lock_release_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&release_event.addr, &tmp, sizeof(void *));
release_event.name = (char *)raw_field_ptr(event, "name", data);
- release_event.file = (char *)raw_field_ptr(event, "file", data);
- release_event.line =
- (unsigned int)raw_field_value(event, "line", data);

- if (trace_handler->acquire_event) {
- trace_handler->release_event(&release_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
}

static void
@@ -503,14 +469,6 @@ static int process_sample_event(event_t *event, struct perf_session *session)
event__parse_sample(event, session->sample_type, &data);
thread = perf_session__findnew(session, data.pid);

- /*
- * FIXME: this causes warn on 32bit environment
- * because of (void *)data.ip (type of data.ip is u64)
- */
-/* dump_printf("(IP, %d): %d/%d: %p period: %llu\n", */
-/* event->header.misc, */
-/* data.pid, data.tid, (void *)data.ip, data.period); */
-
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -580,15 +538,14 @@ static void dump_map(void)

for (i = 0; i < LOCKHASH_SIZE; i++) {
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
- printf("%p: %s (src: %s, line: %u)\n",
- st->addr, st->name, st->file, st->line);
+ printf("%p: %s\n", st->addr, st->name);
}
}
}

static struct perf_event_ops eops = {
- .sample = process_sample_event,
- .comm = event__process_comm,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
};

static struct perf_session *session;
@@ -614,7 +571,7 @@ static void sort_result(void)
}
}

-static void __cmd_prof(void)
+static void __cmd_report(void)
{
setup_pager();
select_key();
@@ -623,12 +580,12 @@ static void __cmd_prof(void)
print_result();
}

-static const char * const prof_usage[] = {
- "perf sched prof [<options>]",
+static const char * const report_usage[] = {
+ "perf lock report [<options>]",
NULL
};

-static const struct option prof_options[] = {
+static const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
"key for sorting"),
/* TODO: type */
@@ -636,17 +593,14 @@ static const struct option prof_options[] = {
};

static const char * const lock_usage[] = {
- "perf lock [<options>] {record|trace|prof}",
+ "perf lock [<options>] {record|trace|report}",
NULL
};

static const struct option lock_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
+ OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_END()
};

@@ -698,21 +652,21 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)

if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
- } else if (!strncmp(argv[0], "prof", 4)) {
- trace_handler = &prof_lock_ops;
+ } else if (!strncmp(argv[0], "report", 6)) {
+ trace_handler = &report_lock_ops;
if (argc) {
argc = parse_options(argc, argv,
- prof_options, prof_usage, 0);
+ report_options, report_usage, 0);
if (argc)
- usage_with_options(prof_usage, prof_options);
+ usage_with_options(report_usage, report_options);
}
- __cmd_prof();
+ __cmd_report();
} else if (!strcmp(argv[0], "trace")) {
/* Aliased to 'perf trace' */
return cmd_trace(argc, argv, prefix);
} else if (!strcmp(argv[0], "map")) {
- /* recycling prof_lock_ops */
- trace_handler = &prof_lock_ops;
+ /* recycling report_lock_ops */
+ trace_handler = &report_lock_ops;
setup_pager();
read_events();
dump_map();

2010-01-31 20:31:11

by Frederic Weisbecker

[permalink] [raw]
Subject: Re: [PATCH 00/12 v2] perf lock: New subcommand "perf lock", for analyzing lock statistics

On Sun, Jan 31, 2010 at 09:29:53AM +0100, Ingo Molnar wrote:
>
> FYI, i've applied a file/line-less version of 'perf lock' to perf/core today.
>
> The basic workflow is the usual:
>
> perf lock record sleep 1 # or some other command
> perf lock report # or 'perf lock trace'
>
> [ I think we can do all the things that file/line can do with a less intrusive
> (and more standard) call-site-IP based approach. For now we can key off the
> names of the locks, that's coarser but also informative and allows us to
> progress.
>
> I've renamed 'perf lock prof' to 'perf lock report' - which is more in line
> with other perf tools. ]
>
> The tool clearly needs more work at the moment: i have tried perf lock on a 16
> cpus box, and it was very slow, while it didnt really record all that many
> events to justify the slowdown. A simple:
>
> perf lock record sleep 1
>
> makes the system very slow and requires a Ctrl-C to stop:
>
> # time perf lock record sleep 1
> ^C[ perf record: Woken up 0 times to write data ]
> [ perf record: Captured and wrote 5.204 MB perf.data (~227374 samples) ]
>
> real 0m11.941s
> user 0m0.020s
> sys 0m11.661s
>
> (The kernel config i used witht that is attached.)
>
> My suspicion is that the overhead of CONFIG_LOCK_STAT based tracing is way too
> high at the moment, and needs to be reduced. I have removed the '-R' option
> from perf lock record (which it got from perf sched where it makes sense but
> here it's not really needed and -R further increases overhead), but that has
> not solved the slowdown.



Hmm, -R is mandatory if you want the raw sample events, otherwise the
event is just a counter.

May be you mean -M ? Sure -M is going to be a noticeable overhead
in 16 cores.

Anyway, I'm looking closely into improving the lock events to
reduce all this overhead. I'll create a lock_init event so
that we can gather the heavy informations there (especially
the name of the lock).

Also, using TRACE_EVENT_FN lets us register a callback when
a tracepoint gets registered, I'm going to try to synthetize
the missing lock_init() events here.

2010-02-01 07:28:27

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 00/12 v2] perf lock: New subcommand "perf lock", for analyzing lock statistics


* Frederic Weisbecker <[email protected]> wrote:

> On Sun, Jan 31, 2010 at 09:29:53AM +0100, Ingo Molnar wrote:
> >
> > FYI, i've applied a file/line-less version of 'perf lock' to perf/core today.
> >
> > The basic workflow is the usual:
> >
> > perf lock record sleep 1 # or some other command
> > perf lock report # or 'perf lock trace'
> >
> > [ I think we can do all the things that file/line can do with a less intrusive
> > (and more standard) call-site-IP based approach. For now we can key off the
> > names of the locks, that's coarser but also informative and allows us to
> > progress.
> >
> > I've renamed 'perf lock prof' to 'perf lock report' - which is more in line
> > with other perf tools. ]
> >
> > The tool clearly needs more work at the moment: i have tried perf lock on a 16
> > cpus box, and it was very slow, while it didnt really record all that many
> > events to justify the slowdown. A simple:
> >
> > perf lock record sleep 1
> >
> > makes the system very slow and requires a Ctrl-C to stop:
> >
> > # time perf lock record sleep 1
> > ^C[ perf record: Woken up 0 times to write data ]
> > [ perf record: Captured and wrote 5.204 MB perf.data (~227374 samples) ]
> >
> > real 0m11.941s
> > user 0m0.020s
> > sys 0m11.661s
> >
> > (The kernel config i used witht that is attached.)
> >
> > My suspicion is that the overhead of CONFIG_LOCK_STAT based tracing is way too
> > high at the moment, and needs to be reduced. I have removed the '-R' option
> > from perf lock record (which it got from perf sched where it makes sense but
> > here it's not really needed and -R further increases overhead), but that has
> > not solved the slowdown.
>
>
>
> Hmm, -R is mandatory if you want the raw sample events, otherwise the
> event is just a counter.
>
> May be you mean -M ? Sure -M is going to be a noticeable overhead
> in 16 cores.

Yeah, i meant and tested -M.

> Anyway, I'm looking closely into improving the lock events to
> reduce all this overhead. I'll create a lock_init event so
> that we can gather the heavy informations there (especially
> the name of the lock).
>
> Also, using TRACE_EVENT_FN lets us register a callback when
> a tracepoint gets registered, I'm going to try to synthetize
> the missing lock_init() events here.

Ok, thanks for looking into this!

Ingo

2010-02-04 07:04:16

by Hitoshi Mitake

[permalink] [raw]
Subject: Re: [PATCH 00/12 v2] perf lock: New subcommand "perf lock", for analyzing lock statistics

On 2010年01月31日 17:29, Ingo Molnar wrote:
>
> FYI, i've applied a file/line-less version of 'perf lock' to perf/core today.
>
> The basic workflow is the usual:
>
> perf lock record sleep 1 # or some other command
> perf lock report # or 'perf lock trace'
>
> [ I think we can do all the things that file/line can do with a less intrusive
> (and more standard) call-site-IP based approach. For now we can key off the
> names of the locks, that's coarser but also informative and allows us to
> progress.
>
> I've renamed 'perf lock prof' to 'perf lock report' - which is more in line
> with other perf tools. ]
>
> The tool clearly needs more work at the moment: i have tried perf lock on a 16
> cpus box, and it was very slow, while it didnt really record all that many
> events to justify the slowdown. A simple:
>
> perf lock record sleep 1
>
> makes the system very slow and requires a Ctrl-C to stop:
>
> # time perf lock record sleep 1
> ^C[ perf record: Woken up 0 times to write data ]
> [ perf record: Captured and wrote 5.204 MB perf.data (~227374 samples) ]
>
> real 0m11.941s
> user 0m0.020s
> sys 0m11.661s
>
> (The kernel config i used witht that is attached.)
>
> My suspicion is that the overhead of CONFIG_LOCK_STAT based tracing is way too
> high at the moment, and needs to be reduced. I have removed the '-R' option
> from perf lock record (which it got from perf sched where it makes sense but
> here it's not really needed and -R further increases overhead), but that has
> not solved the slowdown.
>
> 'top' shows this kind of messy picture of a high-overhead system:
>
> PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
> 15003 root 20 0 28900 1116 784 R 102.2 0.0 0:14.23 perf
> 14981 mingo 20 0 15068 1396 880 R 85.7 0.0 0:11.67 top
> 15036 nobody 30 10 120m 22m 3224 R 74.4 0.1 0:04.47 cc1
> 15030 nobody 30 10 125m 27m 2744 R 64.6 0.1 0:03.88 cc1
> 20 root 20 0 0 0 0 R 53.4 0.0 0:15.04 ksoftirqd/8
> 7646 nobody 30 10 23488 720 264 S 51.9 0.0 0:04.96 distccd
> 43 root 20 0 0 0 0 R 50.2 0.0 0:06.26 events/8
> 15037 nobody 30 10 15696 4248 876 S 39.3 0.0 0:02.36 as
> 2891 nobody 30 10 23488 764 264 R 24.8 0.0 0:03.26 distccd

Yeah, overhead is main problem of perf lock now,
I'll work on it with Frederic's new patches, these are awesome.

>
> A couple of other details i noticed:
>
> - 'perf lock' does not show up in the list of commands if one types 'perf'.
> You can fix this by adding it to command-list.txt.
>
> - i think we should add a reference to 'perf lock' in the config
> LOCK_STAT entry's help text in lib/Kconfig.debug - so that people can see
> what tool to use with lock statistics.

Thanks, I've fixed these two points and prepared patch.
I'll send it later.

>
> - perf report should be used to further reduce the overhead of
> CONFIG_LOCK_STAT. If we want people to use this to tune for performance, we
> want it to be exceptionally fast. Both the perf-lock-running and
> perf-lock-not-running cases should be optimized. (Perhaps dynamic patching
> techniques should be considered as well.)
>
> - we waste 30 characters for the 'ID' column in perf lock report, which is in
> all hexa, while no human would ever really read it, and would rarely rely
> on it as well. Should be removed and only added back on option request or
> so.

Yes, address of lockdep is optional thing. This should be removed.

Thanks,
Hitoshi

2010-02-04 07:08:20

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH] perf lock: Fix and add misc documentally things

I've forgot to add 'perf lock' line to command-list.txt,
so users of perf could not find perf lock when they type 'perf'.

Fixing command-list.txt requires document
(tools/perf/Documentation/perf-lock.txt).
But perf lock is too under construction tool to write document,
so this is something like pseudo document now.

And I wrote description of perf lock at help section of
CONFIG_LOCK_STAT, this will navigate users of lock trace events.

Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Signed-off-by: Hitoshi Mitake <[email protected]>
---
lib/Kconfig.debug | 6 ++++++
tools/perf/Documentation/perf-lock.txt | 29 +++++++++++++++++++++++++++++
tools/perf/command-list.txt | 1 +
3 files changed, 36 insertions(+), 0 deletions(-)
create mode 100644 tools/perf/Documentation/perf-lock.txt

diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 25c3ed5..994d58c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -520,6 +520,12 @@ config LOCK_STAT

For more details, see Documentation/lockstat.txt

+ You can analyze lock events with "perf lock", subcommand of perf.
+ If you want to use "perf lock", you need to turn on CONFIG_FTRACE.
+
+ CONFIG_LOCK_STAT defines events of contended and acquired.
+ (CONFIG_LOCKDEP defines events of acquire and release.)
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
new file mode 100644
index 0000000..77e6c74
--- /dev/null
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -0,0 +1,29 @@
+perf-lock(1)
+============
+
+NAME
+----
+perf-lock - Analyze lock events
+
+SYNOPSIS
+--------
+[verse]
+'perf lock' {record|report|trace}
+
+DESCRIPTION
+-----------
+You can analyze various lock behaviour
+and statistics with this 'perf lock' command.
+
+ 'perf lock record <command>' records events of lock
+ between start and end <command>. And this command
+ produces the file "perf.data" which containts tracing
+ result of lock events.
+
+ 'perf lock trace' shows raw events of lock.
+
+ 'perf lock report' reoprts statistical data.
+
+SEE ALSO
+--------
+linkperf:perf[1]
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 9afcff2..db6ee94 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -18,3 +18,4 @@ perf-top mainporcelain common
perf-trace mainporcelain common
perf-probe mainporcelain common
perf-kmem mainporcelain common
+perf-lock mainporcelain common
--
1.6.5.2

2010-02-28 08:58:25

by Hitoshi Mitake

[permalink] [raw]
Subject: [tip:perf/core] perf lock: Fix and add misc documentally things

Commit-ID: 84c6f88fc8265d7a712d7d6ed8fc1a878dfc84d1
Gitweb: http://git.kernel.org/tip/84c6f88fc8265d7a712d7d6ed8fc1a878dfc84d1
Author: Hitoshi Mitake <[email protected]>
AuthorDate: Thu, 4 Feb 2010 16:08:15 +0900
Committer: Frederic Weisbecker <[email protected]>
CommitDate: Sat, 27 Feb 2010 17:05:22 +0100

perf lock: Fix and add misc documentally things

I've forgot to add 'perf lock' line to command-list.txt,
so users of perf could not find perf lock when they type 'perf'.

Fixing command-list.txt requires document
(tools/perf/Documentation/perf-lock.txt).
But perf lock is too much "under construction" to write a
stable document, so this is something like pseudo document for now.

And I wrote description of perf lock at help section of
CONFIG_LOCK_STAT, this will navigate users of lock trace events.

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
---
lib/Kconfig.debug | 6 ++++++
tools/perf/Documentation/perf-lock.txt | 29 +++++++++++++++++++++++++++++
tools/perf/command-list.txt | 1 +
3 files changed, 36 insertions(+), 0 deletions(-)

diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 25c3ed5..65f964e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -520,6 +520,12 @@ config LOCK_STAT

For more details, see Documentation/lockstat.txt

+ You can analyze lock events with "perf lock", subcommand of perf.
+ If you want to use "perf lock", you need to turn on CONFIG_EVENT_TRACING.
+
+ CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
+ (CONFIG_LOCKDEP defines "acquire" and "release" events.)
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
new file mode 100644
index 0000000..b317102
--- /dev/null
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -0,0 +1,29 @@
+perf-lock(1)
+============
+
+NAME
+----
+perf-lock - Analyze lock events
+
+SYNOPSIS
+--------
+[verse]
+'perf lock' {record|report|trace}
+
+DESCRIPTION
+-----------
+You can analyze various lock behaviours
+and statistics with this 'perf lock' command.
+
+ 'perf lock record <command>' records lock events
+ between start and end <command>. And this command
+ produces the file "perf.data" which contains tracing
+ results of lock events.
+
+ 'perf lock trace' shows raw lock events.
+
+ 'perf lock report' reports statistical data.
+
+SEE ALSO
+--------
+linkperf:perf[1]
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 9afcff2..db6ee94 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -18,3 +18,4 @@ perf-top mainporcelain common
perf-trace mainporcelain common
perf-probe mainporcelain common
perf-kmem mainporcelain common
+perf-lock mainporcelain common