2009-12-20 02:57:51

by Hitoshi Mitake

[permalink] [raw]
Subject: [PATCH] perf lock: Implement basic sorting function of perf lock

Now perf lock can sort result based on the attribute
specified by user.

Basic strategy of caliculating and sorting is like this:
1) Gathering stat information of lock instances into hashlist,
and caliculate attributes with state machine.
2) Popping stat from hash list, pushing it into rbtree
based on the key which user specified(e.g. wait time total, contended num)
3) Popping left most node of rbtree, print it, and iterate this step
till rbtree becomes empty

Example of use:
| % perf lock prof # default key to sort is how many acquired
| ID Name acquired contended total wait (ns) max wait (ns) min wait (ns)
|
| 0xffff8800b93e5348 key 96300 14384 108261953 11630 0
| 0xffff8800a46b78e8 &sem->wait_lock 6663 0 13123757 12145 1324
| 0xffff8800a46b7d98 &inode->i_data.t... 6312 0 28060235 21045 1379
| 0xffff8800a46b7920 &(&ip->i_lock)->... 3330 0 15588899 3930812 1465
| 0xffff8800a46b7e70 &inode->i_data.p... 3161 0 11336333 1110293 1358
| 0xffff8800bf002858 &parent->list_lo... 1992 117 4344197 8475 19
| ...
|
| % perf lock prof --key wait_total # which lock is most waited totally?
| ID Name acquired contended total wait (ns) max wait (ns) min wait (ns)
|
| 0xffff880058d1ee28 &event->child_mu... 1 0 4698426795 4698426795 4698426795
| 0xffff88005b9556f8 &mm->mmap_sem 168 0 210647186 33994947 1432
| 0xffff8800b93e5348 key 96300 14384 108261953 11630 0
| 0xffff8800a46b7d98 &inode->i_data.t... 6312 0 28060235 21045 1379
| 0xffffffff817bb398 dcache_lock 845 0 26993039 4087405 1337
| 0xffff8800a46b7c98 &sb->s_type->i_m... 61 0 18145694 6341240 2926
| ...

And this patch implements "map" subcommand.
Some names of lock instances are too long,
so I cut off such name (over 16 chars) in output.
(e.g. "&inode->i_data.tree_lock" -> "&inode->i_data.t...")
"map" dumps the table of corresponding between
ID (address of lockdep_map) and name.
User can grep by ID the output of "map"
and can determine the name of ID who focusing on.

I still have a lot of TODOs and FIXMEs,
but it's good timing to push into perf/lock of tip tree.
Could you appli it, Ingo?

Signed-off-by: Hitoshi Mitake <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
---
tools/perf/builtin-lock.c | 437 +++++++++++++++++++++++++++++++++++++--------
1 files changed, 358 insertions(+), 79 deletions(-)

diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index ca898c8..0c36abd 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -15,24 +15,43 @@

#include <sys/types.h>
#include <sys/prctl.h>
-
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
-
#include <limits.h>

-/*
- * FIXME and TODO:
- * At least, hash table should be used here.
- * Ideally, numeric ID for each lock instance is desired.
- */
+#include <linux/list.h>
+#include <linux/hash.h>
+
+/* based on kernel/lockdep.c */
+#define MAX_LOCKDEP_KEYS_BITS 13
+#define LOCKHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
+#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
+
+static struct list_head lockhash_table[LOCKHASH_SIZE];
+
+#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
+#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+
+#define LOCK_STATE_UNLOCKED 0 /* initial state */
+#define LOCK_STATE_LOCKED 1
+
struct lock_stat {
- char *name; /* for strcpy(), we cannot use const */
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */
+
+ /* FIXME: raw_field_value() returns unsigned long long,
+ * so address of lockdep_map should be dealed as 64bit.
+ * Is there more better solution? */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
+ int state;
+ u64 prev_event_time; /* timestamp of previous event */

unsigned int nr_acquired;
+ unsigned int nr_acquire;
unsigned int nr_contended;
- /* nr_contended - nr_acquired == immediate availability */
+ unsigned int nr_release;

/* these times are in nano sec. */
u64 wait_time_total;
@@ -40,46 +59,131 @@ struct lock_stat {
u64 wait_time_max;
};

-static struct lock_stat lock_stat_sentinel = {
- .name = NULL,
+/* build simple key function one is bigger than two */
+#define SINGLE_KEY(member) \
+ static int lock_stat_key_ ## member(struct lock_stat *one, \
+ struct lock_stat *two) \
+ { \
+ if (one->member > two->member) \
+ return 1; \
+ else \
+ return 0; \
+ }
+
+SINGLE_KEY(nr_acquired)
+SINGLE_KEY(nr_contended)
+SINGLE_KEY(wait_time_total)
+SINGLE_KEY(wait_time_min)
+SINGLE_KEY(wait_time_max)
+
+struct lock_key {
+ /*
+ * name: the value for specify by user
+ * this should be simpler than raw name of member
+ * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
+ */
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
};

-static struct lock_stat **lock_stats;
+static const char *sort_key = "acquired";
+static int (*compare)(struct lock_stat *, struct lock_stat *);

-static void lock_stats_init(void)
+#define DEF_KEY_LOCK(name, fn_suffix) \
+ { #name, lock_stat_key_ ## fn_suffix }
+struct lock_key keys[] = {
+ DEF_KEY_LOCK(acquired, nr_acquired),
+ DEF_KEY_LOCK(contended, nr_contended),
+ DEF_KEY_LOCK(wait_total, wait_time_total),
+ DEF_KEY_LOCK(wait_min, wait_time_min),
+ DEF_KEY_LOCK(wait_max, wait_time_max),
+
+ /* extra comparisons much complicated should be here */
+
+ { NULL, NULL }
+};
+
+static void select_key(void)
{
- lock_stats = zalloc(sizeof(struct lock_stat *));
- if (!lock_stats)
- die("memory allocation failed\n");
- lock_stats[0] = zalloc(sizeof(struct lock_stat));
- if (!lock_stats[0])
- die("memory allocation failed\n");
- lock_stats[0] = &lock_stat_sentinel;
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return;
+ }
+ }
+
+ die("Unknown compare key:%s\n", sort_key);
}

-static struct lock_stat *lock_stat_findnew(const char *name)
+static struct rb_root result; /* place to store sorted data */
+
+static void insert_to_result(struct lock_stat *st,
+ int (*bigger)(struct lock_stat *,
+ struct lock_stat *))
{
- int i, new;
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct lock_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct lock_stat, rb);
+ parent = *rb;
+
+ if (bigger(st, p))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }

- for (i = 0; lock_stats[i]->name; i++) {
- if (!strcmp(lock_stats[i]->name, name))
- return lock_stats[i];
+ rb_link_node(&st->rb, parent, rb);
+ rb_insert_color(&st->rb, &result);
+}
+
+/* returns left most element of result, and erase it */
+static struct lock_stat *pop_from_result(void)
+{
+ struct rb_node *node = result.rb_node;
+
+ if (!node)
+ return NULL;
+
+ while (node->rb_left)
+ node = node->rb_left;
+
+ rb_erase(node, &result);
+ return container_of(node, struct lock_stat, rb);
+}
+
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
+{
+ struct list_head *entry = lockhashentry(addr);
+ struct lock_stat *ret, *new;
+
+ list_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
}

- new = i;
- lock_stats = xrealloc(lock_stats,
- sizeof(struct lock_stats *) * (i + 2));
- lock_stats[new] = calloc(sizeof(struct lock_stat), 1);
- if (!lock_stats[new])
- die("memory allocation failed\n");
- lock_stats[new]->name = calloc(sizeof(char), strlen(name) + 1);
- if (!lock_stats[new]->name)
- die("memory allocation failed\n");
- strcpy(lock_stats[new]->name, name);
- lock_stats[new]->wait_time_min = ULLONG_MAX;
-
- lock_stats[i + 1] = &lock_stat_sentinel;
- return lock_stats[new];
+ new = zalloc(sizeof(struct lock_stat));
+ if (!new)
+ goto alloc_failed;
+
+ new->addr = addr;
+ new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ if (!new->name)
+ goto alloc_failed;
+ strcpy(new->name, name);
+ /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
+ new->state = LOCK_STATE_UNLOCKED;
+ new->wait_time_min = ULLONG_MAX;
+
+ list_add(&new->hash_entry, entry);
+ return new;
+
+alloc_failed:
+ die("memory allocation failed\n");
}

static char const *input_name = "perf.data";
@@ -95,19 +199,22 @@ struct raw_event_sample {
};

struct trace_acquire_event {
+ void *addr;
const char *name;
};

struct trace_acquired_event {
+ void *addr;
const char *name;
- u64 wait_time; /* in nano sec. */
};

struct trace_contended_event {
+ void *addr;
const char *name;
};

struct trace_release_event {
+ void *addr;
const char *name;
};

@@ -137,52 +244,116 @@ struct trace_lock_handler {
struct thread *thread);
};

-static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event __used,
+static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp __used,
+ u64 timestamp,
struct thread *thread __used)
{
+ struct lock_stat *st;
+
+ st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
}

static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp __used,
+ u64 timestamp,
struct thread *thread __used)
{
struct lock_stat *st;

- st = lock_stat_findnew(acquired_event->name);
- st->nr_acquired++;
-
- if (acquired_event->wait_time != 0) {
- st->wait_time_total += acquired_event->wait_time;
- if (st->wait_time_max < acquired_event->wait_time)
- st->wait_time_max = acquired_event->wait_time;
- else if (st->wait_time_min > acquired_event->wait_time)
- st->wait_time_min = acquired_event->wait_time;
+ st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ st->state = LOCK_STATE_LOCKED;
+ st->nr_acquired++;
+ break;
+ case LOCK_STATE_LOCKED:
+ break;
+ default:
+ BUG_ON(1);
+ break;
}
+
+ st->prev_event_time = timestamp;
}

static void prof_lock_contended_event(struct trace_contended_event *contended_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp __used,
+ u64 timestamp,
struct thread *thread __used)
{
struct lock_stat *st;

- st = lock_stat_findnew(contended_event->name);
- st->nr_contended++;
+ st = lock_stat_findnew(contended_event->addr, contended_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->nr_contended++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ st->prev_event_time = timestamp;
}

-static void prof_lock_release_event(struct trace_release_event *release_event __used,
+static void prof_lock_release_event(struct trace_release_event *release_event,
struct event *__event __used,
int cpu __used,
- u64 timestamp __used,
+ u64 timestamp,
struct thread *thread __used)
{
+ struct lock_stat *st;
+ u64 hold_time;
+
+ st = lock_stat_findnew(release_event->addr, release_event->name);
+
+ switch (st->state) {
+ case LOCK_STATE_UNLOCKED:
+ break;
+ case LOCK_STATE_LOCKED:
+ st->state = LOCK_STATE_UNLOCKED;
+ hold_time = timestamp - st->prev_event_time;
+
+ if (timestamp < st->prev_event_time) {
+ /* terribly, this can happen... */
+ goto end;
+ }
+
+ if (st->wait_time_min > hold_time)
+ st->wait_time_min = hold_time;
+ if (st->wait_time_max < hold_time)
+ st->wait_time_max = hold_time;
+ st->wait_time_total += hold_time;
+
+ st->nr_release++;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+end:
+ st->prev_event_time = timestamp;
}

/* lock oriented handlers */
@@ -203,6 +374,17 @@ process_lock_acquire_event(struct raw_event_sample *raw __used,
u64 timestamp __used,
struct thread *thread __used)
{
+ struct trace_acquire_event acquire_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", raw->data);
+ acquire_event.addr = (void *)tmp;
+ acquire_event.name = (char *)raw_field_ptr(event, "name", raw->data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->acquire_event(&acquire_event,
+ event, cpu, timestamp, thread);
+ }
}

static void
@@ -213,14 +395,11 @@ process_lock_acquired_event(struct raw_event_sample *raw __used,
struct thread *thread __used)
{
struct trace_acquired_event acquired_event;
+ u64 tmp; /* this is required for casting... */

+ tmp = raw_field_value(event, "lockdep_addr", raw->data);
+ acquired_event.addr = (void *)tmp;
acquired_event.name = (char *)raw_field_ptr(event, "name", raw->data);
- acquired_event.wait_time =
- (unsigned long)raw_field_value(event, "wait_usec", raw->data)
- * 1000;
- acquired_event.wait_time +=
- (unsigned long)raw_field_value(event,
- "wait_nsec_rem", raw->data);

if (trace_handler->acquire_event) {
trace_handler->acquired_event(&acquired_event,
@@ -235,6 +414,17 @@ process_lock_contended_event(struct raw_event_sample *raw __used,
u64 timestamp __used,
struct thread *thread __used)
{
+ struct trace_contended_event contended_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", raw->data);
+ contended_event.addr = (void *)tmp;
+ contended_event.name = (char *)raw_field_ptr(event, "name", raw->data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->contended_event(&contended_event,
+ event, cpu, timestamp, thread);
+ }
}

static void
@@ -244,6 +434,17 @@ process_lock_release_event(struct raw_event_sample *raw __used,
u64 timestamp __used,
struct thread *thread __used)
{
+ struct trace_release_event release_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", raw->data);
+ release_event.addr = (void *)tmp;
+ release_event.name = (char *)raw_field_ptr(event, "name", raw->data);
+
+ if (trace_handler->acquire_event) {
+ trace_handler->release_event(&release_event,
+ event, cpu, timestamp, thread);
+ }
}

static void
@@ -333,24 +534,61 @@ static int sample_type_check(u64 type)
return 0;
}

-/* TODO: various way to print */
+/* TODO: various way to print, coloring, nano or milli sec */
static void print_result(void)
{
- int i;
+ struct lock_stat *st;
+ char cut_name[20];
+
+ printf("%18s ", "ID");
+ printf("%20s ", "Name");
+ printf("%10s ", "acquired");
+ printf("%10s ", "contended");
+
+ printf("%15s ", "total wait (ns)");
+ printf("%15s ", "max wait (ns)");
+ printf("%15s ", "min wait (ns)");
+
+ printf("\n\n");
+
+ while ((st = pop_from_result())) {
+ bzero(cut_name, 20);
+
+ printf("%p ", st->addr);
+
+ if (strlen(st->name) < 16) {
+ /* output raw name */
+ printf("%20s ", st->name);
+ } else {
+ strncpy(cut_name, st->name, 16);
+ cut_name[16] = '.';
+ cut_name[17] = '.';
+ cut_name[18] = '.';
+ cut_name[19] = '\0';
+ /* cut off name for saving output style */
+ printf("%20s ", cut_name);
+ }
+
+ printf("%10u ", st->nr_acquired);
+ printf("%10u ", st->nr_contended);
+
+ printf("%15llu ", st->wait_time_total);
+ printf("%15llu ", st->wait_time_max);
+ printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ 0 : st->wait_time_min);
+ printf("\n");
+ }
+}
+
+static void dump_map(void)
+{
+ unsigned int i;
+ struct lock_stat *st;

- /* FIXME: formatting output is very rough */
- printf("\n ------------------------------------------------------------------------------------------\n");
- printf(" Lock | Acquired | Max wait ns | Min wait ns | Total wait ns |\n");
- printf(" --------------------------------------------------------------------------------------------\n");
-
- for (i = 0; lock_stats[i]->name; i++) {
- printf("%32s %13u %13llu %13llu %15llu\n",
- lock_stats[i]->name,
- lock_stats[i]->nr_acquired,
- lock_stats[i]->wait_time_max,
- lock_stats[i]->wait_time_min == ULLONG_MAX ?
- 0 : lock_stats[i]->wait_time_min,
- lock_stats[i]->wait_time_total);
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ printf("%p: %s\n", st->addr, st->name);
+ }
}
}

@@ -368,14 +606,39 @@ static int read_events(void)
&event__cwdlen, &event__cwd);
}

+static void sort_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare);
+ }
+ }
+}
+
static void __cmd_prof(void)
{
setup_pager();
- lock_stats_init();
+ select_key();
read_events();
+ sort_result();
print_result();
}

+static const char * const prof_usage[] = {
+ "perf sched prof [<options>]",
+ NULL
+};
+
+static const struct option prof_options[] = {
+ OPT_STRING('k', "key", &sort_key, "acquired",
+ "key for sorting"),
+ /* TODO: type */
+ OPT_END()
+};
+
static const char * const lock_usage[] = {
"perf lock [<options>] {record|trace|prof}",
NULL
@@ -426,7 +689,11 @@ static int __cmd_record(int argc, const char **argv)

int cmd_lock(int argc, const char **argv, const char *prefix __used)
{
+ unsigned int i;
+
symbol__init(0);
+ for (i = 0; i < LOCKHASH_SIZE; i++)
+ INIT_LIST_HEAD(lockhash_table + i);

argc = parse_options(argc, argv, lock_options, lock_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@@ -437,10 +704,22 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "prof", 4)) {
trace_handler = &prof_lock_ops;
+ if (argc) {
+ argc = parse_options(argc, argv,
+ prof_options, prof_usage, 0);
+ if (argc)
+ usage_with_options(prof_usage, prof_options);
+ }
__cmd_prof();
} else if (!strcmp(argv[0], "trace")) {
/* Aliased to 'perf trace' */
return cmd_trace(argc, argv, prefix);
+ } else if (!strcmp(argv[0], "map")) {
+ /* recycling prof_lock_ops */
+ trace_handler = &prof_lock_ops;
+ setup_pager();
+ read_events();
+ dump_map();
} else {
usage_with_options(lock_usage, lock_options);
}
--
1.6.5.2


2009-12-28 10:17:46

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] perf lock: Implement basic sorting function of perf lock

On Sun, 2009-12-20 at 11:56 +0900, Hitoshi Mitake wrote:
> +/* based on kernel/lockdep.c */
> +#define MAX_LOCKDEP_KEYS_BITS 13
> +#define LOCKHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
> +#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)

Since you're hashing something fundamentally different than lockdep does
I don't think it makes sense to carry either the name or the size along.

2009-12-28 10:58:47

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] perf lock: Implement basic sorting function of perf lock

On Sun, 2009-12-20 at 11:56 +0900, Hitoshi Mitake wrote:
> +#define SINGLE_KEY(member) \
> + static int lock_stat_key_ ## member(struct lock_stat *one, \
> + struct lock_stat *two) \
> + { \
> + if (one->member > two->member) \
> + return 1; \
> + else \
> + return 0; \
> + }

Seems to me:

return one->member > two->member;

Should suffice, no?

[ I usually call the arguments to a comparator (l)eft/(r)ight or a/b,
but I guess one/two works too :-) ]

2009-12-28 11:01:49

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] perf lock: Implement basic sorting function of perf lock

On Sun, 2009-12-20 at 11:56 +0900, Hitoshi Mitake wrote:
> +static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
> +{
> + struct list_head *entry = lockhashentry(addr);
> + struct lock_stat *ret, *new;


Right, so you use a hash table to match lock instances, I suppose that's
faster than an rb-tree?

Do you also remove instances after a match to keep the hash relatively
small?

2010-01-08 08:02:44

by Hitoshi Mitake

[permalink] [raw]
Subject: Re: [PATCH] perf lock: Implement basic sorting function of perf lock

On 2009年12月28日 19:17, Peter Zijlstra wrote:
> On Sun, 2009-12-20 at 11:56 +0900, Hitoshi Mitake wrote:
>> +/* based on kernel/lockdep.c */
>> +#define MAX_LOCKDEP_KEYS_BITS 13
>> +#define LOCKHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
>> +#define LOCKHASH_SIZE (1UL<< LOCKHASH_BITS)
>
> Since you're hashing something fundamentally different than lockdep does
> I don't think it makes sense to carry either the name or the size along.
>
>

Do you mean that MAX_LOCKDEP_KEYS_BITS is needless?
# Sorry, I'm not good at English...

2010-01-08 08:04:29

by Hitoshi Mitake

[permalink] [raw]
Subject: Re: [PATCH] perf lock: Implement basic sorting function of perf lock

On 2009年12月28日 19:58, Peter Zijlstra wrote:
> On Sun, 2009-12-20 at 11:56 +0900, Hitoshi Mitake wrote:
>> +#define SINGLE_KEY(member) \
>> + static int lock_stat_key_ ## member(struct lock_stat *one, \
>> + struct lock_stat *two) \
>> + { \
>> + if (one->member> two->member) \
>> + return 1; \
>> + else \
>> + return 0; \
>> + }
>
> Seems to me:
>
> return one->member> two->member;
>
> Should suffice, no?
>
> [ I usually call the arguments to a comparator (l)eft/(r)ight or a/b,
> but I guess one/two works too :-) ]
>
>

Yeah, your way is far smarter than mine :)
Body of this function can be in single line, I'll fix.

2010-01-08 08:34:06

by Hitoshi Mitake

[permalink] [raw]
Subject: Re: [PATCH] perf lock: Implement basic sorting function of perf lock

On 2009年12月28日 20:01, Peter Zijlstra wrote:
> On Sun, 2009-12-20 at 11:56 +0900, Hitoshi Mitake wrote:
>> +static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
>> +{
>> + struct list_head *entry = lockhashentry(addr);
>> + struct lock_stat *ret, *new;
>
>
> Right, so you use a hash table to match lock instances, I suppose that's
> faster than an rb-tree?

Maybe. But it is hard to determine which data structure is better.
This is a histogram from perf.data produced by "perf bench sched messaging",

(left is length of list, right is number of list whose length is value
of left)
0: 3333
1: 153
2: 91
3: 114
4: 96
5: 75
6: 61
7: 43
8: 29
9: 32
10: 23
11: 15
12: 9
13: 5
14: 7
15: 3
16: 1
17: 3
18: 0
19: 3 # 19 means "longer than 19"

Total number of lock instances is 3459,
so if perf lock use rb-tree for storing info of lock instances,
average order of accessing stored node is lg 4096 == 12.
And the histogram above describes,
there are relatively very few lists with length over 12.
So I think hash list is better thing for storing info of lock instances
than rb-tree.
(Sorting result is another issue, of course.)

>
> Do you also remove instances after a match to keep the hash relatively
> small?
>
>

No, I don't.
At the last phase of profiling,
perf lock makes rb-tree sorted by the key user specified.

static void sort_result(void)
{
unsigned int i;
struct lock_stat *st;

for (i = 0; i < LOCKHASH_SIZE; i++) {
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
insert_to_result(st, compare); # insert to rb-tree
}
}
}

As this code describes,
referencing the completed hash list is done only once.
So removing instances to keep the hash list relatively small
is not required, I think.