Per $subject, this patch set only supports for the LIVE kernel. It adds
support infrastructure for path discovery, load address lookup, and
symbol generation of live kernel modules.
TODO includes resurrection of live annotation in perf top, and support
for annotation and report generation of other than live modules. As the
patch set sits, Perf top can generate symbols from live binaries, but
there's no live annotation capability yet.
patch1: perf_counter tools: Make symbol loading consistently return number of loaded symbols.
patch2: perf_counter tools: Add infrastructure to support loading of kernel module symbols
patch3: perf_counter tools: connect module support infrastructure to symbol loading infrastructure
patch4: perf_counter tools: Enable kernel module symbol loading in tools
Comments and suggestions most welcome.
-Mike
perf_counter tools: Make symbol loading consistently return number of loaded symbols.
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
LKML-Reference: <new-submission>
---
tools/perf/builtin-annotate.c | 2 +-
tools/perf/builtin-report.c | 2 +-
tools/perf/builtin-top.c | 2 +-
tools/perf/util/symbol.c | 9 ++++++---
4 files changed, 9 insertions(+), 6 deletions(-)
Index: linux-2.6/tools/perf/builtin-annotate.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-annotate.c
+++ linux-2.6/tools/perf/builtin-annotate.c
@@ -172,7 +172,7 @@ static int load_kernel(void)
return -1;
err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
- if (err) {
+ if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
} else
Index: linux-2.6/tools/perf/builtin-report.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-report.c
+++ linux-2.6/tools/perf/builtin-report.c
@@ -189,7 +189,7 @@ static int load_kernel(void)
return -1;
err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
- if (err) {
+ if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
} else
Index: linux-2.6/tools/perf/builtin-top.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-top.c
+++ linux-2.6/tools/perf/builtin-top.c
@@ -364,7 +364,7 @@ static int parse_symbols(void)
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0)
+ if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
Index: linux-2.6/tools/perf/util/symbol.c
===================================================================
--- linux-2.6.orig/tools/perf/util/symbol.c
+++ linux-2.6/tools/perf/util/symbol.c
@@ -146,6 +146,7 @@ static int dso__load_kallsyms(struct dso
char *line = NULL;
size_t n;
FILE *file = fopen("/proc/kallsyms", "r");
+ int count = 0;
if (file == NULL)
goto out_failure;
@@ -188,8 +189,10 @@ static int dso__load_kallsyms(struct dso
if (filter && filter(self, sym))
symbol__delete(sym, self->sym_priv_size);
- else
+ else {
dso__insert_symbol(self, sym);
+ count++;
+ }
}
/*
@@ -212,7 +215,7 @@ static int dso__load_kallsyms(struct dso
free(line);
fclose(file);
- return 0;
+ return count;
out_delete_line:
free(line);
@@ -639,7 +642,7 @@ int dso__load_kernel(struct dso *self, c
if (vmlinux)
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
- if (err < 0)
+ if (err <= 0)
err = dso__load_kallsyms(self, filter, verbose);
return err;
perf_counter tools: Add infrastructure to support loading of kernel module symbols.
Add infrastructure for module path discovery and section load addresses.
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
LKML-Reference: <new-submission>
---
tools/perf/Makefile | 2
tools/perf/util/module.c | 502 +++++++++++++++++++++++++++++++++++++++++++++++
tools/perf/util/module.h | 53 ++++
3 files changed, 557 insertions(+)
Index: linux-2.6/tools/perf/Makefile
===================================================================
--- linux-2.6.orig/tools/perf/Makefile
+++ linux-2.6/tools/perf/Makefile
@@ -306,6 +306,7 @@ LIB_H += util/strlist.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
+LIB_H += util/module.h
LIB_H += util/color.h
LIB_OBJS += util/abspath.o
@@ -329,6 +330,7 @@ LIB_OBJS += util/usage.o
LIB_OBJS += util/wrapper.o
LIB_OBJS += util/sigchain.o
LIB_OBJS += util/symbol.o
+LIB_OBJS += util/module.o
LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
LIB_OBJS += util/header.o
Index: linux-2.6/tools/perf/util/module.h
===================================================================
--- /dev/null
+++ linux-2.6/tools/perf/util/module.h
@@ -0,0 +1,53 @@
+#ifndef _PERF_MODULE_
+#define _PERF_MODULE_ 1
+
+#include <linux/types.h>
+#include "../types.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct section {
+ struct rb_node rb_node;
+ u64 hash;
+ u64 vma;
+ char *name;
+ char *path;
+};
+
+struct sec_dso {
+ struct list_head node;
+ struct rb_root secs;
+ struct section *(*find_section)(struct sec_dso *, const char *name);
+ char name[0];
+};
+
+struct module {
+ struct rb_node rb_node;
+ u64 hash;
+ char *name;
+ char *path;
+ struct sec_dso *sections;
+ int active;
+};
+
+struct mod_dso {
+ struct list_head node;
+ struct rb_root mods;
+ struct module *(*find_module)(struct mod_dso *, const char *name);
+ char name[0];
+};
+
+struct sec_dso *sec_dso__new_dso(const char *name);
+void sec_dso__delete_sections(struct sec_dso *self);
+void sec_dso__delete_self(struct sec_dso *self);
+size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp);
+struct section *sec_dso__find_section(struct sec_dso *self, const char *name);
+
+struct mod_dso *mod_dso__new_dso(const char *name);
+void mod_dso__delete_modules(struct mod_dso *self);
+void mod_dso__delete_self(struct mod_dso *self);
+size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp);
+struct module *mod_dso__find_module(struct mod_dso *self, const char *name);
+int mod_dso__load_modules(struct mod_dso *dso);
+
+#endif /* _PERF_MODULE_ */
Index: linux-2.6/tools/perf/util/module.c
===================================================================
--- /dev/null
+++ linux-2.6/tools/perf/util/module.c
@@ -0,0 +1,502 @@
+#include "util.h"
+#include "../perf.h"
+#include "string.h"
+#include "module.h"
+
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#include <dirent.h>
+#include <sys/utsname.h>
+
+static unsigned int crc32(const char *p, unsigned int len)
+{
+ int i;
+ unsigned int crc = 0;
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
+
+/* module section methods */
+
+struct sec_dso *sec_dso__new_dso(const char *name)
+{
+ struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ strcpy(self->name, name);
+ self->secs = RB_ROOT;
+ self->find_section = sec_dso__find_section;
+ }
+
+ return self;
+}
+
+static void sec_dso__delete_section(struct section *self)
+{
+ free(((void *)self));
+}
+
+void sec_dso__delete_sections(struct sec_dso *self)
+{
+ struct section *pos;
+ struct rb_node *next = rb_first(&self->secs);
+
+ while (next) {
+ pos = rb_entry(next, struct section, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &self->secs);
+ sec_dso__delete_section(pos);
+ }
+}
+
+void sec_dso__delete_self(struct sec_dso *self)
+{
+ sec_dso__delete_sections(self);
+ free(self);
+}
+
+static void sec_dso__insert_section(struct sec_dso *self, struct section *sec)
+{
+ struct rb_node **p = &self->secs.rb_node;
+ struct rb_node *parent = NULL;
+ const u64 hash = sec->hash;
+ struct section *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct section, rb_node);
+ if (hash < s->hash)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&sec->rb_node, parent, p);
+ rb_insert_color(&sec->rb_node, &self->secs);
+}
+
+struct section *sec_dso__find_section(struct sec_dso *self, const char *name)
+{
+ struct rb_node *n;
+ u64 hash;
+ int len;
+
+ if (self == NULL)
+ return NULL;
+
+ len = strlen(name);
+ hash = crc32(name, len);
+
+ n = self->secs.rb_node;
+
+ while (n) {
+ struct section *s = rb_entry(n, struct section, rb_node);
+
+ if (hash < s->hash)
+ n = n->rb_left;
+ else if (hash > s->hash)
+ n = n->rb_right;
+ else {
+ if (!strcmp(name, s->name))
+ return s;
+ else
+ n = rb_next(&s->rb_node);
+ }
+ }
+
+ return NULL;
+}
+
+static size_t sec_dso__fprintf_section(struct section *self, FILE *fp)
+{
+ return fprintf(fp, "name:%s vma:%llx path:%s\n",
+ self->name, self->vma, self->path);
+}
+
+size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp)
+{
+ size_t ret = fprintf(fp, "dso: %s\n", self->name);
+
+ struct rb_node *nd;
+ for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) {
+ struct section *pos = rb_entry(nd, struct section, rb_node);
+ ret += sec_dso__fprintf_section(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct section *section__new(const char *name, const char *path)
+{
+ struct section *self = calloc(1, sizeof(*self));
+
+ if (!self)
+ goto out_failure;
+
+ self->name = calloc(1, strlen(name) + 1);
+ if (!self->name)
+ goto out_failure;
+
+ self->path = calloc(1, strlen(path) + 1);
+ if (!self->path)
+ goto out_failure;
+
+ strcpy(self->name, name);
+ strcpy(self->path, path);
+ self->hash = crc32(self->name, strlen(name));
+
+ return self;
+
+out_failure:
+ if (self) {
+ if (self->name)
+ free (self->name);
+ if (self->path)
+ free (self->path);
+ free (self);
+ }
+
+ return NULL;
+}
+
+/* module methods */
+
+struct mod_dso *mod_dso__new_dso(const char *name)
+{
+ struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ strcpy(self->name, name);
+ self->mods = RB_ROOT;
+ self->find_module = mod_dso__find_module;
+ }
+
+ return self;
+}
+
+static void mod_dso__delete_module(struct module *self)
+{
+ free(((void *)self));
+}
+
+void mod_dso__delete_modules(struct mod_dso *self)
+{
+ struct module *pos;
+ struct rb_node *next = rb_first(&self->mods);
+
+ while (next) {
+ pos = rb_entry(next, struct module, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &self->mods);
+ mod_dso__delete_module(pos);
+ }
+}
+
+void mod_dso__delete_self(struct mod_dso *self)
+{
+ mod_dso__delete_modules(self);
+ free(self);
+}
+
+static void mod_dso__insert_module(struct mod_dso *self, struct module *mod)
+{
+ struct rb_node **p = &self->mods.rb_node;
+ struct rb_node *parent = NULL;
+ const u64 hash = mod->hash;
+ struct module *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct module, rb_node);
+ if (hash < m->hash)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mod->rb_node, parent, p);
+ rb_insert_color(&mod->rb_node, &self->mods);
+}
+
+struct module *mod_dso__find_module(struct mod_dso *self, const char *name)
+{
+ struct rb_node *n;
+ u64 hash;
+ int len;
+
+ if (self == NULL)
+ return NULL;
+
+ len = strlen(name);
+ hash = crc32(name, len);
+
+ n = self->mods.rb_node;
+
+ while (n) {
+ struct module *m = rb_entry(n, struct module, rb_node);
+
+ if (hash < m->hash)
+ n = n->rb_left;
+ else if (hash > m->hash)
+ n = n->rb_right;
+ else {
+ if (!strcmp(name, m->name))
+ return m;
+ else
+ n = rb_next(&m->rb_node);
+ }
+ }
+
+ return NULL;
+}
+
+static size_t mod_dso__fprintf_module(struct module *self, FILE *fp)
+{
+ return fprintf(fp, "name:%s path:%s\n", self->name, self->path);
+}
+
+size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp)
+{
+ size_t ret = fprintf(fp, "dso: %s\n", self->name);
+
+ struct rb_node *nd;
+ for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) {
+ struct module *pos = rb_entry(nd, struct module, rb_node);
+ ret += mod_dso__fprintf_module(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct module *module__new(const char *name, const char *path)
+{
+ struct module *self = calloc(1, sizeof(*self));
+
+ if (!self)
+ goto out_failure;
+
+ self->name = calloc(1, strlen(name) + 1);
+ if (!self->name)
+ goto out_failure;
+
+ self->path = calloc(1, strlen(path) + 1);
+ if (!self->path)
+ goto out_failure;
+
+ strcpy(self->name, name);
+ strcpy(self->path, path);
+ self->hash = crc32(self->name, strlen(name));
+
+ return self;
+
+out_failure:
+ if (self) {
+ if (self->name)
+ free (self->name);
+ if (self->path)
+ free (self->path);
+ free (self);
+ }
+
+ return NULL;
+}
+
+static int mod_dso__load_sections(struct module *mod)
+{
+ struct dirent *entry;
+ DIR *dir;
+ size_t n;
+ int count = 0, path_len;
+ char *line = NULL;
+ char *dir_path;
+
+ path_len = strlen("/sys/module/");
+ path_len += strlen(mod->name);
+ path_len += strlen("/sections/");
+
+ dir_path = calloc(1, path_len + 1);
+ if (dir_path == NULL)
+ goto out_failure;
+
+ strcat(dir_path, "/sys/module/");
+ strcat(dir_path, mod->name);
+ strcat(dir_path, "/sections/");
+
+ dir = opendir(dir_path);
+ if (dir == NULL)
+ goto out_free;
+
+ while ((entry = readdir(dir))) {
+ struct section *section;
+ FILE *file;
+ char *path, *vma;
+ int line_len;
+
+ if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name))
+ continue;
+
+ path = calloc(1, path_len + strlen(entry->d_name) + 1);
+ if (path == NULL)
+ break;
+ strcat(path, dir_path);
+ strcat(path, entry->d_name);
+
+ file = fopen(path, "r");
+ if (file == NULL) {
+ free(path);
+ break;
+ }
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0) {
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ if (!line) {
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ line[--line_len] = '\0'; /* \n */
+
+ vma = strstr(line, "0x");
+ if (!vma) {
+ free(path);
+ fclose(file);
+ break;
+ }
+ vma += 2;
+
+ section = section__new(entry->d_name, path);
+ if (!section) {
+ fprintf(stderr, "load_sections: allocation error\n");
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ hex2u64(vma, §ion->vma);
+ sec_dso__insert_section(mod->sections, section);
+
+ free(path);
+ fclose(file);
+ count++;
+ }
+
+ closedir(dir);
+ free(line);
+ free(dir_path);
+
+ return count;
+
+out_free:
+ free(dir_path);
+
+out_failure:
+ return count;
+}
+
+static int mod_dso__load_module_paths(struct mod_dso *self)
+{
+ struct utsname uts;
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ int count = 0, len;
+ char *path;
+
+ if(uname(&uts) < 0)
+ goto out_failure;
+
+ len = strlen("/lib/modules/");
+ len += strlen(uts.release);
+ len += strlen("/modules.dep");
+
+ path = calloc(1, len);
+ if (path == NULL)
+ goto out_failure;
+
+ strcat(path, "/lib/modules/");
+ strcat(path, uts.release);
+ strcat(path, "/modules.dep");
+
+ file = fopen(path, "r");
+ free(path);
+ if (file == NULL)
+ goto out_failure;
+
+ while (!feof(file)) {
+ struct module *module;
+ int line_len, len;
+ char *path, *name, *tmp;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ path = strtok(line, ":");
+ if (!path)
+ goto out_failure;
+
+ name = strdup(path);
+ name = tmp = strtok(name, "/");
+ while(tmp) {
+ tmp = strtok(NULL, "/");
+ if (tmp)
+ name = tmp;
+ }
+ name = strsep(&name, ".");
+
+ /* Replace '-' with '_' in damn sound modules */
+ for (len = strlen(name); len; len--)
+ if (*(name+len) == '-')
+ *(name+len) = '_';
+
+ module = module__new(name, path);
+ if (!module) {
+ fprintf(stderr, "load_module_paths: allocation error\n");
+ goto out_failure;
+ }
+ mod_dso__insert_module(self, module);
+
+ module->sections = sec_dso__new_dso("sections");
+ if (!module->sections) {
+ fprintf(stderr, "load_module_paths: allocation error\n");
+ goto out_failure;
+ }
+
+ module->active = mod_dso__load_sections(module);
+
+ if (module->active > 0)
+ count++;
+ }
+
+ free(line);
+ fclose(file);
+
+ return count;
+
+out_failure:
+ return -1;
+}
+
+int mod_dso__load_modules(struct mod_dso *dso)
+{
+ int err;
+
+ err = mod_dso__load_module_paths(dso);
+
+ return err;
+}
+
perf_counter tools: connect module support infrastructure to symbol loading infrastructure.
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
LKML-Reference: <new-submission>
---
tools/perf/builtin-annotate.c | 2
tools/perf/builtin-report.c | 2
tools/perf/builtin-top.c | 2
tools/perf/util/symbol.c | 159 +++++++++++++++++++++++++++++++++++++++---
tools/perf/util/symbol.h | 5 +
5 files changed, 156 insertions(+), 14 deletions(-)
Index: linux-2.6/tools/perf/util/symbol.h
===================================================================
--- linux-2.6.orig/tools/perf/util/symbol.h
+++ linux-2.6/tools/perf/util/symbol.h
@@ -5,6 +5,7 @@
#include "types.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include "module.h"
struct symbol {
struct rb_node rb_node;
@@ -13,6 +14,7 @@ struct symbol {
u64 obj_start;
u64 hist_sum;
u64 *hist;
+ struct module *module;
void *priv;
char name[0];
};
@@ -41,7 +43,8 @@ static inline void *dso__sym_priv(struct
struct symbol *dso__find_symbol(struct dso *self, u64 ip);
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose);
+ symbol_filter_t filter, int verbose, int modules);
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
size_t dso__fprintf(struct dso *self, FILE *fp);
Index: linux-2.6/tools/perf/util/symbol.c
===================================================================
--- linux-2.6.orig/tools/perf/util/symbol.c
+++ linux-2.6/tools/perf/util/symbol.c
@@ -35,7 +35,7 @@ static struct symbol *symbol__new(u64 st
self = ((void *)self) + priv_size;
}
self->start = start;
- self->end = start + len - 1;
+ self->end = len ? start + len - 1 : start;
memcpy(self->name, name, namelen);
return self;
@@ -48,8 +48,12 @@ static void symbol__delete(struct symbol
static size_t symbol__fprintf(struct symbol *self, FILE *fp)
{
- return fprintf(fp, " %llx-%llx %s\n",
+ if (!self->module)
+ return fprintf(fp, " %llx-%llx %s\n",
self->start, self->end, self->name);
+ else
+ return fprintf(fp, " %llx-%llx %s \t[%s]\n",
+ self->start, self->end, self->name, self->module->name);
}
struct dso *dso__new(const char *name, unsigned int sym_priv_size)
@@ -310,6 +314,26 @@ static inline int elf_sym__is_function(c
sym->st_size != 0;
}
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
static inline const char *elf_sym__name(const GElf_Sym *sym,
const Elf_Data *symstrs)
{
@@ -451,9 +475,9 @@ static int dso__synthesize_plt_symbols(s
}
static int dso__load_sym(struct dso *self, int fd, const char *name,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int verbose, struct module *mod)
{
- Elf_Data *symstrs;
+ Elf_Data *symstrs, *secstrs;
uint32_t nr_syms;
int err = -1;
uint32_t index;
@@ -461,7 +485,7 @@ static int dso__load_sym(struct dso *sel
GElf_Shdr shdr;
Elf_Data *syms;
GElf_Sym sym;
- Elf_Scn *sec, *sec_dynsym;
+ Elf_Scn *sec, *sec_dynsym, *sec_strndx;
Elf *elf;
size_t dynsym_idx;
int nr = 0;
@@ -520,6 +544,14 @@ static int dso__load_sym(struct dso *sel
if (symstrs == NULL)
goto out_elf_end;
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
@@ -529,8 +561,11 @@ static int dso__load_sym(struct dso *sel
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
struct symbol *f;
u64 obj_start;
+ struct section *section = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
- if (!elf_sym__is_function(&sym))
+ if (!is_label && !elf_sym__is_function(&sym))
continue;
sec = elf_getscn(elf, sym.st_shndx);
@@ -538,6 +573,11 @@ static int dso__load_sym(struct dso *sel
goto out_elf_end;
gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_text(&shdr, secstrs))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
obj_start = sym.st_value;
if (self->prelinked) {
@@ -548,6 +588,17 @@ static int dso__load_sym(struct dso *sel
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
}
+ if (mod) {
+ section = mod->sections->find_section(mod->sections, section_name);
+ if (section)
+ sym.st_value += section->vma;
+ else {
+ fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n",
+ mod->name, section_name);
+ goto out_elf_end;
+ }
+ }
+
f = symbol__new(sym.st_value, sym.st_size,
elf_sym__name(&sym, symstrs),
self->sym_priv_size, obj_start, verbose);
@@ -557,6 +608,7 @@ static int dso__load_sym(struct dso *sel
if (filter && filter(self, f))
symbol__delete(f, self->sym_priv_size);
else {
+ f->module = mod;
dso__insert_symbol(self, f);
nr++;
}
@@ -606,7 +658,7 @@ more:
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, fd, name, filter, verbose);
+ ret = dso__load_sym(self, fd, name, filter, verbose, NULL);
close(fd);
/*
@@ -620,6 +672,86 @@ out:
return ret;
}
+static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
+ symbol_filter_t filter, int verbose)
+{
+ struct module *mod = mod_dso__find_module(mods, name);
+ int err = 0, fd;
+
+ if (mod == NULL || !mod->active)
+ return err;
+
+ fd = open(mod->path, O_RDONLY);
+
+ if (fd < 0)
+ return err;
+
+ err = dso__load_sym(self, fd, name, filter, verbose, mod);
+ close(fd);
+
+ return err;
+}
+
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose)
+{
+ struct mod_dso *mods = mod_dso__new_dso("modules");
+ struct module *pos;
+ struct rb_node *next;
+ int err;
+
+ err = mod_dso__load_modules(mods);
+
+ if (err <= 0)
+ return err;
+
+ /*
+ * Iterate over modules, and load active symbols.
+ */
+ next = rb_first(&mods->mods);
+ while (next) {
+ pos = rb_entry(next, struct module, rb_node);
+ err = dso__load_module(self, mods, pos->name, filter, verbose);
+
+ if (err < 0)
+ break;
+
+ next = rb_next(&pos->rb_node);
+ }
+
+ if (err < 0) {
+ mod_dso__delete_modules(mods);
+ mod_dso__delete_self(mods);
+ }
+
+ return err;
+}
+
+static inline void dso__fill_symbol_holes(struct dso *self)
+{
+ struct symbol *prev = NULL;
+ struct rb_node *nd;
+
+ for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev) {
+ u64 hole = 0;
+ int alias = pos->start == prev->start;
+
+ if (!alias)
+ hole = prev->start - pos->end - 1;
+
+ if (hole || alias) {
+ if (alias)
+ pos->end = prev->end;
+ else if (hole)
+ pos->end = prev->start - 1;
+ }
+ }
+ prev = pos;
+ }
+}
+
static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
symbol_filter_t filter, int verbose)
{
@@ -628,19 +760,26 @@ static int dso__load_vmlinux(struct dso
if (fd < 0)
return -1;
- err = dso__load_sym(self, fd, vmlinux, filter, verbose);
+ err = dso__load_sym(self, fd, vmlinux, filter, verbose, NULL);
+
+ if (err > 0)
+ dso__fill_symbol_holes(self);
+
close(fd);
return err;
}
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int verbose, int modules)
{
int err = -1;
- if (vmlinux)
+ if (vmlinux) {
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
+ if (err > 0 && modules)
+ err = dso__load_modules(self, filter, verbose);
+ }
if (err <= 0)
err = dso__load_kallsyms(self, filter, verbose);
Index: linux-2.6/tools/perf/builtin-annotate.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-annotate.c
+++ linux-2.6/tools/perf/builtin-annotate.c
@@ -171,7 +171,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
Index: linux-2.6/tools/perf/builtin-report.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-report.c
+++ linux-2.6/tools/perf/builtin-report.c
@@ -188,7 +188,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
Index: linux-2.6/tools/perf/builtin-top.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-top.c
+++ linux-2.6/tools/perf/builtin-top.c
@@ -364,7 +364,7 @@ static int parse_symbols(void)
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) <= 0)
+ if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1, 0) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
perf_counter tools: Enable kernel module symbol loading in tools.
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
LKML-Reference: <new-submission>
---
tools/perf/builtin-annotate.c | 25 ++++++++++++++++++++-----
tools/perf/builtin-report.c | 9 ++++++++-
tools/perf/builtin-top.c | 12 ++++++++++--
3 files changed, 38 insertions(+), 8 deletions(-)
Index: linux-2.6/tools/perf/builtin-annotate.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-annotate.c
+++ linux-2.6/tools/perf/builtin-annotate.c
@@ -43,6 +43,10 @@ static int dump_trace = 0;
static int verbose;
+static int modules;
+
+static int full_paths;
+
static int print_line;
static unsigned long page_size;
@@ -171,7 +175,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
@@ -1268,19 +1272,25 @@ static void print_summary(char *filename
static void annotate_sym(struct dso *dso, struct symbol *sym)
{
- char *filename = dso->name;
+ char *filename = dso->name, *d_filename;
u64 start, end, len;
char command[PATH_MAX*2];
FILE *file;
if (!filename)
return;
- if (dso == kernel_dso)
+ if (sym->module)
+ filename = sym->module->path;
+ else if (dso == kernel_dso)
filename = vmlinux;
start = sym->obj_start;
if (!start)
start = sym->start;
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
end = start + sym->end - sym->start + 1;
len = sym->end - sym->start;
@@ -1291,13 +1301,14 @@ static void annotate_sym(struct dso *dso
}
printf("\n\n------------------------------------------------\n");
- printf(" Percent | Source code & Disassembly of %s\n", filename);
+ printf(" Percent | Source code & Disassembly of %s\n", d_filename);
printf("------------------------------------------------\n");
if (verbose >= 2)
printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
- sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (u64)start, (u64)end, filename);
+ sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
+ (u64)start, (u64)end, filename, filename);
if (verbose >= 3)
printf("doing: %s\n", command);
@@ -1472,8 +1483,12 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &print_line,
"print matching source lines (may be slow)"),
+ OPT_BOOLEAN('P', "full-paths", &full_paths,
+ "Don't shorten the displayed pathnames"),
OPT_END()
};
Index: linux-2.6/tools/perf/builtin-report.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-report.c
+++ linux-2.6/tools/perf/builtin-report.c
@@ -46,6 +46,8 @@ static int dump_trace = 0;
static int verbose;
#define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
+static int modules;
+
static int full_paths;
static unsigned long page_size;
@@ -188,7 +190,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
@@ -648,6 +650,9 @@ sort__sym_print(FILE *fp, struct hist_en
ret += fprintf(fp, "[%c] %s",
self->dso == kernel_dso ? 'k' :
self->dso == hypervisor_dso ? 'h' : '.', self->sym->name);
+
+ if (self->sym->module)
+ ret += fprintf(fp, "\t[%s]", self->sym->module->name);
} else {
ret += fprintf(fp, "%#016llx", (u64)self->ip);
}
@@ -1710,6 +1715,8 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('P', "full-paths", &full_paths,
Index: linux-2.6/tools/perf/builtin-top.c
===================================================================
--- linux-2.6.orig/tools/perf/builtin-top.c
+++ linux-2.6/tools/perf/builtin-top.c
@@ -66,6 +66,7 @@ static unsigned int page_size;
static unsigned int mmap_pages = 16;
static int freq = 0;
static int verbose = 0;
+static char *vmlinux = NULL;
static char *sym_filter;
static unsigned long filter_start;
@@ -265,7 +266,10 @@ static void print_sym_table(void)
printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
color_fprintf(stdout, color, "%4.1f%%", pcnt);
- printf(" - %016llx : %s\n", sym->start, sym->name);
+ printf(" - %016llx : %s", sym->start, sym->name);
+ if (sym->module)
+ printf("\t[%s]", sym->module->name);
+ printf("\n");
}
}
@@ -359,12 +363,13 @@ static int parse_symbols(void)
{
struct rb_node *node;
struct symbol *sym;
+ int modules = vmlinux ? 1 : 0;
kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1, 0) <= 0)
+ if (dso__load_kernel(kernel_dso, vmlinux, symbol_filter, verbose, modules) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
@@ -680,6 +685,7 @@ static const struct option options[] = {
"system-wide collection from all CPUs"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
+ OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
"number of mmap data pages"),
OPT_INTEGER('r', "realtime", &realtime_prio,
@@ -709,6 +715,8 @@ int cmd_top(int argc, const char **argv,
{
int counter;
+ symbol__init();
+
page_size = sysconf(_SC_PAGE_SIZE);
argc = parse_options(argc, argv, options, top_usage, 0);
* Mike Galbraith <[email protected]> wrote:
> Per $subject, this patch set only supports for the LIVE kernel.
> It adds support infrastructure for path discovery, load address
> lookup, and symbol generation of live kernel modules.
>
> TODO includes resurrection of live annotation in perf top, and
> support for annotation and report generation of other than live
> modules. As the patch set sits, Perf top can generate symbols
> from live binaries, but there's no live annotation capability yet.
>
> patch1: perf_counter tools: Make symbol loading consistently return number of loaded symbols.
> patch2: perf_counter tools: Add infrastructure to support loading of kernel module symbols
> patch3: perf_counter tools: connect module support infrastructure to symbol loading infrastructure
> patch4: perf_counter tools: Enable kernel module symbol loading in tools
>
> Comments and suggestions most welcome.
Looks very nice! I've applied it with a few minor stylistic fixlets
and a tad more verbose changelogs.
I'm wondering about the next step: couldnt we somehow guess at the
position of the vmlinux too, validate somehow that it corresponds to
the kernel we are running - and then use it automatically and by
default?
Plus, offline analysis would be nice as well i suspect - being able
to look at profiles on a different box?
Ingo
Commit-ID: 9974f496782b7612e36a143bedda858f1cb953d4
Gitweb: http://git.kernel.org/tip/9974f496782b7612e36a143bedda858f1cb953d4
Author: Mike Galbraith <[email protected]>
AuthorDate: Thu, 2 Jul 2009 08:05:58 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Thu, 2 Jul 2009 08:42:20 +0200
perf_counter tools: Make symbol loading consistently return number of loaded symbols
perf_counter tools: Make symbol loading consistently return number of loaded symbols.
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/builtin-annotate.c | 2 +-
tools/perf/builtin-report.c | 2 +-
tools/perf/builtin-top.c | 2 +-
tools/perf/util/symbol.c | 9 ++++++---
4 files changed, 9 insertions(+), 6 deletions(-)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 132de8b..3becc8a 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -172,7 +172,7 @@ static int load_kernel(void)
return -1;
err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
- if (err) {
+ if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
} else
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index be1b758..58d1612 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -189,7 +189,7 @@ static int load_kernel(void)
return -1;
err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
- if (err) {
+ if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
} else
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index cdc74cf..97fde1d 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -364,7 +364,7 @@ static int parse_symbols(void)
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) != 0)
+ if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 78c2efd..c077b6a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -146,6 +146,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
char *line = NULL;
size_t n;
FILE *file = fopen("/proc/kallsyms", "r");
+ int count = 0;
if (file == NULL)
goto out_failure;
@@ -188,8 +189,10 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
if (filter && filter(self, sym))
symbol__delete(sym, self->sym_priv_size);
- else
+ else {
dso__insert_symbol(self, sym);
+ count++;
+ }
}
/*
@@ -212,7 +215,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
free(line);
fclose(file);
- return 0;
+ return count;
out_delete_line:
free(line);
@@ -639,7 +642,7 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
if (vmlinux)
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
- if (err < 0)
+ if (err <= 0)
err = dso__load_kallsyms(self, filter, verbose);
return err;
Commit-ID: 208b4b4a59351011b7f212e273f2b7bc47a9c482
Gitweb: http://git.kernel.org/tip/208b4b4a59351011b7f212e273f2b7bc47a9c482
Author: Mike Galbraith <[email protected]>
AuthorDate: Thu, 2 Jul 2009 08:07:10 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Thu, 2 Jul 2009 08:42:20 +0200
perf_counter tools: Add infrastructure to support loading of kernel module symbols
Add infrastructure for module path discovery and section load addresses.
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/Makefile | 2 +
tools/perf/util/module.c | 509 ++++++++++++++++++++++++++++++++++++++++++++++
tools/perf/util/module.h | 53 +++++
3 files changed, 564 insertions(+), 0 deletions(-)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 8f7fd1b..7822b3d 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -306,6 +306,7 @@ LIB_H += util/strlist.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
+LIB_H += util/module.h
LIB_H += util/color.h
LIB_OBJS += util/abspath.o
@@ -329,6 +330,7 @@ LIB_OBJS += util/usage.o
LIB_OBJS += util/wrapper.o
LIB_OBJS += util/sigchain.o
LIB_OBJS += util/symbol.o
+LIB_OBJS += util/module.o
LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
LIB_OBJS += util/header.o
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
new file mode 100644
index 0000000..ddabe92
--- /dev/null
+++ b/tools/perf/util/module.c
@@ -0,0 +1,509 @@
+#include "util.h"
+#include "../perf.h"
+#include "string.h"
+#include "module.h"
+
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#include <dirent.h>
+#include <sys/utsname.h>
+
+static unsigned int crc32(const char *p, unsigned int len)
+{
+ int i;
+ unsigned int crc = 0;
+
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
+
+/* module section methods */
+
+struct sec_dso *sec_dso__new_dso(const char *name)
+{
+ struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ strcpy(self->name, name);
+ self->secs = RB_ROOT;
+ self->find_section = sec_dso__find_section;
+ }
+
+ return self;
+}
+
+static void sec_dso__delete_section(struct section *self)
+{
+ free(((void *)self));
+}
+
+void sec_dso__delete_sections(struct sec_dso *self)
+{
+ struct section *pos;
+ struct rb_node *next = rb_first(&self->secs);
+
+ while (next) {
+ pos = rb_entry(next, struct section, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &self->secs);
+ sec_dso__delete_section(pos);
+ }
+}
+
+void sec_dso__delete_self(struct sec_dso *self)
+{
+ sec_dso__delete_sections(self);
+ free(self);
+}
+
+static void sec_dso__insert_section(struct sec_dso *self, struct section *sec)
+{
+ struct rb_node **p = &self->secs.rb_node;
+ struct rb_node *parent = NULL;
+ const u64 hash = sec->hash;
+ struct section *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct section, rb_node);
+ if (hash < s->hash)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&sec->rb_node, parent, p);
+ rb_insert_color(&sec->rb_node, &self->secs);
+}
+
+struct section *sec_dso__find_section(struct sec_dso *self, const char *name)
+{
+ struct rb_node *n;
+ u64 hash;
+ int len;
+
+ if (self == NULL)
+ return NULL;
+
+ len = strlen(name);
+ hash = crc32(name, len);
+
+ n = self->secs.rb_node;
+
+ while (n) {
+ struct section *s = rb_entry(n, struct section, rb_node);
+
+ if (hash < s->hash)
+ n = n->rb_left;
+ else if (hash > s->hash)
+ n = n->rb_right;
+ else {
+ if (!strcmp(name, s->name))
+ return s;
+ else
+ n = rb_next(&s->rb_node);
+ }
+ }
+
+ return NULL;
+}
+
+static size_t sec_dso__fprintf_section(struct section *self, FILE *fp)
+{
+ return fprintf(fp, "name:%s vma:%llx path:%s\n",
+ self->name, self->vma, self->path);
+}
+
+size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp)
+{
+ size_t ret = fprintf(fp, "dso: %s\n", self->name);
+
+ struct rb_node *nd;
+ for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) {
+ struct section *pos = rb_entry(nd, struct section, rb_node);
+ ret += sec_dso__fprintf_section(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct section *section__new(const char *name, const char *path)
+{
+ struct section *self = calloc(1, sizeof(*self));
+
+ if (!self)
+ goto out_failure;
+
+ self->name = calloc(1, strlen(name) + 1);
+ if (!self->name)
+ goto out_failure;
+
+ self->path = calloc(1, strlen(path) + 1);
+ if (!self->path)
+ goto out_failure;
+
+ strcpy(self->name, name);
+ strcpy(self->path, path);
+ self->hash = crc32(self->name, strlen(name));
+
+ return self;
+
+out_failure:
+ if (self) {
+ if (self->name)
+ free(self->name);
+ if (self->path)
+ free(self->path);
+ free(self);
+ }
+
+ return NULL;
+}
+
+/* module methods */
+
+struct mod_dso *mod_dso__new_dso(const char *name)
+{
+ struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ strcpy(self->name, name);
+ self->mods = RB_ROOT;
+ self->find_module = mod_dso__find_module;
+ }
+
+ return self;
+}
+
+static void mod_dso__delete_module(struct module *self)
+{
+ free(((void *)self));
+}
+
+void mod_dso__delete_modules(struct mod_dso *self)
+{
+ struct module *pos;
+ struct rb_node *next = rb_first(&self->mods);
+
+ while (next) {
+ pos = rb_entry(next, struct module, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &self->mods);
+ mod_dso__delete_module(pos);
+ }
+}
+
+void mod_dso__delete_self(struct mod_dso *self)
+{
+ mod_dso__delete_modules(self);
+ free(self);
+}
+
+static void mod_dso__insert_module(struct mod_dso *self, struct module *mod)
+{
+ struct rb_node **p = &self->mods.rb_node;
+ struct rb_node *parent = NULL;
+ const u64 hash = mod->hash;
+ struct module *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct module, rb_node);
+ if (hash < m->hash)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mod->rb_node, parent, p);
+ rb_insert_color(&mod->rb_node, &self->mods);
+}
+
+struct module *mod_dso__find_module(struct mod_dso *self, const char *name)
+{
+ struct rb_node *n;
+ u64 hash;
+ int len;
+
+ if (self == NULL)
+ return NULL;
+
+ len = strlen(name);
+ hash = crc32(name, len);
+
+ n = self->mods.rb_node;
+
+ while (n) {
+ struct module *m = rb_entry(n, struct module, rb_node);
+
+ if (hash < m->hash)
+ n = n->rb_left;
+ else if (hash > m->hash)
+ n = n->rb_right;
+ else {
+ if (!strcmp(name, m->name))
+ return m;
+ else
+ n = rb_next(&m->rb_node);
+ }
+ }
+
+ return NULL;
+}
+
+static size_t mod_dso__fprintf_module(struct module *self, FILE *fp)
+{
+ return fprintf(fp, "name:%s path:%s\n", self->name, self->path);
+}
+
+size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret;
+
+ ret = fprintf(fp, "dso: %s\n", self->name);
+
+ for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) {
+ struct module *pos = rb_entry(nd, struct module, rb_node);
+
+ ret += mod_dso__fprintf_module(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct module *module__new(const char *name, const char *path)
+{
+ struct module *self = calloc(1, sizeof(*self));
+
+ if (!self)
+ goto out_failure;
+
+ self->name = calloc(1, strlen(name) + 1);
+ if (!self->name)
+ goto out_failure;
+
+ self->path = calloc(1, strlen(path) + 1);
+ if (!self->path)
+ goto out_failure;
+
+ strcpy(self->name, name);
+ strcpy(self->path, path);
+ self->hash = crc32(self->name, strlen(name));
+
+ return self;
+
+out_failure:
+ if (self) {
+ if (self->name)
+ free(self->name);
+ if (self->path)
+ free(self->path);
+ free(self);
+ }
+
+ return NULL;
+}
+
+static int mod_dso__load_sections(struct module *mod)
+{
+ int count = 0, path_len;
+ struct dirent *entry;
+ char *line = NULL;
+ char *dir_path;
+ DIR *dir;
+ size_t n;
+
+ path_len = strlen("/sys/module/");
+ path_len += strlen(mod->name);
+ path_len += strlen("/sections/");
+
+ dir_path = calloc(1, path_len + 1);
+ if (dir_path == NULL)
+ goto out_failure;
+
+ strcat(dir_path, "/sys/module/");
+ strcat(dir_path, mod->name);
+ strcat(dir_path, "/sections/");
+
+ dir = opendir(dir_path);
+ if (dir == NULL)
+ goto out_free;
+
+ while ((entry = readdir(dir))) {
+ struct section *section;
+ char *path, *vma;
+ int line_len;
+ FILE *file;
+
+ if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name))
+ continue;
+
+ path = calloc(1, path_len + strlen(entry->d_name) + 1);
+ if (path == NULL)
+ break;
+ strcat(path, dir_path);
+ strcat(path, entry->d_name);
+
+ file = fopen(path, "r");
+ if (file == NULL) {
+ free(path);
+ break;
+ }
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0) {
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ if (!line) {
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ line[--line_len] = '\0'; /* \n */
+
+ vma = strstr(line, "0x");
+ if (!vma) {
+ free(path);
+ fclose(file);
+ break;
+ }
+ vma += 2;
+
+ section = section__new(entry->d_name, path);
+ if (!section) {
+ fprintf(stderr, "load_sections: allocation error\n");
+ free(path);
+ fclose(file);
+ break;
+ }
+
+ hex2u64(vma, §ion->vma);
+ sec_dso__insert_section(mod->sections, section);
+
+ free(path);
+ fclose(file);
+ count++;
+ }
+
+ closedir(dir);
+ free(line);
+ free(dir_path);
+
+ return count;
+
+out_free:
+ free(dir_path);
+
+out_failure:
+ return count;
+}
+
+static int mod_dso__load_module_paths(struct mod_dso *self)
+{
+ struct utsname uts;
+ int count = 0, len;
+ char *line = NULL;
+ FILE *file;
+ char *path;
+ size_t n;
+
+ if (uname(&uts) < 0)
+ goto out_failure;
+
+ len = strlen("/lib/modules/");
+ len += strlen(uts.release);
+ len += strlen("/modules.dep");
+
+ path = calloc(1, len);
+ if (path == NULL)
+ goto out_failure;
+
+ strcat(path, "/lib/modules/");
+ strcat(path, uts.release);
+ strcat(path, "/modules.dep");
+
+ file = fopen(path, "r");
+ free(path);
+ if (file == NULL)
+ goto out_failure;
+
+ while (!feof(file)) {
+ char *path, *name, *tmp;
+ struct module *module;
+ int line_len, len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ path = strtok(line, ":");
+ if (!path)
+ goto out_failure;
+
+ name = strdup(path);
+ name = strtok(name, "/");
+
+ tmp = name;
+
+ while (tmp) {
+ tmp = strtok(NULL, "/");
+ if (tmp)
+ name = tmp;
+ }
+ name = strsep(&name, ".");
+
+ /* Quirk: replace '-' with '_' in sound modules */
+ for (len = strlen(name); len; len--) {
+ if (*(name+len) == '-')
+ *(name+len) = '_';
+ }
+
+ module = module__new(name, path);
+ if (!module) {
+ fprintf(stderr, "load_module_paths: allocation error\n");
+ goto out_failure;
+ }
+ mod_dso__insert_module(self, module);
+
+ module->sections = sec_dso__new_dso("sections");
+ if (!module->sections) {
+ fprintf(stderr, "load_module_paths: allocation error\n");
+ goto out_failure;
+ }
+
+ module->active = mod_dso__load_sections(module);
+
+ if (module->active > 0)
+ count++;
+ }
+
+ free(line);
+ fclose(file);
+
+ return count;
+
+out_failure:
+ return -1;
+}
+
+int mod_dso__load_modules(struct mod_dso *dso)
+{
+ int err;
+
+ err = mod_dso__load_module_paths(dso);
+
+ return err;
+}
diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h
new file mode 100644
index 0000000..8a592ef
--- /dev/null
+++ b/tools/perf/util/module.h
@@ -0,0 +1,53 @@
+#ifndef _PERF_MODULE_
+#define _PERF_MODULE_ 1
+
+#include <linux/types.h>
+#include "../types.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct section {
+ struct rb_node rb_node;
+ u64 hash;
+ u64 vma;
+ char *name;
+ char *path;
+};
+
+struct sec_dso {
+ struct list_head node;
+ struct rb_root secs;
+ struct section *(*find_section)(struct sec_dso *, const char *name);
+ char name[0];
+};
+
+struct module {
+ struct rb_node rb_node;
+ u64 hash;
+ char *name;
+ char *path;
+ struct sec_dso *sections;
+ int active;
+};
+
+struct mod_dso {
+ struct list_head node;
+ struct rb_root mods;
+ struct module *(*find_module)(struct mod_dso *, const char *name);
+ char name[0];
+};
+
+struct sec_dso *sec_dso__new_dso(const char *name);
+void sec_dso__delete_sections(struct sec_dso *self);
+void sec_dso__delete_self(struct sec_dso *self);
+size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp);
+struct section *sec_dso__find_section(struct sec_dso *self, const char *name);
+
+struct mod_dso *mod_dso__new_dso(const char *name);
+void mod_dso__delete_modules(struct mod_dso *self);
+void mod_dso__delete_self(struct mod_dso *self);
+size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp);
+struct module *mod_dso__find_module(struct mod_dso *self, const char *name);
+int mod_dso__load_modules(struct mod_dso *dso);
+
+#endif /* _PERF_MODULE_ */
Commit-ID: 429764873cf3fc3e73142872a674bb27cda589c1
Gitweb: http://git.kernel.org/tip/429764873cf3fc3e73142872a674bb27cda589c1
Author: Mike Galbraith <[email protected]>
AuthorDate: Thu, 2 Jul 2009 08:09:46 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Thu, 2 Jul 2009 08:42:21 +0200
perf_counter tools: Enable kernel module symbol loading in tools
Add the -m/--modules option to perf report and perf annotate,
which enables live module symbol/image loading. To be used
with -k/--vmlinux.
(Also give perf annotate a -P/--full-paths option.)
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/builtin-annotate.c | 25 ++++++++++++++++++++-----
tools/perf/builtin-report.c | 9 ++++++++-
tools/perf/builtin-top.c | 12 ++++++++++--
3 files changed, 38 insertions(+), 8 deletions(-)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 8820568..08ea6c5 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -43,6 +43,10 @@ static int dump_trace = 0;
static int verbose;
+static int modules;
+
+static int full_paths;
+
static int print_line;
static unsigned long page_size;
@@ -171,7 +175,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
@@ -1268,19 +1272,25 @@ static void print_summary(char *filename)
static void annotate_sym(struct dso *dso, struct symbol *sym)
{
- char *filename = dso->name;
+ char *filename = dso->name, *d_filename;
u64 start, end, len;
char command[PATH_MAX*2];
FILE *file;
if (!filename)
return;
- if (dso == kernel_dso)
+ if (sym->module)
+ filename = sym->module->path;
+ else if (dso == kernel_dso)
filename = vmlinux;
start = sym->obj_start;
if (!start)
start = sym->start;
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
end = start + sym->end - sym->start + 1;
len = sym->end - sym->start;
@@ -1291,13 +1301,14 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
}
printf("\n\n------------------------------------------------\n");
- printf(" Percent | Source code & Disassembly of %s\n", filename);
+ printf(" Percent | Source code & Disassembly of %s\n", d_filename);
printf("------------------------------------------------\n");
if (verbose >= 2)
printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
- sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (u64)start, (u64)end, filename);
+ sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
+ (u64)start, (u64)end, filename, filename);
if (verbose >= 3)
printf("doing: %s\n", command);
@@ -1472,8 +1483,12 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &print_line,
"print matching source lines (may be slow)"),
+ OPT_BOOLEAN('P', "full-paths", &full_paths,
+ "Don't shorten the displayed pathnames"),
OPT_END()
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 38d136f..b44476c 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -46,6 +46,8 @@ static int dump_trace = 0;
static int verbose;
#define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
+static int modules;
+
static int full_paths;
static unsigned long page_size;
@@ -188,7 +190,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
@@ -648,6 +650,9 @@ sort__sym_print(FILE *fp, struct hist_entry *self)
ret += fprintf(fp, "[%c] %s",
self->dso == kernel_dso ? 'k' :
self->dso == hypervisor_dso ? 'h' : '.', self->sym->name);
+
+ if (self->sym->module)
+ ret += fprintf(fp, "\t[%s]", self->sym->module->name);
} else {
ret += fprintf(fp, "%#016llx", (u64)self->ip);
}
@@ -1710,6 +1715,8 @@ static const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('P', "full-paths", &full_paths,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 9bb25fc..aa044ea 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -66,6 +66,7 @@ static unsigned int page_size;
static unsigned int mmap_pages = 16;
static int freq = 0;
static int verbose = 0;
+static char *vmlinux = NULL;
static char *sym_filter;
static unsigned long filter_start;
@@ -265,7 +266,10 @@ static void print_sym_table(void)
printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
color_fprintf(stdout, color, "%4.1f%%", pcnt);
- printf(" - %016llx : %s\n", sym->start, sym->name);
+ printf(" - %016llx : %s", sym->start, sym->name);
+ if (sym->module)
+ printf("\t[%s]", sym->module->name);
+ printf("\n");
}
}
@@ -359,12 +363,13 @@ static int parse_symbols(void)
{
struct rb_node *node;
struct symbol *sym;
+ int modules = vmlinux ? 1 : 0;
kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1, 0) <= 0)
+ if (dso__load_kernel(kernel_dso, vmlinux, symbol_filter, verbose, modules) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
@@ -680,6 +685,7 @@ static const struct option options[] = {
"system-wide collection from all CPUs"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
+ OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
"number of mmap data pages"),
OPT_INTEGER('r', "realtime", &realtime_prio,
@@ -709,6 +715,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
{
int counter;
+ symbol__init();
+
page_size = sysconf(_SC_PAGE_SIZE);
argc = parse_options(argc, argv, options, top_usage, 0);
Commit-ID: 6cfcc53ed4f3ecb9319e73a03f34f1eddcb644dd
Gitweb: http://git.kernel.org/tip/6cfcc53ed4f3ecb9319e73a03f34f1eddcb644dd
Author: Mike Galbraith <[email protected]>
AuthorDate: Thu, 2 Jul 2009 08:08:36 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Thu, 2 Jul 2009 08:42:21 +0200
perf_counter tools: Connect module support infrastructure to symbol loading infrastructure
Signed-off-by: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
tools/perf/builtin-annotate.c | 2 +-
tools/perf/builtin-report.c | 2 +-
tools/perf/builtin-top.c | 2 +-
tools/perf/util/symbol.c | 159 ++++++++++++++++++++++++++++++++++++++---
tools/perf/util/symbol.h | 5 +-
5 files changed, 156 insertions(+), 14 deletions(-)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 3becc8a..8820568 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -171,7 +171,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 58d1612..38d136f 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -188,7 +188,7 @@ static int load_kernel(void)
if (!kernel_dso)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
+ err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, 0);
if (err <= 0) {
dso__delete(kernel_dso);
kernel_dso = NULL;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 97fde1d..9bb25fc 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -364,7 +364,7 @@ static int parse_symbols(void)
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1) <= 0)
+ if (dso__load_kernel(kernel_dso, NULL, symbol_filter, 1, 0) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c077b6a..98a1311 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -35,7 +35,7 @@ static struct symbol *symbol__new(u64 start, u64 len,
self = ((void *)self) + priv_size;
}
self->start = start;
- self->end = start + len - 1;
+ self->end = len ? start + len - 1 : start;
memcpy(self->name, name, namelen);
return self;
@@ -48,8 +48,12 @@ static void symbol__delete(struct symbol *self, unsigned int priv_size)
static size_t symbol__fprintf(struct symbol *self, FILE *fp)
{
- return fprintf(fp, " %llx-%llx %s\n",
+ if (!self->module)
+ return fprintf(fp, " %llx-%llx %s\n",
self->start, self->end, self->name);
+ else
+ return fprintf(fp, " %llx-%llx %s \t[%s]\n",
+ self->start, self->end, self->name, self->module->name);
}
struct dso *dso__new(const char *name, unsigned int sym_priv_size)
@@ -310,6 +314,26 @@ static inline int elf_sym__is_function(const GElf_Sym *sym)
sym->st_size != 0;
}
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
static inline const char *elf_sym__name(const GElf_Sym *sym,
const Elf_Data *symstrs)
{
@@ -451,9 +475,9 @@ static int dso__synthesize_plt_symbols(struct dso *self, Elf *elf,
}
static int dso__load_sym(struct dso *self, int fd, const char *name,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int verbose, struct module *mod)
{
- Elf_Data *symstrs;
+ Elf_Data *symstrs, *secstrs;
uint32_t nr_syms;
int err = -1;
uint32_t index;
@@ -461,7 +485,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
GElf_Shdr shdr;
Elf_Data *syms;
GElf_Sym sym;
- Elf_Scn *sec, *sec_dynsym;
+ Elf_Scn *sec, *sec_dynsym, *sec_strndx;
Elf *elf;
size_t dynsym_idx;
int nr = 0;
@@ -520,6 +544,14 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
if (symstrs == NULL)
goto out_elf_end;
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
@@ -529,8 +561,11 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
struct symbol *f;
u64 obj_start;
+ struct section *section = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
- if (!elf_sym__is_function(&sym))
+ if (!is_label && !elf_sym__is_function(&sym))
continue;
sec = elf_getscn(elf, sym.st_shndx);
@@ -538,6 +573,11 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
goto out_elf_end;
gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_text(&shdr, secstrs))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
obj_start = sym.st_value;
if (self->prelinked) {
@@ -548,6 +588,17 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
}
+ if (mod) {
+ section = mod->sections->find_section(mod->sections, section_name);
+ if (section)
+ sym.st_value += section->vma;
+ else {
+ fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n",
+ mod->name, section_name);
+ goto out_elf_end;
+ }
+ }
+
f = symbol__new(sym.st_value, sym.st_size,
elf_sym__name(&sym, symstrs),
self->sym_priv_size, obj_start, verbose);
@@ -557,6 +608,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
if (filter && filter(self, f))
symbol__delete(f, self->sym_priv_size);
else {
+ f->module = mod;
dso__insert_symbol(self, f);
nr++;
}
@@ -606,7 +658,7 @@ more:
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, fd, name, filter, verbose);
+ ret = dso__load_sym(self, fd, name, filter, verbose, NULL);
close(fd);
/*
@@ -620,6 +672,86 @@ out:
return ret;
}
+static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
+ symbol_filter_t filter, int verbose)
+{
+ struct module *mod = mod_dso__find_module(mods, name);
+ int err = 0, fd;
+
+ if (mod == NULL || !mod->active)
+ return err;
+
+ fd = open(mod->path, O_RDONLY);
+
+ if (fd < 0)
+ return err;
+
+ err = dso__load_sym(self, fd, name, filter, verbose, mod);
+ close(fd);
+
+ return err;
+}
+
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose)
+{
+ struct mod_dso *mods = mod_dso__new_dso("modules");
+ struct module *pos;
+ struct rb_node *next;
+ int err;
+
+ err = mod_dso__load_modules(mods);
+
+ if (err <= 0)
+ return err;
+
+ /*
+ * Iterate over modules, and load active symbols.
+ */
+ next = rb_first(&mods->mods);
+ while (next) {
+ pos = rb_entry(next, struct module, rb_node);
+ err = dso__load_module(self, mods, pos->name, filter, verbose);
+
+ if (err < 0)
+ break;
+
+ next = rb_next(&pos->rb_node);
+ }
+
+ if (err < 0) {
+ mod_dso__delete_modules(mods);
+ mod_dso__delete_self(mods);
+ }
+
+ return err;
+}
+
+static inline void dso__fill_symbol_holes(struct dso *self)
+{
+ struct symbol *prev = NULL;
+ struct rb_node *nd;
+
+ for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev) {
+ u64 hole = 0;
+ int alias = pos->start == prev->start;
+
+ if (!alias)
+ hole = prev->start - pos->end - 1;
+
+ if (hole || alias) {
+ if (alias)
+ pos->end = prev->end;
+ else if (hole)
+ pos->end = prev->start - 1;
+ }
+ }
+ prev = pos;
+ }
+}
+
static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
symbol_filter_t filter, int verbose)
{
@@ -628,19 +760,26 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
if (fd < 0)
return -1;
- err = dso__load_sym(self, fd, vmlinux, filter, verbose);
+ err = dso__load_sym(self, fd, vmlinux, filter, verbose, NULL);
+
+ if (err > 0)
+ dso__fill_symbol_holes(self);
+
close(fd);
return err;
}
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int verbose, int modules)
{
int err = -1;
- if (vmlinux)
+ if (vmlinux) {
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
+ if (err > 0 && modules)
+ err = dso__load_modules(self, filter, verbose);
+ }
if (err <= 0)
err = dso__load_kallsyms(self, filter, verbose);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 65a8449..4e141a3 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -5,6 +5,7 @@
#include "types.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include "module.h"
struct symbol {
struct rb_node rb_node;
@@ -13,6 +14,7 @@ struct symbol {
u64 obj_start;
u64 hist_sum;
u64 *hist;
+ struct module *module;
void *priv;
char name[0];
};
@@ -41,7 +43,8 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym)
struct symbol *dso__find_symbol(struct dso *self, u64 ip);
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose);
+ symbol_filter_t filter, int verbose, int modules);
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
size_t dso__fprintf(struct dso *self, FILE *fp);
On Thu, 2009-07-02 at 08:47 +0200, Ingo Molnar wrote:
> * Mike Galbraith <[email protected]> wrote:
>
> > Per $subject, this patch set only supports for the LIVE kernel.
> > It adds support infrastructure for path discovery, load address
> > lookup, and symbol generation of live kernel modules.
> >
> > TODO includes resurrection of live annotation in perf top, and
> > support for annotation and report generation of other than live
> > modules. As the patch set sits, Perf top can generate symbols
> > from live binaries, but there's no live annotation capability yet.
> >
> > patch1: perf_counter tools: Make symbol loading consistently return number of loaded symbols.
> > patch2: perf_counter tools: Add infrastructure to support loading of kernel module symbols
> > patch3: perf_counter tools: connect module support infrastructure to symbol loading infrastructure
> > patch4: perf_counter tools: Enable kernel module symbol loading in tools
> >
> > Comments and suggestions most welcome.
>
> Looks very nice! I've applied it with a few minor stylistic fixlets
> and a tad more verbose changelogs.
Thanks!
(sorry about changelogs, I did stare at them, nothing spiffy happened)
> I'm wondering about the next step: couldnt we somehow guess at the
> position of the vmlinux too, validate somehow that it corresponds to
> the kernel we are running - and then use it automatically and by
> default?
I don't know of a way to discover where the image lives. Been pondering
that very thing, along with idiot-proofing.
> Plus, offline analysis would be nice as well i suspect - being able
> to look at profiles on a different box?
Yes, that's high on my TODO. I've been pondering a perf archive tool
that would package everything that's needed to do analysis on a
different box. One big problem though, is that while you can easily
package vmlinux and modules, what about all the userland binaries? A
large perf.data and/or debug info binaries can easily make transport
impractical enough.
After I resurrect (well, try) live annotation in top, I'll fiddle with
offline kernel analysis.
-Mike
* Mike Galbraith <[email protected]> wrote:
> On Thu, 2009-07-02 at 08:47 +0200, Ingo Molnar wrote:
> > * Mike Galbraith <[email protected]> wrote:
> >
> > > Per $subject, this patch set only supports for the LIVE kernel.
> > > It adds support infrastructure for path discovery, load address
> > > lookup, and symbol generation of live kernel modules.
> > >
> > > TODO includes resurrection of live annotation in perf top, and
> > > support for annotation and report generation of other than live
> > > modules. As the patch set sits, Perf top can generate symbols
> > > from live binaries, but there's no live annotation capability yet.
> > >
> > > patch1: perf_counter tools: Make symbol loading consistently return number of loaded symbols.
> > > patch2: perf_counter tools: Add infrastructure to support loading of kernel module symbols
> > > patch3: perf_counter tools: connect module support infrastructure to symbol loading infrastructure
> > > patch4: perf_counter tools: Enable kernel module symbol loading in tools
> > >
> > > Comments and suggestions most welcome.
> >
> > Looks very nice! I've applied it with a few minor stylistic fixlets
> > and a tad more verbose changelogs.
>
> Thanks!
>
> (sorry about changelogs, I did stare at them, nothing spiffy
> happened)
[ We want to be verbose in changelogs generally - i.e. it's not a
problem at all to tell a boring story about what happens in the
patch. To _you_ it certainly looks boring - to others it's a
useful summary that sets their mind-set before looking at the
patch. ]
> > I'm wondering about the next step: couldnt we somehow guess at
> > the position of the vmlinux too, validate somehow that it
> > corresponds to the kernel we are running - and then use it
> > automatically and by default?
>
> I don't know of a way to discover where the image lives. Been
> pondering that very thing, along with idiot-proofing.
There's two main usecases:
- distro kernels. Here the vmlinux and module path varies but
should be discoverable with a finite list of try-and-err paths.
- 'make install modules_install' builds of kernel developers. Here
the vmlinux and the source tree might be anywhere. A small trick
might help: we could expose the build position of the kernel
source tree via a new /proc/kernel-buildpath special file, which
contains the vmlinux filename plus an MD5 sum (or CRC32) for good
measure.
Note that /proc/kernel-buildpath might also help the distro case: a
distro could set it thusly to have the correct position for a
debuginfo rpm/deb install.
I.e. /proc/kernel-buildpath and the MD5 could solve both usecases.
Other tools could make use of it too.
A second, more complex possibility would be to expose the kernel
image itself plus the module images as well. This has limitations
though: debuginfo wont be embedded, and symbols are in
/proc/kallsyms (which we do parse).
The advantage is that it's all readily available in memory (just not
exposed), plus it would show the _real_ instructions - the
post-paravirt-fixup post-ftrace-fixup and other dynamic patching
results.
To expose that we'd have to create some sort of special "kernel
image directory" within debugfs that has files like:
/debug/kimage/vmlinux
/debug/kimage/modules/
/debug/kimage/modules/snd_hda_intel.ko
/debug/kimage/modules/firewire_core.ko
Debugfs is quite easy to use and if we dont make it too fancy (no
separate module directories for example) it would be doable without
too much fuss.
It would be assembly-only annotations, without debuginfo.
> > Plus, offline analysis would be nice as well i suspect - being
> > able to look at profiles on a different box?
>
> Yes, that's high on my TODO. I've been pondering a perf archive
> tool that would package everything that's needed to do analysis on
> a different box. One big problem though, is that while you can
> easily package vmlinux and modules, what about all the userland
> binaries? A large perf.data and/or debug info binaries can easily
> make transport impractical enough.
I wouldnt worry about size too much, at least initially.
[ If it ever becomes a big issue then we could do a separate 'perf
compress' pass which could do a 'specific'/sparse snapshot of
affected binaries: i.e. pre-parse the data file, pick out all the
RIPs that matter and check which binaries relate to them, and then
read and pack those bits only. ]
Plus we could use Git's zlib smarts to compress the data file on the
fly as well, during data capture. It's very easy to generate a gig
or two of data currently.
> After I resurrect (well, try) live annotation in top, I'll fiddle
> with offline kernel analysis.
Ok :-)
Btw, another thing: we are thinking about making -F 1000 (1 KHz
auto-freq sampling) the default for perf top and perf record. This
way we'd always gather enough data (and never too much or too little
data), regardless of the intensity of the workload. Have you played
with -F before, what's your general experience about it? It's
particularly useful for 'rare' and highly fluctuating events like
cache-misses.
Maybe 1KHz is a bit too low - Oprofile defaults to 100000 cycles
interval by default which is about 10 KHz on a 1GHz box and 30 KHz
on a 3GHz box. Perhaps 10 KHz is a better default?
Ingo
On Thu, 2009-07-02 at 09:42 +0200, Ingo Molnar wrote:
(squirrels suggestions away for later reference)
> Btw, another thing: we are thinking about making -F 1000 (1 KHz
> auto-freq sampling) the default for perf top and perf record. This
> way we'd always gather enough data (and never too much or too little
> data), regardless of the intensity of the workload. Have you played
> with -F before, what's your general experience about it? It's
> particularly useful for 'rare' and highly fluctuating events like
> cache-misses.
>
> Maybe 1KHz is a bit too low - Oprofile defaults to 100000 cycles
> interval by default which is about 10 KHz on a 1GHz box and 30 KHz
> on a 3GHz box. Perhaps 10 KHz is a better default?
My default usage is 1000Hz to keep overhead low. Works fine for me.
-Mike
On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> After I resurrect (well, try) live annotation in top...
Random thought wrt live top annotation: instead of resurrecting in the
previous form, which could be a bit intrusive display space wise, what
do you think of this idea?
Provide a kbd input snapshot trigger which builds a perf record (hard?)
compatible file for the symbols being displayed. Start a background
task to annotate the lot, stuffing annotate output into an output file.
Better ideas highly welcome.
-Mike
On Thu, 2009-07-02 at 10:42 +0200, Mike Galbraith wrote:
> Provide a kbd input snapshot trigger which builds a perf record (hard?)
> compatible file for the symbols being displayed. Start a background
> task to annotate the lot, stuffing annotate output into an output file.
(P.S. I'm thinking of dirt simple performance issue reporting method)
On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> I've been pondering a perf archive tool
> that would package everything that's needed to do analysis on a
> different box. One big problem though, is that while you can easily
> package vmlinux and modules, what about all the userland binaries? A
> large perf.data and/or debug info binaries can easily make transport
> impractical enough.
I would simply extend the current file header with another section in
which we do a structured storage of the data structures we currently
build in perf-report. That is, the dso and symbol bits.
If we then run perf-report on a file containing such a section we read
that data instead of trying to locate them the regular way.
On Thu, 2009-07-02 at 14:10 +0200, Peter Zijlstra wrote:
> On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
>
> > I've been pondering a perf archive tool
> > that would package everything that's needed to do analysis on a
> > different box. One big problem though, is that while you can easily
> > package vmlinux and modules, what about all the userland binaries? A
> > large perf.data and/or debug info binaries can easily make transport
> > impractical enough.
>
> I would simply extend the current file header with another section in
> which we do a structured storage of the data structures we currently
> build in perf-report. That is, the dso and symbol bits.
>
> If we then run perf-report on a file containing such a section we read
> that data instead of trying to locate them the regular way.
That's a good idea.
If uname doesn't match stored record time uname, you're not live, so
tools require an exportable perf.data. If you're not live and not on
the same host, annotate requires binaries appended via an export tool
with --sym-filter -k -u -% whatever capability.
-Mike
* Mike Galbraith <[email protected]> wrote:
> On Thu, 2009-07-02 at 14:10 +0200, Peter Zijlstra wrote:
> > On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> >
> > > I've been pondering a perf archive tool
> > > that would package everything that's needed to do analysis on a
> > > different box. One big problem though, is that while you can easily
> > > package vmlinux and modules, what about all the userland binaries? A
> > > large perf.data and/or debug info binaries can easily make transport
> > > impractical enough.
> >
> > I would simply extend the current file header with another section in
> > which we do a structured storage of the data structures we currently
> > build in perf-report. That is, the dso and symbol bits.
> >
> > If we then run perf-report on a file containing such a section we read
> > that data instead of trying to locate them the regular way.
>
> That's a good idea.
>
> If uname doesn't match stored record time uname, you're not live,
> so tools require an exportable perf.data. If you're not live and
> not on the same host, annotate requires binaries appended via an
> export tool with --sym-filter -k -u -% whatever capability.
'perf export' could be a nice shortcut to convert a local perf.data
into a off-line analysable body of data.
Ingo
* Mike Galbraith <[email protected]> wrote:
> On Thu, 2009-07-02 at 09:42 +0200, Ingo Molnar wrote:
>
> (squirrels suggestions away for later reference)
>
> > Btw, another thing: we are thinking about making -F 1000 (1 KHz
> > auto-freq sampling) the default for perf top and perf record. This
> > way we'd always gather enough data (and never too much or too little
> > data), regardless of the intensity of the workload. Have you played
> > with -F before, what's your general experience about it? It's
> > particularly useful for 'rare' and highly fluctuating events like
> > cache-misses.
> >
> > Maybe 1KHz is a bit too low - Oprofile defaults to 100000 cycles
> > interval by default which is about 10 KHz on a 1GHz box and 30
> > KHz on a 3GHz box. Perhaps 10 KHz is a better default?
>
> My default usage is 1000Hz to keep overhead low. Works fine for
> me.
ah, so you use -F by default?
I still think 10 KHz would be better - especially for really short
runs like 'perf record -f -g ./git gc'. Since Oprofile samples at 26
KHz by default, we must not go to a too low frequency, otherwise
people might get a 'hm, the perf profiles are worse than the
Oprofile ones' first impression ...
We could perhaps add a freq=1000 switch to .perfconfig - we have
Git's util/config.c facility - it's just unused right now ;-)
[ Look at the Git sources about how config.c is used. ]
Ingo
* Mike Galbraith <[email protected]> wrote:
> On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
>
> > After I resurrect (well, try) live annotation in top...
>
> Random thought wrt live top annotation: instead of resurrecting in
> the previous form, which could be a bit intrusive display space
> wise, what do you think of this idea?
>
> Provide a kbd input snapshot trigger which builds a perf record
> (hard?) compatible file for the symbols being displayed. Start a
> background task to annotate the lot, stuffing annotate output into
> an output file.
>
> Better ideas highly welcome.
Hm, is there really a performance problem?
We need to calculate and cache the objdump annotation output once,
but after that it should be pretty fast as we just display updated
counts with the same lines over and over again. No repeated objdump
runs are needed.
Ingo
On Fri, 2009-07-03 at 09:24 +0200, Ingo Molnar wrote:
> * Mike Galbraith <[email protected]> wrote:
>
> > On Thu, 2009-07-02 at 14:10 +0200, Peter Zijlstra wrote:
> > > On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> > >
> > > > I've been pondering a perf archive tool
> > > > that would package everything that's needed to do analysis on a
> > > > different box. One big problem though, is that while you can easily
> > > > package vmlinux and modules, what about all the userland binaries? A
> > > > large perf.data and/or debug info binaries can easily make transport
> > > > impractical enough.
> > >
> > > I would simply extend the current file header with another section in
> > > which we do a structured storage of the data structures we currently
> > > build in perf-report. That is, the dso and symbol bits.
> > >
> > > If we then run perf-report on a file containing such a section we read
> > > that data instead of trying to locate them the regular way.
> >
> > That's a good idea.
> >
> > If uname doesn't match stored record time uname, you're not live,
> > so tools require an exportable perf.data. If you're not live and
> > not on the same host, annotate requires binaries appended via an
> > export tool with --sym-filter -k -u -% whatever capability.
>
> 'perf export' could be a nice shortcut to convert a local perf.data
> into a off-line analysable body of data.
>
I think it should be in both ways.
perf export and perf import
--
JSR
http://userweb.kernel.org/~jaswinder/
On Fri, 2009-07-03 at 09:27 +0200, Ingo Molnar wrote:
> * Mike Galbraith <[email protected]> wrote:
>
> > On Thu, 2009-07-02 at 09:42 +0200, Ingo Molnar wrote:
> >
> > (squirrels suggestions away for later reference)
> >
> > > Btw, another thing: we are thinking about making -F 1000 (1 KHz
> > > auto-freq sampling) the default for perf top and perf record. This
> > > way we'd always gather enough data (and never too much or too little
> > > data), regardless of the intensity of the workload. Have you played
> > > with -F before, what's your general experience about it? It's
> > > particularly useful for 'rare' and highly fluctuating events like
> > > cache-misses.
> > >
> > > Maybe 1KHz is a bit too low - Oprofile defaults to 100000 cycles
> > > interval by default which is about 10 KHz on a 1GHz box and 30
> > > KHz on a 3GHz box. Perhaps 10 KHz is a better default?
> >
> > My default usage is 1000Hz to keep overhead low. Works fine for
> > me.
>
> ah, so you use -F by default?
Yes.
> I still think 10 KHz would be better - especially for really short
> runs like 'perf record -f -g ./git gc'. Since Oprofile samples at 26
> KHz by default, we must not go to a too low frequency, otherwise
> people might get a 'hm, the perf profiles are worse than the
> Oprofile ones' first impression ...
>
> We could perhaps add a freq=1000 switch to .perfconfig - we have
> Git's util/config.c facility - it's just unused right now ;-)
>
> [ Look at the Git sources about how config.c is used. ]
Yeah, that's a good idea.. everyone can roll their own defaults.
For me, it doesn't matter what the default is, I always specify.
-Mike
On Fri, 2009-07-03 at 09:29 +0200, Ingo Molnar wrote:
> * Mike Galbraith <[email protected]> wrote:
>
> > On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> >
> > > After I resurrect (well, try) live annotation in top...
> >
> > Random thought wrt live top annotation: instead of resurrecting in
> > the previous form, which could be a bit intrusive display space
> > wise, what do you think of this idea?
> >
> > Provide a kbd input snapshot trigger which builds a perf record
> > (hard?) compatible file for the symbols being displayed. Start a
> > background task to annotate the lot, stuffing annotate output into
> > an output file.
> >
> > Better ideas highly welcome.
>
> Hm, is there really a performance problem?
Sort of..
> We need to calculate and cache the objdump annotation output once,
> but after that it should be pretty fast as we just display updated
> counts with the same lines over and over again. No repeated objdump
> runs are needed.
But active files follow symbols, which change on the fly.
Besides, as mentioned previously, while displayed annotation was very
cool, it took a lot of display space. For me, top with the ability to
emit bic-disposable mini-reports would be my primary perf tools usage.
I'd only use big brothers when I needed their power/detail.
-Mike
* Mike Galbraith <[email protected]> wrote:
> > We need to calculate and cache the objdump annotation output
> > once, but after that it should be pretty fast as we just display
> > updated counts with the same lines over and over again. No
> > repeated objdump runs are needed.
>
> But active files follow symbols, which change on the fly.
>
> Besides, as mentioned previously, while displayed annotation was
> very cool, it took a lot of display space. For me, top with the
> ability to emit bic-disposable mini-reports would be my primary
> perf tools usage. I'd only use big brothers when I needed their
> power/detail.
Ok, then how about putting some sort of interactivity into perf top?
Up and down arrows would allow the walking of the histogram, and
hitting enter on a symbol would show the annotated function? It
would be way cool and more usable and more flexible than some
side-channel for mini-reports.
PowerTop has a lot of good text interactivity code that might be
reused. (assuming it's under a kernel compatible license?)
There's also the 'tig' tool - an interactive tool to walk Git
trees/commits. If it's under a compatible license that would be a
nice place to look for clues too - it has a very mature and
well-thought-out TUI in my opinion.
Ingo
On Fri, 2009-07-03 at 10:15 +0200, Ingo Molnar wrote:
> * Mike Galbraith <[email protected]> wrote:
>
> > > We need to calculate and cache the objdump annotation output
> > > once, but after that it should be pretty fast as we just display
> > > updated counts with the same lines over and over again. No
> > > repeated objdump runs are needed.
> >
> > But active files follow symbols, which change on the fly.
> >
> > Besides, as mentioned previously, while displayed annotation was
> > very cool, it took a lot of display space. For me, top with the
> > ability to emit bic-disposable mini-reports would be my primary
> > perf tools usage. I'd only use big brothers when I needed their
> > power/detail.
>
> Ok, then how about putting some sort of interactivity into perf top?
>
> Up and down arrows would allow the walking of the histogram, and
> hitting enter on a symbol would show the annotated function? It
> would be way cool and more usable and more flexible than some
> side-channel for mini-reports.
>
> PowerTop has a lot of good text interactivity code that might be
> reused. (assuming it's under a kernel compatible license?)
>
> There's also the 'tig' tool - an interactive tool to walk Git
> trees/commits. If it's under a compatible license that would be a
> nice place to look for clues too - it has a very mature and
> well-thought-out TUI in my opinion.
Cool. I love free samples to bend/spindle/mutilate :)
-Mike
On Fri, Jul 03, 2009 at 09:17:39AM +0200, Mike Galbraith wrote:
> On Thu, 2009-07-02 at 14:10 +0200, Peter Zijlstra wrote:
> > On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> >
> > > I've been pondering a perf archive tool
> > > that would package everything that's needed to do analysis on a
> > > different box. One big problem though, is that while you can easily
> > > package vmlinux and modules, what about all the userland binaries? A
> > > large perf.data and/or debug info binaries can easily make transport
> > > impractical enough.
> >
> > I would simply extend the current file header with another section in
> > which we do a structured storage of the data structures we currently
> > build in perf-report. That is, the dso and symbol bits.
> >
> > If we then run perf-report on a file containing such a section we read
> > that data instead of trying to locate them the regular way.
>
> That's a good idea.
>
> If uname doesn't match stored record time uname, you're not live, so
> tools require an exportable perf.data. If you're not live and not on
> the same host, annotate requires binaries appended via an export tool
> with --sym-filter -k -u -% whatever capability.
>
> -Mike
Also that would make easier the implementation of a perf compare thing.
A perf compare may have several uses, including:
(1) comparing different workloads with a same executable.
(2) comparing different executable versions for a same workload
(3) (1) + (2) ?
For the (2), having self contained record files as operands would let
comparisons based on symbols, pretty useful when you have to compare
two different vmlinux (or whatever binary executable).
* Frederic Weisbecker <[email protected]> wrote:
> On Fri, Jul 03, 2009 at 09:17:39AM +0200, Mike Galbraith wrote:
> > On Thu, 2009-07-02 at 14:10 +0200, Peter Zijlstra wrote:
> > > On Thu, 2009-07-02 at 09:17 +0200, Mike Galbraith wrote:
> > >
> > > > I've been pondering a perf archive tool
> > > > that would package everything that's needed to do analysis on a
> > > > different box. One big problem though, is that while you can easily
> > > > package vmlinux and modules, what about all the userland binaries? A
> > > > large perf.data and/or debug info binaries can easily make transport
> > > > impractical enough.
> > >
> > > I would simply extend the current file header with another section in
> > > which we do a structured storage of the data structures we currently
> > > build in perf-report. That is, the dso and symbol bits.
> > >
> > > If we then run perf-report on a file containing such a section we read
> > > that data instead of trying to locate them the regular way.
> >
> > That's a good idea.
> >
> > If uname doesn't match stored record time uname, you're not live, so
> > tools require an exportable perf.data. If you're not live and not on
> > the same host, annotate requires binaries appended via an export tool
> > with --sym-filter -k -u -% whatever capability.
> >
> > -Mike
>
>
> Also that would make easier the implementation of a perf compare
> thing. A perf compare may have several uses, including:
>
> (1) comparing different workloads with a same executable.
> (2) comparing different executable versions for a same workload
> (3) (1) + (2) ?
>
> For the (2), having self contained record files as operands would
> let comparisons based on symbols, pretty useful when you have to
> compare two different vmlinux (or whatever binary executable).
very good points.
Ingo
On Fri, Jul 03, 2009 at 10:28:44AM +0200, Mike Galbraith wrote:
> On Fri, 2009-07-03 at 10:15 +0200, Ingo Molnar wrote:
> > * Mike Galbraith <[email protected]> wrote:
> >
> > > > We need to calculate and cache the objdump annotation output
> > > > once, but after that it should be pretty fast as we just display
> > > > updated counts with the same lines over and over again. No
> > > > repeated objdump runs are needed.
> > >
> > > But active files follow symbols, which change on the fly.
> > >
> > > Besides, as mentioned previously, while displayed annotation was
> > > very cool, it took a lot of display space. For me, top with the
> > > ability to emit bic-disposable mini-reports would be my primary
> > > perf tools usage. I'd only use big brothers when I needed their
> > > power/detail.
> >
> > Ok, then how about putting some sort of interactivity into perf top?
> >
> > Up and down arrows would allow the walking of the histogram, and
> > hitting enter on a symbol would show the annotated function? It
> > would be way cool and more usable and more flexible than some
> > side-channel for mini-reports.
> >
> > PowerTop has a lot of good text interactivity code that might be
> > reused. (assuming it's under a kernel compatible license?)
> >
> > There's also the 'tig' tool - an interactive tool to walk Git
> > trees/commits. If it's under a compatible license that would be a
> > nice place to look for clues too - it has a very mature and
> > well-thought-out TUI in my opinion.
>
> Cool. I love free samples to bend/spindle/mutilate :)
>
> -Mike
The same interactivity could also apply to perf report, by dynamically
expanding callchains, linking to more details with annotate, etc...