To speed up cpu to node lookup, adding perf_env__numa_node
function, that creates cpu array on the first lookup, that
holds numa nodes for each stored cpu.
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Jiri Olsa <[email protected]>
---
tools/perf/util/env.c | 35 +++++++++++++++++++++++++++++++++++
tools/perf/util/env.h | 6 ++++++
2 files changed, 41 insertions(+)
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 3baca06786fb..6385961e45df 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -179,6 +179,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@@ -338,3 +339,37 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+ env->numa_map = zalloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d8e083d42610..777008f8007a 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -86,6 +86,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};
enum perf_compress_type {
@@ -118,4 +122,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */
--
2.21.0
Em Mon, Sep 02, 2019 at 02:12:54PM +0200, Jiri Olsa escreveu:
> To speed up cpu to node lookup, adding perf_env__numa_node
> function, that creates cpu array on the first lookup, that
> holds numa nodes for each stored cpu.
>
> Link: http://lkml.kernel.org/n/[email protected]
> Signed-off-by: Jiri Olsa <[email protected]>
> ---
> tools/perf/util/env.c | 35 +++++++++++++++++++++++++++++++++++
> tools/perf/util/env.h | 6 ++++++
> 2 files changed, 41 insertions(+)
>
> diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
> index 3baca06786fb..6385961e45df 100644
> --- a/tools/perf/util/env.c
> +++ b/tools/perf/util/env.c
> @@ -179,6 +179,7 @@ void perf_env__exit(struct perf_env *env)
> zfree(&env->sibling_threads);
> zfree(&env->pmu_mappings);
> zfree(&env->cpu);
> + zfree(&env->numa_map);
>
> for (i = 0; i < env->nr_numa_nodes; i++)
> perf_cpu_map__put(env->numa_nodes[i].map);
> @@ -338,3 +339,37 @@ const char *perf_env__arch(struct perf_env *env)
>
> return normalize_arch(arch_name);
> }
> +
> +
> +int perf_env__numa_node(struct perf_env *env, int cpu)
> +{
> + if (!env->nr_numa_map) {
> + struct numa_node *nn;
> + int i, nr = 0;
> +
> + for (i = 0; i < env->nr_numa_nodes; i++) {
> + nn = &env->numa_nodes[i];
> + nr = max(nr, perf_cpu_map__max(nn->map));
> + }
> +
> + nr++;
> + env->numa_map = zalloc(nr * sizeof(int));
Why do you use zalloc()...
> + if (!env->numa_map)
> + return -1;
Only to right after allocating it set all entries to -1?
That zalloc() should be downgraded to a plain malloc(), right?
The setting to -1 is because we may have holes in the array, right? I
think this deserves a comment here as well.
> + for (i = 0; i < nr; i++)
> + env->numa_map[i] = -1;
> +
> + env->nr_numa_map = nr;
> +
> + for (i = 0; i < env->nr_numa_nodes; i++) {
> + int tmp, j;
> +
> + nn = &env->numa_nodes[i];
> + perf_cpu_map__for_each_cpu(j, tmp, nn->map)
> + env->numa_map[j] = i;
> + }
> + }
> +
> + return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
> +}
> diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
> index d8e083d42610..777008f8007a 100644
> --- a/tools/perf/util/env.h
> +++ b/tools/perf/util/env.h
> @@ -86,6 +86,10 @@ struct perf_env {
> struct rb_root btfs;
> u32 btfs_cnt;
> } bpf_progs;
> +
> + /* For fast cpu to numa node lookup via perf_env__numa_node */
> + int *numa_map;
> + int nr_numa_map;
> };
>
> enum perf_compress_type {
> @@ -118,4 +122,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
> __u32 prog_id);
> void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
> struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
> +
> +int perf_env__numa_node(struct perf_env *env, int cpu);
> #endif /* __PERF_ENV_H */
> --
> 2.21.0
--
- Arnaldo
On Mon, Sep 02, 2019 at 10:57:10AM -0300, Arnaldo Carvalho de Melo wrote:
> Em Mon, Sep 02, 2019 at 02:12:54PM +0200, Jiri Olsa escreveu:
> > To speed up cpu to node lookup, adding perf_env__numa_node
> > function, that creates cpu array on the first lookup, that
> > holds numa nodes for each stored cpu.
> >
> > Link: http://lkml.kernel.org/n/[email protected]
> > Signed-off-by: Jiri Olsa <[email protected]>
> > ---
> > tools/perf/util/env.c | 35 +++++++++++++++++++++++++++++++++++
> > tools/perf/util/env.h | 6 ++++++
> > 2 files changed, 41 insertions(+)
> >
> > diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
> > index 3baca06786fb..6385961e45df 100644
> > --- a/tools/perf/util/env.c
> > +++ b/tools/perf/util/env.c
> > @@ -179,6 +179,7 @@ void perf_env__exit(struct perf_env *env)
> > zfree(&env->sibling_threads);
> > zfree(&env->pmu_mappings);
> > zfree(&env->cpu);
> > + zfree(&env->numa_map);
> >
> > for (i = 0; i < env->nr_numa_nodes; i++)
> > perf_cpu_map__put(env->numa_nodes[i].map);
> > @@ -338,3 +339,37 @@ const char *perf_env__arch(struct perf_env *env)
> >
> > return normalize_arch(arch_name);
> > }
> > +
> > +
> > +int perf_env__numa_node(struct perf_env *env, int cpu)
> > +{
> > + if (!env->nr_numa_map) {
> > + struct numa_node *nn;
> > + int i, nr = 0;
> > +
> > + for (i = 0; i < env->nr_numa_nodes; i++) {
> > + nn = &env->numa_nodes[i];
> > + nr = max(nr, perf_cpu_map__max(nn->map));
> > + }
> > +
> > + nr++;
> > + env->numa_map = zalloc(nr * sizeof(int));
>
> Why do you use zalloc()...
>
> > + if (!env->numa_map)
> > + return -1;
>
> Only to right after allocating it set all entries to -1?
>
> That zalloc() should be downgraded to a plain malloc(), right?
>
> The setting to -1 is because we may have holes in the array, right? I
> think this deserves a comment here as well.
yea, I added that later on and missed the zalloc above ;-)
I'll send new version
thanks,
jirka