To speed up cpu to node lookup, adding perf_env__numa_node
function, that creates cpu array on the first lookup, that
holds numa nodes for each stored cpu.
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Jiri Olsa <[email protected]>
---
tools/perf/util/env.c | 40 ++++++++++++++++++++++++++++++++++++++++
tools/perf/util/env.h | 6 ++++++
2 files changed, 46 insertions(+)
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 3baca06786fb..ee53e89a9535 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -179,6 +179,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@@ -338,3 +339,42 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+
+ /*
+ * We initialize the numa_map array to prepare
+ * it for missing cpus, which return node -1.
+ */
+ env->numa_map = malloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d8e083d42610..777008f8007a 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -86,6 +86,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};
enum perf_compress_type {
@@ -118,4 +122,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */
--
2.21.0
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 389799a7a1e86c55f38897e679762efadcc9dedd
Gitweb: https://git.kernel.org/tip/389799a7a1e86c55f38897e679762efadcc9dedd
Author: Jiri Olsa <[email protected]>
AuthorDate: Thu, 29 Aug 2019 13:31:48 +02:00
Committer: Arnaldo Carvalho de Melo <[email protected]>
CommitterDate: Wed, 06 Nov 2019 15:49:39 -03:00
perf env: Add perf_env__numa_node()
To speed up cpu to node lookup, add perf_env__numa_node(), that creates
cpu array on the first lookup, that holds numa nodes for each stored
cpu.
Signed-off-by: Jiri Olsa <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Alexey Budankov <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Joe Mario <[email protected]>
Cc: Kan Liang <[email protected]>
Cc: Michael Petlan <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
---
tools/perf/util/env.c | 40 ++++++++++++++++++++++++++++++++++++++++
tools/perf/util/env.h | 6 ++++++
2 files changed, 46 insertions(+)
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 2a91a10..6242a92 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -180,6 +180,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@@ -354,3 +355,42 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+
+ /*
+ * We initialize the numa_map array to prepare
+ * it for missing cpus, which return node -1
+ */
+ env->numa_map = malloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index a3059dc..11d05ae 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -87,6 +87,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};
enum perf_compress_type {
@@ -120,4 +124,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */