2019-10-07 12:56:17

by Jiri Olsa

[permalink] [raw]
Subject: [PATCH 28/36] libperf: Move mmap allocation to perf_evlist__mmap_ops::get

Moving allocation of mmap array into perf_evlist__mmap_ops::get,
so we centralize the mmap allocation.

Moving also nr_mmap setup to perf_evlist__mmap_ops so it's
centralized and shared by both perf and libperf mmap code.

Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Jiri Olsa <[email protected]>
---
tools/perf/lib/evlist.c | 42 ++++++++++++++++++++++++----------------
tools/perf/util/evlist.c | 24 +++++++++--------------
2 files changed, 34 insertions(+), 32 deletions(-)

diff --git a/tools/perf/lib/evlist.c b/tools/perf/lib/evlist.c
index 4e85d9bb92bf..61ed27526e08 100644
--- a/tools/perf/lib/evlist.c
+++ b/tools/perf/lib/evlist.c
@@ -338,10 +338,6 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
int i;
struct perf_mmap *map;

- evlist->nr_mmaps = perf_cpu_map__nr(evlist->cpus);
- if (perf_cpu_map__empty(evlist->cpus))
- evlist->nr_mmaps = perf_thread_map__nr(evlist->threads);
-
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
if (!map)
return NULL;
@@ -384,18 +380,22 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
{
- struct perf_mmap *map = &evlist->mmap[idx];
+ struct perf_mmap *maps;

- if (overwrite) {
- if (!evlist->mmap_ovw) {
- evlist->mmap_ovw = perf_evlist__alloc_mmap(evlist, true);
- if (!evlist->mmap_ovw)
- return NULL;
- }
- map = &evlist->mmap_ovw[idx];
+ maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
+
+ if (!maps) {
+ maps = perf_evlist__alloc_mmap(evlist, overwrite);
+ if (!maps)
+ return NULL;
+
+ if (overwrite)
+ evlist->mmap_ovw = maps;
+ else
+ evlist->mmap = maps;
}

- return map;
+ return &maps[idx];
}

#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
@@ -556,6 +556,17 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
return -1;
}

+static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
+{
+ int nr_mmaps;
+
+ nr_mmaps = perf_cpu_map__nr(evlist->cpus);
+ if (perf_cpu_map__empty(evlist->cpus))
+ nr_mmaps = perf_thread_map__nr(evlist->threads);
+
+ return nr_mmaps;
+}
+
int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
@@ -567,10 +578,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;

- if (!evlist->mmap)
- evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
- if (!evlist->mmap)
- return -ENOMEM;
+ evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);

perf_evlist__for_each_entry(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 469a5476ef1a..b58ef3147163 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -574,9 +574,6 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
int i;
struct mmap *map;

- evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
- if (perf_cpu_map__empty(evlist->core.cpus))
- evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
@@ -614,19 +611,21 @@ static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
{
struct evlist *evlist = container_of(_evlist, struct evlist, core);
- struct mmap *maps = evlist->mmap;
+ struct mmap *maps;

- if (overwrite) {
- maps = evlist->overwrite_mmap;
+ maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;

- if (!maps) {
- maps = evlist__alloc_mmap(evlist, true);
- if (!maps)
- return NULL;
+ if (!maps) {
+ maps = evlist__alloc_mmap(evlist, overwrite);
+ if (!maps)
+ return NULL;

+ if (overwrite) {
evlist->overwrite_mmap = maps;
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
+ } else {
+ evlist->mmap = maps;
}
}

@@ -787,11 +786,6 @@ int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
.mmap = perf_evlist__mmap_cb_mmap,
};

- if (!evlist->mmap)
- evlist->mmap = evlist__alloc_mmap(evlist, false);
- if (!evlist->mmap)
- return -ENOMEM;
-
evlist->core.mmap_len = evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
mp.core.mask = evlist->core.mmap_len - page_size - 1;
--
2.21.0