2021-06-30 15:56:55

by Bayduraev, Alexey V

[permalink] [raw]
Subject: [PATCH v8 01/22] perf record: Introduce thread affinity and mmap masks

Introduce affinity and mmap thread masks. Thread affinity mask
defines cpus that a thread is allowed to run on. Thread maps
mask defines mmap data buffers the thread serves to stream
profiling data from.

Acked-by: Andi Kleen <[email protected]>
Acked-by: Namhyung Kim <[email protected]>
Signed-off-by: Alexey Bayduraev <[email protected]>
---
tools/perf/builtin-record.c | 127 ++++++++++++++++++++++++++++++++++++
1 file changed, 127 insertions(+)

diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 71efe6573ee7..31b3a515abc1 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -87,6 +87,11 @@ struct switch_output {
int cur_file;
};

+struct thread_mask {
+ struct mmap_cpu_mask maps;
+ struct mmap_cpu_mask affinity;
+};
+
struct record {
struct perf_tool tool;
struct record_opts opts;
@@ -111,6 +116,8 @@ struct record {
unsigned long long samples;
struct mmap_cpu_mask affinity_mask;
unsigned long output_max_size; /* = 0: unlimited */
+ struct thread_mask *thread_masks;
+ int nr_threads;
};

static volatile int done;
@@ -2216,6 +2223,55 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
return 0;
}

+static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
+{
+ mask->nbits = nr_bits;
+ mask->bits = bitmap_alloc(mask->nbits);
+ if (!mask->bits) {
+ pr_err("Failed to allocate mmap_cpu mask\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
+{
+ bitmap_free(mask->bits);
+ mask->nbits = 0;
+}
+
+static void record__thread_mask_clear(struct thread_mask *mask)
+{
+ bitmap_zero(mask->maps.bits, mask->maps.nbits);
+ bitmap_zero(mask->affinity.bits, mask->affinity.nbits);
+}
+
+static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
+{
+ int ret;
+
+ ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
+ if (ret) {
+ mask->affinity.bits = NULL;
+ return ret;
+ }
+
+ ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
+ if (ret) {
+ record__mmap_cpu_mask_free(&mask->maps);
+ mask->maps.bits = NULL;
+ }
+
+ return ret;
+}
+
+static void record__thread_mask_free(struct thread_mask *mask)
+{
+ record__mmap_cpu_mask_free(&mask->maps);
+ record__mmap_cpu_mask_free(&mask->affinity);
+}
+
static int parse_output_max_size(const struct option *opt,
const char *str, int unset)
{
@@ -2664,6 +2720,70 @@ static struct option __record_options[] = {

struct option *record_options = __record_options;

+static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
+{
+ int c;
+
+ for (c = 0; c < cpus->nr; c++)
+ set_bit(cpus->map[c], mask->bits);
+}
+
+static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
+{
+ int t, ret;
+
+ rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
+ if (!rec->thread_masks) {
+ pr_err("Failed to allocate thread masks\n");
+ return -ENOMEM;
+ }
+
+ for (t = 0; t < nr_threads; t++) {
+ ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
+ if (ret)
+ return ret;
+ record__thread_mask_clear(&rec->thread_masks[t]);
+ }
+
+ return 0;
+}
+static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int ret;
+
+ ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu());
+ if (ret)
+ return ret;
+
+ record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
+
+ rec->nr_threads = 1;
+
+ return 0;
+}
+
+static int record__init_thread_masks(struct record *rec)
+{
+ struct perf_cpu_map *cpus = rec->evlist->core.cpus;
+
+ return record__init_thread_default_masks(rec, cpus);
+}
+
+static int record__fini_thread_masks(struct record *rec)
+{
+ int t;
+
+ if (rec->thread_masks)
+ for (t = 0; t < rec->nr_threads; t++)
+ record__thread_mask_free(&rec->thread_masks[t]);
+
+ zfree(&rec->thread_masks);
+
+ rec->nr_threads = 0;
+
+ return 0;
+}
+
int cmd_record(int argc, const char **argv)
{
int err;
@@ -2912,6 +3032,12 @@ int cmd_record(int argc, const char **argv)
goto out;
}

+ err = record__init_thread_masks(rec);
+ if (err) {
+ pr_err("record__init_thread_masks failed, error %d\n", err);
+ goto out;
+ }
+
if (rec->opts.nr_cblocks > nr_cblocks_max)
rec->opts.nr_cblocks = nr_cblocks_max;
pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
@@ -2930,6 +3056,7 @@ int cmd_record(int argc, const char **argv)
symbol__exit();
auxtrace_record__free(rec->itr);
out_opts:
+ record__fini_thread_masks(rec);
evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
return err;
}
--
2.19.0


2021-06-30 16:19:34

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v8 01/22] perf record: Introduce thread affinity and mmap masks

Em Wed, Jun 30, 2021 at 06:54:40PM +0300, Alexey Bayduraev escreveu:
> Introduce affinity and mmap thread masks. Thread affinity mask
> defines cpus that a thread is allowed to run on. Thread maps
> mask defines mmap data buffers the thread serves to stream
> profiling data from.

At this point we probably be better off renaming 'struct mmap_cpu_mask'
to 'struct cpumask' since it is not only for 'mmap's, right?

In fact probably would be better to adopt linux/cpumask.h from the
kernel sources, i.e.:

typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;

some more comments below

> Acked-by: Andi Kleen <[email protected]>
> Acked-by: Namhyung Kim <[email protected]>
> Signed-off-by: Alexey Bayduraev <[email protected]>
> ---
> tools/perf/builtin-record.c | 127 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 127 insertions(+)
>
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index 71efe6573ee7..31b3a515abc1 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -87,6 +87,11 @@ struct switch_output {
> int cur_file;
> };
>
> +struct thread_mask {
> + struct mmap_cpu_mask maps;
> + struct mmap_cpu_mask affinity;
> +};
> +
> struct record {
> struct perf_tool tool;
> struct record_opts opts;
> @@ -111,6 +116,8 @@ struct record {
> unsigned long long samples;
> struct mmap_cpu_mask affinity_mask;
> unsigned long output_max_size; /* = 0: unlimited */
> + struct thread_mask *thread_masks;
> + int nr_threads;

can you move 'nr_threads' to before 'thread_masks'?

> };
>
> static volatile int done;
> @@ -2216,6 +2223,55 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
> return 0;
> }
>
> +static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
> +{
> + mask->nbits = nr_bits;
> + mask->bits = bitmap_alloc(mask->nbits);
> + if (!mask->bits) {
> + pr_err("Failed to allocate mmap_cpu mask\n");
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
> +{
> + bitmap_free(mask->bits);
> + mask->nbits = 0;

Plese use NULL, as 'mask->nbits' is a pointer.

> +}
> +
> +static void record__thread_mask_clear(struct thread_mask *mask)
> +{
> + bitmap_zero(mask->maps.bits, mask->maps.nbits);
> + bitmap_zero(mask->affinity.bits, mask->affinity.nbits);
> +}
> +
> +static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
> +{
> + int ret;
> +
> + ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);


please combine such decl + assign into one line, i.e.:

int ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);

> + if (ret) {
> + mask->affinity.bits = NULL;
> + return ret;
> + }
> +
> + ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
> + if (ret) {
> + record__mmap_cpu_mask_free(&mask->maps);
> + mask->maps.bits = NULL;
> + }
> +
> + return ret;
> +}
> +
> +static void record__thread_mask_free(struct thread_mask *mask)
> +{
> + record__mmap_cpu_mask_free(&mask->maps);
> + record__mmap_cpu_mask_free(&mask->affinity);
> +}
> +
> static int parse_output_max_size(const struct option *opt,
> const char *str, int unset)
> {
> @@ -2664,6 +2720,70 @@ static struct option __record_options[] = {
>
> struct option *record_options = __record_options;
>
> +static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
> +{
> + int c;
> +
> + for (c = 0; c < cpus->nr; c++)
> + set_bit(cpus->map[c], mask->bits);
> +}
> +
> +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
> +{
> + int t, ret;
> +
> + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
> + if (!rec->thread_masks) {
> + pr_err("Failed to allocate thread masks\n");
> + return -ENOMEM;
> + }
> +
> + for (t = 0; t < nr_threads; t++) {
> + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);

Usually when we don't manage to allocate all that we need we go on and
free the partially allocated resources.

> + if (ret)
> + return ret;
> + record__thread_mask_clear(&rec->thread_masks[t]);
> + }
> +
> + return 0;
> +}
> +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
> +{
> + int ret;
> +
> + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu());

ditto

> + if (ret)
> + return ret;
> +
> + record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
> +
> + rec->nr_threads = 1;
> +
> + return 0;
> +}
> +
> +static int record__init_thread_masks(struct record *rec)
> +{
> + struct perf_cpu_map *cpus = rec->evlist->core.cpus;
> +
> + return record__init_thread_default_masks(rec, cpus);
> +}
> +
> +static int record__fini_thread_masks(struct record *rec)
> +{
> + int t;
> +
> + if (rec->thread_masks)
> + for (t = 0; t < rec->nr_threads; t++)
> + record__thread_mask_free(&rec->thread_masks[t]);
> +
> + zfree(&rec->thread_masks);
> +
> + rec->nr_threads = 0;
> +
> + return 0;
> +}
> +
> int cmd_record(int argc, const char **argv)
> {
> int err;
> @@ -2912,6 +3032,12 @@ int cmd_record(int argc, const char **argv)
> goto out;
> }
>
> + err = record__init_thread_masks(rec);
> + if (err) {
> + pr_err("record__init_thread_masks failed, error %d\n", err);
> + goto out;
> + }
> +
> if (rec->opts.nr_cblocks > nr_cblocks_max)
> rec->opts.nr_cblocks = nr_cblocks_max;
> pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
> @@ -2930,6 +3056,7 @@ int cmd_record(int argc, const char **argv)
> symbol__exit();
> auxtrace_record__free(rec->itr);
> out_opts:
> + record__fini_thread_masks(rec);
> evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
> return err;
> }
> --
> 2.19.0
>

--

- Arnaldo

2021-06-30 16:29:26

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v8 01/22] perf record: Introduce thread affinity and mmap masks

Em Wed, Jun 30, 2021 at 01:17:13PM -0300, Arnaldo Carvalho de Melo escreveu:
> Em Wed, Jun 30, 2021 at 06:54:40PM +0300, Alexey Bayduraev escreveu:
> > Introduce affinity and mmap thread masks. Thread affinity mask
> > defines cpus that a thread is allowed to run on. Thread maps
> > mask defines mmap data buffers the thread serves to stream
> > profiling data from.
>
> At this point we probably be better off renaming 'struct mmap_cpu_mask'
> to 'struct cpumask' since it is not only for 'mmap's, right?
>
> In fact probably would be better to adopt linux/cpumask.h from the
> kernel sources, i.e.:
>
> typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;

Note, this patchset is already 22 patches long and is at v8, so we can
do this as a next step.

I'll continue with the review

> some more comments below
>
> > Acked-by: Andi Kleen <[email protected]>
> > Acked-by: Namhyung Kim <[email protected]>
> > Signed-off-by: Alexey Bayduraev <[email protected]>
> > ---
> > tools/perf/builtin-record.c | 127 ++++++++++++++++++++++++++++++++++++
> > 1 file changed, 127 insertions(+)
> >
> > diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> > index 71efe6573ee7..31b3a515abc1 100644
> > --- a/tools/perf/builtin-record.c
> > +++ b/tools/perf/builtin-record.c
> > @@ -87,6 +87,11 @@ struct switch_output {
> > int cur_file;
> > };
> >
> > +struct thread_mask {
> > + struct mmap_cpu_mask maps;
> > + struct mmap_cpu_mask affinity;
> > +};
> > +
> > struct record {
> > struct perf_tool tool;
> > struct record_opts opts;
> > @@ -111,6 +116,8 @@ struct record {
> > unsigned long long samples;
> > struct mmap_cpu_mask affinity_mask;
> > unsigned long output_max_size; /* = 0: unlimited */
> > + struct thread_mask *thread_masks;
> > + int nr_threads;
>
> can you move 'nr_threads' to before 'thread_masks'?
>
> > };
> >
> > static volatile int done;
> > @@ -2216,6 +2223,55 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
> > return 0;
> > }
> >
> > +static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
> > +{
> > + mask->nbits = nr_bits;
> > + mask->bits = bitmap_alloc(mask->nbits);
> > + if (!mask->bits) {
> > + pr_err("Failed to allocate mmap_cpu mask\n");
> > + return -ENOMEM;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
> > +{
> > + bitmap_free(mask->bits);
> > + mask->nbits = 0;
>
> Plese use NULL, as 'mask->nbits' is a pointer.
>
> > +}
> > +
> > +static void record__thread_mask_clear(struct thread_mask *mask)
> > +{
> > + bitmap_zero(mask->maps.bits, mask->maps.nbits);
> > + bitmap_zero(mask->affinity.bits, mask->affinity.nbits);
> > +}
> > +
> > +static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
> > +{
> > + int ret;
> > +
> > + ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
>
>
> please combine such decl + assign into one line, i.e.:
>
> int ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
>
> > + if (ret) {
> > + mask->affinity.bits = NULL;
> > + return ret;
> > + }
> > +
> > + ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
> > + if (ret) {
> > + record__mmap_cpu_mask_free(&mask->maps);
> > + mask->maps.bits = NULL;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static void record__thread_mask_free(struct thread_mask *mask)
> > +{
> > + record__mmap_cpu_mask_free(&mask->maps);
> > + record__mmap_cpu_mask_free(&mask->affinity);
> > +}
> > +
> > static int parse_output_max_size(const struct option *opt,
> > const char *str, int unset)
> > {
> > @@ -2664,6 +2720,70 @@ static struct option __record_options[] = {
> >
> > struct option *record_options = __record_options;
> >
> > +static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
> > +{
> > + int c;
> > +
> > + for (c = 0; c < cpus->nr; c++)
> > + set_bit(cpus->map[c], mask->bits);
> > +}
> > +
> > +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
> > +{
> > + int t, ret;
> > +
> > + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
> > + if (!rec->thread_masks) {
> > + pr_err("Failed to allocate thread masks\n");
> > + return -ENOMEM;
> > + }
> > +
> > + for (t = 0; t < nr_threads; t++) {
> > + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
>
> Usually when we don't manage to allocate all that we need we go on and
> free the partially allocated resources.
>
> > + if (ret)
> > + return ret;
> > + record__thread_mask_clear(&rec->thread_masks[t]);
> > + }
> > +
> > + return 0;
> > +}
> > +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
> > +{
> > + int ret;
> > +
> > + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu());
>
> ditto
>
> > + if (ret)
> > + return ret;
> > +
> > + record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
> > +
> > + rec->nr_threads = 1;
> > +
> > + return 0;
> > +}
> > +
> > +static int record__init_thread_masks(struct record *rec)
> > +{
> > + struct perf_cpu_map *cpus = rec->evlist->core.cpus;
> > +
> > + return record__init_thread_default_masks(rec, cpus);
> > +}
> > +
> > +static int record__fini_thread_masks(struct record *rec)
> > +{
> > + int t;
> > +
> > + if (rec->thread_masks)
> > + for (t = 0; t < rec->nr_threads; t++)
> > + record__thread_mask_free(&rec->thread_masks[t]);
> > +
> > + zfree(&rec->thread_masks);
> > +
> > + rec->nr_threads = 0;
> > +
> > + return 0;
> > +}
> > +
> > int cmd_record(int argc, const char **argv)
> > {
> > int err;
> > @@ -2912,6 +3032,12 @@ int cmd_record(int argc, const char **argv)
> > goto out;
> > }
> >
> > + err = record__init_thread_masks(rec);
> > + if (err) {
> > + pr_err("record__init_thread_masks failed, error %d\n", err);
> > + goto out;
> > + }
> > +
> > if (rec->opts.nr_cblocks > nr_cblocks_max)
> > rec->opts.nr_cblocks = nr_cblocks_max;
> > pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
> > @@ -2930,6 +3056,7 @@ int cmd_record(int argc, const char **argv)
> > symbol__exit();
> > auxtrace_record__free(rec->itr);
> > out_opts:
> > + record__fini_thread_masks(rec);
> > evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
> > return err;
> > }
> > --
> > 2.19.0
> >
>
> --
>
> - Arnaldo

--

- Arnaldo

2021-07-01 13:07:57

by Bayduraev, Alexey V

[permalink] [raw]
Subject: Re: [PATCH v8 01/22] perf record: Introduce thread affinity and mmap masks

Hi,

On 30.06.2021 19:17, Arnaldo Carvalho de Melo wrote:
> Em Wed, Jun 30, 2021 at 06:54:40PM +0300, Alexey Bayduraev escreveu:
[SNIP]
>> +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
>> +{
>> + bitmap_free(mask->bits);
>> + mask->nbits = 0;
>
> Plese use NULL, as 'mask->nbits' is a pointer.

In perf/util/mmap.h "nbits" is size_t:

struct mmap_cpu_mask {
unsigned long *bits;
size_t nbits;
};

Regards,
Alexey

>
>> +}
>> +
>> +static void record__thread_mask_clear(struct thread_mask *mask)
>> +{
>> + bitmap_zero(mask->maps.bits, mask->maps.nbits);
>> + bitmap_zero(mask->affinity.bits, mask->affinity.nbits);
>> +}
>> +
>> +static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
>> +{
>> + int ret;
>> +
>> + ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
>
>
> please combine such decl + assign into one line, i.e.:
>
> int ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
>
>> + if (ret) {
>> + mask->affinity.bits = NULL;
>> + return ret;
>> + }
>> +
>> + ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
>> + if (ret) {
>> + record__mmap_cpu_mask_free(&mask->maps);
>> + mask->maps.bits = NULL;
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +static void record__thread_mask_free(struct thread_mask *mask)
>> +{
>> + record__mmap_cpu_mask_free(&mask->maps);
>> + record__mmap_cpu_mask_free(&mask->affinity);
>> +}
>> +
>> static int parse_output_max_size(const struct option *opt,
>> const char *str, int unset)
>> {
>> @@ -2664,6 +2720,70 @@ static struct option __record_options[] = {
>>
>> struct option *record_options = __record_options;
>>
>> +static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
>> +{
>> + int c;
>> +
>> + for (c = 0; c < cpus->nr; c++)
>> + set_bit(cpus->map[c], mask->bits);
>> +}
>> +
>> +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
>> +{
>> + int t, ret;
>> +
>> + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
>> + if (!rec->thread_masks) {
>> + pr_err("Failed to allocate thread masks\n");
>> + return -ENOMEM;
>> + }
>> +
>> + for (t = 0; t < nr_threads; t++) {
>> + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
>
> Usually when we don't manage to allocate all that we need we go on and
> free the partially allocated resources.
>
>> + if (ret)
>> + return ret;
>> + record__thread_mask_clear(&rec->thread_masks[t]);
>> + }
>> +
>> + return 0;
>> +}
>> +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
>> +{
>> + int ret;
>> +
>> + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu());
>
> ditto
>
>> + if (ret)
>> + return ret;
>> +
>> + record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
>> +
>> + rec->nr_threads = 1;
>> +
>> + return 0;
>> +}
>> +
>> +static int record__init_thread_masks(struct record *rec)
>> +{
>> + struct perf_cpu_map *cpus = rec->evlist->core.cpus;
>> +
>> + return record__init_thread_default_masks(rec, cpus);
>> +}
>> +
>> +static int record__fini_thread_masks(struct record *rec)
>> +{
>> + int t;
>> +
>> + if (rec->thread_masks)
>> + for (t = 0; t < rec->nr_threads; t++)
>> + record__thread_mask_free(&rec->thread_masks[t]);
>> +
>> + zfree(&rec->thread_masks);
>> +
>> + rec->nr_threads = 0;
>> +
>> + return 0;
>> +}
>> +
>> int cmd_record(int argc, const char **argv)
>> {
>> int err;
>> @@ -2912,6 +3032,12 @@ int cmd_record(int argc, const char **argv)
>> goto out;
>> }
>>
>> + err = record__init_thread_masks(rec);
>> + if (err) {
>> + pr_err("record__init_thread_masks failed, error %d\n", err);
>> + goto out;
>> + }
>> +
>> if (rec->opts.nr_cblocks > nr_cblocks_max)
>> rec->opts.nr_cblocks = nr_cblocks_max;
>> pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
>> @@ -2930,6 +3056,7 @@ int cmd_record(int argc, const char **argv)
>> symbol__exit();
>> auxtrace_record__free(rec->itr);
>> out_opts:
>> + record__fini_thread_masks(rec);
>> evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
>> return err;
>> }
>> --
>> 2.19.0
>>
>

2021-07-01 14:24:19

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v8 01/22] perf record: Introduce thread affinity and mmap masks

Em Thu, Jul 01, 2021 at 04:05:09PM +0300, Bayduraev, Alexey V escreveu:
> Hi,
>
> On 30.06.2021 19:17, Arnaldo Carvalho de Melo wrote:
> > Em Wed, Jun 30, 2021 at 06:54:40PM +0300, Alexey Bayduraev escreveu:
> [SNIP]
> >> +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
> >> +{
> >> + bitmap_free(mask->bits);
> >> + mask->nbits = 0;
> >
> > Plese use NULL, as 'mask->nbits' is a pointer.
>
> In perf/util/mmap.h "nbits" is size_t:
>
> struct mmap_cpu_mask {
> unsigned long *bits;
> size_t nbits;
> };

My bad, I saw the pattern of:

*_free(mask->bits);
mask->bits = 0;

Nevermind :-)

- Arnaldo