Received: by 2002:a25:7ec1:0:0:0:0:0 with SMTP id z184csp2470454ybc; Sun, 24 Nov 2019 22:13:30 -0800 (PST) X-Google-Smtp-Source: APXvYqxXmHXM16YfanrJTVv++jOnlmS88Tvn1pX0AG64KUju3J9IQoNNYDFILWP1fK0PARukZVBz X-Received: by 2002:aa7:c3d0:: with SMTP id l16mr16529282edr.18.1574662410723; Sun, 24 Nov 2019 22:13:30 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1574662410; cv=none; d=google.com; s=arc-20160816; b=Ss67jxTkh7Gx8TDoSQETm9xm/oFRvcGZiltPQGOI5wDUPgvcFYf5nCUeOCEdEM5+Hx jo1TCnbbQ0itZXuYT6j19+5sTRWxn6NXaKMFbF/LexHM/cGSY8zahiE5kS5mlruR+oSd YwrhHyzv1KdLwE4YiM5pqa5Tn59oHbuxv5tf95pIOtrgLat0dME0XbeIwk7LWXM4dW3J imDUEwVAKiBc7sk/M+YbFalVyHvwAQ0Bd3bM+X++TUbgzo2D9wYt9zeT6RHDZne3+OUE iAqPig7XOYFt0fh0r4J6mQbPAxs7XZUTtORi2UGATWaTznTtTbjXIKM+Fn3dxN43iSHH nhKA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:content-transfer-encoding :content-language:in-reply-to:mime-version:user-agent:date :message-id:organization:references:cc:to:from:subject; bh=985UAW1G0aBWf0LP4d+w9e/MDHfwUlhQ8Kvl8+htIHA=; b=FTM/VVvHxMSZeJbGPZQJtPZSJyPLhp8WFJT9R5ZQkFNC2hkNLva4ClK0aKd21kz6aU KG84/+hSr1xkzTD2Au4EgCPXJPU7LyEMg+TCru2nH5Ve97Smec2B0ADqjeo463qg9rWj culPSsqMtstQljQIgA/KnsxiC2uDB/ECze8fsaciiYmNxgFEQRvWfBiShYCckyNjqGYp t+ZkpJz/1ZyZNs8EKUb+LeuAkT7zHnx/MtKLypxW4RexxhszemxleryIHQZMCE97N1y1 0yI4rp6rJQK8hWm63mvbusRRmeiF/T7ZFbXBBLYpr+R6pzRzGaoYKJbO32wfvZNjmcu2 sCtg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id u59si4996181edc.193.2019.11.24.22.13.05; Sun, 24 Nov 2019 22:13:30 -0800 (PST) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727016AbfKYGJC (ORCPT + 99 others); Mon, 25 Nov 2019 01:09:02 -0500 Received: from mga03.intel.com ([134.134.136.65]:3334 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725468AbfKYGJC (ORCPT ); Mon, 25 Nov 2019 01:09:02 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 24 Nov 2019 22:09:01 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,240,1571727600"; d="scan'208";a="382707762" Received: from linux.intel.com ([10.54.29.200]) by orsmga005.jf.intel.com with ESMTP; 24 Nov 2019 22:09:01 -0800 Received: from [10.251.82.176] (abudanko-mobl.ccr.corp.intel.com [10.251.82.176]) by linux.intel.com (Postfix) with ESMTP id C36C8580577; Sun, 24 Nov 2019 22:08:58 -0800 (PST) Subject: [PATCH v2 3/3] perf record: adapt affinity to machines with #CPUs > 1K From: Alexey Budankov To: Arnaldo Carvalho de Melo Cc: Jiri Olsa , Namhyung Kim , Alexander Shishkin , Peter Zijlstra , Ingo Molnar , Andi Kleen , linux-kernel References: Organization: Intel Corp. Message-ID: <69bd0062-0f9e-889b-b7ef-0d97d257569b@linux.intel.com> Date: Mon, 25 Nov 2019 09:08:57 +0300 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.9.1 MIME-Version: 1.0 In-Reply-To: Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Use struct mmap_cpu_mask type for tool's thread and mmap data buffers to overcome current 1024 CPUs mask size limitation of cpu_set_t type. Currently glibc cpu_set_t type has internal mask size limit of 1024 CPUs. Moving to struct mmap_cpu_mask type allows overcoming that limit. tools bitmap API is used to manipulate objects of struct mmap_cpu_mask type. Reported-by: Andi Kleen Signed-off-by: Alexey Budankov --- Changes in v2: - returned -1 from perf_mmap__setup_affinity_mask() - moved debug printing under -vv option - implemented releasing of masks using bitmap_free() --- tools/perf/builtin-record.c | 30 ++++++++++++++++++++++++------ tools/perf/util/mmap.c | 31 +++++++++++++++++++++++++------ tools/perf/util/mmap.h | 2 +- 3 files changed, 50 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b95c000c1ed9..f38d9de8183f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -62,6 +62,7 @@ #include #include #include +#include struct switch_output { bool enabled; @@ -93,7 +94,7 @@ struct record { bool timestamp_boundary; struct switch_output switch_output; unsigned long long samples; - cpu_set_t affinity_mask; + struct mmap_cpu_mask affinity_mask; unsigned long output_max_size; /* = 0: unlimited */ }; @@ -951,13 +952,21 @@ static struct perf_event_header finished_round_event = { .type = PERF_RECORD_FINISHED_ROUND, }; +#define MASK_SIZE 1023 static void record__adjust_affinity(struct record *rec, struct mmap *map) { + char mask[MASK_SIZE + 1] = {0}; + if (rec->opts.affinity != PERF_AFFINITY_SYS && - !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) { - CPU_ZERO(&rec->affinity_mask); - CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask); - sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask); + !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits, + rec->affinity_mask.nbits)) { + bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits); + bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits, + map->affinity_mask.bits, rec->affinity_mask.nbits); + sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask), + (cpu_set_t *)rec->affinity_mask.bits); + bitmap_scnprintf(rec->affinity_mask.bits, rec->affinity_mask.nbits, mask, MASK_SIZE); + pr_debug2("thread mask[%ld]: %s\n", rec->affinity_mask.nbits, mask); } } @@ -2389,7 +2398,6 @@ int cmd_record(int argc, const char **argv) # undef REASON #endif - CPU_ZERO(&rec->affinity_mask); rec->opts.affinity = PERF_AFFINITY_SYS; rec->evlist = evlist__new(); @@ -2455,6 +2463,14 @@ int cmd_record(int argc, const char **argv) symbol__init(NULL); + rec->affinity_mask.nbits = cpu__max_cpu(); + rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits); + if (!rec->affinity_mask.bits) { + pr_err("Failed to allocate thread mask for %ld cpus\n", rec->affinity_mask.nbits); + return -ENOMEM; + } + pr_debug2("thread mask[%ld]: empty\n", rec->affinity_mask.nbits); + err = record__auxtrace_init(rec); if (err) goto out; @@ -2569,6 +2585,8 @@ int cmd_record(int argc, const char **argv) err = __cmd_record(&record, argc, argv); out: + if (rec->affinity_mask.bits) + bitmap_free(rec->affinity_mask.bits); evlist__delete(rec->evlist); symbol__exit(); auxtrace_record__free(rec->itr); diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 063d1b93c53d..3b097ae7f5fd 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -23,6 +23,7 @@ #include "mmap.h" #include "../perf.h" #include /* page_size */ +#include size_t mmap__mmap_len(struct mmap *map) { @@ -207,6 +208,9 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) void mmap__munmap(struct mmap *map) { + if (map->affinity_mask.bits) + bitmap_free(map->affinity_mask.bits); + perf_mmap__aio_munmap(map); if (map->data != NULL) { munmap(map->data, mmap__mmap_len(map)); @@ -215,7 +219,7 @@ void mmap__munmap(struct mmap *map) auxtrace_mmap__munmap(&map->auxtrace_mmap); } -static void build_node_mask(int node, cpu_set_t *mask) +static void build_node_mask(int node, struct mmap_cpu_mask *mask) { int c, cpu, nr_cpus; const struct perf_cpu_map *cpu_map = NULL; @@ -228,28 +232,43 @@ static void build_node_mask(int node, cpu_set_t *mask) for (c = 0; c < nr_cpus; c++) { cpu = cpu_map->map[c]; /* map c index to online cpu index */ if (cpu__get_node(cpu) == node) - CPU_SET(cpu, mask); + set_bit(cpu, mask->bits); } } -static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) +static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) { - CPU_ZERO(&map->affinity_mask); + map->affinity_mask.nbits = cpu__max_cpu(); + map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); + if (!map->affinity_mask.bits) + return -1; + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); else if (mp->affinity == PERF_AFFINITY_CPU) - CPU_SET(map->core.cpu, &map->affinity_mask); + set_bit(map->core.cpu, map->affinity_mask.bits); + + return 0; } +#define MASK_SIZE 1023 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) { + char mask[MASK_SIZE + 1] = {0}; + if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { pr_debug2("failed to mmap perf event ring buffer, error %d\n", errno); return -1; } - perf_mmap__setup_affinity_mask(map, mp); + if (perf_mmap__setup_affinity_mask(map, mp)) { + pr_debug2("failed to alloc mmap affinity mask, error %d\n", + errno); + return -1; + } + bitmap_scnprintf(map->affinity_mask.bits, map->affinity_mask.nbits, mask, MASK_SIZE); + pr_debug2("%p: mmap mask[%ld]: %s\n", map, map->affinity_mask.nbits, mask); map->core.flush = mp->flush; diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index 786c235d6062..65f9b04c45e4 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -40,7 +40,7 @@ struct mmap { int nr_cblocks; } aio; #endif - cpu_set_t affinity_mask; + struct mmap_cpu_mask affinity_mask; void *data; int comp_level; }; -- 2.20.1