Received: by 2002:a05:6a10:22f:0:0:0:0 with SMTP id 15csp220029pxk; Thu, 24 Sep 2020 04:08:04 -0700 (PDT) X-Google-Smtp-Source: ABdhPJzRWTNVCWulKZ0etm8wnY6ZZMf+AhOCarhs4svxjKahmjPtlcFsc/vGrkoAaFgrV0RTN8NS X-Received: by 2002:a17:907:1042:: with SMTP id oy2mr425138ejb.64.1600945683770; Thu, 24 Sep 2020 04:08:03 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1600945683; cv=none; d=google.com; s=arc-20160816; b=e1ZhoL+ep2tcZx1VRT9LIqjSzLlP73B0Wa1MkBSQ4awlGJfl1pJ0x6HbR8hmdKtn8A lx2HDQCa9HF3DFDYSm92cLinVm7h1aV483tVmH3tQWCnBiDKPlolpIN8BmVffP4Lm6r/ 1xCiqkfNStomsPsUVKs4DjoM7sqWDRbWvKz0Knns2EI8nuB1SVybLuQ3pTpOLxkeilgm nSDpnHSYL5m7vh9qlqpXdt9gPqAqtc1crniOiqGrcdtCmZ2OWsaRE8w5XVoAzTBSPi9+ wXdxtVq0rjPHwe+nYSb//fr+APFlxTss7NMkPT6p/WAzMc961c84+tg7m0Zg6OAWN9xc ZXtw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from; bh=bfBHjnjshXo7MS0e0zNSKEYZgU+yrm7jTKEI7qEqnQ4=; b=bM8DAWU4GBs3E7Gki9gHx7Ysk8BR4RRKtX9ZNC8Ebozn0TkKyBNpuw9qEoir/BVqId YQqzgAQOZtm9VqiE/Z+EoTmSERdrkpwzvBaMxy6sAb0UTdxjWHNoSZ5MW0juJ9RtTjaW Bv5xmAK34g7ZsZg6WFN36VeNXj95NavGEQ1IvsRRyY5+Gr5LSE5SxJ4cyvIfa2cXydgS UwePoxW/ftzogtWbI2//JKxjuNlQTpHqoyVmlP74ABp/4tRAa5M3ZeYTU58HNgQAN+IR KTV2Mw2t5MsUrfvAoioGtCKRdSw/3k6C0CuBkRNRzM5KuTptF0RKPUP92CiYWrk4/KBy 5Axg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id gr23si1601892ejb.586.2020.09.24.04.07.39; Thu, 24 Sep 2020 04:08:03 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727485AbgIXLG2 (ORCPT + 99 others); Thu, 24 Sep 2020 07:06:28 -0400 Received: from foss.arm.com ([217.140.110.172]:42294 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727479AbgIXLG0 (ORCPT ); Thu, 24 Sep 2020 07:06:26 -0400 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 275F3152F; Thu, 24 Sep 2020 04:06:25 -0700 (PDT) Received: from monolith.localdoman (unknown [10.37.8.98]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 5A8B13F8C6; Thu, 24 Sep 2020 04:06:23 -0700 (PDT) From: Alexandru Elisei To: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org Cc: mark.rutland@arm.com, sumit.garg@linaro.org, maz@kernel.org, swboyd@chromium.org, catalin.marinas@arm.com, will@kernel.org, Julien Thierry , Julien Thierry , Will Deacon Subject: [PATCH v7 6/7] arm_pmu: Introduce pmu_irq_ops Date: Thu, 24 Sep 2020 12:07:05 +0100 Message-Id: <20200924110706.254996-7-alexandru.elisei@arm.com> X-Mailer: git-send-email 2.28.0 In-Reply-To: <20200924110706.254996-1-alexandru.elisei@arm.com> References: <20200924110706.254996-1-alexandru.elisei@arm.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Julien Thierry Currently the PMU interrupt can either be a normal irq or a percpu irq. Supporting NMI will introduce two cases for each existing one. It becomes a mess of 'if's when managing the interrupt. Define sets of callbacks for operations commonly done on the interrupt. The appropriate set of callbacks is selected at interrupt request time and simplifies interrupt enabling/disabling and freeing. Cc: Julien Thierry Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Julien Thierry Tested-by: Sumit Garg (Developerbox) Signed-off-by: Alexandru Elisei --- drivers/perf/arm_pmu.c | 90 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 74 insertions(+), 16 deletions(-) diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index df352b334ea7..a770726e98d4 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -26,8 +26,46 @@ #include +static int armpmu_count_irq_users(const int irq); + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int irq); + void (*disable_pmuirq)(unsigned int irq); + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); +}; + +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) +{ + free_irq(irq, per_cpu_ptr(devid, cpu)); +} + +static const struct pmu_irq_ops pmuirq_ops = { + .enable_pmuirq = enable_irq, + .disable_pmuirq = disable_irq_nosync, + .free_pmuirq = armpmu_free_pmuirq +}; + +static void armpmu_enable_percpu_pmuirq(unsigned int irq) +{ + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, + void __percpu *devid) +{ + if (armpmu_count_irq_users(irq) == 1) + free_percpu_irq(irq, devid); +} + +static const struct pmu_irq_ops percpu_pmuirq_ops = { + .enable_pmuirq = armpmu_enable_percpu_pmuirq, + .disable_pmuirq = disable_percpu_irq, + .free_pmuirq = armpmu_free_percpu_pmuirq +}; + static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); static inline u64 arm_pmu_event_max_period(struct perf_event *event) { @@ -544,6 +582,23 @@ static int armpmu_count_irq_users(const int irq) return count; } +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) +{ + const struct pmu_irq_ops *ops = NULL; + int cpu; + + for_each_possible_cpu(cpu) { + if (per_cpu(cpu_irq, cpu) != irq) + continue; + + ops = per_cpu(cpu_irq_ops, cpu); + if (ops) + break; + } + + return ops; +} + void armpmu_free_irq(int irq, int cpu) { if (per_cpu(cpu_irq, cpu) == 0) @@ -551,18 +606,18 @@ void armpmu_free_irq(int irq, int cpu) if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) return; - if (!irq_is_percpu_devid(irq)) - free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); - else if (armpmu_count_irq_users(irq) == 1) - free_percpu_irq(irq, &cpu_armpmu); + per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); per_cpu(cpu_irq, cpu) = 0; + per_cpu(cpu_irq_ops, cpu) = NULL; } int armpmu_request_irq(int irq, int cpu) { int err = 0; const irq_handler_t handler = armpmu_dispatch_irq; + const struct pmu_irq_ops *irq_ops; + if (!irq) return 0; @@ -584,15 +639,26 @@ int armpmu_request_irq(int irq, int cpu) irq_set_status_flags(irq, IRQ_NOAUTOEN); err = request_irq(irq, handler, irq_flags, "arm-pmu", per_cpu_ptr(&cpu_armpmu, cpu)); + + irq_ops = &pmuirq_ops; } else if (armpmu_count_irq_users(irq) == 0) { err = request_percpu_irq(irq, handler, "arm-pmu", &cpu_armpmu); + + irq_ops = &percpu_pmuirq_ops; + } else { + /* Per cpudevid irq was already requested by another CPU */ + irq_ops = armpmu_find_irq_ops(irq); + + if (WARN_ON(!irq_ops)) + err = -EINVAL; } if (err) goto err_out; per_cpu(cpu_irq, cpu) = irq; + per_cpu(cpu_irq_ops, cpu) = irq_ops; return 0; err_out: @@ -625,12 +691,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) per_cpu(cpu_armpmu, cpu) = pmu; irq = armpmu_get_cpu_irq(pmu, cpu); - if (irq) { - if (irq_is_percpu_devid(irq)) - enable_percpu_irq(irq, IRQ_TYPE_NONE); - else - enable_irq(irq); - } + if (irq) + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); return 0; } @@ -644,12 +706,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) return 0; irq = armpmu_get_cpu_irq(pmu, cpu); - if (irq) { - if (irq_is_percpu_devid(irq)) - disable_percpu_irq(irq); - else - disable_irq_nosync(irq); - } + if (irq) + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); per_cpu(cpu_armpmu, cpu) = NULL; -- 2.28.0