Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756644AbbHFVzp (ORCPT ); Thu, 6 Aug 2015 17:55:45 -0400 Received: from mga14.intel.com ([192.55.52.115]:28681 "EHLO mga14.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756001AbbHFVzV (ORCPT ); Thu, 6 Aug 2015 17:55:21 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.15,625,1432623600"; d="scan'208";a="743717989" From: Vikas Shivappa To: vikas.shivappa@intel.com Cc: linux-kernel@vger.kernel.org, x86@kernel.org, hpa@zytor.com, tglx@linutronix.de, mingo@kernel.org, tj@kernel.org, peterz@infradead.org, matt.fleming@intel.com, will.auld@intel.com, glenn.p.williamson@intel.com, kanaka.d.juvva@intel.com, vikas.shivappa@linux.intel.com Subject: [PATCH 8/9] x86/intel_rdt: Hot cpu support for Cache Allocation Date: Thu, 6 Aug 2015 14:55:16 -0700 Message-Id: <1438898117-3692-9-git-send-email-vikas.shivappa@linux.intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1438898117-3692-1-git-send-email-vikas.shivappa@linux.intel.com> References: <1438898117-3692-1-git-send-email-vikas.shivappa@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4246 Lines: 152 This patch adds hot cpu support for Intel Cache allocation. Support includes updating the cache bitmask MSRs IA32_L3_QOS_n when a new CPU package comes online. The IA32_L3_QOS_n MSRs are one per Class of service on each CPU package. The new package's MSRs are synchronized with the values of existing MSRs. Also the software cache for IA32_PQR_ASSOC MSRs are reset during hot cpu notifications. Signed-off-by: Vikas Shivappa --- arch/x86/kernel/cpu/intel_rdt.c | 95 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 90 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 06cba8da..f151200 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* @@ -40,6 +41,11 @@ struct static_key __read_mostly rdt_enable_key = STATIC_KEY_INIT_FALSE; * Mask of CPUs for writing CBM values. We only need one CPU per-socket. */ static cpumask_t rdt_cpumask; +/* + * Temporary cpumask used during hot cpu notificaiton handling. The usage + * is serialized by hot cpu locks. + */ +static cpumask_t tmp_cpumask; #define rdt_for_each_child(pos_css, parent_ir) \ css_for_each_child((pos_css), &(parent_ir)->css) @@ -311,13 +317,86 @@ out: return err; } -static inline void rdt_cpumask_update(int cpu) +static inline bool rdt_cpumask_update(int cpu) { - static cpumask_t tmp; - - cpumask_and(&tmp, &rdt_cpumask, topology_core_cpumask(cpu)); - if (cpumask_empty(&tmp)) + cpumask_and(&tmp_cpumask, &rdt_cpumask, topology_core_cpumask(cpu)); + if (cpumask_empty(&tmp_cpumask)) { cpumask_set_cpu(cpu, &rdt_cpumask); + return true; + } + + return false; +} + +/* + * cbm_update_msrs() - Updates all the existing IA32_L3_MASK_n MSRs + * which are one per CLOSid except IA32_L3_MASK_0 on the current package. + */ +static void cbm_update_msrs(void *info) +{ + int maxid = boot_cpu_data.x86_cache_max_closid; + unsigned int i; + + /* + * At cpureset, all bits of IA32_L3_MASK_n are set. + * The index starts from one as there is no need + * to update IA32_L3_MASK_0 as it belongs to root cgroup + * whose cache mask is all 1s always. + */ + for (i = 1; i < maxid; i++) { + if (cctable[i].clos_refcnt) + cbm_cpu_update((void *)i); + } +} + +static inline void intel_rdt_cpu_start(int cpu) +{ + struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); + + state->closid = 0; + mutex_lock(&rdt_group_mutex); + if (rdt_cpumask_update(cpu)) + smp_call_function_single(cpu, cbm_update_msrs, NULL, 1); + mutex_unlock(&rdt_group_mutex); +} + +static void intel_rdt_cpu_exit(unsigned int cpu) +{ + int i; + + mutex_lock(&rdt_group_mutex); + if (!cpumask_test_and_clear_cpu(cpu, &rdt_cpumask)) { + mutex_unlock(&rdt_group_mutex); + return; + } + + cpumask_and(&tmp_cpumask, topology_core_cpumask(cpu), cpu_online_mask); + cpumask_clear_cpu(cpu, &tmp_cpumask); + i = cpumask_any(&tmp_cpumask); + + if (i < nr_cpu_ids) + cpumask_set_cpu(i, &rdt_cpumask); + mutex_unlock(&rdt_group_mutex); +} + +static int intel_rdt_cpu_notifier(struct notifier_block *nb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + intel_rdt_cpu_start(cpu); + break; + case CPU_DOWN_PREPARE: + intel_rdt_cpu_exit(cpu); + break; + default: + break; + } + + return NOTIFY_OK; } static int __init intel_rdt_late_init(void) @@ -355,9 +434,15 @@ static int __init intel_rdt_late_init(void) cct->l3_cbm = (1ULL << max_cbm_len) - 1; cct->clos_refcnt = 1; + cpu_notifier_register_begin(); + for_each_online_cpu(i) rdt_cpumask_update(i); + __hotcpu_notifier(intel_rdt_cpu_notifier, 0); + + cpu_notifier_register_done(); + static_key_slow_inc(&rdt_enable_key); pr_info("Intel cache allocation enabled\n"); out_err: -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/