Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752641AbdDCV55 (ORCPT ); Mon, 3 Apr 2017 17:57:57 -0400 Received: from mga09.intel.com ([134.134.136.24]:46624 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751963AbdDCV5K (ORCPT ); Mon, 3 Apr 2017 17:57:10 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.36,272,1486454400"; d="scan'208";a="84231361" From: Vikas Shivappa To: vikas.shivappa@intel.com, x86@kernel.org, linux-kernel@vger.kernel.org Cc: hpa@zytor.com, tglx@linutronix.de, mingo@kernel.org, peterz@infradead.org, ravi.v.shankar@intel.com, tony.luck@intel.com, fenghua.yu@intel.com, h.peter.anvin@intel.com Subject: [PATCH 4/8] x86/intel_rct/mba: Add MBA structures and initialize MBA Date: Mon, 3 Apr 2017 14:57:28 -0700 Message-Id: <1491256652-18729-5-git-send-email-vikas.shivappa@linux.intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1491256652-18729-1-git-send-email-vikas.shivappa@linux.intel.com> References: <1491256652-18729-1-git-send-email-vikas.shivappa@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 10604 Lines: 388 The MBA feature details like minimum bandwidth supported, b/w granularity etc are obtained via executing CPUID with EAX=10H ,ECX=3. Setup and initialize the MBA specific extensions to data structures like global list of RDT resources, RDT resource structure and RDT domain structure. Signed-off-by: Vikas Shivappa --- arch/x86/include/asm/intel_rdt.h | 80 +++++++++++++-------- arch/x86/kernel/cpu/intel_rdt.c | 151 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 190 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index 4c94f18..285cdeb 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -12,6 +12,7 @@ #define IA32_L3_QOS_CFG 0xc81 #define IA32_L3_CBM_BASE 0xc90 #define IA32_L2_CBM_BASE 0xd10 +#define IA32_MBA_THRTL_BASE 0xd50 #define L3_QOS_CDP_ENABLE 0x01ULL @@ -69,6 +70,36 @@ struct rftype { }; /** + * struct rdt_domain - group of cpus sharing an RDT resource + * @list: all instances of this resource + * @id: unique id for this instance + * @cpu_mask: which cpus share this resource + * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID) + * @new_cbm: new cbm value to be loaded + * @have_new_cbm: did user provide new_cbm for this domain + */ +struct rdt_domain { + struct list_head list; + int id; + struct cpumask cpu_mask; + u32 *ctrl_val; + u32 new_ctrl; + bool have_new_ctrl; +}; + +/** + * struct msr_param - set a range of MSRs from a domain + * @res: The resource to use + * @low: Beginning index from base MSR + * @high: End index + */ +struct msr_param { + struct rdt_resource *res; + int low; + int high; +}; + +/** * struct rdt_resource - attributes of an RDT resource * @enabled: Is this feature enabled on this machine * @capable: Is this feature available on this machine @@ -78,6 +109,16 @@ struct rftype { * @data_width: Character width of data when displaying * @min_cbm_bits: Minimum number of consecutive bits to be set * in a cache bit mask + * @msr_update: Function pointer to update QOS MSRs + * @max_delay: Max throttle delay. Delay is the hardware + * understandable value for memory bandwidth. + * @min_bw: Minimum memory bandwidth percentage user + * can request + * @bw_gran: Granularity at which the memory bandwidth + * is allocated + * @delay_linear: True if memory b/w delay is in linear scale + * @mb_map: Mapping of memory b/w percentage to + * memory b/w delay values * @domains: All domains for this resource * @msr_base: Base MSR address for CBMs * @cache_level: Which cache level defines scope of this domain @@ -94,6 +135,14 @@ struct rdt_resource { int min_cbm_bits; u32 default_ctrl; int data_width; + void (*msr_update) (struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r); + u32 max_delay; + u32 min_bw; + u32 bw_gran; + u32 delay_linear; + u32 *mb_map; + struct list_head domains; int msr_base; int cache_level; @@ -101,36 +150,6 @@ struct rdt_resource { int cbm_idx_offset; }; -/** - * struct rdt_domain - group of cpus sharing an RDT resource - * @list: all instances of this resource - * @id: unique id for this instance - * @cpu_mask: which cpus share this resource - * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID) - * @new_ctrl: new ctrl value to be loaded - * @have_new_ctrl: did user provide new_ctrl for this domain - */ -struct rdt_domain { - struct list_head list; - int id; - struct cpumask cpu_mask; - u32 *ctrl_val; - u32 new_ctrl; - bool have_new_ctrl; -}; - -/** - * struct msr_param - set a range of MSRs from a domain - * @res: The resource to use - * @low: Beginning index from base MSR - * @high: End index - */ -struct msr_param { - struct rdt_resource *res; - int low; - int high; -}; - extern struct mutex rdtgroup_mutex; extern struct rdt_resource rdt_resources_all[]; @@ -144,6 +163,7 @@ enum { RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE, RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, /* Must be the last */ RDT_NUM_RESOURCES, diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index c4cf2e8..be272b9 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -32,6 +32,9 @@ #include #include +#define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 + /* Mutex to protect rdtgroup access. */ DEFINE_MUTEX(rdtgroup_mutex); @@ -45,11 +48,17 @@ */ int max_name_width, max_data_width; +static void +mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); +static void +cqm_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); + struct rdt_resource rdt_resources_all[] = { { .name = "L3", .domains = domain_init(RDT_RESOURCE_L3), .msr_base = IA32_L3_CBM_BASE, + .msr_update = cqm_wrmsr, .min_cbm_bits = 1, .cache_level = 3, .cbm_idx_multi = 1, @@ -59,6 +68,7 @@ struct rdt_resource rdt_resources_all[] = { .name = "L3DATA", .domains = domain_init(RDT_RESOURCE_L3DATA), .msr_base = IA32_L3_CBM_BASE, + .msr_update = cqm_wrmsr, .min_cbm_bits = 1, .cache_level = 3, .cbm_idx_multi = 2, @@ -68,6 +78,7 @@ struct rdt_resource rdt_resources_all[] = { .name = "L3CODE", .domains = domain_init(RDT_RESOURCE_L3CODE), .msr_base = IA32_L3_CBM_BASE, + .msr_update = cqm_wrmsr, .min_cbm_bits = 1, .cache_level = 3, .cbm_idx_multi = 2, @@ -77,11 +88,21 @@ struct rdt_resource rdt_resources_all[] = { .name = "L2", .domains = domain_init(RDT_RESOURCE_L2), .msr_base = IA32_L2_CBM_BASE, + .msr_update = cqm_wrmsr, .min_cbm_bits = 1, .cache_level = 2, .cbm_idx_multi = 1, .cbm_idx_offset = 0 }, + { + .name = "MB", + .domains = domain_init(RDT_RESOURCE_MBA), + .msr_base = IA32_MBA_THRTL_BASE, + .msr_update = mba_wrmsr, + .cache_level = 3, + .cbm_idx_multi = 1, + .cbm_idx_offset = 0 + }, }; static int cbm_idx(struct rdt_resource *r, int closid) @@ -136,6 +157,53 @@ static inline bool cache_alloc_hsw_probe(void) return false; } +/* + * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values + * exposed to user interface and the h/w understandable delay values. + * + * The non-linear delay values have the granularity of power of two + * and also the h/w does not guarantee a curve for configured delay + * values vs. actual b/w enforced. + * Hence we need a mapping that is pre calibrated so the user can + * express the memory b/w as a percentage value. + */ +static inline bool rdt_get_mb_table(struct rdt_resource *r) +{ + /* + * There are no Intel SKUs as of now to support non-linear delay. + */ + pr_info("MBA b/w map not implemented for cpu:%d, model:%d", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + return false; +} + +static bool rdt_get_mem_config(struct rdt_resource *r) +{ + union cpuid_0x10_3_eax eax; + union cpuid_0x10_x_edx edx; + u32 ebx, ecx; + + cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full); + r->num_closid = edx.split.cos_max + 1; + r->max_delay = eax.split.max_delay + 1; + r->default_ctrl = MAX_MBA_BW; + if (ecx & MBA_IS_LINEAR) { + r->delay_linear = true; + r->min_bw = MAX_MBA_BW - r->max_delay; + r->bw_gran = MAX_MBA_BW - r->max_delay; + } else { + if (!rdt_get_mb_table(r)) + return false; + } + r->data_width = 3; + + r->capable = true; + r->enabled = true; + + return true; +} + static void rdt_get_cache_config(int idx, struct rdt_resource *r) { union cpuid_0x10_1_eax eax; @@ -212,7 +280,8 @@ static inline bool get_rdt_resources(void) ret = true; } - if (boot_cpu_has(X86_FEATURE_MBA)) + if (boot_cpu_has(X86_FEATURE_MBA) && + rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA])) ret = true; rdt_init_padding(); @@ -233,6 +302,47 @@ static int get_cache_id(int cpu, int level) return -1; } +/* + * Map the memory b/w percentage value to delay values + * that can be written to QOS_MSRs. + * There are currently no SKUs which support non linear delay values. + */ +static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) +{ + if (r->delay_linear) + return MAX_MBA_BW - bw; + + WARN_ONCE(1, "Non Linear delay-bw map not supported but queried\n"); + return r->default_ctrl; +} + +static void +mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +{ + int i; + + for (i = m->low; i < m->high; i++) { + int idx = cbm_idx(r, i); + + /* + * Write the delay value for mba. + */ + wrmsrl(r->msr_base + idx, delay_bw_map(d->ctrl_val[i], r)); + } +} + +static void +cqm_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +{ + int i; + + for (i = m->low; i < m->high; i++) { + int idx = cbm_idx(r, i); + + wrmsrl(r->msr_base + idx, d->ctrl_val[i]); + } +} + void rdt_ctrl_update(void *arg) { struct msr_param *m = (struct msr_param *)arg; @@ -291,6 +401,33 @@ static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, return NULL; } +static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) +{ + struct msr_param m; + u32 *dc; + int i; + + dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + d->ctrl_val = dc; + + /* + * Initialize the Control MSRs to having no control. + * For Cache Allocation: Set all bits in cbm + * For Memory Allocation: Set b/w requested to 100 + */ + for (i = 0; i < r->num_closid; i++, dc++) + *dc = r->default_ctrl; + + m.low = 0; + m.high = r->num_closid; + r->msr_update(d, &m, r); + + return 0; +} + /* * domain_add_cpu - Add a cpu to a resource's domain list. * @@ -306,7 +443,7 @@ static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, */ static void domain_add_cpu(int cpu, struct rdt_resource *r) { - int i, id = get_cache_id(cpu, r->cache_level); + int id = get_cache_id(cpu, r->cache_level); struct list_head *add_pos = NULL; struct rdt_domain *d; @@ -327,19 +464,11 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) d->id = id; - d->ctrl_val = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL); - if (!d->ctrl_val) { + if (domain_setup_ctrlval(r, d)) { kfree(d); return; } - for (i = 0; i < r->num_closid; i++) { - int idx = cbm_idx(r, i); - - d->ctrl_val[i] = r->default_ctrl; - wrmsrl(r->msr_base + idx, d->ctrl_val[i]); - } - cpumask_set_cpu(cpu, &d->cpu_mask); list_add_tail(&d->list, add_pos); } -- 1.9.1