Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932169AbbHWWqd (ORCPT ); Sun, 23 Aug 2015 18:46:33 -0400 Received: from mga02.intel.com ([134.134.136.20]:57018 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753694AbbHWWqa (ORCPT ); Sun, 23 Aug 2015 18:46:30 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.15,734,1432623600"; d="scan'208";a="789528150" From: Vikas Shivappa To: vikas.shivappa@intel.com Cc: vikas.shivappa@linux.intel.com, x86@kernel.org, linux-kernel@vger.kernel.org, hpa@zytor.com, tglx@linutronix.de, mingo@kernel.org, peterz@infradead.org, tj@kernel.org, matt.fleming@intel.com, will.auld@intel.com, kanaka.d.juvva@intel.com, glenn.p.williamson@intel.com, mtosatti@redhat.com Subject: [PATCH 5/6] x86/intel_rdt: Class of service management for code data prioritization Date: Sun, 23 Aug 2015 15:46:41 -0700 Message-Id: <1440370002-30618-6-git-send-email-vikas.shivappa@linux.intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1440370002-30618-1-git-send-email-vikas.shivappa@linux.intel.com> References: <1440370002-30618-1-git-send-email-vikas.shivappa@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3993 Lines: 170 Add support to manage CLOSid(class of service id) for code data prioritization(CDP). Includes allocating, freeing closid and closid_get and closid_put. During mount if the mode is changed between cdp and cache allocation only, all the CLOSids are freed. When a new cgroup is created it inherits its parents CLOSid in CDP just like in Cache allocation. --- arch/x86/kernel/cpu/intel_rdt.c | 127 +++++++++++++++++++++++++--------------- 1 file changed, 81 insertions(+), 46 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 155ac51..285db1e 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -166,6 +166,85 @@ static void closcbm_map_dump(void) } } +static void closid_map_init(void) +{ + u32 maxid = boot_cpu_data.x86_cache_max_closid; + + bitmap_zero(rdtss_info.closmap, maxid); +} + +static inline void closid_get(u32 closid) +{ + lockdep_assert_held(&rdt_group_mutex); + + if (!rdtss_info.cdp_enable) + cat_cm_map[closid].clos_refcnt++; + else + cdp_cm_map[closid].clos_refcnt++; +} + +static int closid_alloc(struct intel_rdt *ir) +{ + u32 maxid; + u32 id; + + lockdep_assert_held(&rdt_group_mutex); + + maxid = boot_cpu_data.x86_cache_max_closid; + id = find_next_zero_bit(rdtss_info.closmap, maxid, 0); + if (id == maxid) + return -ENOSPC; + + set_bit(id, rdtss_info.closmap); + closid_get(id); + ir->closid = id; + + return 0; +} + +static inline void closid_free(u32 closid) +{ + clear_bit(closid, rdtss_info.closmap); + if (!rdtss_info.cdp_enable) { + cat_cm_map[closid].cache_mask = 0; + } else { + cdp_cm_map[closid].dcache_mask = 0; + cdp_cm_map[closid].icache_mask = 0; + } +} + +static inline void closid_cat_put(u32 closid) +{ + struct cat_clos_mask_map *ccm = &cat_cm_map[closid]; + + lockdep_assert_held(&rdt_group_mutex); + if (WARN_ON(!ccm->clos_refcnt)) + return; + + if (!--ccm->clos_refcnt) + closid_free(closid); +} + +static inline void closid_cdp_put(u32 closid) +{ + struct cdp_clos_mask_map *ccm = &cdp_cm_map[closid]; + + lockdep_assert_held(&rdt_group_mutex); + if (WARN_ON(!ccm->clos_refcnt)) + return; + + if (!--ccm->clos_refcnt) + closid_free(closid); +} + +static inline void closid_put(u32 closid) +{ + if (!rdtss_info.cdp_enable) + closid_cat_put(closid); + else + closid_cdp_put(closid); +} + static void cdp_cm_map_reset(int maxid, unsigned long max_cbm_mask) { size_t sizeb; @@ -266,6 +345,8 @@ static void rdt_css_mount(void* info) else cdp_disable(); + closid_map_init(); + rdtss_info.cdp_enable = enable_cdp; mutex_unlock(&rdt_group_mutex); } @@ -288,52 +369,6 @@ static inline void rdt_cdp_init(int cdp_maxid, unsigned long max_cbm_mask) rdtss_info.cdp_supported = true; } -static inline void closid_get(u32 closid) -{ - struct cat_clos_mask_map *ccm = &cat_cm_map[closid]; - - lockdep_assert_held(&rdt_group_mutex); - - ccm->clos_refcnt++; -} - -static int closid_alloc(struct intel_rdt *ir) -{ - u32 maxid; - u32 id; - - lockdep_assert_held(&rdt_group_mutex); - - maxid = boot_cpu_data.x86_cache_max_closid; - id = find_next_zero_bit(rdtss_info.closmap, maxid, 0); - if (id == maxid) - return -ENOSPC; - - set_bit(id, rdtss_info.closmap); - closid_get(id); - ir->closid = id; - - return 0; -} - -static inline void closid_free(u32 closid) -{ - clear_bit(closid, rdtss_info.closmap); - cat_cm_map[closid].cache_mask = 0; -} - -static inline void closid_put(u32 closid) -{ - struct cat_clos_mask_map *ccm = &cat_cm_map[closid]; - - lockdep_assert_held(&rdt_group_mutex); - if (WARN_ON(!ccm->clos_refcnt)) - return; - - if (!--ccm->clos_refcnt) - closid_free(closid); -} - void __intel_rdt_sched_in(void) { struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/