Received: by 2002:a25:4158:0:0:0:0:0 with SMTP id o85csp1056464yba; Wed, 15 May 2019 15:02:10 -0700 (PDT) X-Google-Smtp-Source: APXvYqz6ZyUKULs+YD/rBEdvalj3FemW59blXRNIhTlfgwoDffeWnzMTUo8qdPbD3apQuICo+sME X-Received: by 2002:a62:386:: with SMTP id 128mr23813493pfd.10.1557957730577; Wed, 15 May 2019 15:02:10 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1557957730; cv=none; d=google.com; s=arc-20160816; b=aoTZlukPkK5Q5x/lGTSgN7VVN7gvRiNAMewNiCuBhF9i7jW1HbBpZV38h88dcka0FF WG90kfMOf2xj4Yqm+A1ABNQxMzLWasQ8ML0rictjx2xTyUof9PVXl+IxqGZ23NFUTg9g Y0OKbDIKSu5qac2tVZZMY9TgeS47OKHhSDQOd9aCGuPGTuf3IiNdSsHnFPwZ8MaDJ3Gg vgiJsV3jmEDrnBIl/XBjezOSb6X5xWrPYakKmgvv7wmXxt5WyASfPCN+zHBLRjIsHS3+ ijgw9YxfckV7UGMiuG1qAjU8hVEA9nbI/FmwlIFx16rSlh185l4WrONBLTRGNr/T/Bum 75Lw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:message-id:date:subject:cc:to:from; bh=j5UDkhxTQcIX4gaMRdImIlYpHKvFb1Pv4dwHQ4qsVpA=; b=LnReEFfUkitJdf5MvCaGc4bkoleI9snOyIaJbjjj5yQ+wd/Sb6Ppqsy7Oy0+Bozjgd IoMxKkS/xfVmHFFPoNav2MdyyK/LNMCRxvNJ1ltv1JsprmxVyyvoYQMoqmT7gH+MhtDn WeWw78Sl08yR6tT+3noTTb6CJBdN/n/5609Er+KTZDEiq1NRDgW/ITXUPElYqdDoiNl1 1Kv5qAwGuko528MNR4Y5ctKYPMlsKw4T0QbNaMfIpEZomdl143sLHn1QTB5UhSWQS3fH K1mOxKg9UOP7Wee0VAhowIvX3qLPuHz3g20oqx2QTUNCbf6fvFt0a2Tv8oG9uXU9KD3J Rb4w== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id x15si2927295pga.92.2019.05.15.15.01.43; Wed, 15 May 2019 15:02:10 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726652AbfEOWAS (ORCPT + 99 others); Wed, 15 May 2019 18:00:18 -0400 Received: from mga18.intel.com ([134.134.136.126]:39649 "EHLO mga18.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725977AbfEOWAS (ORCPT ); Wed, 15 May 2019 18:00:18 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 15 May 2019 15:00:17 -0700 X-ExtLoop1: 1 Received: from unknown (HELO localhost.lm.intel.com) ([10.232.112.69]) by fmsmga001.fm.intel.com with ESMTP; 15 May 2019 15:00:16 -0700 From: Keith Busch To: linux-kernel@vger.kernel.org, linux-acpi@vger.kernel.org, Rafael Wysocki Cc: Dan Williams , Dave Hansen , Brice Goglin , Keith Busch Subject: [PATCHv2 1/2] hmat: Register memory-side cache after parsing Date: Wed, 15 May 2019 15:54:43 -0600 Message-Id: <20190515215444.22256-1-keith.busch@intel.com> X-Mailer: git-send-email 2.13.6 Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Instead of registering the hmat cache attributes in line with parsing the table, save the attributes in the memory target and register them after parsing completes. This will make it easier to register the attributes later when hot add is supported. Signed-off-by: Keith Busch --- v1 -> v2: Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level 1 always existed (Brice). drivers/acpi/hmat/hmat.c | 70 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c index 96b7d39a97c6..bf23c9a27958 100644 --- a/drivers/acpi/hmat/hmat.c +++ b/drivers/acpi/hmat/hmat.c @@ -36,11 +36,17 @@ enum locality_types { static struct memory_locality *localities_types[4]; +struct target_cache { + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + struct memory_target { struct list_head node; unsigned int memory_pxm; unsigned int processor_pxm; struct node_hmem_attrs hmem_attrs; + struct list_head caches; }; struct memory_initiator { @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm) target->memory_pxm = mem_pxm; target->processor_pxm = PXM_INVAL; list_add_tail(&target->node, &targets); + INIT_LIST_HEAD(&target->caches); } static __init const char *hmat_data_type(u8 type) @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_hmat_cache *cache = (void *)header; - struct node_cache_attrs cache_attrs; + struct memory_target *target; + struct target_cache *tcache; u32 attrs; if (cache->header.length < sizeof(*cache)) { @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header, cache->memory_PD, cache->cache_size, attrs, cache->number_of_SMBIOShandles); - cache_attrs.size = cache->cache_size; - cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4; - cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16; + target = find_mem_target(cache->memory_PD); + if (!target) + return 0; + + tcache = kzalloc(sizeof(*tcache), GFP_KERNEL); + if (!tcache) { + pr_notice_once("Failed to allocate HMAT cache info\n"); + return 0; + } + + tcache->cache_attrs.size = cache->cache_size; + tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4; + tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16; switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) { case ACPI_HMAT_CA_DIRECT_MAPPED: - cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; + tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; break; case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING: - cache_attrs.indexing = NODE_CACHE_INDEXED; + tcache->cache_attrs.indexing = NODE_CACHE_INDEXED; break; case ACPI_HMAT_CA_NONE: default: - cache_attrs.indexing = NODE_CACHE_OTHER; + tcache->cache_attrs.indexing = NODE_CACHE_OTHER; break; } switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) { case ACPI_HMAT_CP_WB: - cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; break; case ACPI_HMAT_CP_WT: - cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; break; case ACPI_HMAT_CP_NONE: default: - cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; break; } + list_add_tail(&tcache->node, &target->caches); - node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs); return 0; } @@ -577,20 +595,37 @@ static __init void hmat_register_target_initiators(struct memory_target *target) } } +static __init void hmat_register_target_cache(struct memory_target *target) +{ + unsigned mem_nid = pxm_to_node(target->memory_pxm); + struct target_cache *tcache; + + list_for_each_entry(tcache, &target->caches, node) + node_add_cache(mem_nid, &tcache->cache_attrs); +} + static __init void hmat_register_target_perf(struct memory_target *target) { unsigned mem_nid = pxm_to_node(target->memory_pxm); node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0); } +static __init void hmat_register_target(struct memory_target *target) +{ + if (!node_online(pxm_to_node(target->memory_pxm))) + return; + + hmat_register_target_initiators(target); + hmat_register_target_cache(target); + hmat_register_target_perf(target); +} + static __init void hmat_register_targets(void) { struct memory_target *target; - list_for_each_entry(target, &targets, node) { - hmat_register_target_initiators(target); - hmat_register_target_perf(target); - } + list_for_each_entry(target, &targets, node) + hmat_register_target(target); } static __init void hmat_free_structures(void) @@ -598,8 +633,13 @@ static __init void hmat_free_structures(void) struct memory_target *target, *tnext; struct memory_locality *loc, *lnext; struct memory_initiator *initiator, *inext; + struct target_cache *tcache, *cnext; list_for_each_entry_safe(target, tnext, &targets, node) { + list_for_each_entry_safe(tcache, cnext, &target->caches, node) { + list_del(&tcache->node); + kfree(tcache); + } list_del(&target->node); kfree(target); } -- 2.14.4