Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754608Ab2H2TFK (ORCPT ); Wed, 29 Aug 2012 15:05:10 -0400 Received: from db3ehsobe003.messaging.microsoft.com ([213.199.154.141]:8807 "EHLO db3outboundpool.messaging.microsoft.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754500Ab2H2TFB (ORCPT ); Wed, 29 Aug 2012 15:05:01 -0400 X-Forefront-Antispam-Report: CIP:163.181.249.108;KIP:(null);UIP:(null);IPV:NLI;H:ausb3twp01.amd.com;RD:none;EFVD:NLI X-SpamScore: 0 X-BigFish: VPS0(zzzz1202hzz8275bhz2dh668h839hd24he5bhf0ah107ah1155h) X-WSS-ID: 0M9J6BB-01-8HP-02 X-M-MSG: From: Jacob Shin To: X86-ML CC: LKML , "H. Peter Anvin" , Yinghai Lu , Tejun Heo , Dave Young , Chao Wang , Vivek Goyal , Andreas Herrmann , Borislav Petkov , Jacob Shin Subject: [PATCH 4/6] x86: Only direct map addresses that are marked as E820_RAM Date: Wed, 29 Aug 2012 14:04:04 -0500 Message-ID: <1346267046-6724-5-git-send-email-jacob.shin@amd.com> X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1346267046-6724-1-git-send-email-jacob.shin@amd.com> References: <1346267046-6724-1-git-send-email-jacob.shin@amd.com> MIME-Version: 1.0 Content-Type: text/plain X-OriginatorOrg: amd.com Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 8235 Lines: 246 Currently direct mappings are created for [ 0 to max_low_pfn< --- arch/x86/include/asm/page_types.h | 9 ++++ arch/x86/kernel/setup.c | 100 +++++++++++++++++++++++++++++++------ arch/x86/mm/init.c | 2 + arch/x86/mm/init_64.c | 6 +-- 4 files changed, 99 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index e21fdd1..409047a 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -3,6 +3,7 @@ #include #include +#include /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 @@ -40,12 +41,20 @@ #endif /* CONFIG_X86_64 */ #ifndef __ASSEMBLY__ +#include extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; +extern struct range pfn_mapped[E820_X_MAX]; +extern int nr_pfn_mapped; + +extern void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn); +extern bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); +extern bool pfn_is_mapped(unsigned long pfn); + static inline phys_addr_t get_max_mapped(void) { return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d6e8c03..a2e392e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -115,13 +115,47 @@ #include /* - * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. - * The direct mapping extends to max_pfn_mapped, so that we can directly access - * apertures, ACPI and other tables without having to play with fixmaps. + * max_low_pfn_mapped: highest direct mapped pfn under 4GB + * max_pfn_mapped: highest direct mapped pfn over 4GB + * + * The direct mapping only covers E820_RAM regions, so the ranges and gaps are + * represented by pfn_mapped */ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; +struct range pfn_mapped[E820_X_MAX]; +int nr_pfn_mapped; + +void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) +{ + nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, + nr_pfn_mapped, start_pfn, end_pfn); + nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); + + max_pfn_mapped = max(max_pfn_mapped, end_pfn); + + if (end_pfn <= (1UL << (32 - PAGE_SHIFT))) + max_low_pfn_mapped = max(max_low_pfn_mapped, end_pfn); +} + +bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) +{ + int i; + + for (i = 0; i < nr_pfn_mapped; i++) + if ((start_pfn >= pfn_mapped[i].start) && + (end_pfn <= pfn_mapped[i].end)) + return true; + + return false; +} + +bool pfn_is_mapped(unsigned long pfn) +{ + return pfn_range_is_mapped(pfn, pfn + 1); +} + #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif @@ -296,6 +330,54 @@ static void __init cleanup_highmap(void) } #endif +/* + * Iterate through E820 memory map and create direct mappings for only E820_RAM + * regions. We cannot simply create direct mappings for all pfns from + * [0 to max_low_pfn) and [4GB to max_pfn) because of possible memory holes in + * high addresses that cannot be marked as UC by fixed/variable range MTRRs. + * Depending on the alignment of E820 ranges, this may possibly result in using + * smaller size (i.e. 4K instead of 2M or 1G) page tables. + */ +static void __init init_direct_mapping(void) +{ + int i; + + /* the ISA range is always mapped regardless of memory holes */ + init_memory_mapping(0, ISA_END_ADDRESS); + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + u64 start = ei->addr; + u64 end = ei->addr + ei->size; + + /* we only map E820_RAM */ + if (ei->type != E820_RAM) + continue; + + if (end <= ISA_END_ADDRESS) + continue; + + if (start < ISA_END_ADDRESS) + start = ISA_END_ADDRESS; +#ifdef CONFIG_X86_32 + /* on 32 bit, we only map up to max_low_pfn */ + if ((start >> PAGE_SHIFT) >= max_low_pfn) + continue; + + if ((end >> PAGE_SHIFT) > max_low_pfn) + end = max_low_pfn << PAGE_SHIFT; +#endif + init_memory_mapping(start, end); + } + +#ifdef CONFIG_X86_64 + if (max_pfn > max_low_pfn) { + /* can we preseve max_low_pfn ?*/ + max_low_pfn = max_pfn; + } +#endif +} + static void __init reserve_brk(void) { if (_brk_end > _brk_start) @@ -914,18 +996,8 @@ void __init setup_arch(char **cmdline_p) init_gbpages(); probe_page_size_mask(); - /* max_pfn_mapped is updated here */ - max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn< max_low_pfn) { - max_pfn_mapped = init_memory_mapping(1UL<<32, - max_pfn<> PAGE_SHIFT, ret >> PAGE_SHIFT); + return ret >> PAGE_SHIFT; } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2b6b4a3..ab558eb 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -657,13 +657,11 @@ int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdat = NODE_DATA(nid); struct zone *zone = pgdat->node_zones + ZONE_NORMAL; - unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; + unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - last_mapped_pfn = init_memory_mapping(start, start + size); - if (last_mapped_pfn > max_pfn_mapped) - max_pfn_mapped = last_mapped_pfn; + init_memory_mapping(start, start + size); ret = __add_pages(nid, zone, start_pfn, nr_pages); WARN_ON_ONCE(ret); -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/