2009-06-22 14:39:52

by Pekka Enberg

[permalink] [raw]
Subject: [PATCH] x86: move init_gbpages() to setup_arch()

From: Pekka Enberg <[email protected]>

The init_gbpages() function is conditionally called from init_memory_mapping()
function. There are two call-sites where this 'after_bootmem' condition can be
true: setup_arch() and mem_init() via pci_iommu_alloc().

Therefore, it's safe to move the call to init_gbpages() to setup_arch() as it's
always called before mem_init().

Cc: Yinghai Lu <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
---
arch/x86/kernel/setup.c | 16 ++++++++++++++++
arch/x86/mm/init.c | 17 -----------------
2 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80..de2cab1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
return ret;
}

+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+ if (direct_gbpages && cpu_has_gbpages)
+ printk(KERN_INFO "Using GB pages for direct mapping\n");
+ else
+ direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)

reserve_brk();

+ init_gbpages();
+
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e..47ce9a2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
return nr_range;
}

-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
- if (direct_gbpages && cpu_has_gbpages)
- printk(KERN_INFO "Using GB pages for direct mapping\n");
- else
- direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,

printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);

- if (!after_bootmem)
- init_gbpages();
-
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
--
1.6.0.4


2009-06-22 15:47:33

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH] x86: move init_gbpages() to setup_arch()

Pekka J Enberg wrote:
> From: Pekka Enberg <[email protected]>
>
> The init_gbpages() function is conditionally called from init_memory_mapping()
> function. There are two call-sites where this 'after_bootmem' condition can be
> true: setup_arch() and mem_init() via pci_iommu_alloc().
>
> Therefore, it's safe to move the call to init_gbpages() to setup_arch() as it's
> always called before mem_init().
>
> Cc: Yinghai Lu <[email protected]>
> Signed-off-by: Pekka Enberg <[email protected]>
> ---
> arch/x86/kernel/setup.c | 16 ++++++++++++++++
> arch/x86/mm/init.c | 17 -----------------
> 2 files changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index be5ae80..de2cab1 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
> return ret;
> }
>
> +#ifdef CONFIG_X86_64
> +static void __init init_gbpages(void)
> +{
> + if (direct_gbpages && cpu_has_gbpages)
> + printk(KERN_INFO "Using GB pages for direct mapping\n");
> + else
> + direct_gbpages = 0;
> +}
> +#else
> +static inline void init_gbpages(void)
> +{
> +}
> +#endif
> +
> static void __init reserve_brk(void)
> {
> if (_brk_end > _brk_start)
> @@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
>
> reserve_brk();
>
> + init_gbpages();
> +
> /* max_pfn_mapped is updated here */
> max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
> max_pfn_mapped = max_low_pfn_mapped;
> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
> index f53b57e..47ce9a2 100644
> --- a/arch/x86/mm/init.c
> +++ b/arch/x86/mm/init.c
> @@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
> return nr_range;
> }
>
> -#ifdef CONFIG_X86_64
> -static void __init init_gbpages(void)
> -{
> - if (direct_gbpages && cpu_has_gbpages)
> - printk(KERN_INFO "Using GB pages for direct mapping\n");
> - else
> - direct_gbpages = 0;
> -}
> -#else
> -static inline void init_gbpages(void)
> -{
> -}
> -#endif
> -
> /*
> * Setup the direct mapping of the physical memory at PAGE_OFFSET.
> * This runs before bootmem is initialized and gets pages directly from
> @@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
>
> printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
>
> - if (!after_bootmem)
> - init_gbpages();
> -
> #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
> /*
> * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.


Acked-by: Yinghai Lu <[email protected]>

YH

2009-06-23 08:52:36

by Pekka Enberg

[permalink] [raw]
Subject: [tip:x86/urgent] x86: Move init_gbpages() to setup_arch()

Commit-ID: 854c879f5abf309ebd378bea1ee41acf4ddf7194
Gitweb: http://git.kernel.org/tip/854c879f5abf309ebd378bea1ee41acf4ddf7194
Author: Pekka J Enberg <[email protected]>
AuthorDate: Mon, 22 Jun 2009 17:39:41 +0300
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 23 Jun 2009 10:33:32 +0200

x86: Move init_gbpages() to setup_arch()

The init_gbpages() function is conditionally called from
init_memory_mapping() function. There are two call-sites where
this 'after_bootmem' condition can be true: setup_arch() and
mem_init() via pci_iommu_alloc().

Therefore, it's safe to move the call to init_gbpages() to
setup_arch() as it's always called before mem_init().

This removes an after_bootmem use - paving the way to remove
all uses of that state variable.

Signed-off-by: Pekka Enberg <[email protected]>
Acked-by: Yinghai Lu <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>


---
arch/x86/kernel/setup.c | 16 ++++++++++++++++
arch/x86/mm/init.c | 17 -----------------
2 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80..de2cab1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
return ret;
}

+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+ if (direct_gbpages && cpu_has_gbpages)
+ printk(KERN_INFO "Using GB pages for direct mapping\n");
+ else
+ direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)

reserve_brk();

+ init_gbpages();
+
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e..47ce9a2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
return nr_range;
}

-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
- if (direct_gbpages && cpu_has_gbpages)
- printk(KERN_INFO "Using GB pages for direct mapping\n");
- else
- direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,

printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);

- if (!after_bootmem)
- init_gbpages();
-
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.