2010-02-24 15:04:50

by Pekka Enberg

[permalink] [raw]
Subject: [RFT/PATCH] x86: Unify kernel_physical_mapping_init() API

From: Pekka Enberg <[email protected]>

This patch changes the 32-bit version of kernel_physical_mapping_init() to
return the last mapped address like the 64-bit one so that we can unify the
call-site in init_memory_mapping().

Cc: Yinghai Lu <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
---
Note: I have only tested this on VirtualBox which is why I tagged the patch as
RFT.

arch/x86/mm/init.c | 7 -------
arch/x86/mm/init_32.c | 8 +++++---
2 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d406c52..e71c5cb 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -266,16 +266,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);

-#ifdef CONFIG_X86_32
- for (i = 0; i < nr_range; i++)
- kernel_physical_mapping_init(mr[i].start, mr[i].end,
- mr[i].page_size_mask);
- ret = end;
-#else /* CONFIG_X86_64 */
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask);
-#endif

#ifdef CONFIG_X86_32
early_ioremap_page_table_range_init();
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9a0c258..2226f2c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -241,6 +241,7 @@ kernel_physical_mapping_init(unsigned long start,
unsigned long page_size_mask)
{
int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+ unsigned long last_map_addr = end;
unsigned long start_pfn, end_pfn;
pgd_t *pgd_base = swapper_pg_dir;
int pgd_idx, pmd_idx, pte_ofs;
@@ -341,9 +342,10 @@ repeat:
prot = PAGE_KERNEL_EXEC;

pages_4k++;
- if (mapping_iter == 1)
+ if (mapping_iter == 1) {
set_pte(pte, pfn_pte(pfn, init_prot));
- else
+ last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
+ } else
set_pte(pte, pfn_pte(pfn, prot));
}
}
@@ -368,7 +370,7 @@ repeat:
mapping_iter = 2;
goto repeat;
}
- return 0;
+ return last_map_addr;
}

pte_t *kmap_pte;
--
1.5.6.4


2010-02-25 23:52:16

by Pekka Enberg

[permalink] [raw]
Subject: [tip:x86/mm] x86, mm: Unify kernel_physical_mapping_init() API

Commit-ID: c1fd1b43831fa20c91cdd461342af8edf2e87c2f
Gitweb: http://git.kernel.org/tip/c1fd1b43831fa20c91cdd461342af8edf2e87c2f
Author: Pekka Enberg <[email protected]>
AuthorDate: Wed, 24 Feb 2010 17:04:47 +0200
Committer: H. Peter Anvin <[email protected]>
CommitDate: Thu, 25 Feb 2010 15:15:21 -0800

x86, mm: Unify kernel_physical_mapping_init() API

This patch changes the 32-bit version of kernel_physical_mapping_init() to
return the last mapped address like the 64-bit one so that we can unify the
call-site in init_memory_mapping().

Cc: Yinghai Lu <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
---
arch/x86/mm/init.c | 7 -------
arch/x86/mm/init_32.c | 8 +++++---
2 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d406c52..e71c5cb 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -266,16 +266,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);

-#ifdef CONFIG_X86_32
- for (i = 0; i < nr_range; i++)
- kernel_physical_mapping_init(mr[i].start, mr[i].end,
- mr[i].page_size_mask);
- ret = end;
-#else /* CONFIG_X86_64 */
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask);
-#endif

#ifdef CONFIG_X86_32
early_ioremap_page_table_range_init();
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9a0c258..2226f2c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -241,6 +241,7 @@ kernel_physical_mapping_init(unsigned long start,
unsigned long page_size_mask)
{
int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+ unsigned long last_map_addr = end;
unsigned long start_pfn, end_pfn;
pgd_t *pgd_base = swapper_pg_dir;
int pgd_idx, pmd_idx, pte_ofs;
@@ -341,9 +342,10 @@ repeat:
prot = PAGE_KERNEL_EXEC;

pages_4k++;
- if (mapping_iter == 1)
+ if (mapping_iter == 1) {
set_pte(pte, pfn_pte(pfn, init_prot));
- else
+ last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
+ } else
set_pte(pte, pfn_pte(pfn, prot));
}
}
@@ -368,7 +370,7 @@ repeat:
mapping_iter = 2;
goto repeat;
}
- return 0;
+ return last_map_addr;
}

pte_t *kmap_pte;

2010-02-26 00:50:16

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [RFT/PATCH] x86: Unify kernel_physical_mapping_init() API

On Wed, 24 Feb 2010 17:04:47 +0200 (EET)
Pekka J Enberg <[email protected]> wrote:

> From: Pekka Enberg <[email protected]>
>
> This patch changes the 32-bit version of kernel_physical_mapping_init() to
> return the last mapped address like the 64-bit one so that we can unify the
> call-site in init_memory_mapping().
>

I'm sorry if I can't track the logic correctly..


> Cc: Yinghai Lu <[email protected]>
> Cc: KAMEZAWA Hiroyuki <[email protected]>
> Signed-off-by: Pekka Enberg <[email protected]>
> ---
> Note: I have only tested this on VirtualBox which is why I tagged the patch as
> RFT.
>
> arch/x86/mm/init.c | 7 -------
> arch/x86/mm/init_32.c | 8 +++++---
> 2 files changed, 5 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
> index d406c52..e71c5cb 100644
> --- a/arch/x86/mm/init.c
> +++ b/arch/x86/mm/init.c
> @@ -266,16 +266,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
> if (!after_bootmem)
> find_early_table_space(end, use_pse, use_gbpages);
>
> -#ifdef CONFIG_X86_32
> - for (i = 0; i < nr_range; i++)
> - kernel_physical_mapping_init(mr[i].start, mr[i].end,
> - mr[i].page_size_mask);
> - ret = end;
> -#else /* CONFIG_X86_64 */
> for (i = 0; i < nr_range; i++)
> ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
> mr[i].page_size_mask);
> -#endif
>
> #ifdef CONFIG_X86_32
> early_ioremap_page_table_range_init();
> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
> index 9a0c258..2226f2c 100644
> --- a/arch/x86/mm/init_32.c
> +++ b/arch/x86/mm/init_32.c
> @@ -241,6 +241,7 @@ kernel_physical_mapping_init(unsigned long start,
> unsigned long page_size_mask)
> {
> int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
> + unsigned long last_map_addr = end;
> unsigned long start_pfn, end_pfn;
> pgd_t *pgd_base = swapper_pg_dir;
> int pgd_idx, pmd_idx, pte_ofs;
> @@ -341,9 +342,10 @@ repeat:
> prot = PAGE_KERNEL_EXEC;
>
> pages_4k++;
> - if (mapping_iter == 1)
> + if (mapping_iter == 1) {
> set_pte(pte, pfn_pte(pfn, init_prot));
> - else
> + last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
> + } else
> set_pte(pte, pfn_pte(pfn, prot));
> }
> }

Don't we need to update last_map_addr after pages_2m++; ?

And...from the logic,
unsigned long last_map_addr = end;
seems to be always true.

So, just returning "end" is not good ?

Thanks,
-Kame

> @@ -368,7 +370,7 @@ repeat:
> mapping_iter = 2;
> goto repeat;
> }
> - return 0;
> + return last_map_addr;
> }
>
> pte_t *kmap_pte;
> --
> 1.5.6.4
>
>

2010-02-26 13:37:27

by Pekka Enberg

[permalink] [raw]
Subject: Re: [RFT/PATCH] x86: Unify kernel_physical_mapping_init() API

Hi Hiroyuki-san,

KAMEZAWA Hiroyuki kirjoitti:
> On Wed, 24 Feb 2010 17:04:47 +0200 (EET)
> Pekka J Enberg <[email protected]> wrote:
>
>> From: Pekka Enberg <[email protected]>
>>
>> This patch changes the 32-bit version of kernel_physical_mapping_init() to
>> return the last mapped address like the 64-bit one so that we can unify the
>> call-site in init_memory_mapping().
>>
>
> I'm sorry if I can't track the logic correctly..

OK. I tried to make it match what x86-64 does.

>> Cc: Yinghai Lu <[email protected]>
>> Cc: KAMEZAWA Hiroyuki <[email protected]>
>> Signed-off-by: Pekka Enberg <[email protected]>
>> ---
>> Note: I have only tested this on VirtualBox which is why I tagged the patch as
>> RFT.
>>
>> arch/x86/mm/init.c | 7 -------
>> arch/x86/mm/init_32.c | 8 +++++---
>> 2 files changed, 5 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
>> index d406c52..e71c5cb 100644
>> --- a/arch/x86/mm/init.c
>> +++ b/arch/x86/mm/init.c
>> @@ -266,16 +266,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
>> if (!after_bootmem)
>> find_early_table_space(end, use_pse, use_gbpages);
>>
>> -#ifdef CONFIG_X86_32
>> - for (i = 0; i < nr_range; i++)
>> - kernel_physical_mapping_init(mr[i].start, mr[i].end,
>> - mr[i].page_size_mask);
>> - ret = end;
>> -#else /* CONFIG_X86_64 */
>> for (i = 0; i < nr_range; i++)
>> ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
>> mr[i].page_size_mask);
>> -#endif
>>
>> #ifdef CONFIG_X86_32
>> early_ioremap_page_table_range_init();
>> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
>> index 9a0c258..2226f2c 100644
>> --- a/arch/x86/mm/init_32.c
>> +++ b/arch/x86/mm/init_32.c
>> @@ -241,6 +241,7 @@ kernel_physical_mapping_init(unsigned long start,
>> unsigned long page_size_mask)
>> {
>> int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
>> + unsigned long last_map_addr = end;
>> unsigned long start_pfn, end_pfn;
>> pgd_t *pgd_base = swapper_pg_dir;
>> int pgd_idx, pmd_idx, pte_ofs;
>> @@ -341,9 +342,10 @@ repeat:
>> prot = PAGE_KERNEL_EXEC;
>>
>> pages_4k++;
>> - if (mapping_iter == 1)
>> + if (mapping_iter == 1) {
>> set_pte(pte, pfn_pte(pfn, init_prot));
>> - else
>> + last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
>> + } else
>> set_pte(pte, pfn_pte(pfn, prot));
>> }
>> }
>
> Don't we need to update last_map_addr after pages_2m++; ?

Yeah, I missed that part. It probably works fine because we return "end"
for 2M pages. However, to fix that, I'm not completely sure what
"last_map_addr" would be. I am thinking

last_map_addr = (addr & PMD_MASK) + PMD_SIZE;

might do the trick but I'm not that familiar with the code. Ideas?

> And...from the logic,
> unsigned long last_map_addr = end;
> seems to be always true.
>
> So, just returning "end" is not good ?

Might be but like I said, I tried to make it match x86-64 so that we
could eventually try to unify those bits as well.

Pekka