No real code modification, just cleanup:
1. remove redundant comments which have already appeared above
2. comments improvement:
"aligned to a 2M boundary"
-->
"aligned up to CONFIG_PHYSICAL_ALIGN boundary"
3. typo fix: uncompression --> decompression
4. indentation fix in linker script: spaces -> tab
Signed-off-by: Cao jin <[email protected]>
---
arch/x86/boot/compressed/head_64.S | 13 +++++--------
arch/x86/boot/compressed/vmlinux.lds.S | 4 ++--
2 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 64037895b085..58f6a467f1fa 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -90,9 +90,6 @@ ENTRY(startup_32)
jnz no_longmode
/*
- * Compute the delta between where we were compiled to run at
- * and where the code will actually run at.
- *
* %ebp contains the address we are loaded at by the boot loader and %ebx
* contains the address where we should move the kernel image temporarily
* for safe in-place decompression.
@@ -272,12 +269,12 @@ ENTRY(startup_64)
/*
* Compute the decompressed kernel start address. It is where
- * we were loaded at aligned to a 2M boundary. %rbp contains the
- * decompressed kernel start address.
+ * we were loaded at aligned up to CONFIG_PHYSICAL_ALIGN boundary.
+ * %rbp contains the decompressed kernel start address.
*
* If it is a relocatable kernel then decompress and run the kernel
- * from load address aligned to 2MB addr, otherwise decompress and
- * run the kernel from LOAD_PHYSICAL_ADDR
+ * from load address aligned up to CONFIG_PHYSICAL_ALIGN, otherwise
+ * decompress and run the kernel from LOAD_PHYSICAL_ADDR
*
* We cannot rely on the calculation done in 32-bit mode, since we
* may have been invoked via the 64-bit entry point.
@@ -525,7 +522,7 @@ relocated:
*/
pushq %rsi /* Save the real mode argument */
movq %rsi, %rdi /* real mode address */
- leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
+ leaq boot_heap(%rip), %rsi /* malloc area for decompression */
leaq input_data(%rip), %rdx /* input_data */
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index f491bbde8493..c07c8aba0755 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -64,8 +64,8 @@ SECTIONS
_ebss = .;
}
#ifdef CONFIG_X86_64
- . = ALIGN(PAGE_SIZE);
- .pgtable : {
+ . = ALIGN(PAGE_SIZE);
+ .pgtable : {
_pgtable = . ;
*(.pgtable)
_epgtable = . ;
--
2.17.0
Hi,
Is the patch not right?
--
Sincerely,
Cao jin
On 1/4/19 8:04 PM, Cao jin wrote:
> No real code modification, just cleanup:
> 1. remove redundant comments which have already appeared above
> 2. comments improvement:
> "aligned to a 2M boundary"
> -->
> "aligned up to CONFIG_PHYSICAL_ALIGN boundary"
> 3. typo fix: uncompression --> decompression
> 4. indentation fix in linker script: spaces -> tab
>
> Signed-off-by: Cao jin <[email protected]>
> ---
> arch/x86/boot/compressed/head_64.S | 13 +++++--------
> arch/x86/boot/compressed/vmlinux.lds.S | 4 ++--
> 2 files changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> index 64037895b085..58f6a467f1fa 100644
> --- a/arch/x86/boot/compressed/head_64.S
> +++ b/arch/x86/boot/compressed/head_64.S
> @@ -90,9 +90,6 @@ ENTRY(startup_32)
> jnz no_longmode
>
> /*
> - * Compute the delta between where we were compiled to run at
> - * and where the code will actually run at.
> - *
> * %ebp contains the address we are loaded at by the boot loader and %ebx
> * contains the address where we should move the kernel image temporarily
> * for safe in-place decompression.
> @@ -272,12 +269,12 @@ ENTRY(startup_64)
>
> /*
> * Compute the decompressed kernel start address. It is where
> - * we were loaded at aligned to a 2M boundary. %rbp contains the
> - * decompressed kernel start address.
> + * we were loaded at aligned up to CONFIG_PHYSICAL_ALIGN boundary.
> + * %rbp contains the decompressed kernel start address.
> *
> * If it is a relocatable kernel then decompress and run the kernel
> - * from load address aligned to 2MB addr, otherwise decompress and
> - * run the kernel from LOAD_PHYSICAL_ADDR
> + * from load address aligned up to CONFIG_PHYSICAL_ALIGN, otherwise
> + * decompress and run the kernel from LOAD_PHYSICAL_ADDR
> *
> * We cannot rely on the calculation done in 32-bit mode, since we
> * may have been invoked via the 64-bit entry point.
> @@ -525,7 +522,7 @@ relocated:
> */
> pushq %rsi /* Save the real mode argument */
> movq %rsi, %rdi /* real mode address */
> - leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
> + leaq boot_heap(%rip), %rsi /* malloc area for decompression */
> leaq input_data(%rip), %rdx /* input_data */
> movl $z_input_len, %ecx /* input_len */
> movq %rbp, %r8 /* output target address */
> diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
> index f491bbde8493..c07c8aba0755 100644
> --- a/arch/x86/boot/compressed/vmlinux.lds.S
> +++ b/arch/x86/boot/compressed/vmlinux.lds.S
> @@ -64,8 +64,8 @@ SECTIONS
> _ebss = .;
> }
> #ifdef CONFIG_X86_64
> - . = ALIGN(PAGE_SIZE);
> - .pgtable : {
> + . = ALIGN(PAGE_SIZE);
> + .pgtable : {
> _pgtable = . ;
> *(.pgtable)
> _epgtable = . ;
>
On 1/4/19 8:04 PM, Cao jin wrote:
> No real code modification, just cleanup:
> 1. remove redundant comments which have already appeared above
> 2. comments improvement:
> "aligned to a 2M boundary"
> -->
> "aligned up to CONFIG_PHYSICAL_ALIGN boundary"
Finally I see why I have inaccurate understanding in 2. Sorry for the noise.
> 3. typo fix: uncompression --> decompression
> 4. indentation fix in linker script: spaces -> tab
>
I will leave the other 3 alone, unless they are wanted.
--
Sincerely,
Cao jin