Add a new helper reloc_kernel_entry() to jump back to the start of the
new kernel. After we put the new kernel in a randomized place we can use
this new helper to enter the kernel and begin to relocate again.
Signed-off-by: Jason Yan <[email protected]>
Cc: Diana Craciun <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Nicholas Piggin <[email protected]>
Cc: Kees Cook <[email protected]>
---
arch/powerpc/kernel/head_fsl_booke.S | 16 ++++++++++++++++
arch/powerpc/mm/mmu_decl.h | 1 +
2 files changed, 17 insertions(+)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index a57d44638031..ce40f96dae20 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -1144,6 +1144,22 @@ _GLOBAL(create_tlb_entry)
sync
blr
+/*
+ * Return to the start of the relocated kernel and run again
+ * r3 - virtual address of fdt
+ * r4 - entry of the kernel
+ */
+_GLOBAL(reloc_kernel_entry)
+ mfmsr r7
+ li r8,(MSR_IS | MSR_DS)
+ andc r7,r7,r8
+
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r7
+ isync
+ sync
+ rfi
+
/*
* Create a tlb entry with the same effective and physical address as
* the tlb entry used by the current running code. But set the TS to 1.
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d7737cf97cee..dae8e9177574 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -143,6 +143,7 @@ extern void adjust_total_lowmem(void);
extern int switch_to_as1(void);
extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
extern void create_tlb_entry(phys_addr_t phys, unsigned long virt, int entry);
+extern void reloc_kernel_entry(void *fdt, int addr);
#endif
extern void loadcam_entry(unsigned int index);
extern void loadcam_multi(int first_idx, int num, int tmp_idx);
--
2.17.2
Le 17/07/2019 à 10:06, Jason Yan a écrit :
> Add a new helper reloc_kernel_entry() to jump back to the start of the
> new kernel. After we put the new kernel in a randomized place we can use
> this new helper to enter the kernel and begin to relocate again.
>
> Signed-off-by: Jason Yan <[email protected]>
> Cc: Diana Craciun <[email protected]>
> Cc: Michael Ellerman <[email protected]>
> Cc: Christophe Leroy <[email protected]>
> Cc: Benjamin Herrenschmidt <[email protected]>
> Cc: Paul Mackerras <[email protected]>
> Cc: Nicholas Piggin <[email protected]>
> Cc: Kees Cook <[email protected]>
> ---
> arch/powerpc/kernel/head_fsl_booke.S | 16 ++++++++++++++++
> arch/powerpc/mm/mmu_decl.h | 1 +
> 2 files changed, 17 insertions(+)
>
> diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
> index a57d44638031..ce40f96dae20 100644
> --- a/arch/powerpc/kernel/head_fsl_booke.S
> +++ b/arch/powerpc/kernel/head_fsl_booke.S
> @@ -1144,6 +1144,22 @@ _GLOBAL(create_tlb_entry)
> sync
> blr
>
> +/*
> + * Return to the start of the relocated kernel and run again
> + * r3 - virtual address of fdt
> + * r4 - entry of the kernel
> + */
> +_GLOBAL(reloc_kernel_entry)
> + mfmsr r7
> + li r8,(MSR_IS | MSR_DS)
> + andc r7,r7,r8
Instead of the li/andc, what about the following:
rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS)
> +
> + mtspr SPRN_SRR0,r4
> + mtspr SPRN_SRR1,r7
> + isync
> + sync
> + rfi
Are the isync/sync really necessary ? AFAIK, rfi is context synchronising.
> +
> /*
> * Create a tlb entry with the same effective and physical address as
> * the tlb entry used by the current running code. But set the TS to 1.
> diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
> index d7737cf97cee..dae8e9177574 100644
> --- a/arch/powerpc/mm/mmu_decl.h
> +++ b/arch/powerpc/mm/mmu_decl.h
> @@ -143,6 +143,7 @@ extern void adjust_total_lowmem(void);
> extern int switch_to_as1(void);
> extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
> extern void create_tlb_entry(phys_addr_t phys, unsigned long virt, int entry);
> +extern void reloc_kernel_entry(void *fdt, int addr);
No new 'extern' please, see
https://openpower.xyz/job/snowpatch/job/snowpatch-linux-checkpatch/8125//artifact/linux/checkpatch.log
> #endif
> extern void loadcam_entry(unsigned int index);
> extern void loadcam_multi(int first_idx, int num, int tmp_idx);
>
Christophe
On 2019/7/29 19:08, Christophe Leroy wrote:
>
>
> Le 17/07/2019 à 10:06, Jason Yan a écrit :
>> Add a new helper reloc_kernel_entry() to jump back to the start of the
>> new kernel. After we put the new kernel in a randomized place we can use
>> this new helper to enter the kernel and begin to relocate again.
>>
>> Signed-off-by: Jason Yan <[email protected]>
>> Cc: Diana Craciun <[email protected]>
>> Cc: Michael Ellerman <[email protected]>
>> Cc: Christophe Leroy <[email protected]>
>> Cc: Benjamin Herrenschmidt <[email protected]>
>> Cc: Paul Mackerras <[email protected]>
>> Cc: Nicholas Piggin <[email protected]>
>> Cc: Kees Cook <[email protected]>
>> ---
>> arch/powerpc/kernel/head_fsl_booke.S | 16 ++++++++++++++++
>> arch/powerpc/mm/mmu_decl.h | 1 +
>> 2 files changed, 17 insertions(+)
>>
>> diff --git a/arch/powerpc/kernel/head_fsl_booke.S
>> b/arch/powerpc/kernel/head_fsl_booke.S
>> index a57d44638031..ce40f96dae20 100644
>> --- a/arch/powerpc/kernel/head_fsl_booke.S
>> +++ b/arch/powerpc/kernel/head_fsl_booke.S
>> @@ -1144,6 +1144,22 @@ _GLOBAL(create_tlb_entry)
>> sync
>> blr
>> +/*
>> + * Return to the start of the relocated kernel and run again
>> + * r3 - virtual address of fdt
>> + * r4 - entry of the kernel
>> + */
>> +_GLOBAL(reloc_kernel_entry)
>> + mfmsr r7
>> + li r8,(MSR_IS | MSR_DS)
>> + andc r7,r7,r8
>
> Instead of the li/andc, what about the following:
>
> rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS)
>
Good idea.
>> +
>> + mtspr SPRN_SRR0,r4
>> + mtspr SPRN_SRR1,r7
>> + isync
>> + sync
>> + rfi
>
> Are the isync/sync really necessary ? AFAIK, rfi is context synchronising.
>
I see some code with sync before rfi so I'm not sure. I will check this
and drop the isync/sync if it's true.
Thanks.
>> +
>> /*
>> * Create a tlb entry with the same effective and physical address as
>> * the tlb entry used by the current running code. But set the TS to 1.
>> diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
>> index d7737cf97cee..dae8e9177574 100644
>> --- a/arch/powerpc/mm/mmu_decl.h
>> +++ b/arch/powerpc/mm/mmu_decl.h
>> @@ -143,6 +143,7 @@ extern void adjust_total_lowmem(void);
>> extern int switch_to_as1(void);
>> extern void restore_to_as0(int esel, int offset, void *dt_ptr, int
>> bootcpu);
>> extern void create_tlb_entry(phys_addr_t phys, unsigned long virt,
>> int entry);
>> +extern void reloc_kernel_entry(void *fdt, int addr);
>
> No new 'extern' please, see
> https://openpower.xyz/job/snowpatch/job/snowpatch-linux-checkpatch/8125//artifact/linux/checkpatch.log
>
>
>
>> #endif
>> extern void loadcam_entry(unsigned int index);
>> extern void loadcam_multi(int first_idx, int num, int tmp_idx);
>>
>
> Christophe
>
> .
>