Provide kaslr_offset() to get the kernel offset when KASLR is enabled.
Error may occur before update_kaslr_offset(), so put it at the end of
the offset branch.
Fixes: a307a4ce9ecd ("MIPS: Loongson64: Add KASLR support")
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Jinyang He <[email protected]>
---
arch/mips/include/asm/page.h | 6 ++++++
arch/mips/kernel/relocate.c | 10 ++++++++++
arch/mips/kernel/setup.c | 3 +++
3 files changed, 19 insertions(+)
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 6a77bc4..74082e3 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -255,6 +255,12 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+extern unsigned long __kaslr_offset;
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index c643c81..95abb9c 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -300,6 +300,13 @@ static inline int __init relocation_addr_valid(void *loc_new)
return 1;
}
+static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
+
+ *new_addr = (unsigned long)offset;
+}
+
#if defined(CONFIG_USE_OF)
void __weak *plat_get_fdt(void)
{
@@ -410,6 +417,9 @@ void *__init relocate_kernel(void)
/* Return the new kernel's entry point */
kernel_entry = RELOCATED(start_kernel);
+
+ /* Error may occur before, so keep it at last */
+ update_kaslr_offset(&__kaslr_offset, offset);
}
out:
return kernel_entry;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 7e1f8e2..31ba343 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -84,6 +84,9 @@ static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static struct resource bss_resource = { .name = "Kernel bss", };
+unsigned long __kaslr_offset __ro_after_init;
+EXPORT_SYMBOL(__kaslr_offset);
+
static void *detect_magic __initdata = detect_memory_region;
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
--
2.1.0
The type of the VMLINUX_LOAD_ADDRESS macro is the (unsigned long long)
in 32bits kernel but (unsigned long) in the 64-bit kernel. Although there
is no error here, avoid using it to calculate kaslr_offset. And here we
may need is that the address of __kaslr_offset rather than (void *)offset.
Signed-off-by: Jinyang He <[email protected]>
---
arch/mips/kernel/relocate.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 95abb9c..52018a3 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -430,13 +430,9 @@ void *__init relocate_kernel(void)
*/
static void show_kernel_relocation(const char *level)
{
- unsigned long offset;
-
- offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
-
- if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
+ if (__kaslr_offset > 0) {
printk(level);
- pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
+ pr_cont("Kernel relocated by 0x%pK\n", &__kaslr_offset);
pr_cont(" .text @ 0x%pK\n", _text);
pr_cont(" .data @ 0x%pK\n", _sdata);
pr_cont(" .bss @ 0x%pK\n", __bss_start);
--
2.1.0
On Wed, Feb 03, 2021 at 06:35:10PM +0800, Jinyang He wrote:
> The type of the VMLINUX_LOAD_ADDRESS macro is the (unsigned long long)
> in 32bits kernel but (unsigned long) in the 64-bit kernel. Although there
> is no error here, avoid using it to calculate kaslr_offset. And here we
> may need is that the address of __kaslr_offset rather than (void *)offset.
>
> Signed-off-by: Jinyang He <[email protected]>
> ---
> arch/mips/kernel/relocate.c | 8 ++------
> 1 file changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
> index 95abb9c..52018a3 100644
> --- a/arch/mips/kernel/relocate.c
> +++ b/arch/mips/kernel/relocate.c
> @@ -430,13 +430,9 @@ void *__init relocate_kernel(void)
> */
> static void show_kernel_relocation(const char *level)
> {
> - unsigned long offset;
> -
> - offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
> -
> - if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
> + if (__kaslr_offset > 0) {
> printk(level);
> - pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
> + pr_cont("Kernel relocated by 0x%pK\n", &__kaslr_offset);
are you sure ? I would have expected (void *)__kaslr_offset here.
Thomas.
--
Crap can work. Given enough thrust pigs will fly, but it's not necessarily a
good idea. [ RFC1925, 2.3 ]
On 02/04/2021 11:36 PM, Thomas Bogendoerfer wrote:
> On Wed, Feb 03, 2021 at 06:35:10PM +0800, Jinyang He wrote:
>> The type of the VMLINUX_LOAD_ADDRESS macro is the (unsigned long long)
>> in 32bits kernel but (unsigned long) in the 64-bit kernel. Although there
>> is no error here, avoid using it to calculate kaslr_offset. And here we
>> may need is that the address of __kaslr_offset rather than (void *)offset.
>>
>> Signed-off-by: Jinyang He <[email protected]>
>> ---
>> arch/mips/kernel/relocate.c | 8 ++------
>> 1 file changed, 2 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
>> index 95abb9c..52018a3 100644
>> --- a/arch/mips/kernel/relocate.c
>> +++ b/arch/mips/kernel/relocate.c
>> @@ -430,13 +430,9 @@ void *__init relocate_kernel(void)
>> */
>> static void show_kernel_relocation(const char *level)
>> {
>> - unsigned long offset;
>> -
>> - offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
>> -
>> - if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
>> + if (__kaslr_offset > 0) {
>> printk(level);
>> - pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
>> + pr_cont("Kernel relocated by 0x%pK\n", &__kaslr_offset);
> are you sure ? I would have expected (void *)__kaslr_offset here.
>
> Thomas.
>
It is my fault. I misunderstood the meaning of 'by' and '@' because of
my poor English. Thank you for pointing out my fault. I'll send v3 later.
Thanks,
Jinyang