2018-06-05 09:34:56

by Jun Yao

[permalink] [raw]
Subject: [PATCH v2 0/3] arm64/mm: migrate swapper_pg_dir

Version 2 changes:
* Move __pa_swapper_pg_dir to mmuoff.data.read section[1]
* Fix problem with CONFIG_ARM64_SW_TTBR0_PAN[2]
* Update comment about __enable_mmu()

[v1] https://www.spinics.net/lists/kernel/msg2819351.html

[1] https://www.spinics.net/lists/kernel/msg2819594.html
[2] https://www.spinics.net/lists/kernel/msg2819455.html

Jun Yao (3):
arm64/mm: pass swapper_pg_dir as an argument to __enable_mmu()
arm64/mm: introduce variable to save new swapper_pg_dir address
arm64/mm: migrate swapper_pg_dir and tramp_pg_dir

arch/arm64/include/asm/mmu_context.h | 6 +-
arch/arm64/include/asm/pgtable.h | 2 +
arch/arm64/kernel/cpufeature.c | 2 +-
arch/arm64/kernel/head.S | 6 +-
arch/arm64/kernel/hibernate.c | 2 +-
arch/arm64/kernel/sleep.S | 1 +
arch/arm64/mm/kasan_init.c | 6 +-
arch/arm64/mm/mmu.c | 83 ++++++++++++++++++----------
8 files changed, 69 insertions(+), 39 deletions(-)

--
2.17.0



2018-06-05 09:35:01

by Jun Yao

[permalink] [raw]
Subject: [PATCH v2 3/3] arm64/mm: migrate swapper_pg_dir and tramp_pg_dir

Migrate swapper_pg_dir and tramp_pg_dir. And their virtual addresses
do not correlate with kernel's address.

Signed-off-by: Jun Yao <[email protected]>
---
arch/arm64/mm/mmu.c | 75 ++++++++++++++++++++++++++++-----------------
1 file changed, 47 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fcb425b0fcab..3ffcff2d7dcd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -58,6 +58,9 @@ EXPORT_SYMBOL(kimage_voffset);
volatile phys_addr_t __section(".mmuoff.data.read")
__pa_swapper_pg_dir;
pgd_t *new_swapper_pg_dir = swapper_pg_dir;
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+pgd_t *new_tramp_pg_dir;
+#endif

/*
* Empty_zero_page is a special page that is used for zero-initialized data
@@ -81,19 +84,14 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);

-static phys_addr_t __init early_pgtable_alloc(void)
+static void __init clear_page_phys(phys_addr_t phys)
{
- phys_addr_t phys;
- void *ptr;
-
- phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-
/*
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
* slot will be free, so we can (ab)use the FIX_PTE slot to initialise
* any level of table.
*/
- ptr = pte_set_fixmap(phys);
+ void *ptr = pte_set_fixmap(phys);

memset(ptr, 0, PAGE_SIZE);

@@ -102,6 +100,14 @@ static phys_addr_t __init early_pgtable_alloc(void)
* table walker
*/
pte_clear_fixmap();
+}
+
+static phys_addr_t __init early_pgtable_alloc(void)
+{
+ phys_addr_t phys;
+
+ phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ clear_page_phys(phys);

return phys;
}
@@ -555,6 +561,10 @@ static int __init map_entry_trampoline(void)
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
prot, pgd_pgtable_alloc, 0);

+ memcpy(new_tramp_pg_dir, tramp_pg_dir, PGD_SIZE);
+ memblock_free(__pa_symbol(tramp_pg_dir),
+ __pa_symbol(swapper_pg_dir) - __pa_symbol(tramp_pg_dir));
+
/* Map both the text and data into the kernel page table */
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
@@ -632,38 +642,47 @@ static void __init map_kernel(pgd_t *pgdp)
*/
void __init paging_init(void)
{
- phys_addr_t pgd_phys = early_pgtable_alloc();
- pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
+ phys_addr_t pgd_phys;
+ pgd_t *pgdp;
+ phys_addr_t mem_size;
+
+ mem_size = __pa_symbol(swapper_pg_dir) + PAGE_SIZE
+ - (__pa_symbol(idmap_pg_dir) + IDMAP_DIR_SIZE);
+
+ if (mem_size == PAGE_SIZE) {
+ pgd_phys = early_pgtable_alloc();
+ __pa_swapper_pg_dir = pgd_phys;
+ } else {
+ phys_addr_t p;
+
+ pgd_phys = memblock_alloc(mem_size, PAGE_SIZE);
+
+ for (p = pgd_phys; p < pgd_phys + mem_size; p += PAGE_SIZE)
+ clear_page_phys(p);
+
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ new_tramp_pg_dir = __va(pgd_phys);
+ #endif
+ __pa_swapper_pg_dir = pgd_phys + mem_size - PAGE_SIZE;
+ }

- __pa_swapper_pg_dir = __pa_symbol(swapper_pg_dir);
__flush_dcache_area((void *)&__pa_swapper_pg_dir,
sizeof(__pa_swapper_pg_dir));

+ new_swapper_pg_dir = __va(__pa_swapper_pg_dir);
+
+ pgdp = pgd_set_fixmap(__pa_swapper_pg_dir);
+
map_kernel(pgdp);
map_mem(pgdp);

- /*
- * We want to reuse the original swapper_pg_dir so we don't have to
- * communicate the new address to non-coherent secondaries in
- * secondary_entry, and so cpu_switch_mm can generate the address with
- * adrp+add rather than a load from some global variable.
- *
- * To do this we need to go via a temporary pgd.
- */
- cpu_replace_ttbr1(pgd_phys);
- memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
cpu_replace_ttbr1(__pa_swapper_pg_dir);
+ init_mm.pgd = new_swapper_pg_dir;

pgd_clear_fixmap();
- memblock_free(pgd_phys, PAGE_SIZE);

- /*
- * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
- * allocated with it.
- */
- memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
- __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
- - PAGE_SIZE);
+ memblock_free(__pa_symbol(swapper_pg_dir),
+ __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir));
}

/*
--
2.17.0


2018-06-05 09:35:15

by Jun Yao

[permalink] [raw]
Subject: [PATCH v2 2/3] arm64/mm: introduce variable to save new swapper_pg_dir address

Prepare for migrating swapper_pg_dir, introduce new_swapper_pg_dir
to save virtual address of swapper_pg_dir.

Signed-off-by: Jun Yao <[email protected]>
---
arch/arm64/include/asm/mmu_context.h | 2 +-
arch/arm64/include/asm/pgtable.h | 1 +
arch/arm64/mm/kasan_init.c | 2 +-
arch/arm64/mm/mmu.c | 1 +
4 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 3eddb871f251..481c2d16adeb 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -57,7 +57,7 @@ static inline void cpu_set_reserved_ttbr0(void)

static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
{
- BUG_ON(pgd == swapper_pg_dir);
+ BUG_ON(pgd == new_swapper_pg_dir);
cpu_set_reserved_ttbr0();
cpu_do_switch_mm(virt_to_phys(pgd),mm);
}
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 519ab5581b08..2bda899dcf22 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -723,6 +723,7 @@ extern pgd_t swapper_pg_end[];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
extern volatile phys_addr_t __pa_swapper_pg_dir;
+extern pgd_t *new_swapper_pg_dir;

/*
* Encode and decode a swap entry:
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index dd4f28c19165..08bcaae4725e 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -197,7 +197,7 @@ void __init kasan_init(void)
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished.
*/
- memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
+ memcpy(tmp_pg_dir, new_swapper_pg_dir, sizeof(tmp_pg_dir));
dsb(ishst);
cpu_replace_ttbr1(__pa_symbol(tmp_pg_dir));

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c7df2f0d2e85..fcb425b0fcab 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -57,6 +57,7 @@ EXPORT_SYMBOL(kimage_voffset);

volatile phys_addr_t __section(".mmuoff.data.read")
__pa_swapper_pg_dir;
+pgd_t *new_swapper_pg_dir = swapper_pg_dir;

/*
* Empty_zero_page is a special page that is used for zero-initialized data
--
2.17.0


2018-06-05 09:35:59

by Jun Yao

[permalink] [raw]
Subject: [PATCH v2 1/3] arm64/mm: pass swapper_pg_dir as an argument to __enable_mmu()

Introduce __pa_swapper_pg_dir to save physical address of
swapper_pg_dir. And pass it as an argument to __enable_mmu().

Signed-off-by: Jun Yao <[email protected]>
---
arch/arm64/include/asm/mmu_context.h | 4 +---
arch/arm64/include/asm/pgtable.h | 1 +
arch/arm64/kernel/cpufeature.c | 2 +-
arch/arm64/kernel/head.S | 6 ++++--
arch/arm64/kernel/hibernate.c | 2 +-
arch/arm64/kernel/sleep.S | 1 +
arch/arm64/mm/kasan_init.c | 4 ++--
arch/arm64/mm/mmu.c | 11 +++++++++--
8 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 39ec0b8a689e..3eddb871f251 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -141,14 +141,12 @@ static inline void cpu_install_idmap(void)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
-static inline void cpu_replace_ttbr1(pgd_t *pgdp)
+static inline void cpu_replace_ttbr1(phys_addr_t pgd_phys)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
ttbr_replace_func *replace_phys;

- phys_addr_t pgd_phys = virt_to_phys(pgdp);
-
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);

cpu_install_idmap();
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7c4c8f318ba9..519ab5581b08 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -722,6 +722,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t swapper_pg_end[];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+extern volatile phys_addr_t __pa_swapper_pg_dir;

/*
* Encode and decode a swap entry:
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d2856b129097..e3d76a9dd67a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -917,7 +917,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);

cpu_install_idmap();
- remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+ remap_fn(cpu, num_online_cpus(), __pa_swapper_pg_dir);
cpu_uninstall_idmap();

if (!cpu)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b0853069702f..2e871b1cb75f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -706,6 +706,7 @@ secondary_startup:
* Common entry point for secondary CPUs.
*/
bl __cpu_setup // initialise processor
+ ldr_l x26, __pa_swapper_pg_dir
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
@@ -748,6 +749,7 @@ ENDPROC(__secondary_switched)
* Enable the MMU.
*
* x0 = SCTLR_EL1 value for turning on the MMU.
+ * x26 = TTBR1 value for turning on the MMU.
*
* Returns to the caller via x30/lr. This requires the caller to be covered
* by the .idmap.text section.
@@ -762,9 +764,8 @@ ENTRY(__enable_mmu)
b.ne __no_granule_support
update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir
- adrp x2, swapper_pg_dir
phys_to_ttbr x3, x1
- phys_to_ttbr x4, x2
+ phys_to_ttbr x4, x26
msr ttbr0_el1, x3 // load TTBR0
msr ttbr1_el1, x4 // load TTBR1
isb
@@ -823,6 +824,7 @@ __primary_switch:
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
#endif

+ adrp x26, swapper_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
bl __relocate_kernel
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 6b2686d54411..0a0a0ca19f9b 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -125,7 +125,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
return -EOVERFLOW;

arch_hdr_invariants(&hdr->invariants);
- hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
+ hdr->ttbr1_el1 = __pa_swapper_pg_dir;
hdr->reenter_kernel = _cpu_resume;

/* We can't use __hyp_get_vectors() because kvm may still be loaded */
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index bebec8ef9372..03854c329449 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,6 +101,7 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
+ ldr_l x26, __pa_swapper_pg_dir
bl __enable_mmu
ldr x8, =_cpu_resume
br x8
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 12145874c02b..dd4f28c19165 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -199,7 +199,7 @@ void __init kasan_init(void)
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
dsb(ishst);
- cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
+ cpu_replace_ttbr1(__pa_symbol(tmp_pg_dir));

clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

@@ -236,7 +236,7 @@ void __init kasan_init(void)
pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));

memset(kasan_zero_page, 0, PAGE_SIZE);
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ cpu_replace_ttbr1(__pa_swapper_pg_dir);

/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2dbb2c9f1ec1..c7df2f0d2e85 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -55,6 +55,9 @@ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);

+volatile phys_addr_t __section(".mmuoff.data.read")
+__pa_swapper_pg_dir;
+
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
@@ -631,6 +634,10 @@ void __init paging_init(void)
phys_addr_t pgd_phys = early_pgtable_alloc();
pgd_t *pgdp = pgd_set_fixmap(pgd_phys);

+ __pa_swapper_pg_dir = __pa_symbol(swapper_pg_dir);
+ __flush_dcache_area((void *)&__pa_swapper_pg_dir,
+ sizeof(__pa_swapper_pg_dir));
+
map_kernel(pgdp);
map_mem(pgdp);

@@ -642,9 +649,9 @@ void __init paging_init(void)
*
* To do this we need to go via a temporary pgd.
*/
- cpu_replace_ttbr1(__va(pgd_phys));
+ cpu_replace_ttbr1(pgd_phys);
memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ cpu_replace_ttbr1(__pa_swapper_pg_dir);

pgd_clear_fixmap();
memblock_free(pgd_phys, PAGE_SIZE);
--
2.17.0


2018-06-05 21:57:34

by Laura Abbott

[permalink] [raw]
Subject: Re: [PATCH v2 1/3] arm64/mm: pass swapper_pg_dir as an argument to __enable_mmu()

On 06/05/2018 02:33 AM, Jun Yao wrote:
> Introduce __pa_swapper_pg_dir to save physical address of
> swapper_pg_dir. And pass it as an argument to __enable_mmu().
>
> Signed-off-by: Jun Yao <[email protected]>
> ---
> arch/arm64/include/asm/mmu_context.h | 4 +---
> arch/arm64/include/asm/pgtable.h | 1 +
> arch/arm64/kernel/cpufeature.c | 2 +-
> arch/arm64/kernel/head.S | 6 ++++--
> arch/arm64/kernel/hibernate.c | 2 +-
> arch/arm64/kernel/sleep.S | 1 +
> arch/arm64/mm/kasan_init.c | 4 ++--
> arch/arm64/mm/mmu.c | 11 +++++++++--
> 8 files changed, 20 insertions(+), 11 deletions(-)
>
> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
> index 39ec0b8a689e..3eddb871f251 100644
> --- a/arch/arm64/include/asm/mmu_context.h
> +++ b/arch/arm64/include/asm/mmu_context.h
> @@ -141,14 +141,12 @@ static inline void cpu_install_idmap(void)
> * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
> * avoiding the possibility of conflicting TLB entries being allocated.
> */
> -static inline void cpu_replace_ttbr1(pgd_t *pgdp)
> +static inline void cpu_replace_ttbr1(phys_addr_t pgd_phys)
> {
> typedef void (ttbr_replace_func)(phys_addr_t);
> extern ttbr_replace_func idmap_cpu_replace_ttbr1;
> ttbr_replace_func *replace_phys;
>
> - phys_addr_t pgd_phys = virt_to_phys(pgdp);
> -
> replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
>
> cpu_install_idmap();
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 7c4c8f318ba9..519ab5581b08 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -722,6 +722,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
> extern pgd_t swapper_pg_end[];
> extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
> extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
> +extern volatile phys_addr_t __pa_swapper_pg_dir;
>
> /*
> * Encode and decode a swap entry:
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index d2856b129097..e3d76a9dd67a 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -917,7 +917,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
> remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
>
> cpu_install_idmap();
> - remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
> + remap_fn(cpu, num_online_cpus(), __pa_swapper_pg_dir);
> cpu_uninstall_idmap();
>
> if (!cpu)
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index b0853069702f..2e871b1cb75f 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -706,6 +706,7 @@ secondary_startup:
> * Common entry point for secondary CPUs.
> */
> bl __cpu_setup // initialise processor
> + ldr_l x26, __pa_swapper_pg_dir
> bl __enable_mmu
> ldr x8, =__secondary_switched
> br x8
> @@ -748,6 +749,7 @@ ENDPROC(__secondary_switched)
> * Enable the MMU.
> *
> * x0 = SCTLR_EL1 value for turning on the MMU.
> + * x26 = TTBR1 value for turning on the MMU.
> *
> * Returns to the caller via x30/lr. This requires the caller to be covered
> * by the .idmap.text section.
> @@ -762,9 +764,8 @@ ENTRY(__enable_mmu)
> b.ne __no_granule_support
> update_early_cpu_boot_status 0, x1, x2
> adrp x1, idmap_pg_dir
> - adrp x2, swapper_pg_dir
> phys_to_ttbr x3, x1
> - phys_to_ttbr x4, x2
> + phys_to_ttbr x4, x26
> msr ttbr0_el1, x3 // load TTBR0
> msr ttbr1_el1, x4 // load TTBR1
> isb
> @@ -823,6 +824,7 @@ __primary_switch:
> mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
> #endif
>
> + adrp x26, swapper_pg_dir
> bl __enable_mmu
> #ifdef CONFIG_RELOCATABLE
> bl __relocate_kernel
> diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
> index 6b2686d54411..0a0a0ca19f9b 100644
> --- a/arch/arm64/kernel/hibernate.c
> +++ b/arch/arm64/kernel/hibernate.c
> @@ -125,7 +125,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
> return -EOVERFLOW;
>
> arch_hdr_invariants(&hdr->invariants);
> - hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
> + hdr->ttbr1_el1 = __pa_swapper_pg_dir;
> hdr->reenter_kernel = _cpu_resume;
>
> /* We can't use __hyp_get_vectors() because kvm may still be loaded */
> diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
> index bebec8ef9372..03854c329449 100644
> --- a/arch/arm64/kernel/sleep.S
> +++ b/arch/arm64/kernel/sleep.S
> @@ -101,6 +101,7 @@ ENTRY(cpu_resume)
> bl el2_setup // if in EL2 drop to EL1 cleanly
> bl __cpu_setup
> /* enable the MMU early - so we can access sleep_save_stash by va */
> + ldr_l x26, __pa_swapper_pg_dir
> bl __enable_mmu
> ldr x8, =_cpu_resume
> br x8
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index 12145874c02b..dd4f28c19165 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -199,7 +199,7 @@ void __init kasan_init(void)
> */
> memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
> dsb(ishst);
> - cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
> + cpu_replace_ttbr1(__pa_symbol(tmp_pg_dir));
>
> clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
>
> @@ -236,7 +236,7 @@ void __init kasan_init(void)
> pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
>
> memset(kasan_zero_page, 0, PAGE_SIZE);
> - cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
> + cpu_replace_ttbr1(__pa_swapper_pg_dir);
>
> /* At this point kasan is fully initialized. Enable error messages */
> init_task.kasan_depth = 0;
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 2dbb2c9f1ec1..c7df2f0d2e85 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -55,6 +55,9 @@ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
> u64 kimage_voffset __ro_after_init;
> EXPORT_SYMBOL(kimage_voffset);
>
> +volatile phys_addr_t __section(".mmuoff.data.read")
> +__pa_swapper_pg_dir;
> +
> /*
> * Empty_zero_page is a special page that is used for zero-initialized data
> * and COW.
> @@ -631,6 +634,10 @@ void __init paging_init(void)
> phys_addr_t pgd_phys = early_pgtable_alloc();
> pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
>
> + __pa_swapper_pg_dir = __pa_symbol(swapper_pg_dir);
> + __flush_dcache_area((void *)&__pa_swapper_pg_dir,
> + sizeof(__pa_swapper_pg_dir));
> +

Can you add a comment here explaining why the flush is needed and
also why the variable gets placed in the particular section?
A brief summary of https://www.spinics.net/lists/kernel/msg2819594.html
would be fine.

Thanks,
Laura

> map_kernel(pgdp);
> map_mem(pgdp);
>
> @@ -642,9 +649,9 @@ void __init paging_init(void)
> *
> * To do this we need to go via a temporary pgd.
> */
> - cpu_replace_ttbr1(__va(pgd_phys));
> + cpu_replace_ttbr1(pgd_phys);
> memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
> - cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
> + cpu_replace_ttbr1(__pa_swapper_pg_dir);
>
> pgd_clear_fixmap();
> memblock_free(pgd_phys, PAGE_SIZE);
>