The first patch allows to have only one definition of the address
conversion macros for all kernel types.
The following patches bring small cleanups to mm/init.c and the last
patch makes the size of the DTB early mapping consistent between 32-bit
and 64-bit kernels.
Alexandre Ghiti (5)
riscv: Introduce va_kernel_pa_offset for 32-bit kernel
riscv: Get rid of map_size parameter to create_kernel_page_table
riscv: Use __maybe_unused instead of #ifdefs around variable
declarations
riscv: Simplify BUILTIN_DTB device tree mapping handling
riscv: Move early fdt mapping creation in its own function
arch/riscv/include/asm/page.h | 15 +----
arch/riscv/mm/init.c | 121 +++++++++++++++-------------------
2 files changed, 54 insertions(+), 82 deletions(-)
--
2.30.2
va_kernel_pa_offset was only used for 64-bit as the kernel mapping lies
in the linear mapping for 32-bit kernel and then only the offset between
the PAGE_OFFSET and the kernel load address is needed.
But this distinction complexifies the code with #ifdefs and especially
with a separate definition of the address conversions macros.
Simplify the code by defining this variable for both 32-bit and 64-bit.
Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/include/asm/page.h | 15 ++-------------
arch/riscv/mm/init.c | 3 ---
2 files changed, 2 insertions(+), 16 deletions(-)
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 10dc063868f6..adf5b4671684 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -91,10 +91,8 @@ struct kernel_mapping {
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
-#ifdef CONFIG_64BIT
/* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
-#endif
unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
uintptr_t xiprom;
@@ -105,11 +103,11 @@ struct kernel_mapping {
extern struct kernel_mapping kernel_map;
extern phys_addr_t phys_ram_base;
-#ifdef CONFIG_64BIT
#define is_kernel_mapping(x) \
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+
#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
@@ -133,15 +131,6 @@ extern phys_addr_t phys_ram_base;
is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
-#else
-#define is_kernel_mapping(x) \
- ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
-#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET)
-
-#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
-#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
-#endif /* CONFIG_64BIT */
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4ebe7e19c2b8..9668867e2702 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -559,11 +559,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
-
kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
-#ifdef CONFIG_64BIT
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
-#endif
pfn_base = PFN_DOWN(kernel_map.phys_addr);
--
2.30.2
__PAGETABLE_PMD_FOLDED defines a 2-level page table that is only used in
32-bit kernel, so there is no need to check for CONFIG_64BIT in #ifndef
__PAGETABLE_PMD_FOLDED and vice-versa.
Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/mm/init.c | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 6b29c5791555..1b30ad7e3ebc 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -614,18 +614,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
#else /* CONFIG_BUILTIN_DTB */
-#ifdef CONFIG_64BIT
/*
* __va can't be used since it would return a linear mapping address
* whereas dtb_early_va will be used before setup_vm_final installs
* the linear mapping.
*/
dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#else
- dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_64BIT */
#endif /* CONFIG_BUILTIN_DTB */
-#else
+#else /* __PAGETABLE_PMD_FOLDED */
#ifndef CONFIG_BUILTIN_DTB
/* Create two consecutive PGD mappings for FDT early scan */
pa = dtb_pa & ~(PGDIR_SIZE - 1);
@@ -635,13 +631,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
#else /* CONFIG_BUILTIN_DTB */
-#ifdef CONFIG_64BIT
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#else
dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_64BIT */
#endif /* CONFIG_BUILTIN_DTB */
-#endif
+#endif /* __PAGETABLE_PMD_FOLDED */
dtb_early_pa = dtb_pa;
/*
--
2.30.2
The kernel must always be mapped using PMD_SIZE, and this is already the
case, this just simplifies create_kernel_page_table.
Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/mm/init.c | 30 +++++++++++-------------------
1 file changed, 11 insertions(+), 19 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 9668867e2702..e35df3e1c9a3 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -501,36 +501,35 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
#endif
#ifdef CONFIG_XIP_KERNEL
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
uintptr_t va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.xiprom + (va - kernel_map.virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
- map_size, PAGE_KERNEL);
+ PMD_SIZE, PAGE_KERNEL);
}
#else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
- bool early)
+static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
{
uintptr_t va, end_va;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - kernel_map.virt_addr),
- map_size,
+ PMD_SIZE,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
@@ -539,7 +538,6 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
uintptr_t __maybe_unused pa;
- uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
@@ -564,15 +562,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pfn_base = PFN_DOWN(kernel_map.phys_addr);
- /*
- * Enforce boot alignment requirements of RV32 and
- * RV64 by only allowing PMD or PGD mappings.
- */
- map_size = PMD_SIZE;
-
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
- BUG_ON((kernel_map.phys_addr % map_size) != 0);
+ BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -609,7 +601,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size, true);
+ create_kernel_page_table(early_pg_dir, true);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -725,7 +717,7 @@ static void __init setup_vm_final(void)
#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
+ create_kernel_page_table(swapper_pg_dir, false);
#endif
/* Clear fixmap PTE and PMD mappings */
--
2.30.2
The code that handles the early fdt mapping is hard to read and does not
create the same mapping size depending on the kernel:
- for 64-bit, 2 PMD entries are used which amounts to a 4MB mapping
- for 32-bit, 2 PGDIR entries are used which amounts to a 8MB mapping
So keep using 2 PMD entries for 64-bit and use only one PGD entry for
32-bit needed to cover 4MB. Move that into a new function called
create_fdt_early_page_table which, using the same naming as
create_kernel_page_table.
Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/mm/init.c | 76 +++++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 36 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1b30ad7e3ebc..e2c8f188ad95 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -228,6 +228,7 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
@@ -308,7 +309,6 @@ static void __init create_pte_mapping(pte_t *ptep,
static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
-static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
@@ -394,6 +394,7 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next fixmap_pte
+#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
#endif
void __init create_pgd_mapping(pgd_t *pgdp,
@@ -535,9 +536,44 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
}
#endif
+/*
+ * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
+ * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+ * entry.
+ */
+static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
+{
+#ifndef CONFIG_BUILTIN_DTB
+ uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+ IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa,
+ PGDIR_SIZE,
+ IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+ pa, PMD_SIZE, PAGE_KERNEL);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+ pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ }
+
+ dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else
+ /*
+ * For 64-bit kernel, __va can't be used since it would return a linear
+ * mapping address whereas dtb_early_va will be used before
+ * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
+ * kernel is mapped in the linear mapping, that makes no difference.
+ */
+ dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+#endif
+
+ dtb_early_pa = dtb_pa;
+}
+
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
- uintptr_t __maybe_unused pa;
pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
kernel_map.virt_addr = KERNEL_LINK_ADDR;
@@ -601,40 +637,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
*/
create_kernel_page_table(early_pg_dir, true);
-#ifndef __PAGETABLE_PMD_FOLDED
- /* Setup early PMD for DTB */
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
-#ifndef CONFIG_BUILTIN_DTB
- /* Create two consecutive PMD mappings for FDT early scan */
- pa = dtb_pa & ~(PMD_SIZE - 1);
- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
- pa, PMD_SIZE, PAGE_KERNEL);
- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
- pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
-#else /* CONFIG_BUILTIN_DTB */
- /*
- * __va can't be used since it would return a linear mapping address
- * whereas dtb_early_va will be used before setup_vm_final installs
- * the linear mapping.
- */
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#endif /* CONFIG_BUILTIN_DTB */
-#else /* __PAGETABLE_PMD_FOLDED */
-#ifndef CONFIG_BUILTIN_DTB
- /* Create two consecutive PGD mappings for FDT early scan */
- pa = dtb_pa & ~(PGDIR_SIZE - 1);
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- pa, PGDIR_SIZE, PAGE_KERNEL);
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
- pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
-#else /* CONFIG_BUILTIN_DTB */
- dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_BUILTIN_DTB */
-#endif /* __PAGETABLE_PMD_FOLDED */
- dtb_early_pa = dtb_pa;
+ /* Setup early mapping for FDT early scan */
+ create_fdt_early_page_table(early_pg_dir, dtb_pa);
/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
--
2.30.2
On Fri, 23 Jul 2021 06:01:23 PDT (-0700), [email protected] wrote:
> The first patch allows to have only one definition of the address
> conversion macros for all kernel types.
>
> The following patches bring small cleanups to mm/init.c and the last
> patch makes the size of the DTB early mapping consistent between 32-bit
> and 64-bit kernels.
>
> Alexandre Ghiti (5)
> riscv: Introduce va_kernel_pa_offset for 32-bit kernel
> riscv: Get rid of map_size parameter to create_kernel_page_table
> riscv: Use __maybe_unused instead of #ifdefs around variable
> declarations
> riscv: Simplify BUILTIN_DTB device tree mapping handling
> riscv: Move early fdt mapping creation in its own function
>
> arch/riscv/include/asm/page.h | 15 +----
> arch/riscv/mm/init.c | 121 +++++++++++++++-------------------
> 2 files changed, 54 insertions(+), 82 deletions(-)
Thanks, these are on for-next. There were some minor merge conflicts,
let me know if I missed anything.