To support NOMMU, XIP, the arch/riscv/mm/init.c becomes much complex
due to lots of #ifdefs, this not only impacts the code readability,
compile coverage, but may also bring bug. For example, I believe one
recently fixed bug[1] is caused by this issue when merging.
This series tries to clean up unnecessary #ifdefs as much as possible.
Further cleanups may need to refactor the XIP code as Alexandre's patch
does.
[1] http://lists.infradead.org/pipermail/linux-riscv/2021-December/010607.html
Jisheng Zhang (5):
riscv: mm: init: remove unnecessary "#ifdef CONFIG_CRASH_DUMP"
riscv: mm: init: try IS_ENABLED(CONFIG_64BIT) instead of #ifdef
riscv: mm: init: remove _pt_ops and use pt_ops directly
riscv: mm: init: try IS_ENABLED(CONFIG_XIP_KERNEL) instead of #ifdef
riscv: mm: init: try best to remove #ifdef CONFIG_XIP_KERNEL usage
arch/riscv/mm/init.c | 71 +++++++++++++++++---------------------------
1 file changed, 28 insertions(+), 43 deletions(-)
--
2.34.1
The is_kdump_kernel() returns false for !CRASH_DUMP case, so we don't
need the #ifdef CONFIG_CRASH_DUMP for is_kdump_kernel() checking.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/riscv/mm/init.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 3c0649dba4ff..745f26a3b02e 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -790,12 +790,10 @@ static void __init reserve_crashkernel(void)
* since it doesn't make much sense and we have limited memory
* resources.
*/
-#ifdef CONFIG_CRASH_DUMP
if (is_kdump_kernel()) {
pr_info("crashkernel: ignoring reservation request\n");
return;
}
-#endif
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
--
2.34.1
Try our best to replace the conditional compilation using
"#ifdef CONFIG_64BIT" by a check for "IS_ENABLED(CONFIG_64BIT)", to
simplify the code and to increase compile coverage.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/riscv/mm/init.c | 38 +++++++++++++++++---------------------
1 file changed, 17 insertions(+), 21 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 745f26a3b02e..bd445ac778a8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -102,10 +102,9 @@ static void __init print_vm_layout(void)
(unsigned long)VMALLOC_END);
print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
-#ifdef CONFIG_64BIT
- print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
- (unsigned long)ADDRESS_SPACE_END);
-#endif
+ if (IS_ENABLED(CONFIG_64BIT))
+ print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
+ (unsigned long)ADDRESS_SPACE_END);
}
#else
static void print_vm_layout(void) { }
@@ -172,17 +171,16 @@ static void __init setup_bootmem(void)
memblock_enforce_memory_limit(memory_limit);
- /*
- * Reserve from the start of the kernel to the end of the kernel
- */
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
/*
* Make sure we align the reservation on PMD_SIZE since we will
* map the kernel in the linear mapping as read-only: we do not want
* any allocation to happen between _end and the next pmd aligned page.
*/
- vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
-#endif
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
+ /*
+ * Reserve from the start of the kernel to the end of the kernel
+ */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
@@ -190,7 +188,6 @@ static void __init setup_bootmem(void)
#ifndef CONFIG_XIP_KERNEL
phys_ram_base = memblock_start_of_DRAM();
#endif
-#ifndef CONFIG_64BIT
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
@@ -200,10 +197,11 @@ static void __init setup_bootmem(void)
* address space is occupied by the kernel mapping then this check must
* be done as soon as the kernel mapping base address is determined.
*/
- max_mapped_addr = __pa(~(ulong)0);
- if (max_mapped_addr == (phys_ram_end - 1))
- memblock_set_current_limit(max_mapped_addr - 4096);
-#endif
+ if (!IS_ENABLED(CONFIG_64BIT)) {
+ max_mapped_addr = __pa(~(ulong)0);
+ if (max_mapped_addr == (phys_ram_end - 1))
+ memblock_set_current_limit(max_mapped_addr - 4096);
+ }
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
@@ -616,13 +614,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
-#ifdef CONFIG_64BIT
/*
* The last 4K bytes of the addressable memory can not be mapped because
* of IS_ERR_VALUE macro.
*/
- BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
-#endif
+ if (IS_ENABLED(CONFIG_64BIT))
+ BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -735,10 +732,9 @@ static void __init setup_vm_final(void)
}
}
-#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, false);
-#endif
+ if (IS_ENABLED(CONFIG_64BIT))
+ create_kernel_page_table(swapper_pg_dir, false);
/* Clear fixmap PTE and PMD mappings */
clear_fixmap(FIX_PTE);
--
2.34.1
Except "pt_ops", other global vars when CONFIG_XIP_KERNEL=y is defined
as below:
|foo_type foo;
|#ifdef CONFIG_XIP_KERNEL
|#define foo (*(foo_type *)XIP_FIXUP(&foo))
|#endif
Follow the same way for pt_ops to unify the style and to simplify code.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/riscv/mm/init.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index bd445ac778a8..4d4fcd7ef1a9 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -227,12 +227,10 @@ static void __init setup_bootmem(void)
}
#ifdef CONFIG_MMU
-static struct pt_alloc_ops _pt_ops __initdata;
+static struct pt_alloc_ops pt_ops __initdata;
#ifdef CONFIG_XIP_KERNEL
-#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
-#else
-#define pt_ops _pt_ops
+#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
#endif
unsigned long riscv_pfn_base __ro_after_init;
--
2.34.1
Try our best to replace the conditional compilation using
"#ifdef CONFIG_XIP_KERNEL" with "IS_ENABLED(CONFIG_XIP_KERNEL)", to
simplify the code and to increase compile coverage.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/riscv/mm/init.c | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4d4fcd7ef1a9..4a9e3f429042 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -161,13 +161,13 @@ early_param("mem", early_mem);
static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
- phys_addr_t vmlinux_start = __pa_symbol(&_start);
phys_addr_t __maybe_unused max_mapped_addr;
- phys_addr_t phys_ram_end;
+ phys_addr_t phys_ram_end, vmlinux_start;
-#ifdef CONFIG_XIP_KERNEL
- vmlinux_start = __pa_symbol(&_sdata);
-#endif
+ if (IS_ENABLED(CONFIG_XIP_KERNEL))
+ vmlinux_start = __pa_symbol(&_sdata);
+ else
+ vmlinux_start = __pa_symbol(&_start);
memblock_enforce_memory_limit(memory_limit);
@@ -183,11 +183,9 @@ static void __init setup_bootmem(void)
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
-
phys_ram_end = memblock_end_of_DRAM();
-#ifndef CONFIG_XIP_KERNEL
- phys_ram_base = memblock_start_of_DRAM();
-#endif
+ if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ phys_ram_base = memblock_start_of_DRAM();
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
--
2.34.1
Currently, the #ifdef CONFIG_XIP_KERNEL usage can be divided into the
following three types:
The first one is for functions/declarations only used in XIP case.
The second one is for XIP_FIXUP case. Something as below:
|foo_type foo;
|#ifdef CONFIG_XIP_KERNEL
|#define foo (*(foo_type *)XIP_FIXUP(&foo))
|#endif
Usually, it's better to let the foo macro sit with the foo var
together. But if various foos are defined adjacently, we can
save some #ifdef CONFIG_XIP_KERNEL usage by grouping them together.
The third one is for different implementations for XIP, usually, this
is a #ifdef...#else...#endif case.
This patch moves the pt_ops macro to adjacent #ifdef CONFIG_XIP_KERNEL
and group first usage case into one.
Signed-off-by: Jisheng Zhang <[email protected]>
---
arch/riscv/mm/init.c | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4a9e3f429042..aeae7d6b2fee 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -40,10 +40,6 @@ EXPORT_SYMBOL(kernel_map);
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
-#ifdef CONFIG_XIP_KERNEL
-extern char _xiprom[], _exiprom[], __data_loc;
-#endif
-
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -227,10 +223,6 @@ static void __init setup_bootmem(void)
#ifdef CONFIG_MMU
static struct pt_alloc_ops pt_ops __initdata;
-#ifdef CONFIG_XIP_KERNEL
-#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
-#endif
-
unsigned long riscv_pfn_base __ro_after_init;
EXPORT_SYMBOL(riscv_pfn_base);
@@ -242,6 +234,7 @@ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
+#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
@@ -445,6 +438,8 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
}
#ifdef CONFIG_XIP_KERNEL
+extern char _xiprom[], _exiprom[], __data_loc;
+
/* called from head.S with MMU off */
asmlinkage void __init __copy_data(void)
{
--
2.34.1
On 12/3/21 06:03, Jisheng Zhang wrote:
> The is_kdump_kernel() returns false for !CRASH_DUMP case, so we don't
> need the #ifdef CONFIG_CRASH_DUMP for is_kdump_kernel() checking.
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/riscv/mm/init.c | 2 --
> 1 file changed, 2 deletions(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 3c0649dba4ff..745f26a3b02e 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -790,12 +790,10 @@ static void __init reserve_crashkernel(void)
> * since it doesn't make much sense and we have limited memory
> * resources.
> */
> -#ifdef CONFIG_CRASH_DUMP
> if (is_kdump_kernel()) {
> pr_info("crashkernel: ignoring reservation request\n");
> return;
> }
> -#endif
>
> ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
> &crash_size, &crash_base);
Reviewed-by: Alexandre Ghiti <[email protected]>
Thanks,
Alex
On 12/3/21 06:03, Jisheng Zhang wrote:
> Try our best to replace the conditional compilation using
> "#ifdef CONFIG_64BIT" by a check for "IS_ENABLED(CONFIG_64BIT)", to
> simplify the code and to increase compile coverage.
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/riscv/mm/init.c | 38 +++++++++++++++++---------------------
> 1 file changed, 17 insertions(+), 21 deletions(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 745f26a3b02e..bd445ac778a8 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -102,10 +102,9 @@ static void __init print_vm_layout(void)
> (unsigned long)VMALLOC_END);
> print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
> (unsigned long)high_memory);
> -#ifdef CONFIG_64BIT
> - print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
> - (unsigned long)ADDRESS_SPACE_END);
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT))
> + print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
> + (unsigned long)ADDRESS_SPACE_END);
> }
> #else
> static void print_vm_layout(void) { }
> @@ -172,17 +171,16 @@ static void __init setup_bootmem(void)
>
> memblock_enforce_memory_limit(memory_limit);
>
> - /*
> - * Reserve from the start of the kernel to the end of the kernel
> - */
> -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
> /*
> * Make sure we align the reservation on PMD_SIZE since we will
> * map the kernel in the linear mapping as read-only: we do not want
> * any allocation to happen between _end and the next pmd aligned page.
> */
> - vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
> + vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
> + /*
> + * Reserve from the start of the kernel to the end of the kernel
> + */
> memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
>
>
> @@ -190,7 +188,6 @@ static void __init setup_bootmem(void)
> #ifndef CONFIG_XIP_KERNEL
> phys_ram_base = memblock_start_of_DRAM();
> #endif
> -#ifndef CONFIG_64BIT
> /*
> * memblock allocator is not aware of the fact that last 4K bytes of
> * the addressable memory can not be mapped because of IS_ERR_VALUE
> @@ -200,10 +197,11 @@ static void __init setup_bootmem(void)
> * address space is occupied by the kernel mapping then this check must
> * be done as soon as the kernel mapping base address is determined.
> */
> - max_mapped_addr = __pa(~(ulong)0);
> - if (max_mapped_addr == (phys_ram_end - 1))
> - memblock_set_current_limit(max_mapped_addr - 4096);
> -#endif
> + if (!IS_ENABLED(CONFIG_64BIT)) {
> + max_mapped_addr = __pa(~(ulong)0);
> + if (max_mapped_addr == (phys_ram_end - 1))
> + memblock_set_current_limit(max_mapped_addr - 4096);
> + }
>
> min_low_pfn = PFN_UP(phys_ram_base);
> max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
> @@ -616,13 +614,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
> BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
>
> -#ifdef CONFIG_64BIT
> /*
> * The last 4K bytes of the addressable memory can not be mapped because
> * of IS_ERR_VALUE macro.
> */
> - BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT))
> + BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
For this one, I think we can just get rid of the condition since this is
true for every kernel actually.
>
> pt_ops.alloc_pte = alloc_pte_early;
> pt_ops.get_pte_virt = get_pte_virt_early;
> @@ -735,10 +732,9 @@ static void __init setup_vm_final(void)
> }
> }
>
> -#ifdef CONFIG_64BIT
> /* Map the kernel */
> - create_kernel_page_table(swapper_pg_dir, false);
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT))
> + create_kernel_page_table(swapper_pg_dir, false);
Wouldn't it be better to introduce a create_kernel_page_table function
that does nothing for !CONFIG_64BIT?
>
> /* Clear fixmap PTE and PMD mappings */
> clear_fixmap(FIX_PTE);
On 12/3/21 06:03, Jisheng Zhang wrote:
> Try our best to replace the conditional compilation using
> "#ifdef CONFIG_64BIT" by a check for "IS_ENABLED(CONFIG_64BIT)", to
> simplify the code and to increase compile coverage.
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/riscv/mm/init.c | 38 +++++++++++++++++---------------------
> 1 file changed, 17 insertions(+), 21 deletions(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 745f26a3b02e..bd445ac778a8 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -102,10 +102,9 @@ static void __init print_vm_layout(void)
> (unsigned long)VMALLOC_END);
> print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
> (unsigned long)high_memory);
> -#ifdef CONFIG_64BIT
> - print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
> - (unsigned long)ADDRESS_SPACE_END);
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT))
> + print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
> + (unsigned long)ADDRESS_SPACE_END);
> }
> #else
> static void print_vm_layout(void) { }
> @@ -172,17 +171,16 @@ static void __init setup_bootmem(void)
>
> memblock_enforce_memory_limit(memory_limit);
>
> - /*
> - * Reserve from the start of the kernel to the end of the kernel
> - */
> -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
> /*
> * Make sure we align the reservation on PMD_SIZE since we will
> * map the kernel in the linear mapping as read-only: we do not want
> * any allocation to happen between _end and the next pmd aligned page.
> */
> - vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
> + vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
> + /*
> + * Reserve from the start of the kernel to the end of the kernel
> + */
> memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
>
>
> @@ -190,7 +188,6 @@ static void __init setup_bootmem(void)
> #ifndef CONFIG_XIP_KERNEL
> phys_ram_base = memblock_start_of_DRAM();
> #endif
> -#ifndef CONFIG_64BIT
> /*
> * memblock allocator is not aware of the fact that last 4K bytes of
> * the addressable memory can not be mapped because of IS_ERR_VALUE
> @@ -200,10 +197,11 @@ static void __init setup_bootmem(void)
> * address space is occupied by the kernel mapping then this check must
> * be done as soon as the kernel mapping base address is determined.
> */
> - max_mapped_addr = __pa(~(ulong)0);
> - if (max_mapped_addr == (phys_ram_end - 1))
> - memblock_set_current_limit(max_mapped_addr - 4096);
> -#endif
> + if (!IS_ENABLED(CONFIG_64BIT)) {
> + max_mapped_addr = __pa(~(ulong)0);
> + if (max_mapped_addr == (phys_ram_end - 1))
> + memblock_set_current_limit(max_mapped_addr - 4096);
> + }
>
And remove the __maybe_unused used in max_mapped_addr declaration.
> min_low_pfn = PFN_UP(phys_ram_base);
> max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
> @@ -616,13 +614,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
> BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
>
> -#ifdef CONFIG_64BIT
> /*
> * The last 4K bytes of the addressable memory can not be mapped because
> * of IS_ERR_VALUE macro.
> */
> - BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT))
> + BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
>
> pt_ops.alloc_pte = alloc_pte_early;
> pt_ops.get_pte_virt = get_pte_virt_early;
> @@ -735,10 +732,9 @@ static void __init setup_vm_final(void)
> }
> }
>
> -#ifdef CONFIG_64BIT
> /* Map the kernel */
> - create_kernel_page_table(swapper_pg_dir, false);
> -#endif
> + if (IS_ENABLED(CONFIG_64BIT))
> + create_kernel_page_table(swapper_pg_dir, false);
>
> /* Clear fixmap PTE and PMD mappings */
> clear_fixmap(FIX_PTE);
On 12/3/21 06:03, Jisheng Zhang wrote:
> Try our best to replace the conditional compilation using
> "#ifdef CONFIG_XIP_KERNEL" with "IS_ENABLED(CONFIG_XIP_KERNEL)", to
> simplify the code and to increase compile coverage.
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/riscv/mm/init.c | 16 +++++++---------
> 1 file changed, 7 insertions(+), 9 deletions(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 4d4fcd7ef1a9..4a9e3f429042 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -161,13 +161,13 @@ early_param("mem", early_mem);
> static void __init setup_bootmem(void)
> {
> phys_addr_t vmlinux_end = __pa_symbol(&_end);
> - phys_addr_t vmlinux_start = __pa_symbol(&_start);
> phys_addr_t __maybe_unused max_mapped_addr;
> - phys_addr_t phys_ram_end;
> + phys_addr_t phys_ram_end, vmlinux_start;
>
> -#ifdef CONFIG_XIP_KERNEL
> - vmlinux_start = __pa_symbol(&_sdata);
> -#endif
> + if (IS_ENABLED(CONFIG_XIP_KERNEL))
> + vmlinux_start = __pa_symbol(&_sdata);
> + else
> + vmlinux_start = __pa_symbol(&_start);
>
> memblock_enforce_memory_limit(memory_limit);
>
> @@ -183,11 +183,9 @@ static void __init setup_bootmem(void)
> */
> memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
>
> -
> phys_ram_end = memblock_end_of_DRAM();
> -#ifndef CONFIG_XIP_KERNEL
> - phys_ram_base = memblock_start_of_DRAM();
> -#endif
> + if (!IS_ENABLED(CONFIG_XIP_KERNEL))
> + phys_ram_base = memblock_start_of_DRAM();
> /*
> * memblock allocator is not aware of the fact that last 4K bytes of
> * the addressable memory can not be mapped because of IS_ERR_VALUE
Reviewed-by: Alexandre Ghiti <[email protected]>
Thanks,
Alex
On 12/3/21 06:03, Jisheng Zhang wrote:
> Currently, the #ifdef CONFIG_XIP_KERNEL usage can be divided into the
> following three types:
>
> The first one is for functions/declarations only used in XIP case.
>
> The second one is for XIP_FIXUP case. Something as below:
> |foo_type foo;
> |#ifdef CONFIG_XIP_KERNEL
> |#define foo (*(foo_type *)XIP_FIXUP(&foo))
> |#endif
>
> Usually, it's better to let the foo macro sit with the foo var
> together. But if various foos are defined adjacently, we can
> save some #ifdef CONFIG_XIP_KERNEL usage by grouping them together.
>
> The third one is for different implementations for XIP, usually, this
> is a #ifdef...#else...#endif case.
>
> This patch moves the pt_ops macro to adjacent #ifdef CONFIG_XIP_KERNEL
> and group first usage case into one.
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/riscv/mm/init.c | 11 +++--------
> 1 file changed, 3 insertions(+), 8 deletions(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 4a9e3f429042..aeae7d6b2fee 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -40,10 +40,6 @@ EXPORT_SYMBOL(kernel_map);
> phys_addr_t phys_ram_base __ro_after_init;
> EXPORT_SYMBOL(phys_ram_base);
>
> -#ifdef CONFIG_XIP_KERNEL
> -extern char _xiprom[], _exiprom[], __data_loc;
> -#endif
> -
> unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
> __page_aligned_bss;
> EXPORT_SYMBOL(empty_zero_page);
> @@ -227,10 +223,6 @@ static void __init setup_bootmem(void)
> #ifdef CONFIG_MMU
> static struct pt_alloc_ops pt_ops __initdata;
>
> -#ifdef CONFIG_XIP_KERNEL
> -#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
> -#endif
> -
> unsigned long riscv_pfn_base __ro_after_init;
> EXPORT_SYMBOL(riscv_pfn_base);
>
> @@ -242,6 +234,7 @@ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
> static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
>
> #ifdef CONFIG_XIP_KERNEL
> +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
> #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
> #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
> #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
> @@ -445,6 +438,8 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
> }
>
> #ifdef CONFIG_XIP_KERNEL
> +extern char _xiprom[], _exiprom[], __data_loc;
> +
> /* called from head.S with MMU off */
> asmlinkage void __init __copy_data(void)
> {
Reviewed-by: Alexandre Ghiti <[email protected]>
Thanks,
Alex
On 12/3/21 06:03, Jisheng Zhang wrote:
> Except "pt_ops", other global vars when CONFIG_XIP_KERNEL=y is defined
> as below:
>
> |foo_type foo;
> |#ifdef CONFIG_XIP_KERNEL
> |#define foo (*(foo_type *)XIP_FIXUP(&foo))
> |#endif
>
> Follow the same way for pt_ops to unify the style and to simplify code.
_dtb_early_pa and _dtb_early_va have the same 'issue' too. I thought
there was a reason for those variables to be declared this way but I
can't find it :)
>
> Signed-off-by: Jisheng Zhang <[email protected]>
> ---
> arch/riscv/mm/init.c | 6 ++----
> 1 file changed, 2 insertions(+), 4 deletions(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index bd445ac778a8..4d4fcd7ef1a9 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -227,12 +227,10 @@ static void __init setup_bootmem(void)
> }
>
> #ifdef CONFIG_MMU
> -static struct pt_alloc_ops _pt_ops __initdata;
> +static struct pt_alloc_ops pt_ops __initdata;
>
> #ifdef CONFIG_XIP_KERNEL
> -#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
> -#else
> -#define pt_ops _pt_ops
> +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
> #endif
>
> unsigned long riscv_pfn_base __ro_after_init;
On Fri, 3 Dec 2021 09:57:35 +0100
Alexandre ghiti <[email protected]> wrote:
> On 12/3/21 06:03, Jisheng Zhang wrote:
> > Except "pt_ops", other global vars when CONFIG_XIP_KERNEL=y is defined
> > as below:
> >
> > |foo_type foo;
> > |#ifdef CONFIG_XIP_KERNEL
> > |#define foo (*(foo_type *)XIP_FIXUP(&foo))
> > |#endif
> >
> > Follow the same way for pt_ops to unify the style and to simplify code.
>
>
> _dtb_early_pa and _dtb_early_va have the same 'issue' too. I thought
> there was a reason for those variables to be declared this way but I
> can't find it :)
Hi Alexandre
I may know the reason: the dtb_early_pa|va are used not only in init.c
but also in other source files, so they are not static vars. Then if they
are defined as the unified style, compiler will emit errors.
Thanks
>
>
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > arch/riscv/mm/init.c | 6 ++----
> > 1 file changed, 2 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> > index bd445ac778a8..4d4fcd7ef1a9 100644
> > --- a/arch/riscv/mm/init.c
> > +++ b/arch/riscv/mm/init.c
> > @@ -227,12 +227,10 @@ static void __init setup_bootmem(void)
> > }
> >
> > #ifdef CONFIG_MMU
> > -static struct pt_alloc_ops _pt_ops __initdata;
> > +static struct pt_alloc_ops pt_ops __initdata;
> >
> > #ifdef CONFIG_XIP_KERNEL
> > -#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
> > -#else
> > -#define pt_ops _pt_ops
> > +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
> > #endif
> >
> > unsigned long riscv_pfn_base __ro_after_init;
>
> _______________________________________________
> linux-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-riscv
On Fri, 3 Dec 2021 09:33:12 +0100
Alexandre ghiti <[email protected]> wrote:
> On 12/3/21 06:03, Jisheng Zhang wrote:
> > Try our best to replace the conditional compilation using
> > "#ifdef CONFIG_64BIT" by a check for "IS_ENABLED(CONFIG_64BIT)", to
> > simplify the code and to increase compile coverage.
> >
> > Signed-off-by: Jisheng Zhang <[email protected]>
> > ---
> > arch/riscv/mm/init.c | 38 +++++++++++++++++---------------------
> > 1 file changed, 17 insertions(+), 21 deletions(-)
> >
> > diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> > index 745f26a3b02e..bd445ac778a8 100644
> > --- a/arch/riscv/mm/init.c
> > +++ b/arch/riscv/mm/init.c
> > @@ -102,10 +102,9 @@ static void __init print_vm_layout(void)
> > (unsigned long)VMALLOC_END);
> > print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
> > (unsigned long)high_memory);
> > -#ifdef CONFIG_64BIT
> > - print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
> > - (unsigned long)ADDRESS_SPACE_END);
> > -#endif
> > + if (IS_ENABLED(CONFIG_64BIT))
> > + print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
> > + (unsigned long)ADDRESS_SPACE_END);
> > }
> > #else
> > static void print_vm_layout(void) { }
> > @@ -172,17 +171,16 @@ static void __init setup_bootmem(void)
> >
> > memblock_enforce_memory_limit(memory_limit);
> >
> > - /*
> > - * Reserve from the start of the kernel to the end of the kernel
> > - */
> > -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
> > /*
> > * Make sure we align the reservation on PMD_SIZE since we will
> > * map the kernel in the linear mapping as read-only: we do not want
> > * any allocation to happen between _end and the next pmd aligned page.
> > */
> > - vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
> > -#endif
> > + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
> > + vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
> > + /*
> > + * Reserve from the start of the kernel to the end of the kernel
> > + */
> > memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
> >
> >
> > @@ -190,7 +188,6 @@ static void __init setup_bootmem(void)
> > #ifndef CONFIG_XIP_KERNEL
> > phys_ram_base = memblock_start_of_DRAM();
> > #endif
> > -#ifndef CONFIG_64BIT
> > /*
> > * memblock allocator is not aware of the fact that last 4K bytes of
> > * the addressable memory can not be mapped because of IS_ERR_VALUE
> > @@ -200,10 +197,11 @@ static void __init setup_bootmem(void)
> > * address space is occupied by the kernel mapping then this check must
> > * be done as soon as the kernel mapping base address is determined.
> > */
> > - max_mapped_addr = __pa(~(ulong)0);
> > - if (max_mapped_addr == (phys_ram_end - 1))
> > - memblock_set_current_limit(max_mapped_addr - 4096);
> > -#endif
> > + if (!IS_ENABLED(CONFIG_64BIT)) {
> > + max_mapped_addr = __pa(~(ulong)0);
> > + if (max_mapped_addr == (phys_ram_end - 1))
> > + memblock_set_current_limit(max_mapped_addr - 4096);
> > + }
> >
> > min_low_pfn = PFN_UP(phys_ram_base);
> > max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
> > @@ -616,13 +614,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> > BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
> > BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
> >
> > -#ifdef CONFIG_64BIT
> > /*
> > * The last 4K bytes of the addressable memory can not be mapped because
> > * of IS_ERR_VALUE macro.
> > */
> > - BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
> > -#endif
> > + if (IS_ENABLED(CONFIG_64BIT))
> > + BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
>
>
> For this one, I think we can just get rid of the condition since this is
> true for every kernel actually.
Thanks for pointing out this out. Addressed in v2
>
>
> >
> > pt_ops.alloc_pte = alloc_pte_early;
> > pt_ops.get_pte_virt = get_pte_virt_early;
> > @@ -735,10 +732,9 @@ static void __init setup_vm_final(void)
> > }
> > }
> >
> > -#ifdef CONFIG_64BIT
> > /* Map the kernel */
> > - create_kernel_page_table(swapper_pg_dir, false);
> > -#endif
> > + if (IS_ENABLED(CONFIG_64BIT))
> > + create_kernel_page_table(swapper_pg_dir, false);
>
>
> Wouldn't it be better to introduce a create_kernel_page_table function
> that does nothing for !CONFIG_64BIT?
>
If so, we will have something as:
#ifdef CONFIG_64BIT
create_kernel_page_table()
{
...
}
#else
create_kernel_page_table() { }
#endif
Since we already have different create_kernel_page_table() version for
XIP and !XIP, the code would be more complex.
Thanks for your code review