2021-06-03 08:30:09

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH v3 0/3] riscv: Map the kernel with correct permissions the first time

The kernel permissions are fixed after the kernel page table is created:
fix that by mapping the kernel 'correctly' the first time.

Patch 1 is a cleanup patch on which the next patches are based on, not
necessary for this patchset though.

Patch 2 introduces a new helper to set kernel mapping permissions while
avoiding all the casts when using set_memory_* API.

Patch 3 is the bulk of this work and deals with mapping the kernel with
the right permissions.

Changes in v3:
* Add a patch that factorizes kernel address conversions
* Add a helper called set_kernel_memory in its own patch, as suggested by
Christoph
* Prefer IS_ENABLED over #ifdef, as suggested by Christoph
* Split overly long lines, as suggested by Christoph
* Simplify kernel mapping by mapping ALL text as readonly and taking advantage
of already present code that enables write for init text before
free_initmem_default.

Changes in v2:
* Rebased on top of for-next (and "riscv: mm: fix build errors caused by
mk_pmd()")
* Get rid of protect_kernel_linear_mapping_text_rodata as suggested by
Jisheng
* Improve code in general compared to previous RFC

Alexandre Ghiti (3):
riscv: Factorize xip and !xip kernel address conversion macros
riscv: Introduce set_kernel_memory helper
riscv: Map the kernel with correct permissions the first time

arch/riscv/include/asm/page.h | 27 ++++----
arch/riscv/include/asm/pgtable.h | 2 +
arch/riscv/include/asm/sections.h | 17 +++++
arch/riscv/include/asm/set_memory.h | 13 ++--
arch/riscv/kernel/setup.c | 11 +--
arch/riscv/mm/init.c | 102 ++++++++++++----------------
arch/riscv/mm/pageattr.c | 10 +++
7 files changed, 95 insertions(+), 87 deletions(-)

--
2.30.2


2021-06-03 08:31:03

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

To simplify the kernel address conversion code, make the same definition of
kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.

Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/include/asm/page.h | 14 +++-----------
arch/riscv/include/asm/pgtable.h | 2 ++
2 files changed, 5 insertions(+), 11 deletions(-)

diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 6a7761c86ec2..6e004d8fda4d 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
#ifdef CONFIG_64BIT
extern unsigned long va_kernel_pa_offset;
#endif
-#ifdef CONFIG_XIP_KERNEL
extern unsigned long va_kernel_xip_pa_offset;
-#endif
extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#else
@@ -103,6 +101,7 @@ extern unsigned long pfn_base;
#ifdef CONFIG_64BIT
#define va_kernel_pa_offset 0
#endif
+#define va_kernel_xip_pa_offset 0
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */

@@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;

#ifdef CONFIG_64BIT
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
-#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \
(_y >= CONFIG_PHYS_RAM_BASE) ? \
(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
})
-#else
-#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
-#endif
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)

#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
-#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \
(_y < kernel_virt_addr + XIP_OFFSET) ? \
((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
})
-#else
-#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
-#endif
+
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
(_x < kernel_virt_addr) ? \
@@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
#else
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
-#endif
+#endif /* CONFIG_64BIT */

#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index bde8ce3bfe7c..d98e931a31e5 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -77,6 +77,8 @@

#ifdef CONFIG_XIP_KERNEL
#define XIP_OFFSET SZ_8M
+#else
+#define XIP_OFFSET 0
#endif

#ifndef __ASSEMBLY__
--
2.30.2

2021-06-03 08:33:51

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH v3 2/3] riscv: Introduce set_kernel_memory helper

This helper should be used for setting permissions to the kernel
mapping as it takes pointers as arguments and then avoids explicit cast
to unsigned long needed for the set_memory_* API.

Suggested-by: Christoph Hellwig <[email protected]>
Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/include/asm/set_memory.h | 5 +++++
arch/riscv/mm/pageattr.c | 10 ++++++++++
2 files changed, 15 insertions(+)

diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 086f757e8ba3..7a411fed9e0e 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -16,6 +16,7 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
+int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int));
void protect_kernel_text_data(void);
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
@@ -24,6 +25,10 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
+static inline int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int))
+{
+ return 0;
+}
#endif

#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 5e49e4b4a4cc..c47ac6a432ac 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -156,6 +156,16 @@ int set_memory_nx(unsigned long addr, int numpages)
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
}

+int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start, int num_pages))
+{
+ unsigned long start = (unsigned long)startp;
+ unsigned long end = (unsigned long)endp;
+ int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
+
+ return set_memory(start, num_pages);
+}
+
int set_direct_map_invalid_noflush(struct page *page)
{
int ret;
--
2.30.2

2021-06-03 08:34:57

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH v3 3/3] riscv: Map the kernel with correct permissions the first time

For 64b kernels, we map all the kernel with write and execute permissions
and afterwards remove writability from text and executability from data.

For 32b kernels, the kernel mapping resides in the linear mapping, so we
map all the linear mapping as writable and executable and afterwards we
remove those properties for unused memory and kernel mapping as
described above.

Change this behavior to directly map the kernel with correct permissions
and avoid going through the whole mapping to fix the permissions.

At the same time, this fixes an issue introduced by commit 2bfc6cd81bd1
("riscv: Move kernel mapping outside of linear mapping") as reported
here https://github.com/starfive-tech/linux/issues/17.

Signed-off-by: Alexandre Ghiti <[email protected]>
---
arch/riscv/include/asm/page.h | 13 +++-
arch/riscv/include/asm/sections.h | 17 +++++
arch/riscv/include/asm/set_memory.h | 8 ---
arch/riscv/kernel/setup.c | 11 +--
arch/riscv/mm/init.c | 102 ++++++++++++----------------
5 files changed, 75 insertions(+), 76 deletions(-)

diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 6e004d8fda4d..349e4f9874cc 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -95,6 +95,7 @@ extern unsigned long va_kernel_pa_offset;
#endif
extern unsigned long va_kernel_xip_pa_offset;
extern unsigned long pfn_base;
+extern uintptr_t load_sz;
#define ARCH_PFN_OFFSET (pfn_base)
#else
#define va_pa_offset 0
@@ -108,6 +109,11 @@ extern unsigned long pfn_base;
extern unsigned long kernel_virt_addr;

#ifdef CONFIG_64BIT
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
+
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \
@@ -127,10 +133,15 @@ extern unsigned long kernel_virt_addr;

#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
- (_x < kernel_virt_addr) ? \
+ is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
#else
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET)
+
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
#endif /* CONFIG_64BIT */
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
index 8a303fb1ee3b..32336e8a17cb 100644
--- a/arch/riscv/include/asm/sections.h
+++ b/arch/riscv/include/asm/sections.h
@@ -6,6 +6,7 @@
#define __ASM_SECTIONS_H

#include <asm-generic/sections.h>
+#include <linux/mm.h>

extern char _start[];
extern char _start_kernel[];
@@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];

+static inline bool is_va_kernel_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)_start;
+ uintptr_t end = (uintptr_t)__init_data_begin;
+
+ return va >= start && va < end;
+}
+
+static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)lm_alias(_start);
+ uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
+
+ return va >= start && va < end;
+}
+
#endif /* __ASM_SECTIONS_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 7a411fed9e0e..c0b41ed218e1 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -17,13 +17,11 @@ int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int));
-void protect_kernel_text_data(void);
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
-static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
static inline int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int))
{
@@ -31,12 +29,6 @@ static inline int set_kernel_memory(char *start, char *end, int (*set_memory)(un
}
#endif

-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void);
-#else
-static inline void protect_kernel_linear_mapping_text_rodata(void) {}
-#endif
-
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 4db4d0b5911f..b3d0895ce5f7 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -290,11 +290,6 @@ void __init setup_arch(char **cmdline_p)
init_resources();
sbi_init();

- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
- protect_kernel_text_data();
- protect_kernel_linear_mapping_text_rodata();
- }
-
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
@@ -333,11 +328,9 @@ subsys_initcall(topology_init);

void free_initmem(void)
{
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
- set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+ set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
+ IS_ENABLED(CONFIG_64BIT) ? set_memory_rw : set_memory_rw_nx);

free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 2d80088f33d5..6b70c345cfc4 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -425,6 +425,42 @@ asmlinkage void __init __copy_data(void)
}
#endif

+#ifdef CONFIG_STRICT_KERNEL_RWX
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+ if (is_va_kernel_text(va))
+ return PAGE_KERNEL_READ_EXEC;
+
+ /*
+ * In 64b kernel, the kernel mapping is outside the linear mapping so we
+ * must protect its linear mapping alias from being executed and written.
+ * And rodata section is marked readonly in mark_rodata_ro.
+ */
+ if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
+ return PAGE_KERNEL_READ;
+
+ return PAGE_KERNEL;
+}
+
+void mark_rodata_ro(void)
+{
+ set_kernel_memory(__start_rodata, _data, set_memory_ro);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
+ set_memory_ro);
+
+ debug_checkwx();
+}
+#else
+static __init pgprot_t pgprot_from_va(uintptr_t va)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
+ return PAGE_KERNEL;
+
+ return PAGE_KERNEL_EXEC;
+}
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
/*
* setup_vm() is called from head.S with MMU-off.
*
@@ -454,7 +490,8 @@ uintptr_t xiprom, xiprom_sz;
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))

-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+ __always_unused bool early)
{
uintptr_t va, end_va;

@@ -473,7 +510,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
map_size, PAGE_KERNEL);
}
#else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, bool early)
{
uintptr_t va, end_va;

@@ -481,7 +518,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
for (va = kernel_virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va,
load_pa + (va - kernel_virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ map_size, early ? PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif

@@ -558,7 +595,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size);
+ create_kernel_page_table(early_pg_dir, map_size, true);

#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -634,22 +671,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}

-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
-void protect_kernel_linear_mapping_text_rodata(void)
-{
- unsigned long text_start = (unsigned long)lm_alias(_start);
- unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
- unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
- unsigned long data_start = (unsigned long)lm_alias(_data);
-
- set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
- set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
-
- set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-}
-#endif
-
static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
@@ -682,21 +703,15 @@ static void __init setup_vm_final(void)
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
- create_pgd_mapping(swapper_pg_dir, va, pa,
- map_size,
-#ifdef CONFIG_64BIT
- PAGE_KERNEL
-#else
- PAGE_KERNEL_EXEC
-#endif
- );

+ create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
+ pgprot_from_va(va));
}
}

#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+ create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
#endif

/* Clear fixmap PTE and PMD mappings */
@@ -727,35 +742,6 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */

-#ifdef CONFIG_STRICT_KERNEL_RWX
-void __init protect_kernel_text_data(void)
-{
- unsigned long text_start = (unsigned long)_start;
- unsigned long init_text_start = (unsigned long)__init_text_begin;
- unsigned long init_data_start = (unsigned long)__init_data_begin;
- unsigned long rodata_start = (unsigned long)__start_rodata;
- unsigned long data_start = (unsigned long)_data;
- unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
-
- set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
- set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
- set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
- /* rodata section is marked readonly in mark_rodata_ro */
- set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
- unsigned long rodata_start = (unsigned long)__start_rodata;
- unsigned long data_start = (unsigned long)_data;
-
- set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-
- debug_checkwx();
-}
-#endif
-
#ifdef CONFIG_KEXEC_CORE
/*
* reserve_crashkernel() - reserves memory for crash kernel
--
2.30.2

2021-06-03 11:40:37

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v3 2/3] riscv: Introduce set_kernel_memory helper

On Thu, Jun 3, 2021 at 2:00 PM Alexandre Ghiti <[email protected]> wrote:
>
> This helper should be used for setting permissions to the kernel
> mapping as it takes pointers as arguments and then avoids explicit cast
> to unsigned long needed for the set_memory_* API.
>
> Suggested-by: Christoph Hellwig <[email protected]>
> Signed-off-by: Alexandre Ghiti <[email protected]>

Looks good to me.

Reviewed-by: Anup Patel <[email protected]>

Regards,
Anup

> ---
> arch/riscv/include/asm/set_memory.h | 5 +++++
> arch/riscv/mm/pageattr.c | 10 ++++++++++
> 2 files changed, 15 insertions(+)
>
> diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
> index 086f757e8ba3..7a411fed9e0e 100644
> --- a/arch/riscv/include/asm/set_memory.h
> +++ b/arch/riscv/include/asm/set_memory.h
> @@ -16,6 +16,7 @@ int set_memory_rw(unsigned long addr, int numpages);
> int set_memory_x(unsigned long addr, int numpages);
> int set_memory_nx(unsigned long addr, int numpages);
> int set_memory_rw_nx(unsigned long addr, int numpages);
> +int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int));
> void protect_kernel_text_data(void);
> #else
> static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
> @@ -24,6 +25,10 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
> static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
> static inline void protect_kernel_text_data(void) {}
> static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
> +static inline int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int))
> +{
> + return 0;
> +}
> #endif
>
> #if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
> diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
> index 5e49e4b4a4cc..c47ac6a432ac 100644
> --- a/arch/riscv/mm/pageattr.c
> +++ b/arch/riscv/mm/pageattr.c
> @@ -156,6 +156,16 @@ int set_memory_nx(unsigned long addr, int numpages)
> return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
> }
>
> +int set_kernel_memory(char *startp, char *endp,
> + int (*set_memory)(unsigned long start, int num_pages))
> +{
> + unsigned long start = (unsigned long)startp;
> + unsigned long end = (unsigned long)endp;
> + int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
> +
> + return set_memory(start, num_pages);
> +}
> +
> int set_direct_map_invalid_noflush(struct page *page)
> {
> int ret;
> --
> 2.30.2
>

2021-06-03 11:42:24

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, Jun 3, 2021 at 1:59 PM Alexandre Ghiti <[email protected]> wrote:
>
> To simplify the kernel address conversion code, make the same definition of
> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>

I think subject of the patch should be:
"Simplify address conversion macros for xip and !xip kernel"

Otherwise it looks good to me.

Reviewed-by: Anup Patel <[email protected]>

Regards,
Anup

> ---
> arch/riscv/include/asm/page.h | 14 +++-----------
> arch/riscv/include/asm/pgtable.h | 2 ++
> 2 files changed, 5 insertions(+), 11 deletions(-)
>
> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> index 6a7761c86ec2..6e004d8fda4d 100644
> --- a/arch/riscv/include/asm/page.h
> +++ b/arch/riscv/include/asm/page.h
> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> #ifdef CONFIG_64BIT
> extern unsigned long va_kernel_pa_offset;
> #endif
> -#ifdef CONFIG_XIP_KERNEL
> extern unsigned long va_kernel_xip_pa_offset;
> -#endif
> extern unsigned long pfn_base;
> #define ARCH_PFN_OFFSET (pfn_base)
> #else
> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> #ifdef CONFIG_64BIT
> #define va_kernel_pa_offset 0
> #endif
> +#define va_kernel_xip_pa_offset 0
> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> #endif /* CONFIG_MMU */
>
> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
>
> #ifdef CONFIG_64BIT
> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> -#ifdef CONFIG_XIP_KERNEL
> #define kernel_mapping_pa_to_va(y) ({ \
> unsigned long _y = y; \
> (_y >= CONFIG_PHYS_RAM_BASE) ? \
> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> })
> -#else
> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> -#endif
> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
>
> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> -#ifdef CONFIG_XIP_KERNEL
> #define kernel_mapping_va_to_pa(y) ({ \
> unsigned long _y = y; \
> (_y < kernel_virt_addr + XIP_OFFSET) ? \
> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> })
> -#else
> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> -#endif
> +
> #define __va_to_pa_nodebug(x) ({ \
> unsigned long _x = x; \
> (_x < kernel_virt_addr) ? \
> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> #else
> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> -#endif
> +#endif /* CONFIG_64BIT */
>
> #ifdef CONFIG_DEBUG_VIRTUAL
> extern phys_addr_t __virt_to_phys(unsigned long x);
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index bde8ce3bfe7c..d98e931a31e5 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -77,6 +77,8 @@
>
> #ifdef CONFIG_XIP_KERNEL
> #define XIP_OFFSET SZ_8M
> +#else
> +#define XIP_OFFSET 0
> #endif
>
> #ifndef __ASSEMBLY__
> --
> 2.30.2
>

2021-06-03 11:48:55

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v3 3/3] riscv: Map the kernel with correct permissions the first time

On Thu, Jun 3, 2021 at 2:01 PM Alexandre Ghiti <[email protected]> wrote:
>
> For 64b kernels, we map all the kernel with write and execute permissions
> and afterwards remove writability from text and executability from data.
>
> For 32b kernels, the kernel mapping resides in the linear mapping, so we
> map all the linear mapping as writable and executable and afterwards we
> remove those properties for unused memory and kernel mapping as
> described above.
>
> Change this behavior to directly map the kernel with correct permissions
> and avoid going through the whole mapping to fix the permissions.
>
> At the same time, this fixes an issue introduced by commit 2bfc6cd81bd1
> ("riscv: Move kernel mapping outside of linear mapping") as reported
> here https://github.com/starfive-tech/linux/issues/17.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>

Looks good to me.

Reviewed-by: Anup Patel <[email protected]>

Regards,
Anup

> ---
> arch/riscv/include/asm/page.h | 13 +++-
> arch/riscv/include/asm/sections.h | 17 +++++
> arch/riscv/include/asm/set_memory.h | 8 ---
> arch/riscv/kernel/setup.c | 11 +--
> arch/riscv/mm/init.c | 102 ++++++++++++----------------
> 5 files changed, 75 insertions(+), 76 deletions(-)
>
> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> index 6e004d8fda4d..349e4f9874cc 100644
> --- a/arch/riscv/include/asm/page.h
> +++ b/arch/riscv/include/asm/page.h
> @@ -95,6 +95,7 @@ extern unsigned long va_kernel_pa_offset;
> #endif
> extern unsigned long va_kernel_xip_pa_offset;
> extern unsigned long pfn_base;
> +extern uintptr_t load_sz;
> #define ARCH_PFN_OFFSET (pfn_base)
> #else
> #define va_pa_offset 0
> @@ -108,6 +109,11 @@ extern unsigned long pfn_base;
> extern unsigned long kernel_virt_addr;
>
> #ifdef CONFIG_64BIT
> +#define is_kernel_mapping(x) \
> + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
> +#define is_linear_mapping(x) \
> + ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
> +
> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> #define kernel_mapping_pa_to_va(y) ({ \
> unsigned long _y = y; \
> @@ -127,10 +133,15 @@ extern unsigned long kernel_virt_addr;
>
> #define __va_to_pa_nodebug(x) ({ \
> unsigned long _x = x; \
> - (_x < kernel_virt_addr) ? \
> + is_linear_mapping(_x) ? \
> linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
> })
> #else
> +#define is_kernel_mapping(x) \
> + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
> +#define is_linear_mapping(x) \
> + ((x) >= PAGE_OFFSET)
> +
> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> #endif /* CONFIG_64BIT */
> diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
> index 8a303fb1ee3b..32336e8a17cb 100644
> --- a/arch/riscv/include/asm/sections.h
> +++ b/arch/riscv/include/asm/sections.h
> @@ -6,6 +6,7 @@
> #define __ASM_SECTIONS_H
>
> #include <asm-generic/sections.h>
> +#include <linux/mm.h>
>
> extern char _start[];
> extern char _start_kernel[];
> @@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[];
> extern char __init_text_begin[], __init_text_end[];
> extern char __alt_start[], __alt_end[];
>
> +static inline bool is_va_kernel_text(uintptr_t va)
> +{
> + uintptr_t start = (uintptr_t)_start;
> + uintptr_t end = (uintptr_t)__init_data_begin;
> +
> + return va >= start && va < end;
> +}
> +
> +static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
> +{
> + uintptr_t start = (uintptr_t)lm_alias(_start);
> + uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
> +
> + return va >= start && va < end;
> +}
> +
> #endif /* __ASM_SECTIONS_H */
> diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
> index 7a411fed9e0e..c0b41ed218e1 100644
> --- a/arch/riscv/include/asm/set_memory.h
> +++ b/arch/riscv/include/asm/set_memory.h
> @@ -17,13 +17,11 @@ int set_memory_x(unsigned long addr, int numpages);
> int set_memory_nx(unsigned long addr, int numpages);
> int set_memory_rw_nx(unsigned long addr, int numpages);
> int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int));
> -void protect_kernel_text_data(void);
> #else
> static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
> static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
> static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
> static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
> -static inline void protect_kernel_text_data(void) {}
> static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
> static inline int set_kernel_memory(char *start, char *end, int (*set_memory)(unsigned long, int))
> {
> @@ -31,12 +29,6 @@ static inline int set_kernel_memory(char *start, char *end, int (*set_memory)(un
> }
> #endif
>
> -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
> -void protect_kernel_linear_mapping_text_rodata(void);
> -#else
> -static inline void protect_kernel_linear_mapping_text_rodata(void) {}
> -#endif
> -
> int set_direct_map_invalid_noflush(struct page *page);
> int set_direct_map_default_noflush(struct page *page);
> bool kernel_page_present(struct page *page);
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index 4db4d0b5911f..b3d0895ce5f7 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -290,11 +290,6 @@ void __init setup_arch(char **cmdline_p)
> init_resources();
> sbi_init();
>
> - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
> - protect_kernel_text_data();
> - protect_kernel_linear_mapping_text_rodata();
> - }
> -
> #ifdef CONFIG_SWIOTLB
> swiotlb_init(1);
> #endif
> @@ -333,11 +328,9 @@ subsys_initcall(topology_init);
>
> void free_initmem(void)
> {
> - unsigned long init_begin = (unsigned long)__init_begin;
> - unsigned long init_end = (unsigned long)__init_end;
> -
> if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
> - set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
> + set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
> + IS_ENABLED(CONFIG_64BIT) ? set_memory_rw : set_memory_rw_nx);
>
> free_initmem_default(POISON_FREE_INITMEM);
> }
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index 2d80088f33d5..6b70c345cfc4 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -425,6 +425,42 @@ asmlinkage void __init __copy_data(void)
> }
> #endif
>
> +#ifdef CONFIG_STRICT_KERNEL_RWX
> +static __init pgprot_t pgprot_from_va(uintptr_t va)
> +{
> + if (is_va_kernel_text(va))
> + return PAGE_KERNEL_READ_EXEC;
> +
> + /*
> + * In 64b kernel, the kernel mapping is outside the linear mapping so we
> + * must protect its linear mapping alias from being executed and written.
> + * And rodata section is marked readonly in mark_rodata_ro.
> + */
> + if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
> + return PAGE_KERNEL_READ;
> +
> + return PAGE_KERNEL;
> +}
> +
> +void mark_rodata_ro(void)
> +{
> + set_kernel_memory(__start_rodata, _data, set_memory_ro);
> + if (IS_ENABLED(CONFIG_64BIT))
> + set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
> + set_memory_ro);
> +
> + debug_checkwx();
> +}
> +#else
> +static __init pgprot_t pgprot_from_va(uintptr_t va)
> +{
> + if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
> + return PAGE_KERNEL;
> +
> + return PAGE_KERNEL_EXEC;
> +}
> +#endif /* CONFIG_STRICT_KERNEL_RWX */
> +
> /*
> * setup_vm() is called from head.S with MMU-off.
> *
> @@ -454,7 +490,8 @@ uintptr_t xiprom, xiprom_sz;
> #define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
> #define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
>
> -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
> +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
> + __always_unused bool early)
> {
> uintptr_t va, end_va;
>
> @@ -473,7 +510,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
> map_size, PAGE_KERNEL);
> }
> #else
> -static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
> +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, bool early)
> {
> uintptr_t va, end_va;
>
> @@ -481,7 +518,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
> for (va = kernel_virt_addr; va < end_va; va += map_size)
> create_pgd_mapping(pgdir, va,
> load_pa + (va - kernel_virt_addr),
> - map_size, PAGE_KERNEL_EXEC);
> + map_size, early ? PAGE_KERNEL_EXEC : pgprot_from_va(va));
> }
> #endif
>
> @@ -558,7 +595,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> * us to reach paging_init(). We map all memory banks later
> * in setup_vm_final() below.
> */
> - create_kernel_page_table(early_pg_dir, map_size);
> + create_kernel_page_table(early_pg_dir, map_size, true);
>
> #ifndef __PAGETABLE_PMD_FOLDED
> /* Setup early PMD for DTB */
> @@ -634,22 +671,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
> #endif
> }
>
> -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
> -void protect_kernel_linear_mapping_text_rodata(void)
> -{
> - unsigned long text_start = (unsigned long)lm_alias(_start);
> - unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
> - unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
> - unsigned long data_start = (unsigned long)lm_alias(_data);
> -
> - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
> - set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
> -
> - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
> - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
> -}
> -#endif
> -
> static void __init setup_vm_final(void)
> {
> uintptr_t va, map_size;
> @@ -682,21 +703,15 @@ static void __init setup_vm_final(void)
> map_size = best_map_size(start, end - start);
> for (pa = start; pa < end; pa += map_size) {
> va = (uintptr_t)__va(pa);
> - create_pgd_mapping(swapper_pg_dir, va, pa,
> - map_size,
> -#ifdef CONFIG_64BIT
> - PAGE_KERNEL
> -#else
> - PAGE_KERNEL_EXEC
> -#endif
> - );
>
> + create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
> + pgprot_from_va(va));
> }
> }
>
> #ifdef CONFIG_64BIT
> /* Map the kernel */
> - create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
> + create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
> #endif
>
> /* Clear fixmap PTE and PMD mappings */
> @@ -727,35 +742,6 @@ static inline void setup_vm_final(void)
> }
> #endif /* CONFIG_MMU */
>
> -#ifdef CONFIG_STRICT_KERNEL_RWX
> -void __init protect_kernel_text_data(void)
> -{
> - unsigned long text_start = (unsigned long)_start;
> - unsigned long init_text_start = (unsigned long)__init_text_begin;
> - unsigned long init_data_start = (unsigned long)__init_data_begin;
> - unsigned long rodata_start = (unsigned long)__start_rodata;
> - unsigned long data_start = (unsigned long)_data;
> - unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
> -
> - set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
> - set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
> - set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
> - /* rodata section is marked readonly in mark_rodata_ro */
> - set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
> - set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
> -}
> -
> -void mark_rodata_ro(void)
> -{
> - unsigned long rodata_start = (unsigned long)__start_rodata;
> - unsigned long data_start = (unsigned long)_data;
> -
> - set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
> -
> - debug_checkwx();
> -}
> -#endif
> -
> #ifdef CONFIG_KEXEC_CORE
> /*
> * reserve_crashkernel() - reserves memory for crash kernel
> --
> 2.30.2
>

2021-06-03 12:38:22

by Jisheng Zhang

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, 3 Jun 2021 10:27:47 +0200
Alexandre Ghiti <[email protected]> wrote:

> To simplify the kernel address conversion code, make the same definition of
> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>
> ---
> arch/riscv/include/asm/page.h | 14 +++-----------
> arch/riscv/include/asm/pgtable.h | 2 ++
> 2 files changed, 5 insertions(+), 11 deletions(-)
>
> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> index 6a7761c86ec2..6e004d8fda4d 100644
> --- a/arch/riscv/include/asm/page.h
> +++ b/arch/riscv/include/asm/page.h
> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> #ifdef CONFIG_64BIT
> extern unsigned long va_kernel_pa_offset;
> #endif
> -#ifdef CONFIG_XIP_KERNEL
> extern unsigned long va_kernel_xip_pa_offset;
> -#endif
> extern unsigned long pfn_base;
> #define ARCH_PFN_OFFSET (pfn_base)
> #else
> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> #ifdef CONFIG_64BIT
> #define va_kernel_pa_offset 0
> #endif
> +#define va_kernel_xip_pa_offset 0
> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> #endif /* CONFIG_MMU */
>
> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
>
> #ifdef CONFIG_64BIT
> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> -#ifdef CONFIG_XIP_KERNEL
> #define kernel_mapping_pa_to_va(y) ({ \
> unsigned long _y = y; \
> (_y >= CONFIG_PHYS_RAM_BASE) ?

This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
compiler error for !XIP?

I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset

> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> })
> -#else
> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> -#endif
> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
>
> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> -#ifdef CONFIG_XIP_KERNEL
> #define kernel_mapping_va_to_pa(y) ({ \
> unsigned long _y = y; \
> (_y < kernel_virt_addr + XIP_OFFSET) ? \
> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> })

Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
for !XIP and extra va_kernel_xip_pa_offset symbol.

> -#else
> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> -#endif
> +
> #define __va_to_pa_nodebug(x) ({ \
> unsigned long _x = x; \
> (_x < kernel_virt_addr) ? \
> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> #else
> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> -#endif
> +#endif /* CONFIG_64BIT */
>
> #ifdef CONFIG_DEBUG_VIRTUAL
> extern phys_addr_t __virt_to_phys(unsigned long x);
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index bde8ce3bfe7c..d98e931a31e5 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -77,6 +77,8 @@
>
> #ifdef CONFIG_XIP_KERNEL
> #define XIP_OFFSET SZ_8M
> +#else
> +#define XIP_OFFSET 0
> #endif
>
> #ifndef __ASSEMBLY__


2021-06-03 12:58:15

by Jisheng Zhang

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, 3 Jun 2021 20:27:48 +0800
Jisheng Zhang <[email protected]> wrote:

> On Thu, 3 Jun 2021 10:27:47 +0200
> Alexandre Ghiti <[email protected]> wrote:
>
> > To simplify the kernel address conversion code, make the same definition of
> > kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> > and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
> >
> > Signed-off-by: Alexandre Ghiti <[email protected]>
> > ---
> > arch/riscv/include/asm/page.h | 14 +++-----------
> > arch/riscv/include/asm/pgtable.h | 2 ++
> > 2 files changed, 5 insertions(+), 11 deletions(-)
> >
> > diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> > index 6a7761c86ec2..6e004d8fda4d 100644
> > --- a/arch/riscv/include/asm/page.h
> > +++ b/arch/riscv/include/asm/page.h
> > @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> > #ifdef CONFIG_64BIT
> > extern unsigned long va_kernel_pa_offset;
> > #endif
> > -#ifdef CONFIG_XIP_KERNEL
> > extern unsigned long va_kernel_xip_pa_offset;
> > -#endif
> > extern unsigned long pfn_base;
> > #define ARCH_PFN_OFFSET (pfn_base)
> > #else
> > @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> > #ifdef CONFIG_64BIT
> > #define va_kernel_pa_offset 0
> > #endif
> > +#define va_kernel_xip_pa_offset 0
> > #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> > #endif /* CONFIG_MMU */
> >
> > @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
> >
> > #ifdef CONFIG_64BIT
> > #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> > -#ifdef CONFIG_XIP_KERNEL
> > #define kernel_mapping_pa_to_va(y) ({ \
> > unsigned long _y = y; \
> > (_y >= CONFIG_PHYS_RAM_BASE) ?
>
> This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> compiler error for !XIP?
>
> I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset

Err, I just found this symobl always exists no matter XIP is enabled or not.
I will send out a patch for this clean up

>
> > (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> > (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> > })
> > -#else
> > -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> > -#endif
> > #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
> >
> > #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> > -#ifdef CONFIG_XIP_KERNEL
> > #define kernel_mapping_va_to_pa(y) ({ \
> > unsigned long _y = y; \
> > (_y < kernel_virt_addr + XIP_OFFSET) ? \
> > ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> > ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> > })
>
> Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
> for !XIP and extra va_kernel_xip_pa_offset symbol.
>
> > -#else
> > -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> > -#endif
> > +
> > #define __va_to_pa_nodebug(x) ({ \
> > unsigned long _x = x; \
> > (_x < kernel_virt_addr) ? \
> > @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> > #else
> > #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> > #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> > -#endif
> > +#endif /* CONFIG_64BIT */
> >
> > #ifdef CONFIG_DEBUG_VIRTUAL
> > extern phys_addr_t __virt_to_phys(unsigned long x);
> > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > index bde8ce3bfe7c..d98e931a31e5 100644
> > --- a/arch/riscv/include/asm/pgtable.h
> > +++ b/arch/riscv/include/asm/pgtable.h
> > @@ -77,6 +77,8 @@
> >
> > #ifdef CONFIG_XIP_KERNEL
> > #define XIP_OFFSET SZ_8M
> > +#else
> > +#define XIP_OFFSET 0
> > #endif
> >
> > #ifndef __ASSEMBLY__
>
>


2021-06-03 13:00:55

by Alexandre Ghiti

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

Hi Jisheng,

Le 3/06/2021 ? 14:27, Jisheng Zhang a ?crit?:
> On Thu, 3 Jun 2021 10:27:47 +0200
> Alexandre Ghiti <[email protected]> wrote:
>
>> To simplify the kernel address conversion code, make the same definition of
>> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
>> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
>>
>> Signed-off-by: Alexandre Ghiti <[email protected]>
>> ---
>> arch/riscv/include/asm/page.h | 14 +++-----------
>> arch/riscv/include/asm/pgtable.h | 2 ++
>> 2 files changed, 5 insertions(+), 11 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
>> index 6a7761c86ec2..6e004d8fda4d 100644
>> --- a/arch/riscv/include/asm/page.h
>> +++ b/arch/riscv/include/asm/page.h
>> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
>> #ifdef CONFIG_64BIT
>> extern unsigned long va_kernel_pa_offset;
>> #endif
>> -#ifdef CONFIG_XIP_KERNEL
>> extern unsigned long va_kernel_xip_pa_offset;
>> -#endif
>> extern unsigned long pfn_base;
>> #define ARCH_PFN_OFFSET (pfn_base)
>> #else
>> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
>> #ifdef CONFIG_64BIT
>> #define va_kernel_pa_offset 0
>> #endif
>> +#define va_kernel_xip_pa_offset 0
>> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
>> #endif /* CONFIG_MMU */
>>
>> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
>>
>> #ifdef CONFIG_64BIT
>> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
>> -#ifdef CONFIG_XIP_KERNEL
>> #define kernel_mapping_pa_to_va(y) ({ \
>> unsigned long _y = y; \
>> (_y >= CONFIG_PHYS_RAM_BASE) ?
>
> This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> compiler error for !XIP?

You're right, I have this patch in my branch and forgot to squash it.

>
> I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset

I understand your concerns even if I don't find that the overhead is
that important here, I prefer the readability improvement. I can always
add unlikely/likely builtin to improve things or completely remove this
patch if others agree with you.

Thanks,

Alex

>
>> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
>> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
>> })
>> -#else
>> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
>> -#endif
>> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
>>
>> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
>> -#ifdef CONFIG_XIP_KERNEL
>> #define kernel_mapping_va_to_pa(y) ({ \
>> unsigned long _y = y; \
>> (_y < kernel_virt_addr + XIP_OFFSET) ? \
>> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
>> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
>> })
>
> Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
> for !XIP and extra va_kernel_xip_pa_offset symbol.
>
>> -#else
>> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
>> -#endif
>> +
>> #define __va_to_pa_nodebug(x) ({ \
>> unsigned long _x = x; \
>> (_x < kernel_virt_addr) ? \
>> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
>> #else
>> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
>> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
>> -#endif
>> +#endif /* CONFIG_64BIT */
>>
>> #ifdef CONFIG_DEBUG_VIRTUAL
>> extern phys_addr_t __virt_to_phys(unsigned long x);
>> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
>> index bde8ce3bfe7c..d98e931a31e5 100644
>> --- a/arch/riscv/include/asm/pgtable.h
>> +++ b/arch/riscv/include/asm/pgtable.h
>> @@ -77,6 +77,8 @@
>>
>> #ifdef CONFIG_XIP_KERNEL
>> #define XIP_OFFSET SZ_8M
>> +#else
>> +#define XIP_OFFSET 0
>> #endif
>>
>> #ifndef __ASSEMBLY__
>
>
>
> _______________________________________________
> linux-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-riscv
>

2021-06-03 13:21:38

by Anup Patel

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, Jun 3, 2021 at 6:27 PM Alex Ghiti <[email protected]> wrote:
>
> Hi Jisheng,
>
> Le 3/06/2021 à 14:27, Jisheng Zhang a écrit :
> > On Thu, 3 Jun 2021 10:27:47 +0200
> > Alexandre Ghiti <[email protected]> wrote:
> >
> >> To simplify the kernel address conversion code, make the same definition of
> >> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> >> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
> >>
> >> Signed-off-by: Alexandre Ghiti <[email protected]>
> >> ---
> >> arch/riscv/include/asm/page.h | 14 +++-----------
> >> arch/riscv/include/asm/pgtable.h | 2 ++
> >> 2 files changed, 5 insertions(+), 11 deletions(-)
> >>
> >> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> >> index 6a7761c86ec2..6e004d8fda4d 100644
> >> --- a/arch/riscv/include/asm/page.h
> >> +++ b/arch/riscv/include/asm/page.h
> >> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> >> #ifdef CONFIG_64BIT
> >> extern unsigned long va_kernel_pa_offset;
> >> #endif
> >> -#ifdef CONFIG_XIP_KERNEL
> >> extern unsigned long va_kernel_xip_pa_offset;
> >> -#endif
> >> extern unsigned long pfn_base;
> >> #define ARCH_PFN_OFFSET (pfn_base)
> >> #else
> >> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> >> #ifdef CONFIG_64BIT
> >> #define va_kernel_pa_offset 0
> >> #endif
> >> +#define va_kernel_xip_pa_offset 0
> >> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> >> #endif /* CONFIG_MMU */
> >>
> >> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
> >>
> >> #ifdef CONFIG_64BIT
> >> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> >> -#ifdef CONFIG_XIP_KERNEL
> >> #define kernel_mapping_pa_to_va(y) ({ \
> >> unsigned long _y = y; \
> >> (_y >= CONFIG_PHYS_RAM_BASE) ?
> >
> > This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> > compiler error for !XIP?
>
> You're right, I have this patch in my branch and forgot to squash it.
>
> >
> > I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> > for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset
>
> I understand your concerns even if I don't find that the overhead is
> that important here, I prefer the readability improvement. I can always
> add unlikely/likely builtin to improve things or completely remove this
> patch if others agree with you.

I would also prefer readable code for long-term maintainability. Currently,
the nested "#ifdefs" are increasing causing developers to easily break
untested combinations.

Regards,
Anup

>
> Thanks,
>
> Alex
>
> >
> >> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> >> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> >> })
> >> -#else
> >> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> >> -#endif
> >> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
> >>
> >> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> >> -#ifdef CONFIG_XIP_KERNEL
> >> #define kernel_mapping_va_to_pa(y) ({ \
> >> unsigned long _y = y; \
> >> (_y < kernel_virt_addr + XIP_OFFSET) ? \
> >> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> >> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> >> })
> >
> > Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
> > for !XIP and extra va_kernel_xip_pa_offset symbol.
> >
> >> -#else
> >> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> >> -#endif
> >> +
> >> #define __va_to_pa_nodebug(x) ({ \
> >> unsigned long _x = x; \
> >> (_x < kernel_virt_addr) ? \
> >> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> >> #else
> >> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> >> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> >> -#endif
> >> +#endif /* CONFIG_64BIT */
> >>
> >> #ifdef CONFIG_DEBUG_VIRTUAL
> >> extern phys_addr_t __virt_to_phys(unsigned long x);
> >> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> >> index bde8ce3bfe7c..d98e931a31e5 100644
> >> --- a/arch/riscv/include/asm/pgtable.h
> >> +++ b/arch/riscv/include/asm/pgtable.h
> >> @@ -77,6 +77,8 @@
> >>
> >> #ifdef CONFIG_XIP_KERNEL
> >> #define XIP_OFFSET SZ_8M
> >> +#else
> >> +#define XIP_OFFSET 0
> >> #endif
> >>
> >> #ifndef __ASSEMBLY__
> >
> >
> >
> > _______________________________________________
> > linux-riscv mailing list
> > [email protected]
> > http://lists.infradead.org/mailman/listinfo/linux-riscv
> >

2021-06-03 14:01:08

by Jisheng Zhang

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, 3 Jun 2021 18:46:47 +0530
Anup Patel <[email protected]> wrote:

> On Thu, Jun 3, 2021 at 6:27 PM Alex Ghiti <[email protected]> wrote:
> >
> > Hi Jisheng,

Hi,

> >
> > Le 3/06/2021 à 14:27, Jisheng Zhang a écrit :
> > > On Thu, 3 Jun 2021 10:27:47 +0200
> > > Alexandre Ghiti <[email protected]> wrote:
> > >
> > >> To simplify the kernel address conversion code, make the same definition of
> > >> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> > >> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
> > >>
> > >> Signed-off-by: Alexandre Ghiti <[email protected]>
> > >> ---
> > >> arch/riscv/include/asm/page.h | 14 +++-----------
> > >> arch/riscv/include/asm/pgtable.h | 2 ++
> > >> 2 files changed, 5 insertions(+), 11 deletions(-)
> > >>
> > >> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> > >> index 6a7761c86ec2..6e004d8fda4d 100644
> > >> --- a/arch/riscv/include/asm/page.h
> > >> +++ b/arch/riscv/include/asm/page.h
> > >> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> > >> #ifdef CONFIG_64BIT
> > >> extern unsigned long va_kernel_pa_offset;
> > >> #endif
> > >> -#ifdef CONFIG_XIP_KERNEL
> > >> extern unsigned long va_kernel_xip_pa_offset;
> > >> -#endif
> > >> extern unsigned long pfn_base;
> > >> #define ARCH_PFN_OFFSET (pfn_base)
> > >> #else
> > >> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> > >> #ifdef CONFIG_64BIT
> > >> #define va_kernel_pa_offset 0
> > >> #endif
> > >> +#define va_kernel_xip_pa_offset 0
> > >> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> > >> #endif /* CONFIG_MMU */
> > >>
> > >> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
> > >>
> > >> #ifdef CONFIG_64BIT
> > >> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> > >> -#ifdef CONFIG_XIP_KERNEL
> > >> #define kernel_mapping_pa_to_va(y) ({ \
> > >> unsigned long _y = y; \
> > >> (_y >= CONFIG_PHYS_RAM_BASE) ?
> > >
> > > This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> > > compiler error for !XIP?
> >
> > You're right, I have this patch in my branch and forgot to squash it
> >
> > >
> > > I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> > > for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset
> >
> > I understand your concerns even if I don't find that the overhead is
> > that important here, I prefer the readability improvement. I can always

For readability, we still can avoid introducing va_kernel_xip_pa_offset
symbol by simply define va_kernel_xip_pa_offset as 0 if XIP as you did
for XIP_OFFSET

PS: this may need a preparation patch:
http://lists.infradead.org/pipermail/linux-riscv/2021-June/006802.html

> > add unlikely/likely builtin to improve things or completely remove this
> > patch if others agree with you.
>
> I would also prefer readable code for long-term maintainability. Currently,
> the nested "#ifdefs" are increasing causing developers to easily break
> untested combinations.
>
> Regards,
> Anup
>
> >
> > Thanks,
> >
> > Alex
> >
> > >
> > >> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> > >> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> > >> })
> > >> -#else
> > >> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> > >> -#endif
> > >> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
> > >>
> > >> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> > >> -#ifdef CONFIG_XIP_KERNEL
> > >> #define kernel_mapping_va_to_pa(y) ({ \
> > >> unsigned long _y = y; \
> > >> (_y < kernel_virt_addr + XIP_OFFSET) ? \
> > >> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> > >> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> > >> })
> > >
> > > Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
> > > for !XIP and extra va_kernel_xip_pa_offset symbol.
> > >
> > >> -#else
> > >> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> > >> -#endif
> > >> +
> > >> #define __va_to_pa_nodebug(x) ({ \
> > >> unsigned long _x = x; \
> > >> (_x < kernel_virt_addr) ? \
> > >> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> > >> #else
> > >> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> > >> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> > >> -#endif
> > >> +#endif /* CONFIG_64BIT */
> > >>
> > >> #ifdef CONFIG_DEBUG_VIRTUAL
> > >> extern phys_addr_t __virt_to_phys(unsigned long x);
> > >> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > >> index bde8ce3bfe7c..d98e931a31e5 100644
> > >> --- a/arch/riscv/include/asm/pgtable.h
> > >> +++ b/arch/riscv/include/asm/pgtable.h
> > >> @@ -77,6 +77,8 @@
> > >>
> > >> #ifdef CONFIG_XIP_KERNEL
> > >> #define XIP_OFFSET SZ_8M
> > >> +#else
> > >> +#define XIP_OFFSET 0
> > >> #endif
> > >>
> > >> #ifndef __ASSEMBLY__
> > >
> > >
> > >
> > > _______________________________________________
> > > linux-riscv mailing list
> > > [email protected]
> > > http://lists.infradead.org/mailman/listinfo/linux-riscv
> > >


2021-06-03 15:08:59

by Alexandre Ghiti

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

Le 3/06/2021 à 15:53, Jisheng Zhang a écrit :
> On Thu, 3 Jun 2021 18:46:47 +0530
> Anup Patel <[email protected]> wrote:
>
>> On Thu, Jun 3, 2021 at 6:27 PM Alex Ghiti <[email protected]> wrote:
>>>
>>> Hi Jisheng,
>
> Hi,
>
>>>
>>> Le 3/06/2021 à 14:27, Jisheng Zhang a écrit :
>>>> On Thu, 3 Jun 2021 10:27:47 +0200
>>>> Alexandre Ghiti <[email protected]> wrote:
>>>>
>>>>> To simplify the kernel address conversion code, make the same definition of
>>>>> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
>>>>> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
>>>>>
>>>>> Signed-off-by: Alexandre Ghiti <[email protected]>
>>>>> ---
>>>>> arch/riscv/include/asm/page.h | 14 +++-----------
>>>>> arch/riscv/include/asm/pgtable.h | 2 ++
>>>>> 2 files changed, 5 insertions(+), 11 deletions(-)
>>>>>
>>>>> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
>>>>> index 6a7761c86ec2..6e004d8fda4d 100644
>>>>> --- a/arch/riscv/include/asm/page.h
>>>>> +++ b/arch/riscv/include/asm/page.h
>>>>> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
>>>>> #ifdef CONFIG_64BIT
>>>>> extern unsigned long va_kernel_pa_offset;
>>>>> #endif
>>>>> -#ifdef CONFIG_XIP_KERNEL
>>>>> extern unsigned long va_kernel_xip_pa_offset;
>>>>> -#endif
>>>>> extern unsigned long pfn_base;
>>>>> #define ARCH_PFN_OFFSET (pfn_base)
>>>>> #else
>>>>> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
>>>>> #ifdef CONFIG_64BIT
>>>>> #define va_kernel_pa_offset 0
>>>>> #endif
>>>>> +#define va_kernel_xip_pa_offset 0
>>>>> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
>>>>> #endif /* CONFIG_MMU */
>>>>>
>>>>> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
>>>>>
>>>>> #ifdef CONFIG_64BIT
>>>>> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
>>>>> -#ifdef CONFIG_XIP_KERNEL
>>>>> #define kernel_mapping_pa_to_va(y) ({ \
>>>>> unsigned long _y = y; \
>>>>> (_y >= CONFIG_PHYS_RAM_BASE) ?
>>>>
>>>> This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
>>>> compiler error for !XIP?
>>>
>>> You're right, I have this patch in my branch and forgot to squash it
>>>
>>>>
>>>> I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
>>>> for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset
>>>
>>> I understand your concerns even if I don't find that the overhead is
>>> that important here, I prefer the readability improvement. I can always
>
> For readability, we still can avoid introducing va_kernel_xip_pa_offset
> symbol by simply define va_kernel_xip_pa_offset as 0 if XIP as you did
> for XIP_OFFSET
>
> PS: this may need a preparation patch:
> http://lists.infradead.org/pipermail/linux-riscv/2021-June/006802.html

IIUC, that won't improve readability, just avoid to allocate
va_kernel_xip_pa_offset in !XIP kernel right?

>
>>> add unlikely/likely builtin to improve things or completely remove this
>>> patch if others agree with you.
>>
>> I would also prefer readable code for long-term maintainability. Currently,
>> the nested "#ifdefs" are increasing causing developers to easily break
>> untested combinations.
>>
>> Regards,
>> Anup
>>
>>>
>>> Thanks,
>>>
>>> Alex
>>>
>>>>
>>>>> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
>>>>> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
>>>>> })
>>>>> -#else
>>>>> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
>>>>> -#endif
>>>>> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
>>>>>
>>>>> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
>>>>> -#ifdef CONFIG_XIP_KERNEL
>>>>> #define kernel_mapping_va_to_pa(y) ({ \
>>>>> unsigned long _y = y; \
>>>>> (_y < kernel_virt_addr + XIP_OFFSET) ? \
>>>>> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
>>>>> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
>>>>> })
>>>>
>>>> Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
>>>> for !XIP and extra va_kernel_xip_pa_offset symbol.
>>>>
>>>>> -#else
>>>>> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
>>>>> -#endif
>>>>> +
>>>>> #define __va_to_pa_nodebug(x) ({ \
>>>>> unsigned long _x = x; \
>>>>> (_x < kernel_virt_addr) ? \
>>>>> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
>>>>> #else
>>>>> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
>>>>> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
>>>>> -#endif
>>>>> +#endif /* CONFIG_64BIT */
>>>>>
>>>>> #ifdef CONFIG_DEBUG_VIRTUAL
>>>>> extern phys_addr_t __virt_to_phys(unsigned long x);
>>>>> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
>>>>> index bde8ce3bfe7c..d98e931a31e5 100644
>>>>> --- a/arch/riscv/include/asm/pgtable.h
>>>>> +++ b/arch/riscv/include/asm/pgtable.h
>>>>> @@ -77,6 +77,8 @@
>>>>>
>>>>> #ifdef CONFIG_XIP_KERNEL
>>>>> #define XIP_OFFSET SZ_8M
>>>>> +#else
>>>>> +#define XIP_OFFSET 0
>>>>> #endif
>>>>>
>>>>> #ifndef __ASSEMBLY__
>>>>
>>>>
>>>>
>>>> _______________________________________________
>>>> linux-riscv mailing list
>>>> [email protected]
>>>> http://lists.infradead.org/mailman/listinfo/linux-riscv
>>>>
>
>
>
> _______________________________________________
> linux-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-riscv
>

2021-06-03 15:24:34

by Jisheng Zhang

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, 3 Jun 2021 17:06:39 +0200
Alex Ghiti <[email protected]> wrote:

> Le 3/06/2021 à 15:53, Jisheng Zhang a écrit :
> > On Thu, 3 Jun 2021 18:46:47 +0530
> > Anup Patel <[email protected]> wrote:
> >
> >> On Thu, Jun 3, 2021 at 6:27 PM Alex Ghiti <[email protected]> wrote:
> >>>
> >>> Hi Jisheng,
> >
> > Hi,
> >
> >>>
> >>> Le 3/06/2021 à 14:27, Jisheng Zhang a écrit :
> >>>> On Thu, 3 Jun 2021 10:27:47 +0200
> >>>> Alexandre Ghiti <[email protected]> wrote:
> >>>>
> >>>>> To simplify the kernel address conversion code, make the same definition of
> >>>>> kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> >>>>> and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
> >>>>>
> >>>>> Signed-off-by: Alexandre Ghiti <[email protected]>
> >>>>> ---
> >>>>> arch/riscv/include/asm/page.h | 14 +++-----------
> >>>>> arch/riscv/include/asm/pgtable.h | 2 ++
> >>>>> 2 files changed, 5 insertions(+), 11 deletions(-)
> >>>>>
> >>>>> diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> >>>>> index 6a7761c86ec2..6e004d8fda4d 100644
> >>>>> --- a/arch/riscv/include/asm/page.h
> >>>>> +++ b/arch/riscv/include/asm/page.h
> >>>>> @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> >>>>> #ifdef CONFIG_64BIT
> >>>>> extern unsigned long va_kernel_pa_offset;
> >>>>> #endif
> >>>>> -#ifdef CONFIG_XIP_KERNEL
> >>>>> extern unsigned long va_kernel_xip_pa_offset;
> >>>>> -#endif
> >>>>> extern unsigned long pfn_base;
> >>>>> #define ARCH_PFN_OFFSET (pfn_base)
> >>>>> #else
> >>>>> @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> >>>>> #ifdef CONFIG_64BIT
> >>>>> #define va_kernel_pa_offset 0
> >>>>> #endif
> >>>>> +#define va_kernel_xip_pa_offset 0
> >>>>> #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> >>>>> #endif /* CONFIG_MMU */
> >>>>>
> >>>>> @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
> >>>>>
> >>>>> #ifdef CONFIG_64BIT
> >>>>> #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> >>>>> -#ifdef CONFIG_XIP_KERNEL
> >>>>> #define kernel_mapping_pa_to_va(y) ({ \
> >>>>> unsigned long _y = y; \
> >>>>> (_y >= CONFIG_PHYS_RAM_BASE) ?
> >>>>
> >>>> This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> >>>> compiler error for !XIP?
> >>>
> >>> You're right, I have this patch in my branch and forgot to squash it
> >>>
> >>>>
> >>>> I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> >>>> for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset
> >>>
> >>> I understand your concerns even if I don't find that the overhead is
> >>> that important here, I prefer the readability improvement. I can always
> >
> > For readability, we still can avoid introducing va_kernel_xip_pa_offset
> > symbol by simply define va_kernel_xip_pa_offset as 0 if XIP as you did
> > for XIP_OFFSET
> >
> > PS: this may need a preparation patch:
> > http://lists.infradead.org/pipermail/linux-riscv/2021-June/006802.html
>
> IIUC, that won't improve readability, just avoid to allocate
> va_kernel_xip_pa_offset in !XIP kernel right?

I mean even we can improve code readability while still avoid the
va_kernel_xip_pa_offset symbol for !XIP case. Probably it's implemented
as you did for XIP_OFFSET:

#ifdef CONFIG_XIP_KERNEL
extern unsigned long va_kernel_xip_pa_offset;
#else
#define va_kernel_xip_pa_offset 0
#endif

But since currently va_kernel_xip_pa_offset always exisits no matter XIP
is enabled or not, so you may need the preparation patch to clean up, otherwise
there may be compiler error.
>
> >
> >>> add unlikely/likely builtin to improve things or completely remove this
> >>> patch if others agree with you.
> >>
> >> I would also prefer readable code for long-term maintainability. Currently,
> >> the nested "#ifdefs" are increasing causing developers to easily break
> >> untested combinations.
> >>
> >> Regards,
> >> Anup
> >>
> >>>
> >>> Thanks,
> >>>
> >>> Alex
> >>>
> >>>>
> >>>>> (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> >>>>> (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> >>>>> })
> >>>>> -#else
> >>>>> -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> >>>>> -#endif
> >>>>> #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
> >>>>>
> >>>>> #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> >>>>> -#ifdef CONFIG_XIP_KERNEL
> >>>>> #define kernel_mapping_va_to_pa(y) ({ \
> >>>>> unsigned long _y = y; \
> >>>>> (_y < kernel_virt_addr + XIP_OFFSET) ? \
> >>>>> ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> >>>>> ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> >>>>> })
> >>>>
> >>>> Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
> >>>> for !XIP and extra va_kernel_xip_pa_offset symbol.
> >>>>
> >>>>> -#else
> >>>>> -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> >>>>> -#endif
> >>>>> +
> >>>>> #define __va_to_pa_nodebug(x) ({ \
> >>>>> unsigned long _x = x; \
> >>>>> (_x < kernel_virt_addr) ? \
> >>>>> @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> >>>>> #else
> >>>>> #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> >>>>> #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> >>>>> -#endif
> >>>>> +#endif /* CONFIG_64BIT */
> >>>>>
> >>>>> #ifdef CONFIG_DEBUG_VIRTUAL
> >>>>> extern phys_addr_t __virt_to_phys(unsigned long x);
> >>>>> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> >>>>> index bde8ce3bfe7c..d98e931a31e5 100644
> >>>>> --- a/arch/riscv/include/asm/pgtable.h
> >>>>> +++ b/arch/riscv/include/asm/pgtable.h
> >>>>> @@ -77,6 +77,8 @@
> >>>>>
> >>>>> #ifdef CONFIG_XIP_KERNEL
> >>>>> #define XIP_OFFSET SZ_8M
> >>>>> +#else
> >>>>> +#define XIP_OFFSET 0
> >>>>> #endif
> >>>>>
> >>>>> #ifndef __ASSEMBLY__
> >>>>
> >>>>
> >>>>
> >>>> _______________________________________________
> >>>> linux-riscv mailing list
> >>>> [email protected]
> >>>> http://lists.infradead.org/mailman/listinfo/linux-riscv
> >>>>
> >
> >
> >
> > _______________________________________________
> > linux-riscv mailing list
> > [email protected]
> > http://lists.infradead.org/mailman/listinfo/linux-riscv
> >


2021-06-03 15:54:55

by Vitaly Wool

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, Jun 3, 2021, 14:57 Jisheng Zhang <[email protected]> wrote:
>
> On Thu, 3 Jun 2021 20:27:48 +0800
> Jisheng Zhang <[email protected]> wrote:
>
> > On Thu, 3 Jun 2021 10:27:47 +0200
> > Alexandre Ghiti <[email protected]> wrote:
> >
> > > To simplify the kernel address conversion code, make the same definition of
> > > kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> > > and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
> > >
> > > Signed-off-by: Alexandre Ghiti <[email protected]>
> > > ---
> > > arch/riscv/include/asm/page.h | 14 +++-----------
> > > arch/riscv/include/asm/pgtable.h | 2 ++
> > > 2 files changed, 5 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> > > index 6a7761c86ec2..6e004d8fda4d 100644
> > > --- a/arch/riscv/include/asm/page.h
> > > +++ b/arch/riscv/include/asm/page.h
> > > @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> > > #ifdef CONFIG_64BIT
> > > extern unsigned long va_kernel_pa_offset;
> > > #endif
> > > -#ifdef CONFIG_XIP_KERNEL
> > > extern unsigned long va_kernel_xip_pa_offset;
> > > -#endif
> > > extern unsigned long pfn_base;
> > > #define ARCH_PFN_OFFSET (pfn_base)
> > > #else
> > > @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> > > #ifdef CONFIG_64BIT
> > > #define va_kernel_pa_offset 0
> > > #endif
> > > +#define va_kernel_xip_pa_offset 0
> > > #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> > > #endif /* CONFIG_MMU */
> > >
> > > @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
> > >
> > > #ifdef CONFIG_64BIT
> > > #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> > > -#ifdef CONFIG_XIP_KERNEL
> > > #define kernel_mapping_pa_to_va(y) ({ \
> > > unsigned long _y = y; \
> > > (_y >= CONFIG_PHYS_RAM_BASE) ?
> >
> > This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> > compiler error for !XIP?
> >
> > I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> > for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset
>
> Err, I just found this symobl always exists no matter XIP is enabled or not.
> I will send out a patch for this clean up

What cleanup?

Best regards,
Vitaly

>
> >
> > > (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
> > > (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
> > > })
> > > -#else
> > > -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
> > > -#endif
> > > #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
> > >
> > > #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
> > > -#ifdef CONFIG_XIP_KERNEL
> > > #define kernel_mapping_va_to_pa(y) ({ \
> > > unsigned long _y = y; \
> > > (_y < kernel_virt_addr + XIP_OFFSET) ? \
> > > ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
> > > ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
> > > })
> >
> > Similar as kernel_mapping_pa_to_va(), an overhead of "if" condition branch
> > for !XIP and extra va_kernel_xip_pa_offset symbol.
> >
> > > -#else
> > > -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
> > > -#endif
> > > +
> > > #define __va_to_pa_nodebug(x) ({ \
> > > unsigned long _x = x; \
> > > (_x < kernel_virt_addr) ? \
> > > @@ -141,7 +133,7 @@ extern unsigned long kernel_virt_addr;
> > > #else
> > > #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
> > > #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
> > > -#endif
> > > +#endif /* CONFIG_64BIT */
> > >
> > > #ifdef CONFIG_DEBUG_VIRTUAL
> > > extern phys_addr_t __virt_to_phys(unsigned long x);
> > > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> > > index bde8ce3bfe7c..d98e931a31e5 100644
> > > --- a/arch/riscv/include/asm/pgtable.h
> > > +++ b/arch/riscv/include/asm/pgtable.h
> > > @@ -77,6 +77,8 @@
> > >
> > > #ifdef CONFIG_XIP_KERNEL
> > > #define XIP_OFFSET SZ_8M
> > > +#else
> > > +#define XIP_OFFSET 0
> > > #endif
> > >
> > > #ifndef __ASSEMBLY__
> >
> >
>
>
>
> _______________________________________________
> linux-riscv mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-riscv

2021-06-03 15:57:42

by Jisheng Zhang

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] riscv: Factorize xip and !xip kernel address conversion macros

On Thu, 3 Jun 2021 17:51:25 +0200
Vitaly Wool <[email protected]> wrote:

> On Thu, Jun 3, 2021, 14:57 Jisheng Zhang <[email protected]> wrote:
> >
> > On Thu, 3 Jun 2021 20:27:48 +0800
> > Jisheng Zhang <[email protected]> wrote:
> >
> > > On Thu, 3 Jun 2021 10:27:47 +0200
> > > Alexandre Ghiti <[email protected]> wrote:
> > >
> > > > To simplify the kernel address conversion code, make the same definition of
> > > > kernel_mapping_pa_to_va and kernel_mapping_va_to_pa compatible for both xip
> > > > and !xip kernel by defining XIP_OFFSET to 0 in !xip kernel.
> > > >
> > > > Signed-off-by: Alexandre Ghiti <[email protected]>
> > > > ---
> > > > arch/riscv/include/asm/page.h | 14 +++-----------
> > > > arch/riscv/include/asm/pgtable.h | 2 ++
> > > > 2 files changed, 5 insertions(+), 11 deletions(-)
> > > >
> > > > diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
> > > > index 6a7761c86ec2..6e004d8fda4d 100644
> > > > --- a/arch/riscv/include/asm/page.h
> > > > +++ b/arch/riscv/include/asm/page.h
> > > > @@ -93,9 +93,7 @@ extern unsigned long va_pa_offset;
> > > > #ifdef CONFIG_64BIT
> > > > extern unsigned long va_kernel_pa_offset;
> > > > #endif
> > > > -#ifdef CONFIG_XIP_KERNEL
> > > > extern unsigned long va_kernel_xip_pa_offset;
> > > > -#endif
> > > > extern unsigned long pfn_base;
> > > > #define ARCH_PFN_OFFSET (pfn_base)
> > > > #else
> > > > @@ -103,6 +101,7 @@ extern unsigned long pfn_base;
> > > > #ifdef CONFIG_64BIT
> > > > #define va_kernel_pa_offset 0
> > > > #endif
> > > > +#define va_kernel_xip_pa_offset 0
> > > > #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
> > > > #endif /* CONFIG_MMU */
> > > >
> > > > @@ -110,29 +109,22 @@ extern unsigned long kernel_virt_addr;
> > > >
> > > > #ifdef CONFIG_64BIT
> > > > #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
> > > > -#ifdef CONFIG_XIP_KERNEL
> > > > #define kernel_mapping_pa_to_va(y) ({ \
> > > > unsigned long _y = y; \
> > > > (_y >= CONFIG_PHYS_RAM_BASE) ?
> > >
> > > This CONFIG_PHYS_RAM_BASE is only available for XIP, could result in a
> > > compiler error for !XIP?
> > >
> > > I'm also concerned with the unecessary overhead of kernel_mapping_pa_to_va()
> > > for !XIP case, there's a "if" condition branch, and extra symbol: va_kernel_xip_pa_offset
> >
> > Err, I just found this symobl always exists no matter XIP is enabled or not.
> > I will send out a patch for this clean up
>
> What cleanup?
>

http://lists.infradead.org/pipermail/linux-riscv/2021-June/006802.html