Hi all,
This series fixes support for loading kernel to XKPHYS space.
It is derived from "MIPS: use virtual addresses from xkphys for MIPS64" [1].
Boot tested on boston and QEMU with loading address set to 0xa800000090000000.
QEMU patch on the way.
For EyeQ5's memory layout, I think you just need to write devicetree memory
node as:
memory@0 {
device_type = "memory";
reg = < 0x0 0x08000000 0x0 0x08000000
0x8 0x08000000 0x0 0x78000000>;
};
And set kernel load addesss to somewhere in RAM, everything should work.
It makes me a little bit confused that in EyeQ5 enablement patch, you set
load address to:
> +else
> +load-$(CONFIG_MIPS_GENERIC) += 0xa800000080100000
> +endif
Where does not have memory aviailable.
I guess you might want to set it to 0xa800000800100000?
Though I would suggest you to set it to 0xa800000808000000, to avoid
collisions with low mem and reserved mem.
Gregory and Vladimir, do let me know if I missed anything.
Thanks
- Jiaxun
[1]: https://lore.kernel.org/lkml/[email protected]/
Jiaxun Yang (10):
MIPS: Export higher/highest relocation functions in uasm
MIPS: spaces: Define a couple of handy macros
MIPS: genex: Fix except_vec_vi for kernel in XKPHYS
MIPS: Fix set_uncached_handler for ebase in XKPHYS
MIPS: Refactor mips_cps_core_entry implementation
MIPS: Allow kernel base to be set from Kconfig for all platforms
MIPS: traps: Handle CPU with non standard vint offset
MIPS: Avoid unnecessary reservation of exception space
MIPS: traps: Enhance memblock ebase allocation process
MIPS: Get rid of CONFIG_NO_EXCEPT_FILL
arch/mips/Kconfig | 27 ++--
arch/mips/include/asm/addrspace.h | 5 +
arch/mips/include/asm/mach-generic/spaces.h | 5 +-
arch/mips/include/asm/mips-cm.h | 1 +
arch/mips/include/asm/smp-cps.h | 4 +-
arch/mips/include/asm/traps.h | 1 -
arch/mips/include/asm/uasm.h | 2 +
arch/mips/kernel/cps-vec.S | 110 +++++--------
arch/mips/kernel/cpu-probe.c | 5 -
arch/mips/kernel/cpu-r3k-probe.c | 2 -
arch/mips/kernel/genex.S | 19 ++-
arch/mips/kernel/head.S | 7 +-
arch/mips/kernel/smp-cps.c | 167 +++++++++++++++++---
arch/mips/kernel/traps.c | 85 +++++++---
arch/mips/mm/uasm.c | 6 +-
15 files changed, 293 insertions(+), 153 deletions(-)
--
2.34.1
Use {highest, higher, hi, lo} immediate loading sequence
to load 64 bit jump address for handler when kernel is
loaded to XKPHYS.
Co-developed-by: Vladimir Kondratiev <[email protected]>
Signed-off-by: Vladimir Kondratiev <[email protected]>
Co-developed-by: Gregory CLEMENT <[email protected]>
Signed-off-by: Gregory CLEMENT <[email protected]>
Signed-off-by: Jiaxun Yang <[email protected]>
---
arch/mips/kernel/genex.S | 19 +++++++++++++++----
arch/mips/kernel/traps.c | 34 ++++++++++++++++++++++++----------
2 files changed, 39 insertions(+), 14 deletions(-)
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index b6de8e88c1bd..fd765ad9ecac 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -272,11 +272,22 @@ NESTED(except_vec_vi, 0, sp)
.set push
.set noreorder
PTR_LA v1, except_vec_vi_handler
-FEXPORT(except_vec_vi_lui)
- lui v0, 0 /* Patched */
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+FEXPORT(except_vec_vi_hi)
+ lui v0, 0 /* Patched */
+#else
+FEXPORT(except_vec_vi_highest)
+ lui v0, 0 /* Patched */
+FEXPORT(except_vec_vi_higher)
+ daddiu v0, 0 /* Patched */
+ dsll v0, 16
+FEXPORT(except_vec_vi_hi)
+ daddiu v0, 0 /* Patched */
+ dsll v0, 16
+#endif
jr v1
-FEXPORT(except_vec_vi_ori)
- ori v0, 0 /* Patched */
+FEXPORT(except_vec_vi_lo)
+ PTR_ADDIU v0, 0 /* Patched */
.set pop
END(except_vec_vi)
EXPORT(except_vec_vi_end)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 246c6a6b0261..60c513c51684 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2091,18 +2091,26 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* If no shadow set is selected then use the default handler
* that does normal register saving and standard interrupt exit
*/
- extern const u8 except_vec_vi[], except_vec_vi_lui[];
- extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
+ extern const u8 except_vec_vi[], except_vec_vi_hi[];
+ extern const u8 except_vec_vi_lo[], except_vec_vi_end[];
+#if defined(CONFIG_64BIT) && !defined(KBUILD_64BIT_SYM32)
+ extern const u8 except_vec_vi_highest[], except_vec_vi_higher[];
+#endif
extern const u8 rollback_except_vec_vi[];
const u8 *vec_start = using_rollback_handler() ?
rollback_except_vec_vi : except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
- const int lui_offset = except_vec_vi_lui - vec_start + 2;
- const int ori_offset = except_vec_vi_ori - vec_start + 2;
+ const int imm_offset = 2;
#else
- const int lui_offset = except_vec_vi_lui - vec_start;
- const int ori_offset = except_vec_vi_ori - vec_start;
+ const int imm_offset = 0;
+#endif
+#if defined(CONFIG_64BIT) && !defined(KBUILD_64BIT_SYM32)
+ const int highest_offset = except_vec_vi_highest - vec_start + imm_offset;
+ const int higher_offset = except_vec_vi_higher - vec_start + imm_offset;
#endif
+ const int hi_offset = except_vec_vi_hi - vec_start + imm_offset;
+ const int lo_offset = except_vec_vi_lo - vec_start + imm_offset;
+
const int handler_len = except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
@@ -2119,10 +2127,16 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
#else
handler_len);
#endif
- h = (u16 *)(b + lui_offset);
- *h = (handler >> 16) & 0xffff;
- h = (u16 *)(b + ori_offset);
- *h = (handler & 0xffff);
+#if defined(CONFIG_64BIT) && !defined(KBUILD_64BIT_SYM32)
+ h = (u16 *)(b + highest_offset);
+ *h = uasm_rel_highest(handler);
+ h = (u16 *)(b + higher_offset);
+ *h = uasm_rel_higher(handler);
+#endif
+ h = (u16 *)(b + hi_offset);
+ *h = uasm_rel_hi(handler);
+ h = (u16 *)(b + lo_offset);
+ *h = uasm_rel_lo(handler);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
--
2.34.1
ebase may be in XKPHYS if memblock unable to allocate memory
within KSEG0 physical range.
To map ebase into uncached space we just convert it back to
physical address and then use platform's TO_UNCAC helper
to create mapping.
Co-developed-by: Vladimir Kondratiev <[email protected]>
Signed-off-by: Vladimir Kondratiev <[email protected]>
Co-developed-by: Gregory CLEMENT <[email protected]>
Signed-off-by: Gregory CLEMENT <[email protected]>
Signed-off-by: Jiaxun Yang <[email protected]>
---
arch/mips/kernel/traps.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 60c513c51684..230728d76d11 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2346,7 +2346,7 @@ static const char panic_null_cerr[] =
void set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
- unsigned long uncached_ebase = CKSEG1ADDR(ebase);
+ unsigned long uncached_ebase = TO_UNCAC(__pa(ebase));
if (!addr)
panic(panic_null_cerr);
--
2.34.1
We try to allocate from KSEG0 accessible space first, and
then if we really can't allocate any memory from KSEG0 and
we are sure that we support ebase in higher segment, give
it another go without restriction.
This can maximize the possibility of having ebase in KSEG0.
Signed-off-by: Jiaxun Yang <[email protected]>
---
arch/mips/kernel/traps.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b6e94654f621..68f1dd54cde1 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2399,7 +2399,12 @@ void __init trap_init(void)
memblock_reserve(ebase_pa, vec_size);
} else {
vec_size = max(vec_size, PAGE_SIZE);
- ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
+ ebase_pa = memblock_phys_alloc_range(vec_size, 1 << fls(vec_size),
+ 0x0, KSEGX_SIZE - 1);
+
+ if (!ebase_pa && (IS_ENABLED(CONFIG_EVA) || cpu_has_ebase_wg))
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
+
if (!ebase_pa)
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
__func__, vec_size, 1 << fls(vec_size));
--
2.34.1
NO_EXCEPT_FILL is used to indicate platform that does not
need to reserve ebase memory at start of kernel.
This is true for all R2+ platform as they allocate ebase
memory on fly, and also true for any platform that does
not load kernel at start of physical memory.
Get rid this Kconfig symbol by use macro to detect conditions
above.
Signed-off-by: Jiaxun Yang <[email protected]>
---
arch/mips/Kconfig | 9 ---------
arch/mips/kernel/head.S | 7 +++++--
2 files changed, 5 insertions(+), 11 deletions(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bfedc8b48a81..e928ebc2cd1f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -149,7 +149,6 @@ config MIPS_GENERIC_KERNEL
select MIPS_CPU_SCACHE
select MIPS_GIC
select MIPS_L1_CACHE_SHIFT_7
- select NO_EXCEPT_FILL
select PCI_DRIVERS_GENERIC
select SMP_UP if SMP
select SWAP_IO_SPACE
@@ -210,7 +209,6 @@ config AR7
select CEVT_R4K
select CSRC_R4K
select IRQ_MIPS_CPU
- select NO_EXCEPT_FILL
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_EARLY_PRINTK
@@ -265,7 +263,6 @@ config BMIPS_GENERIC
select ARCH_HAS_RESET_CONTROLLER
select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
select BOOT_RAW
- select NO_EXCEPT_FILL
select USE_OF
select CEVT_R4K
select CSRC_R4K
@@ -309,7 +306,6 @@ config BCM47XX
select HAVE_PCI
select IRQ_MIPS_CPU
select SYS_HAS_CPU_MIPS32_R1
- select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MIPS16
@@ -445,7 +441,6 @@ config LANTIQ
select IRQ_MIPS_CPU
select CEVT_R4K
select CSRC_R4K
- select NO_EXCEPT_FILL
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_SUPPORTS_BIG_ENDIAN
@@ -494,7 +489,6 @@ config MACH_LOONGSON64
select ISA
select I8259
select IRQ_MIPS_CPU
- select NO_EXCEPT_FILL
select NR_CPUS_DEFAULT_64
select USE_GENERIC_EARLY_PRINTK_8250
select PCI_DRIVERS_GENERIC
@@ -1177,9 +1171,6 @@ config PCI_GT64XXX_PCI0
config PCI_XTALK_BRIDGE
bool
-config NO_EXCEPT_FILL
- bool
-
config MIPS_SPRAM
bool
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index b825ed4476c7..4af53b1628f5 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -59,10 +59,13 @@
#endif
.endm
-#ifndef CONFIG_NO_EXCEPT_FILL
+#if (MIPS_ISA_REV < 2) && \
+ ((VMLINUX_LOAD_ADDRESS == KSEG0) || \
+ (VMLINUX_LOAD_ADDRESS == CKSEG0))
/*
* Reserved space for exception handlers.
- * Necessary for machines which link their kernels at KSEG0.
+ * Necessary for machines which link their kernels at KSEG0
+ * and incapable of moving ebase.
*/
.fill 0x400
#endif
--
2.34.1
There are some platforms in wild that generic loading address won't
work with them due to memory layout.
Allow PHYSICAL_START to be override from Kconfig, introduce
PHYSICAL_START_BOOL symbol as powerpc did.
Signed-off-by: Jiaxun Yang <[email protected]>
---
arch/mips/Kconfig | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bc8421859006..bfedc8b48a81 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2884,12 +2884,22 @@ config ARCH_SUPPORTS_KEXEC
config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
+config PHYSICAL_START_BOOL
+ bool "Set physical address where the kernel is loaded"
+ default y if CRASH_DUMP
+ help
+ This gives the CKSEG0, KSEG0 or XKPHYS address where the kernel
+ is loaded.
+
+ Say N here unless you know what you are doing.
+
config PHYSICAL_START
- hex "Physical address where the kernel is loaded"
- default "0xffffffff84000000"
- depends on CRASH_DUMP
+ hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
+ default "0xffffffff84000000" if CRASH_DUMP
+ default "0xffffffff80100000"
help
- This gives the CKSEG0 or KSEG0 address where the kernel is loaded.
+ This gives the CKSEG0, KSEG0 or XKPHYS address where the kernel
+ is loaded.
If you plan to use kernel for capturing the crash dump change
this value to start of the reserved region (the "X" value as
specified in the "crashkernel=YM@XM" command line boot parameter
--
2.34.1
Nowadays we allocate exception base from memblock for r2_r6,
so we don't need to reverse exception space at the start of
the memory for r2_r6 processors.
For older processors the reservation is moved to traps_init
where we have knowledge of exact size we need. We also add
a sanity check to detect possible overlap with kernel.
Signed-off-by: Jiaxun Yang <[email protected]>
---
arch/mips/include/asm/traps.h | 1 -
arch/mips/kernel/cpu-probe.c | 5 -----
arch/mips/kernel/cpu-r3k-probe.c | 2 --
arch/mips/kernel/traps.c | 12 +++++++-----
4 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index 15cde638b407..d3dddd1c083a 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -24,7 +24,6 @@ extern void (*board_ebase_setup)(void);
extern void (*board_cache_error_setup)(void);
extern int register_nmi_notifier(struct notifier_block *nb);
-extern void reserve_exception_space(phys_addr_t addr, unsigned long size);
extern char except_vec_nmi[];
#define VECTORSPACING 0x100 /* for EI/VI mode */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b406d8bfb15a..54e8b0fd4a2a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1570,7 +1570,6 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_BMIPS3300;
__cpu_name[cpu] = "Broadcom BMIPS3300";
set_elf_platform(cpu, "bmips3300");
- reserve_exception_space(0x400, VECTORSPACING * 64);
break;
case PRID_IMP_BMIPS43XX: {
int rev = c->processor_id & PRID_REV_MASK;
@@ -1581,7 +1580,6 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Broadcom BMIPS4380";
set_elf_platform(cpu, "bmips4380");
c->options |= MIPS_CPU_RIXI;
- reserve_exception_space(0x400, VECTORSPACING * 64);
} else {
c->cputype = CPU_BMIPS4350;
__cpu_name[cpu] = "Broadcom BMIPS4350";
@@ -1598,7 +1596,6 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Broadcom BMIPS5000";
set_elf_platform(cpu, "bmips5000");
c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
- reserve_exception_space(0x1000, VECTORSPACING * 64);
break;
}
}
@@ -1996,8 +1993,6 @@ void cpu_probe(void)
if (cpu == 0)
__ua_limit = ~((1ull << cpu_vmbits) - 1);
#endif
-
- reserve_exception_space(0, 0x1000);
}
void cpu_report(void)
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
index be93469c0e0e..05410b743e57 100644
--- a/arch/mips/kernel/cpu-r3k-probe.c
+++ b/arch/mips/kernel/cpu-r3k-probe.c
@@ -137,8 +137,6 @@ void cpu_probe(void)
cpu_set_fpu_opts(c);
else
cpu_set_nofpu_opts(c);
-
- reserve_exception_space(0, 0x400);
}
void cpu_report(void)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 651c9ec6265a..b6e94654f621 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2007,10 +2007,6 @@ unsigned long exception_handlers[32];
static unsigned long vi_vecbase;
unsigned long vi_handlers[64];
-void reserve_exception_space(phys_addr_t addr, unsigned long size)
-{
- memblock_reserve(addr, size);
-}
void __init *set_except_vector(int n, void *addr)
{
@@ -2394,7 +2390,13 @@ void __init trap_init(void)
}
if (!cpu_has_mips_r2_r6) {
- ebase = CAC_BASE;
+ ebase_pa = 0x0;
+ ebase = CKSEG0ADDR(ebase_pa);
+
+ if (__pa_symbol(_stext) < (ebase_pa + vec_size))
+ pr_err("Insufficient space for exception vectors\n");
+
+ memblock_reserve(ebase_pa, vec_size);
} else {
vec_size = max(vec_size, PAGE_SIZE);
ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
--
2.34.1