Hello,
this series rearranges unaligned exception handler and adds load/store
exception handler that allows transparent 1- and 2-byte wide reads from
memory attached to an instruction bus of an xtensa core.
Max Filippov (4):
xtensa: move early_trap_init from kasan_early_init to init_arch
xtensa: always install slow handler for unaligned access exception
xtensa: rearrange unaligned exception handler
xtensa: add load/store exception handler
arch/xtensa/Kconfig | 12 ++
arch/xtensa/include/asm/traps.h | 7 +
arch/xtensa/kernel/align.S | 256 ++++++++++++++++++++++----------
arch/xtensa/kernel/setup.c | 7 +
arch/xtensa/kernel/traps.c | 27 +++-
arch/xtensa/mm/kasan_init.c | 2 -
6 files changed, 221 insertions(+), 90 deletions(-)
--
2.30.2
- extract initialization part of the exception handler into a separate
function.
- use single label for invalid instruction instead of two labels, one
for load and one for store, at one place.
- use sext instruction for sign extension when available.
- store SAR on the stack instead of in a0.
- replace numeric labels for load and store writeback with .Lload_w and
.Lstore_w respectively.
Signed-off-by: Max Filippov <[email protected]>
---
arch/xtensa/kernel/align.S | 171 +++++++++++++++++++------------------
1 file changed, 89 insertions(+), 82 deletions(-)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index d062c732ef18..bcbd7962a684 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -157,58 +157,7 @@
.literal_position
ENTRY(fast_unaligned)
- /* Note: We don't expect the address to be aligned on a word
- * boundary. After all, the processor generated that exception
- * and it would be a hardware fault.
- */
-
- /* Save some working register */
-
- s32i a4, a2, PT_AREG4
- s32i a5, a2, PT_AREG5
- s32i a6, a2, PT_AREG6
- s32i a7, a2, PT_AREG7
- s32i a8, a2, PT_AREG8
-
- rsr a0, depc
- s32i a0, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
-
- rsr a3, excsave1
- movi a4, fast_unaligned_fixup
- s32i a4, a3, EXC_TABLE_FIXUP
-
- /* Keep value of SAR in a0 */
-
- rsr a0, sar
- rsr a8, excvaddr # load unaligned memory address
-
- /* Now, identify one of the following load/store instructions.
- *
- * The only possible danger of a double exception on the
- * following l32i instructions is kernel code in vmalloc
- * memory. The processor was just executing at the EPC_1
- * address, and indeed, already fetched the instruction. That
- * guarantees a TLB mapping, which hasn't been replaced by
- * this unaligned exception handler that uses only static TLB
- * mappings. However, high-level interrupt handlers might
- * modify TLB entries, so for the generic case, we register a
- * TABLE_FIXUP handler here, too.
- */
-
- /* a3...a6 saved on stack, a2 = SP */
-
- /* Extract the instruction that caused the unaligned access. */
-
- rsr a7, epc1 # load exception address
- movi a3, ~3
- and a3, a3, a7 # mask lower bits
-
- l32i a4, a3, 0 # load 2 words
- l32i a5, a3, 4
-
- __ssa8 a7
- __src_b a4, a4, a5 # a4 has the instruction
+ call0 .Lsave_and_load_instruction
/* Analyze the instruction (load or store?). */
@@ -249,7 +198,7 @@ ENTRY(fast_unaligned)
addi a7, a7, 2 # increment PC (assume 16-bit insn)
extui a5, a4, INSN_OP0, 4
- _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
+ _beqi a5, OP0_L32I_N, .Lload_w# l32i.n: jump
addi a7, a7, 1
#else
@@ -257,21 +206,24 @@ ENTRY(fast_unaligned)
#endif
extui a5, a4, INSN_OP1, 4
- _beqi a5, OP1_L32I, 1f # l32i: jump
+ _beqi a5, OP1_L32I, .Lload_w # l32i: jump
extui a3, a3, 0, 16 # extract lower 16 bits
- _beqi a5, OP1_L16UI, 1f
+ _beqi a5, OP1_L16UI, .Lload_w
addi a5, a5, -OP1_L16SI
- _bnez a5, .Linvalid_instruction_load
+ _bnez a5, .Linvalid_instruction
/* sign extend value */
-
+#if XCHAL_HAVE_SEXT
+ sext a3, a3, 15
+#else
slli a3, a3, 16
srai a3, a3, 16
+#endif
/* Set target register. */
-1:
+.Lload_w:
extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table
addx8 a4, a4, a5
@@ -297,28 +249,27 @@ ENTRY(fast_unaligned)
mov a15, a3 ; _j .Lexit; .align 8
.Lstore_table:
- l32i a3, a2, PT_AREG0; _j 1f; .align 8
- mov a3, a1; _j 1f; .align 8 # fishy??
- l32i a3, a2, PT_AREG2; _j 1f; .align 8
- l32i a3, a2, PT_AREG3; _j 1f; .align 8
- l32i a3, a2, PT_AREG4; _j 1f; .align 8
- l32i a3, a2, PT_AREG5; _j 1f; .align 8
- l32i a3, a2, PT_AREG6; _j 1f; .align 8
- l32i a3, a2, PT_AREG7; _j 1f; .align 8
- l32i a3, a2, PT_AREG8; _j 1f; .align 8
- mov a3, a9 ; _j 1f; .align 8
- mov a3, a10 ; _j 1f; .align 8
- mov a3, a11 ; _j 1f; .align 8
- mov a3, a12 ; _j 1f; .align 8
- mov a3, a13 ; _j 1f; .align 8
- mov a3, a14 ; _j 1f; .align 8
- mov a3, a15 ; _j 1f; .align 8
+ l32i a3, a2, PT_AREG0; _j .Lstore_w; .align 8
+ mov a3, a1; _j .Lstore_w; .align 8 # fishy??
+ l32i a3, a2, PT_AREG2; _j .Lstore_w; .align 8
+ l32i a3, a2, PT_AREG3; _j .Lstore_w; .align 8
+ l32i a3, a2, PT_AREG4; _j .Lstore_w; .align 8
+ l32i a3, a2, PT_AREG5; _j .Lstore_w; .align 8
+ l32i a3, a2, PT_AREG6; _j .Lstore_w; .align 8
+ l32i a3, a2, PT_AREG7; _j .Lstore_w; .align 8
+ l32i a3, a2, PT_AREG8; _j .Lstore_w; .align 8
+ mov a3, a9 ; _j .Lstore_w; .align 8
+ mov a3, a10 ; _j .Lstore_w; .align 8
+ mov a3, a11 ; _j .Lstore_w; .align 8
+ mov a3, a12 ; _j .Lstore_w; .align 8
+ mov a3, a13 ; _j .Lstore_w; .align 8
+ mov a3, a14 ; _j .Lstore_w; .align 8
+ mov a3, a15 ; _j .Lstore_w; .align 8
/* We cannot handle this exception. */
.extern _kernel_exception
-.Linvalid_instruction_load:
-.Linvalid_instruction_store:
+.Linvalid_instruction:
movi a4, 0
rsr a3, excsave1
@@ -326,6 +277,7 @@ ENTRY(fast_unaligned)
/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
+ l32i a0, a2, PT_SAR
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
@@ -343,8 +295,8 @@ ENTRY(fast_unaligned)
2: movi a0, _user_exception
jx a0
-1: # a7: instruction pointer, a4: instruction, a3: value
-
+ # a7: instruction pointer, a4: instruction, a3: value
+.Lstore_w:
movi a6, 0 # mask: ffffffff:00000000
#if XCHAL_HAVE_DENSITY
@@ -361,7 +313,7 @@ ENTRY(fast_unaligned)
extui a5, a4, INSN_OP1, 4 # extract OP1
_beqi a5, OP1_S32I, 1f # jump if 32 bit store
- _bnei a5, OP1_S16I, .Linvalid_instruction_store
+ _bnei a5, OP1_S16I, .Linvalid_instruction
movi a5, -1
__extl a3, a3 # get 16-bit value
@@ -434,6 +386,7 @@ ENTRY(fast_unaligned)
/* Restore working register */
+ l32i a0, a2, PT_SAR
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
@@ -448,6 +401,59 @@ ENTRY(fast_unaligned)
l32i a2, a2, PT_AREG2
rfe
+ .align 4
+.Lsave_and_load_instruction:
+
+ /* Save some working register */
+
+ s32i a3, a2, PT_AREG3
+ s32i a4, a2, PT_AREG4
+ s32i a5, a2, PT_AREG5
+ s32i a6, a2, PT_AREG6
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+
+ rsr a4, depc
+ s32i a4, a2, PT_AREG2
+
+ rsr a5, sar
+ s32i a5, a2, PT_SAR
+
+ rsr a3, excsave1
+ movi a4, fast_unaligned_fixup
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ rsr a8, excvaddr # load unaligned memory address
+
+ /* Now, identify one of the following load/store instructions.
+ *
+ * The only possible danger of a double exception on the
+ * following l32i instructions is kernel code in vmalloc
+ * memory. The processor was just executing at the EPC_1
+ * address, and indeed, already fetched the instruction. That
+ * guarantees a TLB mapping, which hasn't been replaced by
+ * this unaligned exception handler that uses only static TLB
+ * mappings. However, high-level interrupt handlers might
+ * modify TLB entries, so for the generic case, we register a
+ * TABLE_FIXUP handler here, too.
+ */
+
+ /* a3...a6 saved on stack, a2 = SP */
+
+ /* Extract the instruction that caused the unaligned access. */
+
+ rsr a7, epc1 # load exception address
+ movi a3, ~3
+ and a3, a3, a7 # mask lower bits
+
+ l32i a4, a3, 0 # load 2 words
+ l32i a5, a3, 4
+
+ __ssa8 a7
+ __src_b a4, a4, a5 # a4 has the instruction
+
+ ret
+
ENDPROC(fast_unaligned)
ENTRY(fast_unaligned_fixup)
@@ -459,10 +465,11 @@ ENTRY(fast_unaligned_fixup)
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
- l32i a4, a2, PT_AREG4
+ l32i a4, a2, PT_SAR
l32i a0, a2, PT_AREG2
- xsr a0, depc # restore depc and a0
- wsr a0, sar
+ wsr a4, sar
+ wsr a0, depc # restore depc and a0
+ l32i a4, a2, PT_AREG4
rsr a0, exccause
s32i a0, a2, PT_DEPC # mark as a regular exception
--
2.30.2
Memory attached to instruction bus of the xtensa CPU is only accessible
for a limited subset of opcodes. Other opcodes generate an exception
with the load/store error cause code. This property complicates use of
such systems. Provide a handler that recognizes and transparently fixes
such exceptions. The following opcodes are recognized when used outside
of FLIX bundles: l32i, l32i.n, l16ui, l16si, l8ui.
Signed-off-by: Max Filippov <[email protected]>
---
arch/xtensa/Kconfig | 12 ++++
arch/xtensa/include/asm/traps.h | 5 ++
arch/xtensa/kernel/align.S | 109 ++++++++++++++++++++++++++++----
arch/xtensa/kernel/setup.c | 3 +-
arch/xtensa/kernel/traps.c | 21 +++++-
5 files changed, 136 insertions(+), 14 deletions(-)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index b1189f085a68..c9cb0cc78ebb 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -209,6 +209,18 @@ config XTENSA_UNALIGNED_USER
Say Y here to enable unaligned memory access in user space.
+config XTENSA_LOAD_STORE
+ bool "Load/store exception handler for memory only readable with l32"
+ help
+ The Xtensa architecture only allows reading memory attached to its
+ instruction bus with l32r and l32i instructions, all other
+ instructions raise an exception with the LoadStoreErrorCause code.
+ This makes it hard to use some configurations, e.g. store string
+ literals in FLASH memory attached to the instruction bus.
+
+ Say Y here to enable exception handler that allows transparent
+ byte and 2-byte access to memory attached to instruction bus.
+
config HAVE_SMP
bool "System Supports SMP (MX)"
depends on XTENSA_VARIANT_CUSTOM
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index acffb02f8760..212c3b9ff407 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -47,6 +47,7 @@ __init trap_set_handler(int cause, xtensa_exception_handler *handler);
asmlinkage void fast_illegal_instruction_user(void);
asmlinkage void fast_syscall_user(void);
asmlinkage void fast_alloca(void);
+asmlinkage void fast_load_store(void);
asmlinkage void fast_unaligned(void);
asmlinkage void fast_second_level_miss(void);
asmlinkage void fast_store_prohibited(void);
@@ -64,6 +65,10 @@ void do_unhandled(struct pt_regs *regs);
static inline void __init early_trap_init(void)
{
static struct exc_table init_exc_table __initdata = {
+#ifdef CONFIG_XTENSA_LOAD_STORE
+ .fast_kernel_handler[EXCCAUSE_LOAD_STORE_ERROR] =
+ fast_load_store,
+#endif
#ifdef CONFIG_MMU
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss,
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index bcbd7962a684..20d6b4961001 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -22,7 +22,17 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
-#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || defined CONFIG_XTENSA_LOAD_STORE
+#define LOAD_EXCEPTION_HANDLER
+#endif
+
+#if XCHAL_UNALIGNED_STORE_EXCEPTION || defined LOAD_EXCEPTION_HANDLER
+#define ANY_EXCEPTION_HANDLER
+#endif
+
+#if XCHAL_HAVE_WINDOWED
+#define UNALIGNED_USER_EXCEPTION
+#endif
/* First-level exception handler for unaligned exceptions.
*
@@ -58,10 +68,6 @@
* BE shift left / mask 0 0 X X
*/
-#if XCHAL_HAVE_WINDOWED
-#define UNALIGNED_USER_EXCEPTION
-#endif
-
#if XCHAL_HAVE_BE
#define HWORD_START 16
@@ -103,7 +109,7 @@
*
* 23 0
* -----------------------------
- * res 0000 0010
+ * L8UI xxxx xxxx 0000 ssss tttt 0010
* L16UI xxxx xxxx 0001 ssss tttt 0010
* L32I xxxx xxxx 0010 ssss tttt 0010
* XXX 0011 ssss tttt 0010
@@ -128,9 +134,11 @@
#define OP0_L32I_N 0x8 /* load immediate narrow */
#define OP0_S32I_N 0x9 /* store immediate narrow */
+#define OP0_LSAI 0x2 /* load/store */
#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
#define OP1_SI_BIT 2 /* OP1 bit number for stores */
+#define OP1_L8UI 0x0
#define OP1_L32I 0x2
#define OP1_L16UI 0x1
#define OP1_L16SI 0x9
@@ -155,8 +163,73 @@
*/
.literal_position
+#ifdef CONFIG_XTENSA_LOAD_STORE
+ENTRY(fast_load_store)
+
+ call0 .Lsave_and_load_instruction
+
+ /* Analyze the instruction (load or store?). */
+
+ extui a0, a4, INSN_OP0, 4 # get insn.op0 nibble
+
+#if XCHAL_HAVE_DENSITY
+ _beqi a0, OP0_L32I_N, 1f # L32I.N, jump
+#endif
+ bnei a0, OP0_LSAI, .Linvalid_instruction
+ /* 'store indicator bit' set, jump */
+ bbsi.l a4, OP1_SI_BIT + INSN_OP1, .Linvalid_instruction
+
+1:
+ movi a3, ~3
+ and a3, a3, a8 # align memory address
+
+ __ssa8 a8
+
+#ifdef CONFIG_MMU
+ /* l32e can't be used here even when it's available. */
+ /* TODO access_ok(a3) could be used here */
+ j .Linvalid_instruction
+#endif
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+ __src_b a3, a5, a6 # a3 has the data word
+
+#if XCHAL_HAVE_DENSITY
+ addi a7, a7, 2 # increment PC (assume 16-bit insn)
+ _beqi a0, OP0_L32I_N, .Lload_w# l32i.n: jump
+ addi a7, a7, 1
+#else
+ addi a7, a7, 3
+#endif
+
+ extui a5, a4, INSN_OP1, 4
+ _beqi a5, OP1_L32I, .Lload_w
+ bnei a5, OP1_L8UI, .Lload16
+ extui a3, a3, 0, 8
+ j .Lload_w
+
+ENDPROC(fast_load_store)
+#endif
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+#ifdef ANY_EXCEPTION_HANDLER
ENTRY(fast_unaligned)
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+
call0 .Lsave_and_load_instruction
/* Analyze the instruction (load or store?). */
@@ -171,12 +244,17 @@ ENTRY(fast_unaligned)
/* 'store indicator bit' not set, jump */
_bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
+#endif
+#if XCHAL_UNALIGNED_STORE_EXCEPTION
+
/* Store: Jump to table entry to get the value in the source register.*/
.Lstore:movi a5, .Lstore_table # table
extui a6, a4, INSN_T, 4 # get source register
addx8 a5, a6, a5
jx a5 # jump into table
+#endif
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION
/* Load: Load memory address. */
@@ -207,7 +285,9 @@ ENTRY(fast_unaligned)
extui a5, a4, INSN_OP1, 4
_beqi a5, OP1_L32I, .Lload_w # l32i: jump
-
+#endif
+#ifdef LOAD_EXCEPTION_HANDLER
+.Lload16:
extui a3, a3, 0, 16 # extract lower 16 bits
_beqi a5, OP1_L16UI, .Lload_w
addi a5, a5, -OP1_L16SI
@@ -247,7 +327,8 @@ ENTRY(fast_unaligned)
mov a13, a3 ; _j .Lexit; .align 8
mov a14, a3 ; _j .Lexit; .align 8
mov a15, a3 ; _j .Lexit; .align 8
-
+#endif
+#if XCHAL_UNALIGNED_STORE_EXCEPTION
.Lstore_table:
l32i a3, a2, PT_AREG0; _j .Lstore_w; .align 8
mov a3, a1; _j .Lstore_w; .align 8 # fishy??
@@ -265,7 +346,9 @@ ENTRY(fast_unaligned)
mov a3, a13 ; _j .Lstore_w; .align 8
mov a3, a14 ; _j .Lstore_w; .align 8
mov a3, a15 ; _j .Lstore_w; .align 8
+#endif
+#ifdef ANY_EXCEPTION_HANDLER
/* We cannot handle this exception. */
.extern _kernel_exception
@@ -294,6 +377,8 @@ ENTRY(fast_unaligned)
2: movi a0, _user_exception
jx a0
+#endif
+#if XCHAL_UNALIGNED_STORE_EXCEPTION
# a7: instruction pointer, a4: instruction, a3: value
.Lstore_w:
@@ -358,7 +443,8 @@ ENTRY(fast_unaligned)
#else
s32i a6, a4, 4
#endif
-
+#endif
+#ifdef ANY_EXCEPTION_HANDLER
.Lexit:
#if XCHAL_HAVE_LOOPS
rsr a4, lend # check if we reached LEND
@@ -453,7 +539,7 @@ ENTRY(fast_unaligned)
__src_b a4, a4, a5 # a4 has the instruction
ret
-
+#endif
ENDPROC(fast_unaligned)
ENTRY(fast_unaligned_fixup)
@@ -490,5 +576,4 @@ ENTRY(fast_unaligned_fixup)
jx a0
ENDPROC(fast_unaligned_fixup)
-
-#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
+#endif
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 5c0c0fcac144..08d9cb5cbd41 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -245,7 +245,8 @@ void __init init_arch(bp_tag_t *bp_start)
{
/* Initialize basic exception handling if configuration may need it */
- if (IS_ENABLED(CONFIG_KASAN))
+ if (IS_ENABLED(CONFIG_KASAN) ||
+ IS_ENABLED(CONFIG_XTENSA_LOAD_STORE))
early_trap_init();
/* Initialize MMU. */
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index f447262468c5..5fc19aa5ba5d 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -54,6 +54,9 @@ static void do_interrupt(struct pt_regs *regs);
#if XTENSA_FAKE_NMI
static void do_nmi(struct pt_regs *regs);
#endif
+#ifdef CONFIG_XTENSA_LOAD_STORE
+static void do_load_store(struct pt_regs *regs);
+#endif
static void do_unaligned_user(struct pt_regs *regs);
static void do_multihit(struct pt_regs *regs);
#if XTENSA_HAVE_COPROCESSORS
@@ -89,7 +92,10 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
-/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+#ifdef CONFIG_XTENSA_LOAD_STORE
+{ EXCCAUSE_LOAD_STORE_ERROR, USER|KRNL, fast_load_store },
+{ EXCCAUSE_LOAD_STORE_ERROR, 0, do_load_store },
+#endif
{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
#ifdef SUPPORT_WINDOWED
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
@@ -354,6 +360,19 @@ static void do_div0(struct pt_regs *regs)
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
}
+#ifdef CONFIG_XTENSA_LOAD_STORE
+static void do_load_store(struct pt_regs *regs)
+{
+ __die_if_kernel("Unhandled load/store exception in kernel",
+ regs, SIGKILL);
+
+ pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n",
+ regs->excvaddr, current->comm,
+ task_pid_nr(current), regs->pc);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr);
+}
+#endif
+
/*
* Handle unaligned memory accesses from user space. Kill task.
*
--
2.30.2
There may be other users for the early traps besides KASAN. Move call to
the early_trap_init from kasan_early_init. Protect init_exc_table
initializer with ifdef to make sure it builds on noMMU configurations.
Signed-off-by: Max Filippov <[email protected]>
---
arch/xtensa/include/asm/traps.h | 2 ++
arch/xtensa/kernel/setup.c | 6 ++++++
arch/xtensa/mm/kasan_init.c | 2 --
3 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 6f74ccc0c7ea..acffb02f8760 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -64,8 +64,10 @@ void do_unhandled(struct pt_regs *regs);
static inline void __init early_trap_init(void)
{
static struct exc_table init_exc_table __initdata = {
+#ifdef CONFIG_MMU
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss,
+#endif
};
xtensa_set_sr(&init_exc_table, excsave1);
}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 94aafa19771b..5c0c0fcac144 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -47,6 +47,7 @@
#include <asm/smp.h>
#include <asm/sysmem.h>
#include <asm/timex.h>
+#include <asm/traps.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
@@ -242,6 +243,11 @@ void __init early_init_devtree(void *params)
void __init init_arch(bp_tag_t *bp_start)
{
+ /* Initialize basic exception handling if configuration may need it */
+
+ if (IS_ENABLED(CONFIG_KASAN))
+ early_trap_init();
+
/* Initialize MMU. */
init_mmu();
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 1fef24db2ff6..f00d122aa806 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <asm/initialize_mmu.h>
#include <asm/tlbflush.h>
-#include <asm/traps.h>
void __init kasan_early_init(void)
{
@@ -31,7 +30,6 @@ void __init kasan_early_init(void)
BUG_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
}
- early_trap_init();
}
static void __init populate(void *start, void *end)
--
2.30.2
Currently slow handler for unaligned access exception is not installed
when CPU has hardware support for unaligned access. However some opcodes
(e.g. l32ai, s32ri, s32c1i) would still raise unaligned access exception
even on such CPUs. In that case instead of SIGBUS and a diagnostic entry
in the kernel log the faulting process would receive SIGILL.
Always install slow handler for unaligned access exception to fix that.
Signed-off-by: Max Filippov <[email protected]>
---
arch/xtensa/kernel/traps.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index a92c8593d4f1..f447262468c5 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -54,9 +54,7 @@ static void do_interrupt(struct pt_regs *regs);
#if XTENSA_FAKE_NMI
static void do_nmi(struct pt_regs *regs);
#endif
-#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
static void do_unaligned_user(struct pt_regs *regs);
-#endif
static void do_multihit(struct pt_regs *regs);
#if XTENSA_HAVE_COPROCESSORS
static void do_coprocessor(struct pt_regs *regs);
@@ -102,9 +100,9 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
#ifdef CONFIG_XTENSA_UNALIGNED_USER
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#endif
-{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
+{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
#ifdef CONFIG_MMU
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
@@ -363,7 +361,6 @@ static void do_div0(struct pt_regs *regs)
* accesses causes from user space.
*/
-#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
static void do_unaligned_user(struct pt_regs *regs)
{
__die_if_kernel("Unhandled unaligned exception in kernel",
@@ -375,7 +372,6 @@ static void do_unaligned_user(struct pt_regs *regs)
task_pid_nr(current), regs->pc);
force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
}
-#endif
#if XTENSA_HAVE_COPROCESSORS
static void do_coprocessor(struct pt_regs *regs)
--
2.30.2