Inspired from arm64's implement -- commit 70918779aec9
("arm64: entry: Enable random_kstack_offset support")
Add support of kernel stack offset randomization while handling syscall,
the offset is defaultly limited by KSTACK_OFFSET_MAX() (i.e. 10 bits).
In order to avoid trigger stack canaries (due to __builtin_alloca) and
slowing down the entry path, use __no_stack_protector attribute to
disable stack protector for do_trap_ecall_u() at the function level.
Signed-off-by: Song Shuai <[email protected]>
---
Testing with randomize_kstack_offset=y cmdline, lkdtm/stack-entropy.sh
showed appropriate stack offset instead of zero.
---
arch/riscv/Kconfig | 1 +
arch/riscv/kernel/traps.c | 18 +++++++++++++++++-
2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d607ab0f7c6d..0e843de33f0c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -100,6 +100,7 @@ config RISCV
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 19807c4d3805..3f869b2d47c3 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/randomize_kstack.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
@@ -296,9 +297,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
}
}
-asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
+asmlinkage __visible __trap_section __no_stack_protector
+void do_trap_ecall_u(struct pt_regs *regs)
{
if (user_mode(regs)) {
+
long syscall = regs->a7;
regs->epc += 4;
@@ -308,10 +311,23 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
syscall = syscall_enter_from_user_mode(regs, syscall);
+ add_random_kstack_offset();
+
if (syscall >= 0 && syscall < NR_syscalls)
syscall_handler(regs, syscall);
else if (syscall != -1)
regs->a0 = -ENOSYS;
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * so the maximum stack offset is 1k bytes (10 bits).
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned
+ * for RV32I or RV64I.
+ *
+ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+ choose_random_kstack_offset(get_random_u16());
syscall_exit_to_user_mode(regs);
} else {
--
2.20.1
On 11/1/23 15:44, Song Shuai wrote:
> Inspired from arm64's implement -- commit 70918779aec9
> ("arm64: entry: Enable random_kstack_offset support")
>
> Add support of kernel stack offset randomization while handling syscall,
> the offset is defaultly limited by KSTACK_OFFSET_MAX() (i.e. 10 bits).
>
> In order to avoid trigger stack canaries (due to __builtin_alloca) and
> slowing down the entry path, use __no_stack_protector attribute to
> disable stack protector for do_trap_ecall_u() at the function level.
>
> Signed-off-by: Song Shuai <[email protected]>
> ---
> Testing with randomize_kstack_offset=y cmdline, lkdtm/stack-entropy.sh
> showed appropriate stack offset instead of zero.
> ---
> arch/riscv/Kconfig | 1 +
> arch/riscv/kernel/traps.c | 18 +++++++++++++++++-
> 2 files changed, 18 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index d607ab0f7c6d..0e843de33f0c 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -100,6 +100,7 @@ config RISCV
> select HAVE_ARCH_KGDB_QXFER_PKT
> select HAVE_ARCH_MMAP_RND_BITS if MMU
> select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
> + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
> select HAVE_ARCH_SECCOMP_FILTER
> select HAVE_ARCH_THREAD_STRUCT_WHITELIST
> select HAVE_ARCH_TRACEHOOK
> diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
> index 19807c4d3805..3f869b2d47c3 100644
> --- a/arch/riscv/kernel/traps.c
> +++ b/arch/riscv/kernel/traps.c
> @@ -6,6 +6,7 @@
> #include <linux/cpu.h>
> #include <linux/kernel.h>
> #include <linux/init.h>
> +#include <linux/randomize_kstack.h>
> #include <linux/sched.h>
> #include <linux/sched/debug.h>
> #include <linux/sched/signal.h>
> @@ -296,9 +297,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
> }
> }
>
> -asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
> +asmlinkage __visible __trap_section __no_stack_protector
> +void do_trap_ecall_u(struct pt_regs *regs)
> {
> if (user_mode(regs)) {
> +
White line change.
> long syscall = regs->a7;
>
> regs->epc += 4;
> @@ -308,10 +311,23 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
>
> syscall = syscall_enter_from_user_mode(regs, syscall);
>
> + add_random_kstack_offset();
> +
> if (syscall >= 0 && syscall < NR_syscalls)
> syscall_handler(regs, syscall);
> else if (syscall != -1)
> regs->a0 = -ENOSYS;
> + /*
> + * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
> + * so the maximum stack offset is 1k bytes (10 bits).
> + *
> + * The actual entropy will be further reduced by the compiler when
> + * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned
> + * for RV32I or RV64I.
> + *
> + * The resulting 6 bits of entropy is seen in SP[9:4].
> + */
> + choose_random_kstack_offset(get_random_u16());
>
> syscall_exit_to_user_mode(regs);
> } else {
--
Damien Le Moal
Western Digital Research