Hi,
Not much change on the core side but:
_ Use latest arm64 static call implementation by Ard
_ Rebase against latest tip:/sched/core
git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks.git
preempt/arm-v2
HEAD: 5700542b609d9ab640210d7dd93621c7967688e6
Thanks,
Frederic
---
Frederic Weisbecker (3):
sched/preempt: Prepare for supporting !CONFIG_GENERIC_ENTRY dynamic preemption
arm64: Implement IRQ exit preemption static call for dynamic preemption
arm64: Implement HAVE_PREEMPT_DYNAMIC
Ard Biesheuvel (1):
arm64: implement support for static call trampolines
arch/Kconfig | 1 -
arch/arm64/Kconfig | 2 ++
arch/arm64/include/asm/preempt.h | 23 +++++++++++++++-
arch/arm64/include/asm/static_call.h | 25 ++++++++++++++++++
arch/arm64/kernel/entry-common.c | 15 ++++++++---
arch/arm64/kernel/patching.c | 51 +++++++++++++++++++++++++++++++++---
arch/arm64/kernel/vmlinux.lds.S | 1 +
include/linux/entry-common.h | 3 ++-
kernel/sched/core.c | 6 +++--
9 files changed, 116 insertions(+), 11 deletions(-)
arm64 doesn't support generic entry yet, so the architecture's own IRQ
exit preemption path needs to be exposed through the relevant static
call.
Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Quentin Perret <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: James Morse <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: David Laight <[email protected]>
---
arch/arm64/include/asm/preempt.h | 7 +++++++
arch/arm64/kernel/entry-common.c | 15 ++++++++++++---
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index e83f0982b99c..4fbbe644532f 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -3,6 +3,7 @@
#define __ASM_PREEMPT_H
#include <linux/thread_info.h>
+#include <linux/static_call_types.h>
#define PREEMPT_NEED_RESCHED BIT(32)
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
@@ -86,4 +87,10 @@ void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPTION */
+#ifdef CONFIG_PREEMPT_DYNAMIC
+void arm64_preempt_schedule_irq(void);
+#define __irqentry_exit_cond_resched_func arm64_preempt_schedule_irq
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
#endif /* __ASM_PREEMPT_H */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 32f9796c4ffe..f1c739dd874d 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
+#include <linux/static_call.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
@@ -235,7 +236,7 @@ static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
-static void __sched arm64_preempt_schedule_irq(void)
+void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
@@ -259,6 +260,9 @@ static void __sched arm64_preempt_schedule_irq(void)
if (system_capabilities_finalized())
preempt_schedule_irq();
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL(irqentry_exit_cond_resched, arm64_preempt_schedule_irq);
+#endif
static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
@@ -446,8 +450,13 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
* preempt_count().
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
- READ_ONCE(current_thread_info()->preempt_count) == 0)
- arm64_preempt_schedule_irq();
+ READ_ONCE(current_thread_info()->preempt_count) == 0) {
+#ifdef CONFIG_PREEMPT_DYNAMIC
+ static_call(irqentry_exit_cond_resched)();
+#else
+ arm64_preempt_schedule_irq();
+#endif
+ }
exit_el1_irq_or_nmi(regs);
}
--
2.25.1
There is no need to force dynamic preemption to depend on the generic
entry code. The latter is convenient but not mandatory. An architecture
that doesn't support it just need to provide a static call on its
kernel IRQ exit preemption path.
Prepare the preempt dynamic code to handle that.
Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Quentin Perret <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: James Morse <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: David Laight <[email protected]>
---
arch/Kconfig | 1 -
include/linux/entry-common.h | 3 ++-
kernel/sched/core.c | 6 ++++--
3 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig
index 8df1c7102643..9af493999d43 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1255,7 +1255,6 @@ config HAVE_STATIC_CALL_INLINE
config HAVE_PREEMPT_DYNAMIC
bool
depends on HAVE_STATIC_CALL
- depends on GENERIC_ENTRY
help
Select this if the architecture support boot time preempt setting
on top of static calls. It is strongly advised to support inline
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 2e2b8d6140ed..81166bbc0f22 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -456,7 +456,8 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
*/
void irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
-DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+#define __irqentry_exit_cond_resched_func irqentry_exit_cond_resched
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
#endif
/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f2611b9cf503..355722abcd79 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6510,7 +6510,9 @@ EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#ifdef CONFIG_PREEMPT_DYNAMIC
+#ifdef CONFIG_GENERIC_ENTRY
#include <linux/entry-common.h>
+#endif
/*
* SC:cond_resched
@@ -6575,7 +6577,7 @@ void sched_dynamic_update(int mode)
static_call_update(might_resched, __cond_resched);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
- static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+ static_call_update(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
switch (mode) {
case preempt_dynamic_none:
@@ -6601,7 +6603,7 @@ void sched_dynamic_update(int mode)
static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
- static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+ static_call_update(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
pr_info("Dynamic Preempt: full\n");
break;
}
--
2.25.1
From: Ard Biesheuvel <[email protected]>
Implement arm64 support for the 'unoptimized' static call variety, which
routes all calls through a single trampoline that is patched to perform a
tail call to the selected function.
It is expected that the direct branch instruction will be able to cover
the common case. However, given that static call targets may be located
in modules loaded out of direct branching range, we need a fallback path
that loads the address into R16 and uses a branch-to-register (BR)
instruction to perform an indirect call.
Unlike on x86, there is no pressing need on arm64 to avoid indirect
calls at all cost, but hiding it from the compiler as is done here does
have some benefits:
- the literal is located in .text, which gives us the same robustness
advantage that code patching does;
- no performance hit on CFI enabled Clang builds that decorate compiler
emitted indirect calls with branch target validity checks.
Signed-off-by: Ard Biesheuvel <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Quentin Perret <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: James Morse <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: David Laight <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/static_call.h | 25 ++++++++++++++
arch/arm64/kernel/patching.c | 51 ++++++++++++++++++++++++++--
arch/arm64/kernel/vmlinux.lds.S | 1 +
4 files changed, 75 insertions(+), 3 deletions(-)
create mode 100644 arch/arm64/include/asm/static_call.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d13677f4731d..34b175b1e247 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -192,6 +192,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_STATIC_CALL
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUTEX_CMPXCHG if FUTEX
select MMU_GATHER_RCU_TABLE_FREE
diff --git a/arch/arm64/include/asm/static_call.h b/arch/arm64/include/asm/static_call.h
new file mode 100644
index 000000000000..4871374d584b
--- /dev/null
+++ b/arch/arm64/include/asm/static_call.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STATIC_CALL_H
+#define _ASM_STATIC_CALL_H
+
+#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insn) \
+ asm(" .pushsection .static_call.text, \"ax\" \n" \
+ " .align 4 \n" \
+ " .globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ "0: .quad 0x0 \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ " hint 34 /* BTI C */ \n" \
+ insn " \n" \
+ " ldr x16, 0b \n" \
+ " cbz x16, 1f \n" \
+ " br x16 \n" \
+ "1: ret \n" \
+ " .popsection \n")
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "b " #func)
+
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret")
+
+#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
index 771f543464e0..f98127d92e1f 100644
--- a/arch/arm64/kernel/patching.c
+++ b/arch/arm64/kernel/patching.c
@@ -66,7 +66,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
return ret;
}
-static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
+static int __kprobes __aarch64_insn_write(void *addr, void *insn, int size)
{
void *waddr = addr;
unsigned long flags = 0;
@@ -75,7 +75,7 @@ static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
- ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
+ ret = copy_to_kernel_nofault(waddr, insn, size);
patch_unmap(FIX_TEXT_POKE0);
raw_spin_unlock_irqrestore(&patch_lock, flags);
@@ -85,7 +85,52 @@ static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
int __kprobes aarch64_insn_write(void *addr, u32 insn)
{
- return __aarch64_insn_write(addr, cpu_to_le32(insn));
+ __le32 i = cpu_to_le32(insn);
+
+ return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
+}
+
+void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+{
+ /*
+ * -0x8 <literal>
+ * 0x0 bti c <--- trampoline entry point
+ * 0x4 <branch or nop>
+ * 0x8 ldr x16, <literal>
+ * 0xc cbz x16, 20
+ * 0x10 br x16
+ * 0x14 ret
+ */
+ struct {
+ u64 literal;
+ __le32 insn[2];
+ } insns;
+ u32 insn;
+ int ret;
+
+ insn = aarch64_insn_gen_hint(AARCH64_INSN_HINT_BTIC);
+ insns.literal = (u64)func;
+ insns.insn[0] = cpu_to_le32(insn);
+
+ if (!func) {
+ insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_LR,
+ AARCH64_INSN_BRANCH_RETURN);
+ } else {
+ insn = aarch64_insn_gen_branch_imm((u64)tramp + 4, (u64)func,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ /*
+ * Use a NOP if the branch target is out of range, and rely on
+ * the indirect call instead.
+ */
+ if (insn == AARCH64_BREAK_FAULT)
+ insn = aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+ }
+ insns.insn[1] = cpu_to_le32(insn);
+
+ ret = __aarch64_insn_write(tramp - 8, &insns, sizeof(insns));
+ if (!WARN_ON(ret))
+ caches_clean_inval_pou((u64)tramp - 8, sizeof(insns));
}
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f6b1a88245db..ceb35c35192c 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -161,6 +161,7 @@ SECTIONS
IDMAP_TEXT
HIBERNATE_TEXT
TRAMP_TEXT
+ STATIC_CALL_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
--
2.25.1
Provide the static calls for the common preemption points and report
arm64 ability to support dynamic preemption.
Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Quentin Perret <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: James Morse <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: David Laight <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/preempt.h | 20 +++++++++++++++++---
2 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 34b175b1e247..24915ca7d6d5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -191,6 +191,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_DYNAMIC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_STATIC_CALL
select HAVE_FUNCTION_ARG_ACCESS_API
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 4fbbe644532f..69d1cc491d3b 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -82,15 +82,29 @@ static inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPTION
void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
void preempt_schedule_notrace(void);
-#define __preempt_schedule_notrace() preempt_schedule_notrace()
-#endif /* CONFIG_PREEMPTION */
#ifdef CONFIG_PREEMPT_DYNAMIC
+
+#define __preempt_schedule_func preempt_schedule
+DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
+#define __preempt_schedule() static_call(preempt_schedule)()
+
+#define __preempt_schedule_notrace_func preempt_schedule_notrace
+DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
+#define __preempt_schedule_notrace() static_call(preempt_schedule_notrace)()
+
void arm64_preempt_schedule_irq(void);
#define __irqentry_exit_cond_resched_func arm64_preempt_schedule_irq
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
+
+#else /* !CONFIG_PREEMPT_DYNAMIC */
+
+#define __preempt_schedule() preempt_schedule()
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
#endif /* CONFIG_PREEMPT_DYNAMIC */
+#endif /* CONFIG_PREEMPTION */
+
#endif /* __ASM_PREEMPT_H */
--
2.25.1
On Mon, 25 Oct 2021 at 15:57, Peter Zijlstra <[email protected]> wrote:
>
> On Mon, Oct 25, 2021 at 02:21:00PM +0200, Frederic Weisbecker wrote:
>
> > +#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insn) \
> > + asm(" .pushsection .static_call.text, \"ax\" \n" \
> > + " .align 4 \n" \
> > + " .globl " STATIC_CALL_TRAMP_STR(name) " \n" \
> > + "0: .quad 0x0 \n" \
> > + STATIC_CALL_TRAMP_STR(name) ": \n" \
> > + " hint 34 /* BTI C */ \n" \
> > + insn " \n" \
> > + " ldr x16, 0b \n" \
> > + " cbz x16, 1f \n" \
> > + " br x16 \n" \
> > + "1: ret \n" \
> > + " .popsection \n")
>
> > +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
> > +{
> > + /*
> > + * -0x8 <literal>
> > + * 0x0 bti c <--- trampoline entry point
> > + * 0x4 <branch or nop>
> > + * 0x8 ldr x16, <literal>
> > + * 0xc cbz x16, 20
> > + * 0x10 br x16
> > + * 0x14 ret
> > + */
> > + struct {
> > + u64 literal;
> > + __le32 insn[2];
> > + } insns;
> > + u32 insn;
> > + int ret;
> > +
> > + insn = aarch64_insn_gen_hint(AARCH64_INSN_HINT_BTIC);
> > + insns.literal = (u64)func;
> > + insns.insn[0] = cpu_to_le32(insn);
> > +
> > + if (!func) {
> > + insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_LR,
> > + AARCH64_INSN_BRANCH_RETURN);
> > + } else {
> > + insn = aarch64_insn_gen_branch_imm((u64)tramp + 4, (u64)func,
> > + AARCH64_INSN_BRANCH_NOLINK);
> > +
> > + /*
> > + * Use a NOP if the branch target is out of range, and rely on
> > + * the indirect call instead.
> > + */
> > + if (insn == AARCH64_BREAK_FAULT)
> > + insn = aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
> > + }
> > + insns.insn[1] = cpu_to_le32(insn);
> > +
> > + ret = __aarch64_insn_write(tramp - 8, &insns, sizeof(insns));
>
> OK, that's pretty magical...
>
> So you're writing the literal and the two instructions with 2 u64
> stores. Relying on alignment to guarantee both are in a single page and
> that copy_to_kernel_nofault() selects u64 writes.
>
To be honest, it just seemed tidier and less likely to produce weird
corner cases to put the literal and the patched insn in the smallest
possible power-of-2 aligned window, as it ensures that the D-side view
is always consistent.
However, the actual fetch of the instruction could still produce a
stale value before the cache maintenance completes.
> By unconditionally writing the literal, you avoid there ever being an
> stale value, which in turn avoids there being a race where you switch
> from 'J @func' relative addressing to 'NOP; do-literal-thing' and cross
> CPU execution gets the ordering inverted.
>
Indeed.
> Ooohh, but what if you go from !func to NOP.
>
> assuming:
>
> .literal = 0
> BTI C
> RET
>
> Then
>
> CPU0 CPU1
>
> [S] literal = func [I] NOP
> [S] insn[1] = NOP [L] x16 = literal (NULL)
> b x16
> *BANG*
>
> Is that possible? (total lack of memory ordering etc..)
>
The CBZ will branch to the RET instruction if x16 == 0x0, so this
should not happen.
> On IRC you just alluded to the fact that this relies on it all being in
> a single cacheline (i-fetch windows don't need to be cacheline sized,
> but provided they're at least 16 bytes, this should still work given the
> alignment).
>
> But is I$ and D$ coherent? One load is through I-fetch, the other is a
> regular D-fetch.
>
> However, Will has previously expressed reluctance to rely on such
> things.
>
No they are not. That is why the CBZ is there. So the only issue we
might see is where the branch instruction is out of sync with the
literal, and so we may call the old function while switching to the
new one and the I-cache maintenance hasn't completed yet.
> > + if (!WARN_ON(ret))
> > + caches_clean_inval_pou((u64)tramp - 8, sizeof(insns));
> > }
On Mon, Oct 25, 2021 at 04:08:37PM +0200, Ard Biesheuvel wrote:
> On Mon, 25 Oct 2021 at 15:57, Peter Zijlstra <[email protected]> wrote:
> >
> > On Mon, Oct 25, 2021 at 02:21:00PM +0200, Frederic Weisbecker wrote:
> >
> > > +#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insn) \
> > > + asm(" .pushsection .static_call.text, \"ax\" \n" \
> > > + " .align 4 \n" \
> > > + " .globl " STATIC_CALL_TRAMP_STR(name) " \n" \
> > > + "0: .quad 0x0 \n" \
> > > + STATIC_CALL_TRAMP_STR(name) ": \n" \
> > > + " hint 34 /* BTI C */ \n" \
> > > + insn " \n" \
> > > + " ldr x16, 0b \n" \
> > > + " cbz x16, 1f \n" \
> > > + " br x16 \n" \
> > > + "1: ret \n" \
> > > + " .popsection \n")
> >
> > OK, that's pretty magical...
> >
> > So you're writing the literal and the two instructions with 2 u64
> > stores. Relying on alignment to guarantee both are in a single page and
> > that copy_to_kernel_nofault() selects u64 writes.
> >
>
> To be honest, it just seemed tidier and less likely to produce weird
> corner cases to put the literal and the patched insn in the smallest
> possible power-of-2 aligned window, as it ensures that the D-side view
> is always consistent.
>
> However, the actual fetch of the instruction could still produce a
> stale value before the cache maintenance completes.
>
> > By unconditionally writing the literal, you avoid there ever being an
> > stale value, which in turn avoids there being a race where you switch
> > from 'J @func' relative addressing to 'NOP; do-literal-thing' and cross
> > CPU execution gets the ordering inverted.
> >
>
> Indeed.
>
> > Ooohh, but what if you go from !func to NOP.
> >
> > assuming:
> >
> > .literal = 0
> > BTI C
> > RET
> >
> > Then
> >
> > CPU0 CPU1
> >
> > [S] literal = func [I] NOP
> > [S] insn[1] = NOP [L] x16 = literal (NULL)
> > b x16
> > *BANG*
> >
> > Is that possible? (total lack of memory ordering etc..)
> >
>
> The CBZ will branch to the RET instruction if x16 == 0x0, so this
> should not happen.
Oooh, I missed that :/ I was about to suggest writing the address of a
bare 'ret' trampoline instead of NULL into the literal.
> > On IRC you just alluded to the fact that this relies on it all being in
> > a single cacheline (i-fetch windows don't need to be cacheline sized,
> > but provided they're at least 16 bytes, this should still work given the
> > alignment).
> >
> > But is I$ and D$ coherent? One load is through I-fetch, the other is a
> > regular D-fetch.
> >
> > However, Will has previously expressed reluctance to rely on such
> > things.
> >
>
> No they are not. That is why the CBZ is there. So the only issue we
> might see is where the branch instruction is out of sync with the
> literal, and so we may call the old function while switching to the
> new one and the I-cache maintenance hasn't completed yet.
OK, agreed. Perhaps put in a comment to explain some of this though. The
next poor sod trying to untangle this code is sure to have a question or
two :-)
From: Ard Biesheuvel
> Sent: 25 October 2021 15:32
...
> On arm64, we can only patch NOPs into branch instructions or vice
> versa, or we'd have to run the whole thing under stop_machine() to
> ensure that other cores don't fetch garbage.
Ok, I was thinking it would be safe to patch a single instruction.
Clearly you can't patch more than one without danger of 'garbage'.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
On Mon, Oct 25, 2021 at 04:19:16PM +0200, Peter Zijlstra wrote:
> On Mon, Oct 25, 2021 at 04:08:37PM +0200, Ard Biesheuvel wrote:
> > > Ooohh, but what if you go from !func to NOP.
> > >
> > > assuming:
> > >
> > > .literal = 0
> > > BTI C
> > > RET
> > >
> > > Then
> > >
> > > CPU0 CPU1
> > >
> > > [S] literal = func [I] NOP
> > > [S] insn[1] = NOP [L] x16 = literal (NULL)
> > > b x16
> > > *BANG*
> > >
> > > Is that possible? (total lack of memory ordering etc..)
> > >
> >
> > The CBZ will branch to the RET instruction if x16 == 0x0, so this
> > should not happen.
>
> Oooh, I missed that :/ I was about to suggest writing the address of a
> bare 'ret' trampoline instead of NULL into the literal.
Perhaps a little something like so.. Shaves 2 instructions off each
trampoline.
--- a/arch/arm64/include/asm/static_call.h
+++ b/arch/arm64/include/asm/static_call.h
@@ -11,9 +11,7 @@
" hint 34 /* BTI C */ \n" \
insn " \n" \
" ldr x16, 0b \n" \
- " cbz x16, 1f \n" \
" br x16 \n" \
- "1: ret \n" \
" .popsection \n")
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
--- a/arch/arm64/kernel/patching.c
+++ b/arch/arm64/kernel/patching.c
@@ -90,6 +90,11 @@ int __kprobes aarch64_insn_write(void *a
return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
}
+asm("__static_call_ret: \n"
+ " ret \n")
+
+extern void __static_call_ret(void);
+
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
/*
@@ -97,9 +102,7 @@ void arch_static_call_transform(void *si
* 0x0 bti c <--- trampoline entry point
* 0x4 <branch or nop>
* 0x8 ldr x16, <literal>
- * 0xc cbz x16, 20
- * 0x10 br x16
- * 0x14 ret
+ * 0xc br x16
*/
struct {
u64 literal;
@@ -113,6 +116,7 @@ void arch_static_call_transform(void *si
insns.insn[0] = cpu_to_le32(insn);
if (!func) {
+ insns.literal = (unsigned long)&__static_call_ret;
insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_LR,
AARCH64_INSN_BRANCH_RETURN);
} else {
On Mon, 25 Oct 2021 at 16:47, Peter Zijlstra <[email protected]> wrote:
>
> On Mon, Oct 25, 2021 at 04:19:16PM +0200, Peter Zijlstra wrote:
> > On Mon, Oct 25, 2021 at 04:08:37PM +0200, Ard Biesheuvel wrote:
>
> > > > Ooohh, but what if you go from !func to NOP.
> > > >
> > > > assuming:
> > > >
> > > > .literal = 0
> > > > BTI C
> > > > RET
> > > >
> > > > Then
> > > >
> > > > CPU0 CPU1
> > > >
> > > > [S] literal = func [I] NOP
> > > > [S] insn[1] = NOP [L] x16 = literal (NULL)
> > > > b x16
> > > > *BANG*
> > > >
> > > > Is that possible? (total lack of memory ordering etc..)
> > > >
> > >
> > > The CBZ will branch to the RET instruction if x16 == 0x0, so this
> > > should not happen.
> >
> > Oooh, I missed that :/ I was about to suggest writing the address of a
> > bare 'ret' trampoline instead of NULL into the literal.
>
> Perhaps a little something like so.. Shaves 2 instructions off each
> trampoline.
>
> --- a/arch/arm64/include/asm/static_call.h
> +++ b/arch/arm64/include/asm/static_call.h
> @@ -11,9 +11,7 @@
> " hint 34 /* BTI C */ \n" \
> insn " \n" \
> " ldr x16, 0b \n" \
> - " cbz x16, 1f \n" \
> " br x16 \n" \
> - "1: ret \n" \
> " .popsection \n")
>
> #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
> --- a/arch/arm64/kernel/patching.c
> +++ b/arch/arm64/kernel/patching.c
> @@ -90,6 +90,11 @@ int __kprobes aarch64_insn_write(void *a
> return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
> }
>
> +asm("__static_call_ret: \n"
> + " ret \n")
> +
This breaks BTI as it lacks the landing pad, and it will be called indirectly.
> +extern void __static_call_ret(void);
> +
Better to have an ordinary C function here (with consistent linkage),
but we need to take the address in a way that works with Clang CFI.
As the two additional instructions are on an ice cold path anyway, I'm
not sure this is an obvious improvement tbh.
> void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
> {
> /*
> @@ -97,9 +102,7 @@ void arch_static_call_transform(void *si
> * 0x0 bti c <--- trampoline entry point
> * 0x4 <branch or nop>
> * 0x8 ldr x16, <literal>
> - * 0xc cbz x16, 20
> - * 0x10 br x16
> - * 0x14 ret
> + * 0xc br x16
> */
> struct {
> u64 literal;
> @@ -113,6 +116,7 @@ void arch_static_call_transform(void *si
> insns.insn[0] = cpu_to_le32(insn);
>
> if (!func) {
> + insns.literal = (unsigned long)&__static_call_ret;
> insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_LR,
> AARCH64_INSN_BRANCH_RETURN);
> } else {
On Mon, Oct 25, 2021 at 02:21:00PM +0200, Frederic Weisbecker wrote:
> +#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insn) \
> + asm(" .pushsection .static_call.text, \"ax\" \n" \
> + " .align 4 \n" \
> + " .globl " STATIC_CALL_TRAMP_STR(name) " \n" \
> + "0: .quad 0x0 \n" \
> + STATIC_CALL_TRAMP_STR(name) ": \n" \
> + " hint 34 /* BTI C */ \n" \
> + insn " \n" \
> + " ldr x16, 0b \n" \
> + " cbz x16, 1f \n" \
> + " br x16 \n" \
> + "1: ret \n" \
> + " .popsection \n")
> +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
> +{
> + /*
> + * -0x8 <literal>
> + * 0x0 bti c <--- trampoline entry point
> + * 0x4 <branch or nop>
> + * 0x8 ldr x16, <literal>
> + * 0xc cbz x16, 20
> + * 0x10 br x16
> + * 0x14 ret
> + */
> + struct {
> + u64 literal;
> + __le32 insn[2];
> + } insns;
> + u32 insn;
> + int ret;
> +
> + insn = aarch64_insn_gen_hint(AARCH64_INSN_HINT_BTIC);
> + insns.literal = (u64)func;
> + insns.insn[0] = cpu_to_le32(insn);
> +
> + if (!func) {
> + insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_LR,
> + AARCH64_INSN_BRANCH_RETURN);
> + } else {
> + insn = aarch64_insn_gen_branch_imm((u64)tramp + 4, (u64)func,
> + AARCH64_INSN_BRANCH_NOLINK);
> +
> + /*
> + * Use a NOP if the branch target is out of range, and rely on
> + * the indirect call instead.
> + */
> + if (insn == AARCH64_BREAK_FAULT)
> + insn = aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
> + }
> + insns.insn[1] = cpu_to_le32(insn);
> +
> + ret = __aarch64_insn_write(tramp - 8, &insns, sizeof(insns));
OK, that's pretty magical...
So you're writing the literal and the two instructions with 2 u64
stores. Relying on alignment to guarantee both are in a single page and
that copy_to_kernel_nofault() selects u64 writes.
By unconditionally writing the literal, you avoid there ever being an
stale value, which in turn avoids there being a race where you switch
from 'J @func' relative addressing to 'NOP; do-literal-thing' and cross
CPU execution gets the ordering inverted.
Ooohh, but what if you go from !func to NOP.
assuming:
.literal = 0
BTI C
RET
Then
CPU0 CPU1
[S] literal = func [I] NOP
[S] insn[1] = NOP [L] x16 = literal (NULL)
b x16
*BANG*
Is that possible? (total lack of memory ordering etc..)
On IRC you just alluded to the fact that this relies on it all being in
a single cacheline (i-fetch windows don't need to be cacheline sized,
but provided they're at least 16 bytes, this should still work given the
alignment).
But is I$ and D$ coherent? One load is through I-fetch, the other is a
regular D-fetch.
However, Will has previously expressed reluctance to rely on such
things.
> + if (!WARN_ON(ret))
> + caches_clean_inval_pou((u64)tramp - 8, sizeof(insns));
> }
On Mon, 25 Oct 2021 at 16:25, David Laight <[email protected]> wrote:
>
> From: Frederic Weisbecker
> > Sent: 25 October 2021 13:21
> >
> > Implement arm64 support for the 'unoptimized' static call variety, which
> > routes all calls through a single trampoline that is patched to perform a
> > tail call to the selected function.
> >
> > It is expected that the direct branch instruction will be able to cover
> > the common case. However, given that static call targets may be located
> > in modules loaded out of direct branching range, we need a fallback path
> > that loads the address into R16 and uses a branch-to-register (BR)
> > instruction to perform an indirect call.
> >
> ...
> > +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
> > +{
> > + /*
> > + * -0x8 <literal>
> > + * 0x0 bti c <--- trampoline entry point
> > + * 0x4 <branch or nop>
> > + * 0x8 ldr x16, <literal>
> > + * 0xc cbz x16, 20
> > + * 0x10 br x16
> > + * 0x14 ret
> > + */
>
> Since the 'ldr x16, <literal>' is just a 32bit constant
> (for a pc-relative load).
>
I don't follow. Are you saying it is a 32-bit opcode? This applies to
all AArch64 opcodes.
> Can't you save a word by making offset 0x4 <branch or ldr x16, <literal>> ?
>
> Or am I missing something?
>
On arm64, we can only patch NOPs into branch instructions or vice
versa, or we'd have to run the whole thing under stop_machine() to
ensure that other cores don't fetch garbage.
On Mon, Oct 25, 2021 at 04:55:17PM +0200, Ard Biesheuvel wrote:
> On Mon, 25 Oct 2021 at 16:47, Peter Zijlstra <[email protected]> wrote:
> > Perhaps a little something like so.. Shaves 2 instructions off each
> > trampoline.
> >
> > --- a/arch/arm64/include/asm/static_call.h
> > +++ b/arch/arm64/include/asm/static_call.h
> > @@ -11,9 +11,7 @@
> > " hint 34 /* BTI C */ \n" \
> > insn " \n" \
> > " ldr x16, 0b \n" \
> > - " cbz x16, 1f \n" \
> > " br x16 \n" \
> > - "1: ret \n" \
> > " .popsection \n")
> >
> > #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
> > --- a/arch/arm64/kernel/patching.c
> > +++ b/arch/arm64/kernel/patching.c
> > @@ -90,6 +90,11 @@ int __kprobes aarch64_insn_write(void *a
> > return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
> > }
> >
> > +asm("__static_call_ret: \n"
> > + " ret \n")
> > +
>
> This breaks BTI as it lacks the landing pad, and it will be called indirectly.
Argh!
> > +extern void __static_call_ret(void);
> > +
>
> Better to have an ordinary C function here (with consistent linkage),
> but we need to take the address in a way that works with Clang CFI.
There is that.
> As the two additional instructions are on an ice cold path anyway, I'm
> not sure this is an obvious improvement tbh.
For me it's both simpler -- by virtue of being more consistent, and
smaller. So double win :-)
That is; you're already relying on the literal being unconditionally
updated for the normal B foo -> NOP path, and having the RET -> NOP path
be handled differently is just confusing.
At least, that's how I'm seeing it today...
On Mon, 25 Oct 2021 at 17:05, Peter Zijlstra <[email protected]> wrote:
>
> On Mon, Oct 25, 2021 at 04:55:17PM +0200, Ard Biesheuvel wrote:
> > On Mon, 25 Oct 2021 at 16:47, Peter Zijlstra <[email protected]> wrote:
>
> > > Perhaps a little something like so.. Shaves 2 instructions off each
> > > trampoline.
> > >
> > > --- a/arch/arm64/include/asm/static_call.h
> > > +++ b/arch/arm64/include/asm/static_call.h
> > > @@ -11,9 +11,7 @@
> > > " hint 34 /* BTI C */ \n" \
> > > insn " \n" \
> > > " ldr x16, 0b \n" \
> > > - " cbz x16, 1f \n" \
> > > " br x16 \n" \
> > > - "1: ret \n" \
> > > " .popsection \n")
> > >
> > > #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
> > > --- a/arch/arm64/kernel/patching.c
> > > +++ b/arch/arm64/kernel/patching.c
> > > @@ -90,6 +90,11 @@ int __kprobes aarch64_insn_write(void *a
> > > return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
> > > }
> > >
> > > +asm("__static_call_ret: \n"
> > > + " ret \n")
> > > +
> >
> > This breaks BTI as it lacks the landing pad, and it will be called indirectly.
>
> Argh!
>
> > > +extern void __static_call_ret(void);
> > > +
> >
> > Better to have an ordinary C function here (with consistent linkage),
> > but we need to take the address in a way that works with Clang CFI.
>
> There is that.
>
> > As the two additional instructions are on an ice cold path anyway, I'm
> > not sure this is an obvious improvement tbh.
>
> For me it's both simpler -- by virtue of being more consistent, and
> smaller. So double win :-)
>
> That is; you're already relying on the literal being unconditionally
> updated for the normal B foo -> NOP path, and having the RET -> NOP path
> be handled differently is just confusing.
>
> At least, that's how I'm seeing it today...
Fair enough. I don't have a strong opinion either way, so I'll let
some other arm64 folks chime in as well.
From: Ard Biesheuvel
> Sent: 25 October 2021 15:55
>
> On Mon, 25 Oct 2021 at 16:47, Peter Zijlstra <[email protected]> wrote:
> >
> > On Mon, Oct 25, 2021 at 04:19:16PM +0200, Peter Zijlstra wrote:
> > > On Mon, Oct 25, 2021 at 04:08:37PM +0200, Ard Biesheuvel wrote:
> >
> > > > > Ooohh, but what if you go from !func to NOP.
> > > > >
> > > > > assuming:
> > > > >
> > > > > .literal = 0
> > > > > BTI C
> > > > > RET
> > > > >
> > > > > Then
> > > > >
> > > > > CPU0 CPU1
> > > > >
> > > > > [S] literal = func [I] NOP
> > > > > [S] insn[1] = NOP [L] x16 = literal (NULL)
> > > > > b x16
> > > > > *BANG*
> > > > >
> > > > > Is that possible? (total lack of memory ordering etc..)
> > > > >
> > > >
> > > > The CBZ will branch to the RET instruction if x16 == 0x0, so this
> > > > should not happen.
> > >
> > > Oooh, I missed that :/ I was about to suggest writing the address of a
> > > bare 'ret' trampoline instead of NULL into the literal.
> >
> > Perhaps a little something like so.. Shaves 2 instructions off each
> > trampoline.
> >
> > --- a/arch/arm64/include/asm/static_call.h
> > +++ b/arch/arm64/include/asm/static_call.h
> > @@ -11,9 +11,7 @@
> > " hint 34 /* BTI C */ \n" \
> > insn " \n" \
> > " ldr x16, 0b \n" \
> > - " cbz x16, 1f \n" \
> > " br x16 \n" \
> > - "1: ret \n" \
> > " .popsection \n")
> >
> > #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
> > --- a/arch/arm64/kernel/patching.c
> > +++ b/arch/arm64/kernel/patching.c
> > @@ -90,6 +90,11 @@ int __kprobes aarch64_insn_write(void *a
> > return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
> > }
> >
> > +asm("__static_call_ret: \n"
> > + " ret \n")
> > +
>
> This breaks BTI as it lacks the landing pad, and it will be called indirectly.
>
> > +extern void __static_call_ret(void);
> > +
>
> Better to have an ordinary C function here (with consistent linkage),
> but we need to take the address in a way that works with Clang CFI.
>
> As the two additional instructions are on an ice cold path anyway, I'm
> not sure this is an obvious improvement tbh.
If my sums are correct the code block is exactly 32 bytes.
So no point saving an instruction.
But you could have:
.long 1f
label:
bti c
nop/branch
ldr x16, 0b
br x16
1: bti c
ret
That is all self-contained.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
From: Frederic Weisbecker
> Sent: 25 October 2021 13:21
>
> Implement arm64 support for the 'unoptimized' static call variety, which
> routes all calls through a single trampoline that is patched to perform a
> tail call to the selected function.
>
> It is expected that the direct branch instruction will be able to cover
> the common case. However, given that static call targets may be located
> in modules loaded out of direct branching range, we need a fallback path
> that loads the address into R16 and uses a branch-to-register (BR)
> instruction to perform an indirect call.
>
...
> +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
> +{
> + /*
> + * -0x8 <literal>
> + * 0x0 bti c <--- trampoline entry point
> + * 0x4 <branch or nop>
> + * 0x8 ldr x16, <literal>
> + * 0xc cbz x16, 20
> + * 0x10 br x16
> + * 0x14 ret
> + */
Since the 'ldr x16, <literal>' is just a 32bit constant
(for a pc-relative load).
Can't you save a word by making offset 0x4 <branch or ldr x16, <literal>> ?
Or am I missing something?
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
On Tue, Oct 26, 2021 at 11:36:55AM +0100, Mark Rutland wrote:
> My preference overall is to keep the trampoline self-contained, and I'd
> prefer to keep the RET inline in the trampoline rather than trying to
> factor it out so that all the control-flow is clearly in one place.
>
> So I'd prefer that we have the sequence as-is:
>
> | 0: .quad 0x0
> | bti c
> | < insn >
> | ldr x16, 0b
> | cbz x16, 1f
> | br x16
> | 1: ret
OK, fair enough. In that case:
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Although I do think that function can use a comment to explain the magic
involved.
> If we knew these were only called with IRQs enabled (and so we can take
> an IPI to generate a context synchronization event), we could patch
> <insn> to a RET and point the literal back at the BTI, e.g.
Given the static_call() usage on x86 I'm pretty sure you'll want them
with IRQs disabled.
On Mon, Oct 25, 2021 at 05:10:24PM +0200, Ard Biesheuvel wrote:
> On Mon, 25 Oct 2021 at 17:05, Peter Zijlstra <[email protected]> wrote:
> >
> > On Mon, Oct 25, 2021 at 04:55:17PM +0200, Ard Biesheuvel wrote:
> > > On Mon, 25 Oct 2021 at 16:47, Peter Zijlstra <[email protected]> wrote:
> >
> > > > Perhaps a little something like so.. Shaves 2 instructions off each
> > > > trampoline.
> > > >
> > > > --- a/arch/arm64/include/asm/static_call.h
> > > > +++ b/arch/arm64/include/asm/static_call.h
> > > > @@ -11,9 +11,7 @@
> > > > " hint 34 /* BTI C */ \n" \
> > > > insn " \n" \
> > > > " ldr x16, 0b \n" \
> > > > - " cbz x16, 1f \n" \
> > > > " br x16 \n" \
> > > > - "1: ret \n" \
> > > > " .popsection \n")
> > > >
> > > > #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
> > > > --- a/arch/arm64/kernel/patching.c
> > > > +++ b/arch/arm64/kernel/patching.c
> > > > @@ -90,6 +90,11 @@ int __kprobes aarch64_insn_write(void *a
> > > > return __aarch64_insn_write(addr, &i, AARCH64_INSN_SIZE);
> > > > }
> > > >
> > > > +asm("__static_call_ret: \n"
> > > > + " ret \n")
> > > > +
> > >
> > > This breaks BTI as it lacks the landing pad, and it will be called indirectly.
> >
> > Argh!
> >
> > > > +extern void __static_call_ret(void);
> > > > +
> > >
> > > Better to have an ordinary C function here (with consistent linkage),
> > > but we need to take the address in a way that works with Clang CFI.
> >
> > There is that.
> >
> > > As the two additional instructions are on an ice cold path anyway, I'm
> > > not sure this is an obvious improvement tbh.
> >
> > For me it's both simpler -- by virtue of being more consistent, and
> > smaller. So double win :-)
> >
> > That is; you're already relying on the literal being unconditionally
> > updated for the normal B foo -> NOP path, and having the RET -> NOP path
> > be handled differently is just confusing.
> >
> > At least, that's how I'm seeing it today...
>
> Fair enough. I don't have a strong opinion either way, so I'll let
> some other arm64 folks chime in as well.
My preference overall is to keep the trampoline self-contained, and I'd
prefer to keep the RET inline in the trampoline rather than trying to
factor it out so that all the control-flow is clearly in one place.
So I'd prefer that we have the sequence as-is:
| 0: .quad 0x0
| bti c
| < insn >
| ldr x16, 0b
| cbz x16, 1f
| br x16
| 1: ret
If we knew these were only called with IRQs enabled (and so we can take
an IPI to generate a context synchronization event), we could patch
<insn> to a RET and point the literal back at the BTI, e.g.
| 0: .quad 0x0
| bti c
| < insn >
| ldr x16, 0b
| br x16
... but I'm pretty sure there are CPUs that will never re-fetch <insn>
in that case, and will get stuck in an infinite loop.
Thanks,
Mark.
From: Mark Rutland
> Sent: 26 October 2021 11:37
...
> My preference overall is to keep the trampoline self-contained, and I'd
> prefer to keep the RET inline in the trampoline rather than trying to
> factor it out so that all the control-flow is clearly in one place.
>
> So I'd prefer that we have the sequence as-is:
>
> | 0: .quad 0x0
> | bti c
> | < insn >
> | ldr x16, 0b
> | cbz x16, 1f
> | br x16
> | 1: ret
What is wrong with:
0: .quad 1f
bti c
< insn >
ldr x16, 0b
br x16
1: bti c
ret
Self-contained and reasonably easy to read.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
On Tue, Oct 26, 2021 at 11:06:11AM +0000, David Laight wrote:
> From: Mark Rutland
> > Sent: 26 October 2021 11:37
> ...
> > My preference overall is to keep the trampoline self-contained, and I'd
> > prefer to keep the RET inline in the trampoline rather than trying to
> > factor it out so that all the control-flow is clearly in one place.
> >
> > So I'd prefer that we have the sequence as-is:
> >
> > | 0: .quad 0x0
> > | bti c
> > | < insn >
> > | ldr x16, 0b
> > | cbz x16, 1f
> > | br x16
> > | 1: ret
>
> What is wrong with:
> 0: .quad 1f
> bti c
> < insn >
> ldr x16, 0b
> br x16
> 1: bti c
> ret
>
> Self-contained and reasonably easy to read.
FWIW, that would work for me too.
Thanks,
Mark.