2021-08-31 08:11:31

by Christophe Leroy

[permalink] [raw]
Subject: [PATCH] powerpc/32: Add support for out-of-line static calls

Add support for out-of-line static calls on PPC32. This change
improve performance of calls to global function pointers by
using direct calls instead of indirect calls.

The trampoline is initialy populated with a 'blr' and 3 'nop'.
Then, depending on the target distance, arch_static_call_transform()
will either replace the 'blr' by a direct 'bl <target>' or an
indirect jump throught CTR register via a lis/addi/mtctr/bctr sequence.

static_call selftest is running successfully with this change.

With this patch, __do_irq() has the following sequence to trace
irq entries:

c00049c0 <__SCT__tp_func_irq_entry>:
c00049c0: 48 00 00 70 b c0004a30 <__traceiter_irq_entry>
c00049c4: 60 00 00 00 nop
c00049c8: 60 00 00 00 nop
c00049cc: 60 00 00 00 nop
...
c00055a4 <__do_irq>:
...
c00055b4: 7c 7f 1b 78 mr r31,r3
...
c00055f0: 81 22 00 00 lwz r9,0(r2)
c00055f4: 39 29 00 01 addi r9,r9,1
c00055f8: 91 22 00 00 stw r9,0(r2)
c00055fc: 3d 20 c0 af lis r9,-16209
c0005600: 81 29 74 cc lwz r9,29900(r9)
c0005604: 2c 09 00 00 cmpwi r9,0
c0005608: 41 82 00 10 beq c0005618 <__do_irq+0x74>
c000560c: 80 69 00 04 lwz r3,4(r9)
c0005610: 7f e4 fb 78 mr r4,r31
c0005614: 4b ff f3 ad bl c00049c0 <__SCT__tp_func_irq_entry>

Before this patch, __do_irq() was doing the following to trace irq
entries:

c0005700 <__do_irq>:
...
c0005710: 7c 7e 1b 78 mr r30,r3
...
c000574c: 93 e1 00 0c stw r31,12(r1)
c0005750: 81 22 00 00 lwz r9,0(r2)
c0005754: 39 29 00 01 addi r9,r9,1
c0005758: 91 22 00 00 stw r9,0(r2)
c000575c: 3d 20 c0 af lis r9,-16209
c0005760: 83 e9 f4 cc lwz r31,-2868(r9)
c0005764: 2c 1f 00 00 cmpwi r31,0
c0005768: 41 82 00 24 beq c000578c <__do_irq+0x8c>
c000576c: 81 3f 00 00 lwz r9,0(r31)
c0005770: 80 7f 00 04 lwz r3,4(r31)
c0005774: 7d 29 03 a6 mtctr r9
c0005778: 7f c4 f3 78 mr r4,r30
c000577c: 4e 80 04 21 bctrl
c0005780: 85 3f 00 0c lwzu r9,12(r31)
c0005784: 2c 09 00 00 cmpwi r9,0
c0005788: 40 82 ff e4 bne c000576c <__do_irq+0x6c>

Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/static_call.h | 31 +++++++++++++++++++
arch/powerpc/kernel/Makefile | 2 +-
arch/powerpc/kernel/static_call.c | 43 ++++++++++++++++++++++++++
4 files changed, 76 insertions(+), 1 deletion(-)
create mode 100644 arch/powerpc/include/asm/static_call.h
create mode 100644 arch/powerpc/kernel/static_call.c

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 36b72d972568..a0fe69d8ec83 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -247,6 +247,7 @@ config PPC
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
+ select HAVE_STATIC_CALL if PPC32
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h
new file mode 100644
index 000000000000..7cbefd845afc
--- /dev/null
+++ b/arch/powerpc/include/asm/static_call.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_STATIC_CALL_H
+#define _ASM_POWERPC_STATIC_CALL_H
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
+ asm(".pushsection .text, \"ax\" \n" \
+ ".align 4 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ " b " #func " \n" \
+ " nop \n" \
+ " nop \n" \
+ " nop \n" \
+ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+ ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ ".popsection \n")
+
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
+ asm(".pushsection .text, \"ax\" \n" \
+ ".align 4 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ " blr \n" \
+ " nop \n" \
+ " nop \n" \
+ " nop \n" \
+ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+ ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ ".popsection \n")
+
+#endif /* _ASM_POWERPC_STATIC_CALL_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 7be36c1e1db6..0e3640e14eb1 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -106,7 +106,7 @@ extra-y += vmlinux.lds

obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o

-obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o
+obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o static_call.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c
new file mode 100644
index 000000000000..72754bcaf758
--- /dev/null
+++ b/arch/powerpc/kernel/static_call.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/memory.h>
+#include <linux/static_call.h>
+
+#include <asm/code-patching.h>
+
+static int patch_trampoline_32(u32 *addr, unsigned long target)
+{
+ int err;
+
+ err = patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(target))));
+ err |= patch_instruction(addr++, ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(target))));
+ err |= patch_instruction(addr++, ppc_inst(PPC_RAW_MTCTR(_R12)));
+ err |= patch_instruction(addr, ppc_inst(PPC_RAW_BCTR()));
+
+ return err;
+}
+
+void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+{
+ int err;
+ unsigned long target = (long)func;
+
+ if (!tramp)
+ return;
+
+ mutex_lock(&text_mutex);
+
+ if (!func)
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
+ else if (is_offset_in_branch_range((long)target - (long)tramp))
+ err = patch_branch(tramp, target, 0);
+ else if (IS_ENABLED(CONFIG_PPC32))
+ err = patch_trampoline_32(tramp, target);
+ else
+ BUILD_BUG();
+
+ mutex_unlock(&text_mutex);
+
+ if (err)
+ panic("%s: patching failed %pS at %pS\n", __func__, func, tramp);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);
--
2.25.0


2021-08-31 10:24:40

by Ard Biesheuvel

[permalink] [raw]
Subject: Re: [PATCH] powerpc/32: Add support for out-of-line static calls

On Tue, 31 Aug 2021 at 10:53, Peter Zijlstra <[email protected]> wrote:
>
> On Tue, Aug 31, 2021 at 08:05:21AM +0000, Christophe Leroy wrote:
>
> > +#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
> > + asm(".pushsection .text, \"ax\" \n" \
> > + ".align 4 \n" \
> > + ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
> > + STATIC_CALL_TRAMP_STR(name) ": \n" \
> > + " blr \n" \
> > + " nop \n" \
> > + " nop \n" \
> > + " nop \n" \
> > + ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
> > + ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
> > + ".popsection \n")
>
> > +static int patch_trampoline_32(u32 *addr, unsigned long target)
> > +{
> > + int err;
> > +
> > + err = patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(target))));
> > + err |= patch_instruction(addr++, ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(target))));
> > + err |= patch_instruction(addr++, ppc_inst(PPC_RAW_MTCTR(_R12)));
> > + err |= patch_instruction(addr, ppc_inst(PPC_RAW_BCTR()));
> > +
> > + return err;
> > +}
>
> There can be concurrent execution and modification; the above doesn't
> look safe in that regard. What happens if you've say, done the first
> two, but not the latter two and execution happens (on a different
> CPU or through IRQ context, etc..)?
>
> > +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
> > +{
> > + int err;
> > + unsigned long target = (long)func;
> > +
> > + if (!tramp)
> > + return;
> > +
> > + mutex_lock(&text_mutex);
> > +
> > + if (!func)
> > + err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
> > + else if (is_offset_in_branch_range((long)target - (long)tramp))
> > + err = patch_branch(tramp, target, 0);
>
> These two are single instruction modifications and I'm assuming the
> hardware is sane enough that execution sees either the old or the new
> instruction. So this should work.
>
> > + else if (IS_ENABLED(CONFIG_PPC32))
> > + err = patch_trampoline_32(tramp, target);
> > + else
> > + BUILD_BUG();
> > +
> > + mutex_unlock(&text_mutex);
> > +
> > + if (err)
> > + panic("%s: patching failed %pS at %pS\n", __func__, func, tramp);
> > +}
> > +EXPORT_SYMBOL_GPL(arch_static_call_transform);
>
> One possible solution that we explored on ARM64, was having the
> trampoline be in 2 slots:
>
>
> b 1f
>
> 1: blr
> nop
> nop
> nop
>
> 2: blr
> nop
> nop
> nop
>
> Where initially the first slot is active (per "b 1f"), then you write
> the second slot, and as a final act, re-write the initial branch to
> point to slot 2.
>
> Then you execute synchronize_rcu_tasks() under your text mutex
> (careful!) to ensure all users of your slot1 are gone and the next
> modification repeats the whole thing, except for using slot1 etc..
>
> Eventually I think Ard came up with the latest ARM64 proposal which puts
> a literal in a RO section (could be the text section I suppose) and
> loads and branches to that.
>

Yes. The main reason is simply that anything else is premature
optimization: we have a clear use case (CFI) where out-of-line static
calls are faster than compiler generated indirect calls, even if the
static call sequence is based on a literal load and an indirect
branch, but CFI is not upstream [yet].

Once other use cases emerge, we will revisit this.



> Anyway, the thing is, you can really only modify a single instruction at
> the time and need to ensure concurrent execution is correct.