Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754028AbbLINC5 (ORCPT ); Wed, 9 Dec 2015 08:02:57 -0500 Received: from mail-vk0-f46.google.com ([209.85.213.46]:35707 "EHLO mail-vk0-f46.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753649AbbLINC4 (ORCPT ); Wed, 9 Dec 2015 08:02:56 -0500 From: Brian Gerst To: Andy Lutomirski Cc: Andy Lutomirski , the arch/x86 maintainers , Linux Kernel Mailing List , Borislav Petkov , =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Weisbecker?= , Denys Vlasenko , Linus Torvalds Subject: [PATCH] x86/entry/64: Remove duplicate syscall table for fast path Date: Wed, 9 Dec 2015 08:02:53 -0500 Message-Id: <1449666173-15366-1-git-send-email-brgerst@gmail.com> X-Mailer: git-send-email 2.5.0 In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 6970 Lines: 210 Instead of using a duplicate syscall table for the fast path, create stubs for the syscalls that need pt_regs that save the extra registers if a flag for the slow path is not set. Signed-off-by: Brian Gerst To: Andy Lutomirski Cc: Andy Lutomirski Cc: the arch/x86 maintainers Cc: Linux Kernel Mailing List Cc: Borislav Petkov Cc: Frédéric Weisbecker Cc: Denys Vlasenko Cc: Linus Torvalds --- Applies on top of Andy's syscall cleanup series. arch/x86/entry/calling.h | 32 ++++++++++++++++---------------- arch/x86/entry/common.c | 4 ++++ arch/x86/entry/entry_64.S | 36 +++++++++++++++++++++++++++++------- arch/x86/entry/syscall_64.c | 25 +++++-------------------- arch/x86/include/asm/thread_info.h | 1 + 5 files changed, 55 insertions(+), 43 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index e32206e..7c58bd2 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -129,22 +129,22 @@ For 32-bit we have the following conventions - kernel is built with SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 .endm - .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) - movq %rbx, 5*8+\offset(%rsp) - .endm - - .macro RESTORE_EXTRA_REGS offset=0 - movq 0*8+\offset(%rsp), %r15 - movq 1*8+\offset(%rsp), %r14 - movq 2*8+\offset(%rsp), %r13 - movq 3*8+\offset(%rsp), %r12 - movq 4*8+\offset(%rsp), %rbp - movq 5*8+\offset(%rsp), %rbx + .macro SAVE_EXTRA_REGS offset=0 base=rsp + movq %r15, 0*8+\offset(%\base) + movq %r14, 1*8+\offset(%\base) + movq %r13, 2*8+\offset(%\base) + movq %r12, 3*8+\offset(%\base) + movq %rbp, 4*8+\offset(%\base) + movq %rbx, 5*8+\offset(%\base) + .endm + + .macro RESTORE_EXTRA_REGS offset=0 base=rsp + movq 0*8+\offset(%\base), %r15 + movq 1*8+\offset(%\base), %r14 + movq 2*8+\offset(%\base), %r13 + movq 3*8+\offset(%\base), %r12 + movq 4*8+\offset(%\base), %rbp + movq 5*8+\offset(%\base), %rbx .endm .macro ZERO_EXTRA_REGS diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 016ac47..4381aca 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -342,6 +342,8 @@ __visible void do_syscall_64(struct pt_regs *regs) struct thread_info *ti = pt_regs_to_thread_info(regs); unsigned long nr = regs->orig_ax; + ti->status |= TS_SLOWPATH; + enter_from_user_mode(); local_irq_enable(); @@ -360,6 +362,8 @@ __visible void do_syscall_64(struct pt_regs *regs) } syscall_return_slowpath(regs); + + ti->status &= ~TS_SLOWPATH; } #endif diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1ab5362..5852ec6 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -188,7 +188,7 @@ entry_SYSCALL_64_fastpath: #endif ja 1f /* return -ENOSYS (already in pt_regs->ax) */ movq %r10, %rcx - call *sys_call_table_fastpath_64(, %rax, 8) + call *sys_call_table(, %rax, 8) movq %rax, RAX(%rsp) 1: @@ -306,15 +306,37 @@ END(entry_SYSCALL_64) ENTRY(stub_ptregs_64) /* - * Syscalls marked as needing ptregs that go through the fast path - * land here. We transfer to the slow path. + * Syscalls marked as needing ptregs land here. + * If we are on the fast path, we need to save the extra regs. + * If we are on the slow path, the extra regs are already saved. */ - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - addq $8, %rsp - jmp entry_SYSCALL64_slow_path + movq PER_CPU_VAR(cpu_current_top_of_stack), %r10 + testl $TS_SLOWPATH, ASM_THREAD_INFO(TI_status, %r10, 0) + jnz 1f + subq $SIZEOF_PTREGS, %r10 + SAVE_EXTRA_REGS base=r10 + movq %r10, %rbx + call *%rax + movq %rbx, %r10 + RESTORE_EXTRA_REGS base=r10 + ret +1: + jmp *%rax END(stub_ptregs_64) +.macro ptregs_stub func +ENTRY(ptregs_\func) + leaq \func(%rip), %rax + jmp stub_ptregs_64 +END(ptregs_\func) +.endm + +#define __SYSCALL_64_QUAL_(sym) +#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym + +#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) +#include + /* * A newly forked process directly context switches into this address. * diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 601745c..9dbc5ab 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -6,11 +6,14 @@ #include #include -#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ; +#define __SYSCALL_64_QUAL_(sym) sym +#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym + +#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include #undef __SYSCALL_64 -#define __SYSCALL_64(nr, sym, qual) [nr] = sym, +#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym), extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); @@ -22,21 +25,3 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { [0 ... __NR_syscall_max] = &sys_ni_syscall, #include }; - -#undef __SYSCALL_64 - -extern long stub_ptregs_64(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); - -#define __SYSCALL_64_QUAL_(nr, sym) [nr] = sym, -#define __SYSCALL_64_QUAL_ptregs(nr, sym) [nr] = stub_ptregs_64, - -#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(nr, sym) - -asmlinkage const sys_call_ptr_t sys_call_table_fastpath_64[__NR_syscall_max+1] = { - /* - * Smells like a compiler bug -- it doesn't work - * when the & below is removed. - */ - [0 ... __NR_syscall_max] = &sys_ni_syscall, -#include -}; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ae210d6..358e3a9 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -229,6 +229,7 @@ static inline unsigned long current_stack_pointer(void) * ever touches our thread-synchronous status, so we don't * have to worry about atomic accesses. */ +#define TS_SLOWPATH 0x0001 /* syscall slowpath (64BIT) */ #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ -- 2.5.0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/