Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751528AbeACXJu (ORCPT + 1 other); Wed, 3 Jan 2018 18:09:50 -0500 Received: from mga03.intel.com ([134.134.136.65]:11021 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751066AbeACXJq (ORCPT ); Wed, 3 Jan 2018 18:09:46 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.45,504,1508828400"; d="scan'208";a="7222335" From: Andi Kleen To: tglx@linuxtronix.de Cc: torvalds@linux-foundation.org, gregkh@linux-foundation.org, dwmw@amazon.co.uk, tim.c.chen@linux.intel.com, linux-kernel@vger.kernel.org, dave.hansen@intel.com, Andi Kleen Subject: [PATCH 03/11] x86/retpoline/entry: Convert entry assembler indirect jumps Date: Wed, 3 Jan 2018 15:09:26 -0800 Message-Id: <20180103230934.15788-4-andi@firstfloor.org> X-Mailer: git-send-email 2.14.3 In-Reply-To: <20180103230934.15788-1-andi@firstfloor.org> References: <20180103230934.15788-1-andi@firstfloor.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Return-Path: From: Andi Kleen Convert all indirect jumps in core 32/64bit entry assembler code to use non speculative sequences. Based on code from David Woodhouse and Tim Chen Signed-off-by: Andi Kleen --- arch/x86/entry/entry_32.S | 5 +++-- arch/x86/entry/entry_64.S | 12 +++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ace8f321a5a1..a4b88260d6f7 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,6 +44,7 @@ #include #include #include +#include .section .entry.text, "ax" @@ -290,7 +291,7 @@ ENTRY(ret_from_fork) /* kernel thread */ 1: movl %edi, %eax - call *%ebx + NOSPEC_CALL %ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -919,7 +920,7 @@ common_exception: movl %ecx, %es TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call *%edi + NOSPEC_CALL %edi jmp ret_from_exception END(common_exception) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f048e384ff54..486990fb3e4d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "calling.h" @@ -191,7 +192,7 @@ ENTRY(entry_SYSCALL_64_trampoline) */ pushq %rdi movq $entry_SYSCALL_64_stage2, %rdi - jmp *%rdi + NOSPEC_JMP %rdi END(entry_SYSCALL_64_trampoline) .popsection @@ -269,8 +270,9 @@ entry_SYSCALL_64_fastpath: * This call instruction is handled specially in stub_ptregs_64. * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. - */ - call *sys_call_table(, %rax, 8) + */ + movq sys_call_table(, %rax, 8), %rax + NOSPEC_CALL %rax .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) @@ -442,7 +444,7 @@ ENTRY(stub_ptregs_64) jmp entry_SYSCALL64_slow_path 1: - jmp *%rax /* Called from C */ + NOSPEC_JMP %rax /* Called from C */ END(stub_ptregs_64) .macro ptregs_stub func @@ -521,7 +523,7 @@ ENTRY(ret_from_fork) 1: /* kernel thread */ movq %r12, %rdi - call *%rbx + NOSPEC_CALL %rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() -- 2.14.3