Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753669AbeAFCdN (ORCPT + 1 other); Fri, 5 Jan 2018 21:33:13 -0500 Received: from mga03.intel.com ([134.134.136.65]:10718 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753734AbeAFCdJ (ORCPT ); Fri, 5 Jan 2018 21:33:09 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.46,320,1511856000"; d="scan'208";a="190375411" From: Tim Chen To: Thomas Gleixner , Andy Lutomirski , Linus Torvalds , Greg KH Cc: Tim Chen , Dave Hansen , Andrea Arcangeli , Andi Kleen , Arjan Van De Ven , David Woodhouse , linux-kernel@vger.kernel.org Subject: [PATCH v2 8/8] x86: Use IBRS for firmware update path Date: Fri, 5 Jan 2018 18:12:23 -0800 Message-Id: X-Mailer: git-send-email 2.9.4 In-Reply-To: References: In-Reply-To: References: Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Return-Path: From: David Woodhouse We are impervious to the indirect branch prediction attack with retpoline but firmware won't be, so we still need to set IBRS to protect firmware code execution when calling into firmware at runtime. Signed-off-by: David Woodhouse Signed-off-by: Tim Chen --- arch/x86/include/asm/apm.h | 6 ++++++ arch/x86/include/asm/efi.h | 17 +++++++++++++-- arch/x86/include/asm/spec_ctrl.h | 3 +++ arch/x86/kernel/cpu/spec_ctrl.c | 45 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 4d4015d..fbf4212 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -7,6 +7,8 @@ #ifndef _ASM_X86_MACH_DEFAULT_APM_H #define _ASM_X86_MACH_DEFAULT_APM_H +#include + #ifdef APM_ZERO_SEGS # define APM_DO_ZERO_SEGS \ "pushl %%ds\n\t" \ @@ -28,6 +30,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi) { + unprotected_firmware_begin(); /* * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. @@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, "=S" (*esi) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); + unprotected_firmware_end(); } static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, @@ -52,6 +56,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, int cx, dx, si; bool error; + unprotected_firmware_begin(); /* * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. @@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, "=S" (si) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); + unprotected_firmware_end(); return error; } diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 85f6ccb..439bd55 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -6,6 +6,7 @@ #include #include #include +#include /* * We map the EFI regions needed for runtime services non-contiguously, @@ -36,8 +37,18 @@ extern asmlinkage unsigned long efi_call_phys(void *, ...); -#define arch_efi_call_virt_setup() kernel_fpu_begin() -#define arch_efi_call_virt_teardown() kernel_fpu_end() +#define arch_efi_call_virt_setup() \ +{( \ + kernel_fpu_begin(); \ + unprotected_firmware_begin(); \ +)} + +#define arch_efi_call_virt_teardown() \ +{( \ + unprotected_firmware_end(); \ + kernel_fpu_end(); \ +)} + /* * Wrap all the virtual calls in a way that forces the parameters on the stack. @@ -73,6 +84,7 @@ struct efi_scratch { efi_sync_low_kernel_mappings(); \ preempt_disable(); \ __kernel_fpu_begin(); \ + unprotected_firmware_begin(); \ \ if (efi_scratch.use_pgd) { \ efi_scratch.prev_cr3 = __read_cr3(); \ @@ -91,6 +103,7 @@ struct efi_scratch { __flush_tlb_all(); \ } \ \ + unprotected_firmware_end(); \ __kernel_fpu_end(); \ preempt_enable(); \ }) diff --git a/arch/x86/include/asm/spec_ctrl.h b/arch/x86/include/asm/spec_ctrl.h index be08ae7..ce21d24 100644 --- a/arch/x86/include/asm/spec_ctrl.h +++ b/arch/x86/include/asm/spec_ctrl.h @@ -11,6 +11,9 @@ void scan_spec_ctrl_feature(struct cpuinfo_x86 *c); void rescan_spec_ctrl_feature(struct cpuinfo_x86 *c); bool ibrs_inuse(void); +void unprotected_firmware_begin(void); +void unprotected_firmware_end(void); + extern unsigned int dynamic_ibrs; static inline void __disable_indirect_speculation(void) diff --git a/arch/x86/kernel/cpu/spec_ctrl.c b/arch/x86/kernel/cpu/spec_ctrl.c index 076c470..59bf561 100644 --- a/arch/x86/kernel/cpu/spec_ctrl.c +++ b/arch/x86/kernel/cpu/spec_ctrl.c @@ -10,6 +10,10 @@ unsigned int dynamic_ibrs __read_mostly; EXPORT_SYMBOL_GPL(dynamic_ibrs); +#if defined(RETPOLINE) +static unsigned int firmware_ibrs __read_mostly; +#endif + enum { IBRS_DISABLED, /* in host kernel, disabled in guest and userland */ @@ -31,6 +35,8 @@ static inline void set_ibrs_feature(void) dynamic_ibrs = 1; ibrs_enabled = IBRS_ENABLED; } +#else + firmware_ibrs = 1; #endif } @@ -162,3 +168,42 @@ static int __init debugfs_spec_ctrl(void) return 0; } late_initcall(debugfs_spec_ctrl); + +#if defined(RETPOLINE) +/* + * RETPOLINE does not protect against indirect speculation + * in firmware code. Enable IBRS to protect firmware execution. + */ +void unprotected_firmware_begin(void) +{ + if (firmware_ibrs) { + __disable_indirect_speculation(); + } else { + /* + * rmb prevent unwanted speculation when we + * are setting IBRS + */ + rmb(); + } +} +EXPORT_SYMBOL_GPL(unprotected_firmware_begin); + +void unprotected_firmware_end(void) +{ + if (firmware_ibrs) { + __enable_indirect_speculation(); + } +} +EXPORT_SYMBOL_GPL(unprotected_firmware_end); + +#else +void unprotected_firmware_begin(void) +{ +} +EXPORT_SYMBOL_GPL(unprotected_firmware_begin); + +void unprotected_firmware_end(void) +{ +} +EXPORT_SYMBOL_GPL(unprotected_firmware_end); +#endif -- 2.9.4