Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S934911AbeAKQIU (ORCPT + 1 other); Thu, 11 Jan 2018 11:08:20 -0500 Received: from smtp-fw-33001.amazon.com ([207.171.190.10]:12302 "EHLO smtp-fw-33001.amazon.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S934310AbeAKQIN (ORCPT ); Thu, 11 Jan 2018 11:08:13 -0500 X-IronPort-AV: E=Sophos;i="5.46,345,1511827200"; d="scan'208";a="714947889" From: David Woodhouse To: Andi Kleen Cc: Paul Turner , LKML , Linus Torvalds , Greg Kroah-Hartman , Tim Chen , Dave Hansen , tglx@linutronix.de, Kees Cook , Rik van Riel , Peter Zijlstra , Andy Lutomirski , Jiri Kosina , gnomes@lxorguk.ukuu.org.uk, x86@kernel.org, bp@alien8.de, rga@amazon.de, thomas.lendacky@amd.com, Josh Poimboeuf Subject: [PATCH v3] x86/retpoline: Fill return stack buffer on vmexit Date: Thu, 11 Jan 2018 16:07:47 +0000 Message-Id: <1515686867-10319-1-git-send-email-dwmw@amazon.co.uk> X-Mailer: git-send-email 2.7.4 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Return-Path: In accordance with the Intel and AMD documentation, we need to overwrite all entries in the RSB on exiting a guest, to prevent malicious branch target predictions from affecting the host kernel. This is needed both for retpoline and for IBRS. Signed-off-by: David Woodhouse Tested-by: Peter Zijlstra (Intel) --- v2: Reduce the size of the ALTERNATIVE insns, fix .align (again)! Sent in private email for testing, hence this second public post is v2.1: Add CONFIG_RETPOLINE around RSB stuffing v3: Back to putting the whole RSB stuffing in alternatives, so that we can use ANNOTATE_NOSPEC_ALTERNATIVE to make objtool cope. But in oldinstr, jump over it. arch/x86/include/asm/nospec-branch.h | 69 ++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm.c | 4 +++ arch/x86/kvm/vmx.c | 4 +++ 3 files changed, 77 insertions(+) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 7d70ea9..1bb3cc8 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -7,6 +7,43 @@ #include #include +/* + * Fill the CPU return stack buffer. + * + * Each entry in the RSB, if used for a speculative 'ret', contains an + * infinite 'pause; jmp' loop to capture speculative execution. + * + * This is required in various cases for retpoline and IBRS-based + * mitigations for the Spectre variant 2 vulnerability. Sometimes to + * eliminate potentially bogus entries from the RSB, and sometimes + * purely to ensure that it doesn't get empty, which on some CPUs would + * allow predictions from other (unwanted!) sources to be used. + * + * We define a CPP macro such that it can be used from both .S files and + * inline assembly. It's possible to do a .macro and then include that + * from C via asm(".include ") but let's not go there. + */ + +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ +#define RSB_FILL_LOOPS 16 /* To avoid underflow */ + +#define __FILL_RETURN_BUFFER(reg, nr, sp, uniq) \ + mov $(nr/2), reg; \ +.Ldo_call1_ ## uniq: \ + call .Ldo_call2_ ## uniq; \ +.Ltrap1_ ## uniq: \ + pause; \ + jmp .Ltrap1_ ## uniq; \ +.Ldo_call2_ ## uniq: \ + call .Ldo_loop_ ## uniq; \ +.Ltrap2_ ## uniq: \ + pause; \ + jmp .Ltrap2_ ## uniq; \ +.Ldo_loop_ ## uniq: \ + dec reg; \ + jnz .Ldo_call1_ ## uniq; \ + add $(BITS_PER_LONG/8) * nr, sp; + #ifdef __ASSEMBLY__ /* @@ -61,6 +98,19 @@ #endif .endm + /* + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP + * monstrosity above, manually. + */ +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req +#ifdef CONFIG_RETPOLINE + ALTERNATIVE "jmp .Lskip_rsb_\@", \ + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP,\@)) \ + \ftr +.Lskip_rsb_\@: +#endif +.endm + #else /* __ASSEMBLY__ */ #if defined(CONFIG_X86_64) && defined(RETPOLINE) @@ -115,5 +165,24 @@ enum spectre_v2_mitigation { SPECTRE_V2_IBRS, }; +/* + * On VMEXIT we must ensure that no RSB predictions learned in the guest + * can be followed in the host, by overwriting the RSB completely. Both + * retpoline and IBRS mitigations for Spectre v2 need this; only on future + * CPUs with IBRS_ATT *might* it be avoided. + */ +static inline void vmexit_fill_RSB(void) +{ +#ifdef CONFIG_RETPOLINE + unsigned long loops = RSB_CLEAR_LOOPS / 2; + + asm volatile (ALTERNATIVE("jmp .Lskip_rsb_%=", + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1, _%=)), + X86_FEATURE_RETPOLINE) + ".Lskip_rsb_%=:" + : "=&r" (loops), ASM_CALL_CONSTRAINT + : "r" (loops) : "memory" ); +#endif +} #endif /* __ASSEMBLY__ */ #endif /* __NOSPEC_BRANCH_H__ */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0e68f0b..2744b973 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include "trace.h" @@ -4985,6 +4986,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 62ee436..d1e25db 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -50,6 +50,7 @@ #include #include #include +#include #include "trace.h" #include "pmu.h" @@ -9403,6 +9404,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); -- 2.7.4