2018-11-25 19:00:16

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

To avoid the overhead of STIBP always on, it's necessary to allow per task
control of STIBP.

Add a new task flag TIF_SPEC_IB and evaluate it during context switch if
SMT is active and flag evaluation is enabled by the speculation control
code. Add the conditional evaluation to x86_virt_spec_ctrl() as well so the
guest/host switch works properly.

This has no effect because TIF_SPEC_IB cannot be set yet and the static key
which controls evaluation is off. Preparatory patch for adding the control
code.

[ tglx: Simplify the context switch logic and make the TIF evaluation
depend on SMP=y and on the static key controlling the conditional
update. Rename it to TIF_SPEC_IB because it controls both STIBP and
IBPB ]

Signed-off-by: Tim Chen <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>

---

v1 -> v2: Remove pointless include. Use consistent comments.

---
arch/x86/include/asm/msr-index.h | 5 +++--
arch/x86/include/asm/spec-ctrl.h | 12 ++++++++++++
arch/x86/include/asm/thread_info.h | 5 ++++-
arch/x86/kernel/cpu/bugs.c | 4 ++++
arch/x86/kernel/process.c | 23 +++++++++++++++++++++--
5 files changed, 44 insertions(+), 5 deletions(-)

--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,9 +41,10 @@

#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */

#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}

+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
{
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}

+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,6 +83,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
@@ -110,6 +111,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
@@ -146,7 +148,8 @@ struct thread_info {

/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+ _TIF_SSBD|_TIF_SPEC_IB)

#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -148,6 +148,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);

+ /* Conditional STIBP enabled? */
+ if (static_branch_unlikely(&switch_to_cond_stibp))
+ hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
if (static_cpu_has(X86_FEATURE_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);

+ /* Only evaluate if conditional STIBP is enabled */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ static_branch_unlikely(&switch_to_cond_stibp))
+ msr |= stibp_tif_to_spec_ctrl(tifn);
+
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}

@@ -418,10 +423,16 @@ static __always_inline void spec_ctrl_up
static __always_inline void __speculation_ctrl_update(unsigned long tifp,
unsigned long tifn)
{
+ unsigned long tif_diff = tifp ^ tifn;
bool updmsr = false;

- /* If TIF_SSBD is different, select the proper mitigation method */
- if ((tifp ^ tifn) & _TIF_SSBD) {
+ /*
+ * If TIF_SSBD is different, select the proper mitigation
+ * method. Note that if SSBD mitigation is disabled or permanentely
+ * enabled this branch can't be taken because nothing can set
+ * TIF_SSBD.
+ */
+ if (tif_diff & _TIF_SSBD) {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
amd_set_ssb_virt_state(tifn);
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
@@ -430,6 +441,14 @@ static __always_inline void __speculatio
updmsr = true;
}

+ /*
+ * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+ * otherwise avoid the MSR write.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ static_branch_unlikely(&switch_to_cond_stibp))
+ updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+
if (updmsr)
spec_ctrl_update_msr(tifn);
}




2018-11-27 19:28:05

by Tom Lendacky

[permalink] [raw]
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
> To avoid the overhead of STIBP always on, it's necessary to allow per task
> control of STIBP.
>
> Add a new task flag TIF_SPEC_IB and evaluate it during context switch if
> SMT is active and flag evaluation is enabled by the speculation control
> code. Add the conditional evaluation to x86_virt_spec_ctrl() as well so the
> guest/host switch works properly.
>
> This has no effect because TIF_SPEC_IB cannot be set yet and the static key
> which controls evaluation is off. Preparatory patch for adding the control
> code.
>
> [ tglx: Simplify the context switch logic and make the TIF evaluation
> depend on SMP=y and on the static key controlling the conditional
> update. Rename it to TIF_SPEC_IB because it controls both STIBP and
> IBPB ]
>
> Signed-off-by: Tim Chen <[email protected]>
> Signed-off-by: Thomas Gleixner <[email protected]>
>
> ---
>
> v1 -> v2: Remove pointless include. Use consistent comments.
>
> ---
> arch/x86/include/asm/msr-index.h | 5 +++--
> arch/x86/include/asm/spec-ctrl.h | 12 ++++++++++++
> arch/x86/include/asm/thread_info.h | 5 ++++-
> arch/x86/kernel/cpu/bugs.c | 4 ++++
> arch/x86/kernel/process.c | 23 +++++++++++++++++++++--
> 5 files changed, 44 insertions(+), 5 deletions(-)
>
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -41,9 +41,10 @@
>
> #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
> #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
> -#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
> +#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
> +#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
> #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
> -#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
> +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
>
> #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
> #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
> --- a/arch/x86/include/asm/spec-ctrl.h
> +++ b/arch/x86/include/asm/spec-ctrl.h
> @@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(
> return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
> }
>
> +static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
> +{
> + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
> + return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
> +}
> +
> static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
> {
> BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
> return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
> }
>
> +static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
> +{
> + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
> + return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
> +}
> +
> static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
> {
> return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
> --- a/arch/x86/include/asm/thread_info.h
> +++ b/arch/x86/include/asm/thread_info.h
> @@ -83,6 +83,7 @@ struct thread_info {
> #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
> #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
> #define TIF_SECCOMP 8 /* secure computing */
> +#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
> #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
> #define TIF_UPROBE 12 /* breakpointed or singlestepping */
> #define TIF_PATCH_PENDING 13 /* pending live patching update */
> @@ -110,6 +111,7 @@ struct thread_info {
> #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
> #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
> #define _TIF_SECCOMP (1 << TIF_SECCOMP)
> +#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
> #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
> #define _TIF_UPROBE (1 << TIF_UPROBE)
> #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
> @@ -146,7 +148,8 @@ struct thread_info {
>
> /* flags to check in __switch_to() */
> #define _TIF_WORK_CTXSW \
> - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
> + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
> + _TIF_SSBD|_TIF_SPEC_IB)
>
> #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
> #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -148,6 +148,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
> static_cpu_has(X86_FEATURE_AMD_SSBD))
> hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
>
> + /* Conditional STIBP enabled? */
> + if (static_branch_unlikely(&switch_to_cond_stibp))
> + hostval |= stibp_tif_to_spec_ctrl(ti->flags);
> +
> if (hostval != guestval) {
> msrval = setguest ? guestval : hostval;
> wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
> if (static_cpu_has(X86_FEATURE_SSBD))
> msr |= ssbd_tif_to_spec_ctrl(tifn);

I did some quick testing and found my original logic was flawed. Since
spec_ctrl_update_msr() can now be called for STIBP, an additional check
is needed to set the SSBD MSR bit.

Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
only called if X86_FEATURE_SSBD was set and one of the other SSBD features
wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
and cause the SSBD MSR bit to be set when it shouldn't (could result in
a GP fault).

Thanks,
Tom

>
> + /* Only evaluate if conditional STIBP is enabled */
> + if (IS_ENABLED(CONFIG_SMP) &&
> + static_branch_unlikely(&switch_to_cond_stibp))
> + msr |= stibp_tif_to_spec_ctrl(tifn);
> +
> wrmsrl(MSR_IA32_SPEC_CTRL, msr);
> }
>
> @@ -418,10 +423,16 @@ static __always_inline void spec_ctrl_up
> static __always_inline void __speculation_ctrl_update(unsigned long tifp,
> unsigned long tifn)
> {
> + unsigned long tif_diff = tifp ^ tifn;
> bool updmsr = false;
>
> - /* If TIF_SSBD is different, select the proper mitigation method */
> - if ((tifp ^ tifn) & _TIF_SSBD) {
> + /*
> + * If TIF_SSBD is different, select the proper mitigation
> + * method. Note that if SSBD mitigation is disabled or permanentely
> + * enabled this branch can't be taken because nothing can set
> + * TIF_SSBD.
> + */
> + if (tif_diff & _TIF_SSBD) {
> if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> amd_set_ssb_virt_state(tifn);
> else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> @@ -430,6 +441,14 @@ static __always_inline void __speculatio
> updmsr = true;
> }
>
> + /*
> + * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
> + * otherwise avoid the MSR write.
> + */
> + if (IS_ENABLED(CONFIG_SMP) &&
> + static_branch_unlikely(&switch_to_cond_stibp))
> + updmsr |= !!(tif_diff & _TIF_SPEC_IB);
> +
> if (updmsr)
> spec_ctrl_update_msr(tifn);
> }
>
>

2018-11-27 19:54:04

by Tim Chen

[permalink] [raw]
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

On 11/27/2018 09:25 AM, Lendacky, Thomas wrote:
>> --- a/arch/x86/kernel/cpu/bugs.c
>> +++ b/arch/x86/kernel/cpu/bugs.c
>> @@ -148,6 +148,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
>> static_cpu_has(X86_FEATURE_AMD_SSBD))
>> hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
>>
>> + /* Conditional STIBP enabled? */
>> + if (static_branch_unlikely(&switch_to_cond_stibp))
>> + hostval |= stibp_tif_to_spec_ctrl(ti->flags);
>> +
>> if (hostval != guestval) {
>> msrval = setguest ? guestval : hostval;
>> wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
>> --- a/arch/x86/kernel/process.c
>> +++ b/arch/x86/kernel/process.c
>> @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
>> if (static_cpu_has(X86_FEATURE_SSBD))
>> msr |= ssbd_tif_to_spec_ctrl(tifn);
>
> I did some quick testing and found my original logic was flawed. Since
> spec_ctrl_update_msr() can now be called for STIBP, an additional check
> is needed to set the SSBD MSR bit.
>
> Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
> X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
> only called if X86_FEATURE_SSBD was set and one of the other SSBD features
> wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
> and cause the SSBD MSR bit to be set when it shouldn't (could result in
> a GP fault).
>

I think it will be cleaner just to fold the msr update into
__speculation_ctrl_update to fix this issue.

Something like this perhaps.

Thanks.

Tim

---

diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3f5e351..614ec51 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,25 +398,6 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}

-static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
-{
- u64 msr = x86_spec_ctrl_base;
-
- /*
- * If X86_FEATURE_SSBD is not set, the SSBD bit is not to be
- * touched.
- */
- if (static_cpu_has(X86_FEATURE_SSBD))
- msr |= ssbd_tif_to_spec_ctrl(tifn);
-
- /* Only evaluate if conditional STIBP is enabled */
- if (IS_ENABLED(CONFIG_SMP) &&
- static_branch_unlikely(&switch_to_cond_stibp))
- msr |= stibp_tif_to_spec_ctrl(tifn);
-
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
-}
-
/*
* Update the MSRs managing speculation control, during context switch.
*
@@ -428,6 +409,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
{
unsigned long tif_diff = tifp ^ tifn;
bool updmsr = false;
+ u64 msr = x86_spec_ctrl_base;

/*
* If TIF_SSBD is different, select the proper mitigation
@@ -440,8 +422,10 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
amd_set_ssb_virt_state(tifn);
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn);
- else if (static_cpu_has(X86_FEATURE_SSBD))
+ else if (static_cpu_has(X86_FEATURE_SSBD)) {
updmsr = true;
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
+ }
}

/*
@@ -449,11 +433,13 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
* otherwise avoid the MSR write.
*/
if (IS_ENABLED(CONFIG_SMP) &&
- static_branch_unlikely(&switch_to_cond_stibp))
+ static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+ msr |= stibp_tif_to_spec_ctrl(tifn);
+ }

if (updmsr)
- spec_ctrl_update_msr(tifn);
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}

void speculation_ctrl_update(unsigned long tif)

2018-11-27 20:40:54

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

On Tue, 27 Nov 2018, Lendacky, Thomas wrote:
> On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
> > --- a/arch/x86/kernel/process.c
> > +++ b/arch/x86/kernel/process.c
> > @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
> > if (static_cpu_has(X86_FEATURE_SSBD))
> > msr |= ssbd_tif_to_spec_ctrl(tifn);
>
> I did some quick testing and found my original logic was flawed. Since
> spec_ctrl_update_msr() can now be called for STIBP, an additional check
> is needed to set the SSBD MSR bit.
>
> Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
> X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
> only called if X86_FEATURE_SSBD was set and one of the other SSBD features
> wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
> and cause the SSBD MSR bit to be set when it shouldn't (could result in
> a GP fault).

The below should fix that. We have the same logic in x86_virt_spec_ctrl()

Thanks,

tglx

8<---------------
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -403,10 +403,11 @@ static __always_inline void spec_ctrl_up
u64 msr = x86_spec_ctrl_base;

/*
- * If X86_FEATURE_SSBD is not set, the SSBD bit is not to be
- * touched.
+ * If SSBD is not controlled in MSR_SPEC_CTRL, the SSBD bit has not
+ * to be touched.
*/
- if (static_cpu_has(X86_FEATURE_SSBD))
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);

/* Only evaluate if conditional STIBP is enabled */

2018-11-27 20:43:57

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

On Tue, 27 Nov 2018, Thomas Gleixner wrote:
> On Tue, 27 Nov 2018, Lendacky, Thomas wrote:
> > On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
> > > --- a/arch/x86/kernel/process.c
> > > +++ b/arch/x86/kernel/process.c
> > > @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
> > > if (static_cpu_has(X86_FEATURE_SSBD))
> > > msr |= ssbd_tif_to_spec_ctrl(tifn);
> >
> > I did some quick testing and found my original logic was flawed. Since
> > spec_ctrl_update_msr() can now be called for STIBP, an additional check
> > is needed to set the SSBD MSR bit.
> >
> > Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
> > X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
> > only called if X86_FEATURE_SSBD was set and one of the other SSBD features
> > wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
> > and cause the SSBD MSR bit to be set when it shouldn't (could result in
> > a GP fault).
>
> The below should fix that. We have the same logic in x86_virt_spec_ctrl()

Actually it's incomplete. Full version below.

Thanks,

tglx

8<-----------------
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -403,10 +403,11 @@ static __always_inline void spec_ctrl_up
u64 msr = x86_spec_ctrl_base;

/*
- * If X86_FEATURE_SSBD is not set, the SSBD bit is not to be
- * touched.
+ * If SSBD is not controlled in MSR_SPEC_CTRL, the SSBD bit has not
+ * to be touched.
*/
- if (static_cpu_has(X86_FEATURE_SSBD))
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);

/* Only evaluate if conditional STIBP is enabled */
@@ -440,7 +441,8 @@ static __always_inline void __speculatio
amd_set_ssb_virt_state(tifn);
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn);
- else if (static_cpu_has(X86_FEATURE_SSBD))
+ else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
updmsr = true;
}


2018-11-27 21:54:13

by Tom Lendacky

[permalink] [raw]
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

On 11/27/2018 02:42 PM, Thomas Gleixner wrote:
> On Tue, 27 Nov 2018, Thomas Gleixner wrote:
>> On Tue, 27 Nov 2018, Lendacky, Thomas wrote:
>>> On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
>>>> --- a/arch/x86/kernel/process.c
>>>> +++ b/arch/x86/kernel/process.c
>>>> @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
>>>> if (static_cpu_has(X86_FEATURE_SSBD))
>>>> msr |= ssbd_tif_to_spec_ctrl(tifn);
>>>
>>> I did some quick testing and found my original logic was flawed. Since
>>> spec_ctrl_update_msr() can now be called for STIBP, an additional check
>>> is needed to set the SSBD MSR bit.
>>>
>>> Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
>>> X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
>>> only called if X86_FEATURE_SSBD was set and one of the other SSBD features
>>> wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
>>> and cause the SSBD MSR bit to be set when it shouldn't (could result in
>>> a GP fault).
>>
>> The below should fix that. We have the same logic in x86_virt_spec_ctrl()
>
> Actually it's incomplete. Full version below.

Just one little nit on the comment below, otherwise works nicely.

Thanks,
Tom

>
> Thanks,
>
> tglx
>
> 8<-----------------
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -403,10 +403,11 @@ static __always_inline void spec_ctrl_up
> u64 msr = x86_spec_ctrl_base;
>
> /*
> - * If X86_FEATURE_SSBD is not set, the SSBD bit is not to be
> - * touched.
> + * If SSBD is not controlled in MSR_SPEC_CTRL, the SSBD bit has not

s/has not/is not/

> + * to be touched.
> */
> - if (static_cpu_has(X86_FEATURE_SSBD))
> + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
> + static_cpu_has(X86_FEATURE_AMD_SSBD))
> msr |= ssbd_tif_to_spec_ctrl(tifn);
>
> /* Only evaluate if conditional STIBP is enabled */
> @@ -440,7 +441,8 @@ static __always_inline void __speculatio
> amd_set_ssb_virt_state(tifn);
> else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> amd_set_core_ssb_state(tifn);
> - else if (static_cpu_has(X86_FEATURE_SSBD))
> + else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
> + static_cpu_has(X86_FEATURE_AMD_SSBD))
> updmsr = true;
> }
>
>

2018-11-28 09:40:07

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect branch speculation control

On Tue, 27 Nov 2018, Tim Chen wrote:
> I think it will be cleaner just to fold the msr update into
> __speculation_ctrl_update to fix this issue.

Yes, that looks nicer and avoids a couple of extra static_cpu_has()
evaluations. I'll fold it into the proper places.

Thanks,

tglx

Subject: [tip:x86/pti] x86/speculation: Prepare for per task indirect branch speculation control

Commit-ID: 5bfbe3ad5840d941b89bcac54b821ba14f50a0ba
Gitweb: https://git.kernel.org/tip/5bfbe3ad5840d941b89bcac54b821ba14f50a0ba
Author: Tim Chen <[email protected]>
AuthorDate: Sun, 25 Nov 2018 19:33:46 +0100
Committer: Thomas Gleixner <[email protected]>
CommitDate: Wed, 28 Nov 2018 11:57:10 +0100

x86/speculation: Prepare for per task indirect branch speculation control

To avoid the overhead of STIBP always on, it's necessary to allow per task
control of STIBP.

Add a new task flag TIF_SPEC_IB and evaluate it during context switch if
SMT is active and flag evaluation is enabled by the speculation control
code. Add the conditional evaluation to x86_virt_spec_ctrl() as well so the
guest/host switch works properly.

This has no effect because TIF_SPEC_IB cannot be set yet and the static key
which controls evaluation is off. Preparatory patch for adding the control
code.

[ tglx: Simplify the context switch logic and make the TIF evaluation
depend on SMP=y and on the static key controlling the conditional
update. Rename it to TIF_SPEC_IB because it controls both STIBP and
IBPB ]

Signed-off-by: Tim Chen <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Jiri Kosina <[email protected]>
Cc: Tom Lendacky <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: David Woodhouse <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Casey Schaufler <[email protected]>
Cc: Asit Mallick <[email protected]>
Cc: Arjan van de Ven <[email protected]>
Cc: Jon Masters <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Greg KH <[email protected]>
Cc: Dave Stewart <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]


---
arch/x86/include/asm/msr-index.h | 5 +++--
arch/x86/include/asm/spec-ctrl.h | 12 ++++++++++++
arch/x86/include/asm/thread_info.h | 5 ++++-
arch/x86/kernel/cpu/bugs.c | 4 ++++
arch/x86/kernel/process.c | 20 ++++++++++++++++++--
5 files changed, 41 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 80f4a4f38c79..c8f73efb4ece 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,9 +41,10 @@

#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */

#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 8e2f8411c7a7..27b0bce3933b 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}

+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
{
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}

+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 523c69efc38a..fa583ec99e3e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,6 +83,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
@@ -110,6 +111,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
@@ -146,7 +148,8 @@ struct thread_info {

/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+ _TIF_SSBD|_TIF_SPEC_IB)

#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 3a223cce1fac..1e13dbfc0919 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -148,6 +148,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);

+ /* Conditional STIBP enabled? */
+ if (static_branch_unlikely(&switch_to_cond_stibp))
+ hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 70e9832379e1..574b144d2b53 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -404,11 +404,17 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
static __always_inline void __speculation_ctrl_update(unsigned long tifp,
unsigned long tifn)
{
+ unsigned long tif_diff = tifp ^ tifn;
u64 msr = x86_spec_ctrl_base;
bool updmsr = false;

- /* If TIF_SSBD is different, select the proper mitigation method */
- if ((tifp ^ tifn) & _TIF_SSBD) {
+ /*
+ * If TIF_SSBD is different, select the proper mitigation
+ * method. Note that if SSBD mitigation is disabled or permanentely
+ * enabled this branch can't be taken because nothing can set
+ * TIF_SSBD.
+ */
+ if (tif_diff & _TIF_SSBD) {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
amd_set_ssb_virt_state(tifn);
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
@@ -420,6 +426,16 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
}
}

+ /*
+ * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+ * otherwise avoid the MSR write.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ static_branch_unlikely(&switch_to_cond_stibp)) {
+ updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+ msr |= stibp_tif_to_spec_ctrl(tifn);
+ }
+
if (updmsr)
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}