2018-11-25 18:58:37

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V2 24/28] x86/speculation: Prepare arch_smt_update() for PRCTL mode

The upcoming fine grained per task STIBP control needs to be updated on CPU
hotplug as well.

Split out the code which controls the strict mode so the prctl control code
can be added later. Mark the SMP function call argument __unused while at it.

Signed-off-by: Thomas Gleixner <[email protected]>

---

v1 -> v2: s/app2app/user/. Mark smp function argument __unused

---
arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++---------------------
1 file changed, 25 insertions(+), 21 deletions(-)

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -530,40 +530,44 @@ static void __init spectre_v2_select_mit
arch_smt_update();
}

-static bool stibp_needed(void)
+static void update_stibp_msr(void * __unused)
{
- /* Enhanced IBRS makes using STIBP unnecessary. */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
- return false;
-
- /* Check for strict user mitigation mode */
- return spectre_v2_user == SPECTRE_V2_USER_STRICT;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}

-static void update_stibp_msr(void *info)
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
{
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+ if (sched_smt_active())
+ mask |= SPEC_CTRL_STIBP;
+
+ if (mask == x86_spec_ctrl_base)
+ return;
+
+ pr_info("Spectre v2 user space SMT mitigation: STIBP %s\n",
+ mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
}

void arch_smt_update(void)
{
- u64 mask;
-
- if (!stibp_needed())
+ /* Enhanced IBRS implies STIBP. No update required. */
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;

mutex_lock(&spec_ctrl_mutex);

- mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
- if (sched_smt_active())
- mask |= SPEC_CTRL_STIBP;
-
- if (mask != x86_spec_ctrl_base) {
- pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
- mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
- x86_spec_ctrl_base = mask;
- on_each_cpu(update_stibp_msr, NULL, 1);
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ break;
+ case SPECTRE_V2_USER_STRICT:
+ update_stibp_strict();
+ break;
}
+
mutex_unlock(&spec_ctrl_mutex);
}





2018-11-27 20:19:49

by Tom Lendacky

[permalink] [raw]
Subject: Re: [patch V2 24/28] x86/speculation: Prepare arch_smt_update() for PRCTL mode

On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
> The upcoming fine grained per task STIBP control needs to be updated on CPU
> hotplug as well.
>
> Split out the code which controls the strict mode so the prctl control code
> can be added later. Mark the SMP function call argument __unused while at it.
>
> Signed-off-by: Thomas Gleixner <[email protected]>
>
> ---
>
> v1 -> v2: s/app2app/user/. Mark smp function argument __unused
>
> ---
> arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++---------------------
> 1 file changed, 25 insertions(+), 21 deletions(-)
>
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -530,40 +530,44 @@ static void __init spectre_v2_select_mit
> arch_smt_update();
> }
>
> -static bool stibp_needed(void)
> +static void update_stibp_msr(void * __unused)
> {
> - /* Enhanced IBRS makes using STIBP unnecessary. */
> - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
> - return false;
> -
> - /* Check for strict user mitigation mode */
> - return spectre_v2_user == SPECTRE_V2_USER_STRICT;
> + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
> }
>
> -static void update_stibp_msr(void *info)
> +/* Update x86_spec_ctrl_base in case SMT state changed. */
> +static void update_stibp_strict(void)
> {
> - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
> + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
> +
> + if (sched_smt_active())
> + mask |= SPEC_CTRL_STIBP;
> +
> + if (mask == x86_spec_ctrl_base)
> + return;
> +
> + pr_info("Spectre v2 user space SMT mitigation: STIBP %s\n",
> + mask & SPEC_CTRL_STIBP ? "always-on" : "off");
> + x86_spec_ctrl_base = mask;
> + on_each_cpu(update_stibp_msr, NULL, 1);

Some more testing using spectre_v2_user=on and I've found that during boot
up, once the first SMT thread is encountered no more updates to MSRs for
STIBP are done for any CPUs brought up after that. The first SMT thread
will cause mask != x86_spec_ctrl_base, but then x86_spec_ctrl_base is set
to mask and the check always causes a return for subsequent CPUs that are
brought up.

Talking to our HW folks, they recommend that it be set on all threads, so
I'm not sure what the right approach would be for this.

Also, I've seen some BIOSes set up the cores/threads where the core and
its thread are enumerated before the next core and its thread, etc. If
that were the case, I think this would result in only the first core
and its thread having STIBP set, right?

Thanks,
Tom

> }
>
> void arch_smt_update(void)
> {
> - u64 mask;
> -
> - if (!stibp_needed())
> + /* Enhanced IBRS implies STIBP. No update required. */
> + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
> return;
>
> mutex_lock(&spec_ctrl_mutex);
>
> - mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
> - if (sched_smt_active())
> - mask |= SPEC_CTRL_STIBP;
> -
> - if (mask != x86_spec_ctrl_base) {
> - pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
> - mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
> - x86_spec_ctrl_base = mask;
> - on_each_cpu(update_stibp_msr, NULL, 1);
> + switch (spectre_v2_user) {
> + case SPECTRE_V2_USER_NONE:
> + break;
> + case SPECTRE_V2_USER_STRICT:
> + update_stibp_strict();
> + break;
> }
> +
> mutex_unlock(&spec_ctrl_mutex);
> }
>
>
>

2018-11-27 20:31:44

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [patch V2 24/28] x86/speculation: Prepare arch_smt_update() for PRCTL mode

On Tue, 27 Nov 2018, Lendacky, Thomas wrote:
> On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
> > +/* Update x86_spec_ctrl_base in case SMT state changed. */
> > +static void update_stibp_strict(void)
> > {
> > - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
> > + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
> > +
> > + if (sched_smt_active())
> > + mask |= SPEC_CTRL_STIBP;
> > +
> > + if (mask == x86_spec_ctrl_base)
> > + return;
> > +
> > + pr_info("Spectre v2 user space SMT mitigation: STIBP %s\n",
> > + mask & SPEC_CTRL_STIBP ? "always-on" : "off");
> > + x86_spec_ctrl_base = mask;
> > + on_each_cpu(update_stibp_msr, NULL, 1);
>
> Some more testing using spectre_v2_user=on and I've found that during boot
> up, once the first SMT thread is encountered no more updates to MSRs for
> STIBP are done for any CPUs brought up after that. The first SMT thread
> will cause mask != x86_spec_ctrl_base, but then x86_spec_ctrl_base is set
> to mask and the check always causes a return for subsequent CPUs that are
> brought up.

The above code merily handles the switch between SMT and non-SMT mode,
because there all other online CPUs need to be updated, but after that each
upcoming CPU calls x86_spec_ctrl_setup_ap() which will write the MSR. So
it's all good.

Thanks,

tglx

2018-11-27 21:23:14

by Tom Lendacky

[permalink] [raw]
Subject: Re: [patch V2 24/28] x86/speculation: Prepare arch_smt_update() for PRCTL mode

On 11/27/2018 02:30 PM, Thomas Gleixner wrote:
> On Tue, 27 Nov 2018, Lendacky, Thomas wrote:
>> On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
>>> +/* Update x86_spec_ctrl_base in case SMT state changed. */
>>> +static void update_stibp_strict(void)
>>> {
>>> - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
>>> + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
>>> +
>>> + if (sched_smt_active())
>>> + mask |= SPEC_CTRL_STIBP;
>>> +
>>> + if (mask == x86_spec_ctrl_base)
>>> + return;
>>> +
>>> + pr_info("Spectre v2 user space SMT mitigation: STIBP %s\n",
>>> + mask & SPEC_CTRL_STIBP ? "always-on" : "off");
>>> + x86_spec_ctrl_base = mask;
>>> + on_each_cpu(update_stibp_msr, NULL, 1);
>>
>> Some more testing using spectre_v2_user=on and I've found that during boot
>> up, once the first SMT thread is encountered no more updates to MSRs for
>> STIBP are done for any CPUs brought up after that. The first SMT thread
>> will cause mask != x86_spec_ctrl_base, but then x86_spec_ctrl_base is set
>> to mask and the check always causes a return for subsequent CPUs that are
>> brought up.
>
> The above code merily handles the switch between SMT and non-SMT mode,
> because there all other online CPUs need to be updated, but after that each
> upcoming CPU calls x86_spec_ctrl_setup_ap() which will write the MSR. So
> it's all good.

Yup, sorry for the noise. Trying to test different scenarios using some
code hacks and put them in the wrong place, so wasn't triggering the
WRMSR in x86_spec_ctrl_setup_ap().

Thanks,
Tom

>
> Thanks,
>
> tglx
>

Subject: [tip:x86/pti] x86/speculation: Prepare arch_smt_update() for PRCTL mode

Commit-ID: 6893a959d7fdebbab5f5aa112c277d5a44435ba1
Gitweb: https://git.kernel.org/tip/6893a959d7fdebbab5f5aa112c277d5a44435ba1
Author: Thomas Gleixner <[email protected]>
AuthorDate: Sun, 25 Nov 2018 19:33:52 +0100
Committer: Thomas Gleixner <[email protected]>
CommitDate: Wed, 28 Nov 2018 11:57:13 +0100

x86/speculation: Prepare arch_smt_update() for PRCTL mode

The upcoming fine grained per task STIBP control needs to be updated on CPU
hotplug as well.

Split out the code which controls the strict mode so the prctl control code
can be added later. Mark the SMP function call argument __unused while at it.

Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Jiri Kosina <[email protected]>
Cc: Tom Lendacky <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: David Woodhouse <[email protected]>
Cc: Tim Chen <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Casey Schaufler <[email protected]>
Cc: Asit Mallick <[email protected]>
Cc: Arjan van de Ven <[email protected]>
Cc: Jon Masters <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Greg KH <[email protected]>
Cc: Dave Stewart <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]


---
arch/x86/kernel/cpu/bugs.c | 46 +++++++++++++++++++++++++---------------------
1 file changed, 25 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 29f40a92f5a8..9cab538e10f1 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -530,40 +530,44 @@ specv2_set_mode:
arch_smt_update();
}

-static bool stibp_needed(void)
+static void update_stibp_msr(void * __unused)
{
- /* Enhanced IBRS makes using STIBP unnecessary. */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
- return false;
-
- /* Check for strict user mitigation mode */
- return spectre_v2_user == SPECTRE_V2_USER_STRICT;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}

-static void update_stibp_msr(void *info)
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
{
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+ if (sched_smt_active())
+ mask |= SPEC_CTRL_STIBP;
+
+ if (mask == x86_spec_ctrl_base)
+ return;
+
+ pr_info("Update user space SMT mitigation: STIBP %s\n",
+ mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
}

void arch_smt_update(void)
{
- u64 mask;
-
- if (!stibp_needed())
+ /* Enhanced IBRS implies STIBP. No update required. */
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;

mutex_lock(&spec_ctrl_mutex);

- mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
- if (sched_smt_active())
- mask |= SPEC_CTRL_STIBP;
-
- if (mask != x86_spec_ctrl_base) {
- pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
- mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
- x86_spec_ctrl_base = mask;
- on_each_cpu(update_stibp_msr, NULL, 1);
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ break;
+ case SPECTRE_V2_USER_STRICT:
+ update_stibp_strict();
+ break;
}
+
mutex_unlock(&spec_ctrl_mutex);
}