2023-08-09 08:32:06

by Peter Zijlstra

[permalink] [raw]
Subject: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

Since it is now readily apparent that the two SRSO
untrain_ret+return_thunk variants are exactly the same mechanism as
the existing (retbleed) zen untrain_ret+return_thunk, add them to the
existing retbleed options.

This avoids all confusion as to which of the three -- if any -- ought
to be active, there's a single point of control and no funny
interactions.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
arch/x86/kernel/cpu/bugs.c | 87 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 76 insertions(+), 11 deletions(-)

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -748,6 +748,8 @@ enum spectre_v2_mitigation spectre_v2_en
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
RETBLEED_MITIGATION_UNRET,
+ RETBLEED_MITIGATION_UNRET_SRSO,
+ RETBLEED_MITIGATION_UNRET_SRSO_ALIAS,
RETBLEED_MITIGATION_IBPB,
RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS,
@@ -758,17 +760,21 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_OFF,
RETBLEED_CMD_AUTO,
RETBLEED_CMD_UNRET,
+ RETBLEED_CMD_UNRET_SRSO,
+ RETBLEED_CMD_UNRET_SRSO_ALIAS,
RETBLEED_CMD_IBPB,
RETBLEED_CMD_STUFF,
};

static const char * const retbleed_strings[] = {
- [RETBLEED_MITIGATION_NONE] = "Vulnerable",
- [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
- [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
- [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
- [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
- [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
+ [RETBLEED_MITIGATION_NONE] = "Vulnerable",
+ [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
+ [RETBLEED_MITIGATION_UNRET_SRSO] = "Mitigation: srso untrained return thunk",
+ [RETBLEED_MITIGATION_UNRET_SRSO_ALIAS] = "Mitigation: srso alias untrained return thunk",
+ [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
+ [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
+ [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
+ [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
};

static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
@@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
retbleed_cmd = RETBLEED_CMD_AUTO;
} else if (!strcmp(str, "unret")) {
retbleed_cmd = RETBLEED_CMD_UNRET;
+ } else if (!strcmp(str, "srso")) {
+ retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
+ } else if (!strcmp(str, "srso_alias")) {
+ retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;
} else if (!strcmp(str, "ibpb")) {
retbleed_cmd = RETBLEED_CMD_IBPB;
} else if (!strcmp(str, "stuff")) {
@@ -817,21 +827,54 @@ early_param("retbleed", retbleed_parse_c

#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
+#define RETBLEED_SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."

static void __init retbleed_select_mitigation(void)
{
bool mitigate_smt = false;
+ bool has_microcode = false;

- if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
+ if ((!boot_cpu_has_bug(X86_BUG_RETBLEED) && !boot_cpu_has_bug(X86_BUG_SRSO)) ||
+ cpu_mitigations_off())
return;

+ if (boot_cpu_has_bug(X86_BUG_SRSO)) {
+ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
+ if (!has_microcode) {
+ pr_warn("IBPB-extending microcode not applied!\n");
+ pr_warn(RETBLEED_SRSO_NOTICE);
+ } else {
+ /*
+ * Enable the synthetic (even if in a real CPUID leaf)
+ * flags for guests.
+ */
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+ setup_force_cpu_cap(X86_FEATURE_SBPB);
+
+ /*
+ * Zen1/2 with SMT off aren't vulnerable after the right
+ * IBPB microcode has been applied.
+ */
+ if ((boot_cpu_data.x86 < 0x19) &&
+ (cpu_smt_control == CPU_SMT_DISABLED))
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+ }
+ }
+
switch (retbleed_cmd) {
case RETBLEED_CMD_OFF:
return;

case RETBLEED_CMD_UNRET:
+ case RETBLEED_CMD_UNRET_SRSO:
+ case RETBLEED_CMD_UNRET_SRSO_ALIAS:
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (retbleed_cmd == RETBLEED_CMD_UNRET)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (retbleed_cmd == RETBLEED_CMD_UNRET_SRSO)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;
+ if (retbleed_cmd == RETBLEED_CMD_UNRET_SRSO_ALIAS)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
} else {
pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
goto do_cmd_auto;
@@ -843,6 +886,8 @@ static void __init retbleed_select_mitig
pr_err("WARNING: CPU does not support IBPB.\n");
goto do_cmd_auto;
} else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
+ if (boot_cpu_has_bug(X86_BUG_SRSO) && !has_microcode)
+ pr_err("IBPB-extending microcode not applied; SRSO NOT mitigated\n");
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
@@ -870,8 +915,17 @@ static void __init retbleed_select_mitig
default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
+ if (boot_cpu_has_bug(X86_BUG_RETBLEED))
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+
+ if (boot_cpu_has_bug(X86_BUG_SRSO) && !boot_cpu_has(X86_FEATURE_SRSO_NO)) {
+ if (boot_cpu_data.x86 == 0x19)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
+ else
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;
+ }
+ }
else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
}
@@ -886,9 +940,20 @@ static void __init retbleed_select_mitig
}

switch (retbleed_mitigation) {
+ case RETBLEED_MITIGATION_UNRET_SRSO_ALIAS:
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+ x86_return_thunk = srso_alias_return_thunk;
+ goto do_rethunk;
+
+ case RETBLEED_MITIGATION_UNRET_SRSO:
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ x86_return_thunk = srso_return_thunk;
+ goto do_rethunk;
+
case RETBLEED_MITIGATION_UNRET:
- setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
+do_rethunk:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);

if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)




2023-08-09 14:10:20

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
> @@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
> retbleed_cmd = RETBLEED_CMD_AUTO;
> } else if (!strcmp(str, "unret")) {
> retbleed_cmd = RETBLEED_CMD_UNRET;
> + } else if (!strcmp(str, "srso")) {
> + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
> + } else if (!strcmp(str, "srso_alias")) {
> + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;

It doesn't make sense for "srso_alias" to be a separate cmdline option,
as that option is a model-dependent variant of the SRSO mitigation.

> + if (boot_cpu_has_bug(X86_BUG_SRSO)) {
> + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
> + if (!has_microcode) {
> + pr_warn("IBPB-extending microcode not applied!\n");
> + pr_warn(RETBLEED_SRSO_NOTICE);
> + } else {
> + /*
> + * Enable the synthetic (even if in a real CPUID leaf)
> + * flags for guests.
> + */
> + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
> + setup_force_cpu_cap(X86_FEATURE_SBPB);
> +
> + /*
> + * Zen1/2 with SMT off aren't vulnerable after the right
> + * IBPB microcode has been applied.
> + */
> + if ((boot_cpu_data.x86 < 0x19) &&
> + (cpu_smt_control == CPU_SMT_DISABLED))
> + setup_force_cpu_cap(X86_FEATURE_SRSO_NO);

The rumor I heard was that SMT had to be disabled specifically by BIOS
for this condition to be true. Can somebody from AMD confirm?

The above check doesn't even remotely do that, in fact it does the
opposite. Regardless, at the very least it should be checking
cpu_smt_possible(). I guess that would be a separate patch as it's a
bug fix.

> @@ -870,8 +915,17 @@ static void __init retbleed_select_mitig
> default:
> if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
> boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
> - if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
> - retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
> + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
> + if (boot_cpu_has_bug(X86_BUG_RETBLEED))
> + retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
> +
> + if (boot_cpu_has_bug(X86_BUG_SRSO) && !boot_cpu_has(X86_FEATURE_SRSO_NO)) {
> + if (boot_cpu_data.x86 == 0x19)
> + retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
> + else
> + retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;

It would be great to get confirmation from somebody at AMD that the SRSO
mitigations supersede the Retbleed one, i.e., that the SRSO mitigations
also fix Retbleed.

Yes, the original patches might imply that, but they also seem confused
since they do the double untraining for both Retbleed and SRSO. And I
was given conflicting information.

Even better, we could really use some official AMD documentation for
this mitigation, since right now it all feels very unofficial and
ubsubstantiated.

> + }
> + }
> else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
> retbleed_mitigation = RETBLEED_MITIGATION_IBPB;

Here should have the microcode check too:

if (boot_cpu_has_bug(X86_BUG_SRSO) && !has_microcode)
pr_err("IBPB-extending microcode not applied; SRSO NOT mitigated\n");


--
Josh

2023-08-09 14:25:42

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 09:42:33AM -0400, Josh Poimboeuf wrote:
> On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> > static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
> > @@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
> > retbleed_cmd = RETBLEED_CMD_AUTO;
> > } else if (!strcmp(str, "unret")) {
> > retbleed_cmd = RETBLEED_CMD_UNRET;
> > + } else if (!strcmp(str, "srso")) {
> > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
> > + } else if (!strcmp(str, "srso_alias")) {
> > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;
>
> It doesn't make sense for "srso_alias" to be a separate cmdline option,
> as that option is a model-dependent variant of the SRSO mitigation.

so what I did with retbleed, and what should be fixed here too (I
forgot) is run with:

retbleed=force,unret

on any random machine (typically Intel, because I have a distinct lack
of AMD machines :-() and look at the life kernel image to see if all the
patching worked.

I suppose I should add:

setup_force_cpu_bug(X86_BUG_SRSO);

to the 'force' option, then:

retbleed=force,srso_alias

should function the same, irrespective of the hardware.

I'm also of the opinion that the kernel should do as told, even if it
doesn't make sense. If you tell it nonsense, you get to keep the pieces.

So in that light, yes I think we should have separate options.

> > + if (boot_cpu_has_bug(X86_BUG_SRSO)) {
> > + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
> > + if (!has_microcode) {
> > + pr_warn("IBPB-extending microcode not applied!\n");
> > + pr_warn(RETBLEED_SRSO_NOTICE);
> > + } else {
> > + /*
> > + * Enable the synthetic (even if in a real CPUID leaf)
> > + * flags for guests.
> > + */
> > + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
> > + setup_force_cpu_cap(X86_FEATURE_SBPB);
> > +
> > + /*
> > + * Zen1/2 with SMT off aren't vulnerable after the right
> > + * IBPB microcode has been applied.
> > + */
> > + if ((boot_cpu_data.x86 < 0x19) &&
> > + (cpu_smt_control == CPU_SMT_DISABLED))
> > + setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
>
> The rumor I heard was that SMT had to be disabled specifically by BIOS
> for this condition to be true. Can somebody from AMD confirm?

David Kaplan is on Cc, I was hoping he would enlighten us -- once he's
back from PTO.

> The above check doesn't even remotely do that, in fact it does the
> opposite. Regardless, at the very least it should be checking
> cpu_smt_possible(). I guess that would be a separate patch as it's a
> bug fix.
>
> > @@ -870,8 +915,17 @@ static void __init retbleed_select_mitig
> > default:
> > if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
> > boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
> > - if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
> > - retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
> > + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
> > + if (boot_cpu_has_bug(X86_BUG_RETBLEED))
> > + retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
> > +
> > + if (boot_cpu_has_bug(X86_BUG_SRSO) && !boot_cpu_has(X86_FEATURE_SRSO_NO)) {
> > + if (boot_cpu_data.x86 == 0x19)
> > + retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
> > + else
> > + retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;
>
> It would be great to get confirmation from somebody at AMD that the SRSO
> mitigations supersede the Retbleed one, i.e., that the SRSO mitigations
> also fix Retbleed.

They should, the discussions we had back then explained the Zen1/2
retbleed case in quite some detail and the srso case matches that
exactly with the movabs. A larger instruction is used because we need a
larger embedded sequence of instructions, but otherwise it is identical.

The comments provided for srso_alias state the BTB is untrained using
the explicit aliasing.

That is to say, AFAIU any of this, yes both srso options untrain the BTB
and mitigate the earlier retbleed thing.

SRSO then goes one step further with the RAP/RSB clobber.

> Yes, the original patches might imply that, but they also seem confused
> since they do the double untraining for both Retbleed and SRSO. And I
> was given conflicting information.

Which makes no bloody sense, since the double untrain is for another
address, which is not at all used.

> Even better, we could really use some official AMD documentation for
> this mitigation, since right now it all feels very unofficial and
> ubsubstantiated.

Seconded. David is there anything you can do to clarify this stuff?

>
> > + }
> > + }
> > else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
> > retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
>
> Here should have the microcode check too:
>
> if (boot_cpu_has_bug(X86_BUG_SRSO) && !has_microcode)
> pr_err("IBPB-extending microcode not applied; SRSO NOT mitigated\n");

That earlier printk is unconditional of the selected mitigation, you
really want it printed again?


2023-08-09 14:46:00

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 03:31:20PM +0100, [email protected] wrote:
> On 09/08/2023 2:42 pm, Josh Poimboeuf wrote:
> > On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> >> + if (boot_cpu_has_bug(X86_BUG_SRSO)) {
> >> + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
> >> + if (!has_microcode) {
> >> + pr_warn("IBPB-extending microcode not applied!\n");
> >> + pr_warn(RETBLEED_SRSO_NOTICE);
> >> + } else {
> >> + /*
> >> + * Enable the synthetic (even if in a real CPUID leaf)
> >> + * flags for guests.
> >> + */
> >> + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
> >> + setup_force_cpu_cap(X86_FEATURE_SBPB);
> >> +
> >> + /*
> >> + * Zen1/2 with SMT off aren't vulnerable after the right
> >> + * IBPB microcode has been applied.
> >> + */
> >> + if ((boot_cpu_data.x86 < 0x19) &&
> >> + (cpu_smt_control == CPU_SMT_DISABLED))
> >> + setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
> > The rumor I heard was that SMT had to be disabled specifically by BIOS
> > for this condition to be true. Can somebody from AMD confirm?
>
> It's Complicated.
>
> On Zen1/2, uarch constraints mitigate SRSO when the core is in 1T mode,
> where such an attack would succeed in 2T mode.  Specifically, it is
> believed that the SRSO infinite-call-loop can poison more than 16
> RSB/RAS/RAP entries, but can't poison 32 entries.
>
> The RSB dynamically repartitions depending on the idleness of the
> sibling.  Therefore, offlining/parking the siblings should make you
> safe.  (Assuming you can handwave away the NMI hitting the parked thread
> case as outside of an attackers control.)
>
>
> In Xen, I decided that synthesizing SRSO_NO was only safe when SMT was
> disabled by firmware, because that's the only case where it can't cease
> being true later by admin action.
>
> If it were just Xen's safety that mattered here it might be ok to allow
> the OS SMT=0 cases, but this bit needs to get into guests, you can't
> credibly tell the guest SRSO_NO and then make it unsafe at a later point.

Thanks for that explanation. It sounds like we can use
!cpu_smt_possible() here.

--
Josh

2023-08-09 15:19:25

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 04:06:03PM +0200, Peter Zijlstra wrote:
> On Wed, Aug 09, 2023 at 09:42:33AM -0400, Josh Poimboeuf wrote:
> > On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> > > static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
> > > @@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
> > > retbleed_cmd = RETBLEED_CMD_AUTO;
> > > } else if (!strcmp(str, "unret")) {
> > > retbleed_cmd = RETBLEED_CMD_UNRET;
> > > + } else if (!strcmp(str, "srso")) {
> > > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
> > > + } else if (!strcmp(str, "srso_alias")) {
> > > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;
> >
> > It doesn't make sense for "srso_alias" to be a separate cmdline option,
> > as that option is a model-dependent variant of the SRSO mitigation.
>
> so what I did with retbleed, and what should be fixed here too (I
> forgot) is run with:
>
> retbleed=force,unret
>
> on any random machine (typically Intel, because I have a distinct lack
> of AMD machines :-() and look at the life kernel image to see if all the
> patching worked.
>
> I suppose I should add:
>
> setup_force_cpu_bug(X86_BUG_SRSO);
>
> to the 'force' option, then:
>
> retbleed=force,srso_alias
>
> should function the same, irrespective of the hardware.
>
> I'm also of the opinion that the kernel should do as told, even if it
> doesn't make sense. If you tell it nonsense, you get to keep the pieces.
>
> So in that light, yes I think we should have separate options.

What if I want the SRSO mitigation regardless of CPU model?

> > > @@ -870,8 +915,17 @@ static void __init retbleed_select_mitig
> > > default:
> > > if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
> > > boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
> > > - if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
> > > - retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
> > > + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
> > > + if (boot_cpu_has_bug(X86_BUG_RETBLEED))
> > > + retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
> > > +
> > > + if (boot_cpu_has_bug(X86_BUG_SRSO) && !boot_cpu_has(X86_FEATURE_SRSO_NO)) {
> > > + if (boot_cpu_data.x86 == 0x19)
> > > + retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
> > > + else
> > > + retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;
> >
> > It would be great to get confirmation from somebody at AMD that the SRSO
> > mitigations supersede the Retbleed one, i.e., that the SRSO mitigations
> > also fix Retbleed.
>
> They should, the discussions we had back then explained the Zen1/2
> retbleed case in quite some detail and the srso case matches that
> exactly with the movabs. A larger instruction is used because we need a
> larger embedded sequence of instructions, but otherwise it is identical.
>
> The comments provided for srso_alias state the BTB is untrained using
> the explicit aliasing.
>
> That is to say, AFAIU any of this, yes both srso options untrain the BTB
> and mitigate the earlier retbleed thing.
>
> SRSO then goes one step further with the RAP/RSB clobber.

Ah, nice. Please add that information somewhere (e.g., one of the
commit logs).

> > > + }
> > > + }
> > > else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
> > > retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
> >
> > Here should have the microcode check too:
> >
> > if (boot_cpu_has_bug(X86_BUG_SRSO) && !has_microcode)
> > pr_err("IBPB-extending microcode not applied; SRSO NOT mitigated\n");
>
> That earlier printk is unconditional of the selected mitigation, you
> really want it printed again?

Hm, if you don't want it printed twice then remove it from the
RETBLEED_CMD_IBPB case too.

--
Josh

2023-08-09 16:02:33

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 10:28:47AM -0400, Josh Poimboeuf wrote:
> On Wed, Aug 09, 2023 at 04:06:03PM +0200, Peter Zijlstra wrote:
> > On Wed, Aug 09, 2023 at 09:42:33AM -0400, Josh Poimboeuf wrote:
> > > On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> > > > static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
> > > > @@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
> > > > retbleed_cmd = RETBLEED_CMD_AUTO;
> > > > } else if (!strcmp(str, "unret")) {
> > > > retbleed_cmd = RETBLEED_CMD_UNRET;
> > > > + } else if (!strcmp(str, "srso")) {
> > > > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
> > > > + } else if (!strcmp(str, "srso_alias")) {
> > > > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;
> > >
> > > It doesn't make sense for "srso_alias" to be a separate cmdline option,
> > > as that option is a model-dependent variant of the SRSO mitigation.
> >
> > so what I did with retbleed, and what should be fixed here too (I
> > forgot) is run with:
> >
> > retbleed=force,unret
> >
> > on any random machine (typically Intel, because I have a distinct lack
> > of AMD machines :-() and look at the life kernel image to see if all the
> > patching worked.
> >
> > I suppose I should add:
> >
> > setup_force_cpu_bug(X86_BUG_SRSO);
> >
> > to the 'force' option, then:
> >
> > retbleed=force,srso_alias
> >
> > should function the same, irrespective of the hardware.
> >
> > I'm also of the opinion that the kernel should do as told, even if it
> > doesn't make sense. If you tell it nonsense, you get to keep the pieces.
> >
> > So in that light, yes I think we should have separate options.
>
> What if I want the SRSO mitigation regardless of CPU model?

You mean, you want to say 'any of the SRSO things, you pick the right
version?'

Which means the user has an AMD machine, but can't be arsed to figure
out which and somehow doesn't want to use AUTO?




> > They should, the discussions we had back then explained the Zen1/2
> > retbleed case in quite some detail and the srso case matches that
> > exactly with the movabs. A larger instruction is used because we need a
> > larger embedded sequence of instructions, but otherwise it is identical.
> >
> > The comments provided for srso_alias state the BTB is untrained using
> > the explicit aliasing.
> >
> > That is to say, AFAIU any of this, yes both srso options untrain the BTB
> > and mitigate the earlier retbleed thing.
> >
> > SRSO then goes one step further with the RAP/RSB clobber.
>
> Ah, nice. Please add that information somewhere (e.g., one of the
> commit logs).

The comment above zen_untrain_ret (or retbleed_untrain_ret as you've
christened it) not clear enough?

/*
* Safety details here pertain to the AMD Zen{1,2} microarchitecture:
* 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
* alignment within the BTB.
* 2) The instruction at retbleed_untrain_ret must contain, and not
* end with, the 0xc3 byte of the RET.
* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
* from re-poisioning the BTB prediction.
*/


Hmm, when compared to:

.align 64
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR
.byte 0x48, 0xb8

SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
add $8, %_ASM_SP
ret
int3
int3
int3
/* end of movabs */
lfence
jmp srso_return_thunk
int3
SYM_CODE_END(srso_safe_ret)
SYM_FUNC_END(srso_untrain_ret)

Then we match 2, srso_safe_ret is strictly *inside* the movabs, that is,
it is not the first nor the last byte of the outer instruction.

However, we fail at 1, 'add $8, %rsp' sits at the boundary, not ret.

Anybody, help ?

2023-08-09 17:23:56

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 05:08:52PM +0200, Peter Zijlstra wrote:
> On Wed, Aug 09, 2023 at 10:28:47AM -0400, Josh Poimboeuf wrote:
> > On Wed, Aug 09, 2023 at 04:06:03PM +0200, Peter Zijlstra wrote:
> > > On Wed, Aug 09, 2023 at 09:42:33AM -0400, Josh Poimboeuf wrote:
> > > > On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> > > > > static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
> > > > > @@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
> > > > > retbleed_cmd = RETBLEED_CMD_AUTO;
> > > > > } else if (!strcmp(str, "unret")) {
> > > > > retbleed_cmd = RETBLEED_CMD_UNRET;
> > > > > + } else if (!strcmp(str, "srso")) {
> > > > > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
> > > > > + } else if (!strcmp(str, "srso_alias")) {
> > > > > + retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;
> > > >
> > > > It doesn't make sense for "srso_alias" to be a separate cmdline option,
> > > > as that option is a model-dependent variant of the SRSO mitigation.
> > >
> > > so what I did with retbleed, and what should be fixed here too (I
> > > forgot) is run with:
> > >
> > > retbleed=force,unret
> > >
> > > on any random machine (typically Intel, because I have a distinct lack
> > > of AMD machines :-() and look at the life kernel image to see if all the
> > > patching worked.
> > >
> > > I suppose I should add:
> > >
> > > setup_force_cpu_bug(X86_BUG_SRSO);
> > >
> > > to the 'force' option, then:
> > >
> > > retbleed=force,srso_alias
> > >
> > > should function the same, irrespective of the hardware.
> > >
> > > I'm also of the opinion that the kernel should do as told, even if it
> > > doesn't make sense. If you tell it nonsense, you get to keep the pieces.
> > >
> > > So in that light, yes I think we should have separate options.
> >
> > What if I want the SRSO mitigation regardless of CPU model?
>
> You mean, you want to say 'any of the SRSO things, you pick the right
> version?'
>
> Which means the user has an AMD machine, but can't be arsed to figure
> out which and somehow doesn't want to use AUTO?

Well, nobody's going to use any of these options anyway so it doesn't
matter regardless.

> > > They should, the discussions we had back then explained the Zen1/2
> > > retbleed case in quite some detail and the srso case matches that
> > > exactly with the movabs. A larger instruction is used because we need a
> > > larger embedded sequence of instructions, but otherwise it is identical.
> > >
> > > The comments provided for srso_alias state the BTB is untrained using
> > > the explicit aliasing.
> > >
> > > That is to say, AFAIU any of this, yes both srso options untrain the BTB
> > > and mitigate the earlier retbleed thing.
> > >
> > > SRSO then goes one step further with the RAP/RSB clobber.
> >
> > Ah, nice. Please add that information somewhere (e.g., one of the
> > commit logs).
>
> The comment above zen_untrain_ret (or retbleed_untrain_ret as you've
> christened it) not clear enough?
>
> /*
> * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
> * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
> * alignment within the BTB.
> * 2) The instruction at retbleed_untrain_ret must contain, and not
> * end with, the 0xc3 byte of the RET.
> * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
> * from re-poisioning the BTB prediction.
> */

To me, it's only clear now that you connected the dots.

> Hmm, when compared to:
>
> .align 64
> .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
> SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
> ANNOTATE_NOENDBR
> .byte 0x48, 0xb8
>
> SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
> add $8, %_ASM_SP
> ret
> int3
> int3
> int3
> /* end of movabs */
> lfence
> jmp srso_return_thunk
> int3
> SYM_CODE_END(srso_safe_ret)
> SYM_FUNC_END(srso_untrain_ret)
>
> Then we match 2, srso_safe_ret is strictly *inside* the movabs, that is,
> it is not the first nor the last byte of the outer instruction.
>
> However, we fail at 1, 'add $8, %rsp' sits at the boundary, not ret.
>
> Anybody, help ?

Um... yeah, doesn't look right.

--
Josh

2023-08-10 16:21:55

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> Since it is now readily apparent that the two SRSO
> untrain_ret+return_thunk variants are exactly the same mechanism as
> the existing (retbleed) zen untrain_ret+return_thunk, add them to the
> existing retbleed options.

Except that Zen3/4 are not affected by retbleed, according to my current
state of information.

I don't like this lumping together of the issues even if their
mitigations are more or less similar.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

2023-08-10 16:34:20

by Josh Poimboeuf

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Thu, Aug 10, 2023 at 05:44:04PM +0200, Borislav Petkov wrote:
> On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> > Since it is now readily apparent that the two SRSO
> > untrain_ret+return_thunk variants are exactly the same mechanism as
> > the existing (retbleed) zen untrain_ret+return_thunk, add them to the
> > existing retbleed options.
>
> Except that Zen3/4 are not affected by retbleed, according to my current
> state of information.
>
> I don't like this lumping together of the issues even if their
> mitigations are more or less similar.

I tend to agree that SRSO is a new issue and should have its own sysfs
and cmdline options (though a separate CONFIG option is overkill IMO).

The mitigations are unfortunately intertwined, but we've been in that
situation several times before (e.g., spectre_v2 + intel retbleed).

--
Josh

2023-08-11 11:45:44

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Thu, Aug 10, 2023 at 12:10:03PM -0400, Josh Poimboeuf wrote:
> I tend to agree that SRSO is a new issue and should have its own sysfs
> and cmdline options (though a separate CONFIG option is overkill IMO).

Yeah, there's a patch floating around adding a config option for every
mitigation. Apparently people want to build-time disable them all.

> The mitigations are unfortunately intertwined, but we've been in that
> situation several times before (e.g., spectre_v2 + intel retbleed).

Yap.

And if you recall, keeping Intel Retbleed from AMD Retbleed apart was
already a PITA at the time so adding yet another one behind that flag
would be madness.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

2023-08-12 11:51:39

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Fri, Aug 11, 2023 at 12:27:48PM +0200, Borislav Petkov wrote:
> On Thu, Aug 10, 2023 at 12:10:03PM -0400, Josh Poimboeuf wrote:
> > I tend to agree that SRSO is a new issue and should have its own sysfs
> > and cmdline options (though a separate CONFIG option is overkill IMO).
>
> Yeah, there's a patch floating around adding a config option for every
> mitigation. Apparently people want to build-time disable them all.

So I really hate that .Kconfig explosion, that's the very last thing we
need :-( More random options that can cause build time trouble.

I might see value in one knob that kills all speculation nonsense at
build time, but not one per mitigation, that's maddness.

2023-08-12 12:23:22

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Thu, Aug 10, 2023 at 12:10:03PM -0400, Josh Poimboeuf wrote:
> On Thu, Aug 10, 2023 at 05:44:04PM +0200, Borislav Petkov wrote:
> > On Wed, Aug 09, 2023 at 09:12:24AM +0200, Peter Zijlstra wrote:
> > > Since it is now readily apparent that the two SRSO
> > > untrain_ret+return_thunk variants are exactly the same mechanism as
> > > the existing (retbleed) zen untrain_ret+return_thunk, add them to the
> > > existing retbleed options.
> >
> > Except that Zen3/4 are not affected by retbleed, according to my current
> > state of information.
> >
> > I don't like this lumping together of the issues even if their
> > mitigations are more or less similar.
>
> I tend to agree that SRSO is a new issue and should have its own sysfs
> and cmdline options (though a separate CONFIG option is overkill IMO).
>
> The mitigations are unfortunately intertwined, but we've been in that
> situation several times before (e.g., spectre_v2 + intel retbleed).

That very experience wants me to avoid doing it again :-/ But you all
really want to keep the parameter, can we at least rename it something
you can remember how to type, like 'srso=' instead of this horrific
'spec_rstack_overflow=' thing?

2023-08-12 12:42:01

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Sat, Aug 12, 2023 at 01:24:04PM +0200, Peter Zijlstra wrote:
> That very experience wants me to avoid doing it again :-/ But you all
> really want to keep the parameter, can we at least rename it something
> you can remember how to type, like 'srso=' instead of this horrific
> 'spec_rstack_overflow=' thing?

I'm all for short'n'sweet but last time I did that, Linus said we should
have option names which aren't abbreviations which don't mean anything.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

2023-08-12 13:36:32

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Sat, Aug 12, 2023 at 01:32:56PM +0200, Peter Zijlstra wrote:
> So I really hate that .Kconfig explosion, that's the very last thing we
> need :-( More random options that can cause build time trouble.
>
> I might see value in one knob that kills all speculation nonsense at
> build time, but not one per mitigation, that's maddness.

Yap, I hate them too. Linus wanted them last time and you added them.

And now we have a subset of mitigations with Kconfig options and
a subset without.

;-\

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

2023-08-14 13:20:51

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

On Sat, Aug 12, 2023 at 02:10:34PM +0200, Borislav Petkov wrote:
> On Sat, Aug 12, 2023 at 01:24:04PM +0200, Peter Zijlstra wrote:
> > That very experience wants me to avoid doing it again :-/ But you all
> > really want to keep the parameter, can we at least rename it something
> > you can remember how to type, like 'srso=' instead of this horrific
> > 'spec_rstack_overflow=' thing?
>
> I'm all for short'n'sweet but last time I did that, Linus said we should
> have option names which aren't abbreviations which don't mean anything.

So:

1) do you guys really want to keep this extra argument?

2) if so, can we *PLEASE* rename it, because the current naming *SUCKS*.


I really don't see the need for an extra feature, we can trivially fold
the whole thing into retbleed, that's already 2 issues, might as well
make it 3 :-)


If we're going to rename, how about we simply call it 'inception' then
we haz both 'retbleed=' and 'inception=' and we're consistent here. Then
I'll make a compromise and do:

's/zen_\(untrain_ret\|return_thunk\)/btc_\1/g'

so that the actual mitigations have the official amd name on them --
however much I disagree with calling this branch-type-confusion.

2023-08-14 16:19:22

by David Laight

[permalink] [raw]
Subject: RE: [RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=

From: Peter Zijlstra
> Sent: 12 August 2023 12:33
>
> On Fri, Aug 11, 2023 at 12:27:48PM +0200, Borislav Petkov wrote:
> > On Thu, Aug 10, 2023 at 12:10:03PM -0400, Josh Poimboeuf wrote:
> > > I tend to agree that SRSO is a new issue and should have its own sysfs
> > > and cmdline options (though a separate CONFIG option is overkill IMO).
> >
> > Yeah, there's a patch floating around adding a config option for every
> > mitigation. Apparently people want to build-time disable them all.
>
> So I really hate that .Kconfig explosion, that's the very last thing we
> need :-( More random options that can cause build time trouble.
>
> I might see value in one knob that kills all speculation nonsense at
> build time, but not one per mitigation, that's maddness.

Or a very limited number of options for things that are
pretty separate.
Maybe the call/indirect jump are separate enough from the
return ones?

An one big KNOB to disable them all (DEPEND_ON !NO_MIGIGATIONS ?).

David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)