Baby steps... this is just the basic CPUID and MSR definitions for AMD
and Intel, followed by the complete no-brainer: Disable KPTI on Intel
CPUs which set the RDCL_NO bit to say that they don't need it.
Roll in the microcode blacklist patch as I'm about to send that out
again anyway so it might as well be part of this. Treat it as an RFC
perhaps.
v2: Cleanups, add AMD bits for STIBP/SPEC_CTRL.
v3: Add more CPUs to the exemption for KPTI and clean that up.
Add microcode blacklist (RFC)
David Woodhouse (6):
x86/cpufeatures: Add CPUID_7_EDX CPUID leaf
x86/cpufeatures: Add Intel feature bits for Speculation Control
x86/cpufeatures: Add AMD feature bits for Speculation Control
x86/msr: Add definitions for new speculation control MSRs
x86/pti: Do not enable PTI on processors which are not vulnerable to
Meltdown
x86/cpufeature: Blacklist SPEC_CTRL on early Spectre v2 microcodes
arch/x86/include/asm/cpufeature.h | 7 ++-
arch/x86/include/asm/cpufeatures.h | 14 ++++--
arch/x86/include/asm/disabled-features.h | 3 +-
arch/x86/include/asm/msr-index.h | 12 +++++
arch/x86/include/asm/required-features.h | 3 +-
arch/x86/kernel/cpu/common.c | 35 ++++++++++++++-
arch/x86/kernel/cpu/intel.c | 76 ++++++++++++++++++++++++++++++++
arch/x86/kernel/cpu/scattered.c | 2 -
8 files changed, 141 insertions(+), 11 deletions(-)
--
2.7.4
Add MSR and bit definitions for SPEC_CTRL, PRED_CMD and ARCH_CAPABILITIES.
See Intel's 336996-Speculative-Execution-Side-Channel-Mitigations.pdf
Signed-off-by: David Woodhouse <[email protected]>
---
arch/x86/include/asm/msr-index.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index fa11fb1..eb83ff1 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -39,6 +39,13 @@
/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+
+#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
@@ -57,6 +64,11 @@
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_MTRRcap 0x000000fe
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
--
2.7.4
We don't refuse to load the affected microcodes; just refuse to use SPEC_CTRL
if they're detected.
AMD has a feature bit for "PRED_CMD only", which Intel didn't do. When disabling
SPEC_CTRL we can actually turn on that AMD bit to allow IBPB to still be used.
We handle the other AMD bits here too, because hypervisors *may* have been
exposing those bits even on Intel chips, for fine-grained control of what's
available.
Signed-off-by: David Woodhouse <[email protected]>
---
arch/x86/kernel/cpu/intel.c | 76 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b720dac..f5c7f61 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -102,6 +102,64 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
}
+/*
+ * Early microcode releases for the Spectre v2 mitigation were broken.
+ * Information taken from;
+ * • https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * • https://kb.vmware.com/s/article/52345
+ * • Microcode revisions observed in the wild
+ * • releasenote from 20180108 microcode release
+ */
+struct sku_microcode {
+ u8 model;
+ u8 stepping;
+ u32 microcode;
+};
+static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
+ /* Corrected typo in Intel doc */
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003C },
+ { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0x000000C2 },
+ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0x000000C2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x0000001B },
+ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
+ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
+ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x7000011 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x0000001B },
+ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
+ /* Dropped repeat of KBL Desktop 906E9, 0x80 */
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ /* Dropped repeat of SKX 50654, 0x200003c */
+ /* Updated in the 20180108 release; blacklist until we know otherwise */
+ { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+};
+
+static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_mask == spectre_bad_microcodes[i].stepping)
+ return (c->microcode <= spectre_bad_microcodes[i].microcode);
+ }
+ return false;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -122,6 +180,24 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
c->microcode = intel_get_microcode_revision();
+ if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
+ cpu_has(c, X86_FEATURE_STIBP) ||
+ cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) ||
+ cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) {
+ pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n");
+ /*
+ * Intel's X86_FEATURE_SPEC_CTRL says both MSRs are available.
+ * We can't leave that set, but we can turn on the AMD bit
+ * which advertises PRED_CMD alone. IBPB is believed to be OK.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL))
+ set_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD);
+ clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_STIBP);
+ clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_AMD_STIBP);
+ }
+
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
*
--
2.7.4
This is a pure feature bits leaf. We have two AVX512 feature bits in it
already which were handled as scattered bits, and I'm about to add three
more from this leaf for speculation control features.
Signed-off-by: David Woodhouse <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
---
arch/x86/include/asm/cpufeature.h | 7 +++++--
arch/x86/include/asm/cpufeatures.h | 8 +++++---
arch/x86/include/asm/disabled-features.h | 3 ++-
arch/x86/include/asm/required-features.h | 3 ++-
arch/x86/kernel/cpu/common.c | 1 +
arch/x86/kernel/cpu/scattered.c | 2 --
6 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ea9a7dd..70eddb3 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -29,6 +29,7 @@ enum cpuid_leafs
CPUID_8000_000A_EDX,
CPUID_7_ECX,
CPUID_8000_0007_EBX,
+ CPUID_7_EDX,
};
#ifdef CONFIG_X86_FEATURE_NAMES
@@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 25b9375..7b25cf3 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 18 /* N 32-bit words worth of info */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -206,8 +206,6 @@
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
@@ -319,6 +317,10 @@
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+
/*
* BUG word(s)
*/
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index e428e16..c6a3af1 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -71,6 +71,7 @@
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
#define DISABLED_MASK17 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d91ba04..fb3a6de 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -106,6 +106,7 @@
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57)
#define REQUIRED_MASK17 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 372ba3f..e5d66e9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -745,6 +745,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_ECX] = ecx;
+ c->x86_capability[CPUID_7_EDX] = edx;
}
/* Extended state features: level 0x0000000d */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d0e6976..df11f5d 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -21,8 +21,6 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
--
2.7.4
Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not vulnerable.
Roll the AMD exemption into the x86_match_cpu() table too.
Based on suggestions from Dave Hansen and Alan Cox.
Signed-off-by: David Woodhouse <[email protected]>
---
arch/x86/kernel/cpu/common.c | 34 ++++++++++++++++++++++++++++++++--
1 file changed, 32 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e5d66e9..3bc8a1f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -47,6 +47,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -853,6 +855,35 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
+static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_ANY, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static bool __init early_cpu_vulnerable_meltdown(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return false;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return false;
+
+ return true;
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -900,9 +931,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
- if (c->x86_vendor != X86_VENDOR_AMD)
+ if (early_cpu_vulnerable_meltdown(c))
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
--
2.7.4
AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel.
See http://lkml.kernel.org/r/[email protected]
Signed-off-by: David Woodhouse <[email protected]>
---
arch/x86/include/asm/cpufeatures.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 0a51070..ae3212f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -269,6 +269,9 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_AMD_PRED_CMD (13*32+12) /* Prediction Command MSR (AMD) */
+#define X86_FEATURE_AMD_SPEC_CTRL (13*32+14) /* Speculation Control MSR only (AMD) */
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors (AMD) */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
--
2.7.4
Add three feature bits exposed by new microcode on Intel CPUs for
speculation control.
Signed-off-by: David Woodhouse <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
---
arch/x86/include/asm/cpufeatures.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 7b25cf3..0a51070 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -320,6 +320,9 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
/*
* BUG word(s)
--
2.7.4
On Wed, Jan 24, 2018 at 04:57:03PM +0000, David Woodhouse wrote:
> Add MSR and bit definitions for SPEC_CTRL, PRED_CMD and ARCH_CAPABILITIES.
>
> See Intel's 336996-Speculative-Execution-Side-Channel-Mitigations.pdf
>
> Signed-off-by: David Woodhouse <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
On Wed, Jan 24, 2018 at 04:57:00PM +0000, David Woodhouse wrote:
> This is a pure feature bits leaf. We have two AVX512 feature bits in it
> already which were handled as scattered bits, and I'm about to add three
> more from this leaf for speculation control features.
>
> Signed-off-by: David Woodhouse <[email protected]>
> Reviewed-by: Borislav Petkov <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
On Wed, Jan 24, 2018 at 04:57:01PM +0000, David Woodhouse wrote:
> Add three feature bits exposed by new microcode on Intel CPUs for
> speculation control.
>
> Signed-off-by: David Woodhouse <[email protected]>
> Reviewed-by: Borislav Petkov <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
On Wed, Jan 24, 2018 at 04:57:02PM +0000, David Woodhouse wrote:
> AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel.
> See http://lkml.kernel.org/r/[email protected]
Oh how fun :(
> Signed-off-by: David Woodhouse <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
On Wed, Jan 24, 2018 at 04:57:04PM +0000, David Woodhouse wrote:
> Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
> the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not vulnerable.
>
> Roll the AMD exemption into the x86_match_cpu() table too.
>
> Based on suggestions from Dave Hansen and Alan Cox.
>
> Signed-off-by: David Woodhouse <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
On Wed, Jan 24, 2018 at 04:57:05PM +0000, David Woodhouse wrote:
> We don't refuse to load the affected microcodes; just refuse to use SPEC_CTRL
> if they're detected.
>
> AMD has a feature bit for "PRED_CMD only", which Intel didn't do. When disabling
> SPEC_CTRL we can actually turn on that AMD bit to allow IBPB to still be used.
>
> We handle the other AMD bits here too, because hypervisors *may* have been
> exposing those bits even on Intel chips, for fine-grained control of what's
> available.
>
> Signed-off-by: David Woodhouse <[email protected]>
> ---
> arch/x86/kernel/cpu/intel.c | 76 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 76 insertions(+)
>
> diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
> index b720dac..f5c7f61 100644
> --- a/arch/x86/kernel/cpu/intel.c
> +++ b/arch/x86/kernel/cpu/intel.c
> @@ -102,6 +102,64 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
> ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
> }
>
> +/*
> + * Early microcode releases for the Spectre v2 mitigation were broken.
> + * Information taken from;
> + * • https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
> + * • https://kb.vmware.com/s/article/52345
> + * • Microcode revisions observed in the wild
> + * • releasenote from 20180108 microcode release
> + */
> +struct sku_microcode {
> + u8 model;
> + u8 stepping;
> + u32 microcode;
> +};
> +static const struct sku_microcode spectre_bad_microcodes[] = {
> + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
> + /* Corrected typo in Intel doc */
> + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
> + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
> + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
> + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
> + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003C },
> + { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0x000000C2 },
> + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0x000000C2 },
> + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
> + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x0000001B },
> + { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
> + { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
> + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
> + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
> + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
> + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
> + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
> + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
> + { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x7000011 },
> + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x0000001B },
> + { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
> + /* Dropped repeat of KBL Desktop 906E9, 0x80 */
> + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
> + /* Dropped repeat of SKX 50654, 0x200003c */
Nit, but why comment that you dropped a repeat? No one cares, do they?
You already said above where this info came from.
Anyway, not a big deal at all:
Reviewed-by: Greg Kroah-Hartman <[email protected]>
On Wed, 2018-01-24 at 18:20 +0100, Greg KH wrote:
> On Wed, Jan 24, 2018 at 04:57:02PM +0000, David Woodhouse wrote:
> >
> > AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel.
> > See http://lkml.kernel.org/r/[email protected]
> Oh how fun :(
At least they *work* the same :)
Although I still haven't seen that doc, Tom...
On Wed, 2018-01-24 at 18:29 +0100, Greg KH wrote:
>
> > + /* Dropped repeat of SKX 50654, 0x200003c */
>
> Nit, but why comment that you dropped a repeat? No one cares, do they?
> You already said above where this info came from.
I started off by pedantically noted everywhere I'd had to proof-read
Intel's doc for them, so that people could map my table back to the
original and check my working. Eventually I got bored of that, but
these ones are left over. I'll kill them.
> On Wed, Jan 24, 2018 at 04:57:05PM +0000, David Woodhouse wrote:
> > +static const struct sku_microcode spectre_bad_microcodes[] = {
> > + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
> > + /* Corrected typo in Intel doc */
> > + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
> > + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
> > + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
> > + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
> > + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003C },
> > + { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0x000000C2 },
> > + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0x000000C2 },
> > + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
> > + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x0000001B },
> > + { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
> > + { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
> > + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
> > + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
> > + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
> > + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
> > + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
> > + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
> > + { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x7000011 },
> > + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x0000001B },
> > + { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
> > + /* Dropped repeat of KBL Desktop 906E9, 0x80 */
> > + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
> > + /* Dropped repeat of SKX 50654, 0x200003c */
Could we please also get better tabulation of this array, so that the general
layout of the values is easier to see? It's also prettier:
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x00000080 },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003C },
{ INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0x000000C2 },
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0x000000C2 },
Note the vertical spacing and the fixed length of each column.
(We can ignore the col80 for things like this.)
Thanks,
Ingo
On 1/24/2018 11:57 AM, David Woodhouse wrote:
> On Wed, 2018-01-24 at 18:20 +0100, Greg KH wrote:
>> On Wed, Jan 24, 2018 at 04:57:02PM +0000, David Woodhouse wrote:
>>>
>>> AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel.
>>> See http://lkml.kernel.org/r/[email protected]
>> Oh how fun :(
>
> At least they *work* the same :)
>
> Although I still haven't seen that doc, Tom...
Working on it...
Thanks,
Tom
>
On Wed, 2018-01-24 at 16:57 +0000, David Woodhouse wrote:
> Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
> the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not vulnerable.
>
> Roll the AMD exemption into the x86_match_cpu() table too.
>
> Based on suggestions from Dave Hansen and Alan Cox.
Hi David,
I know we'll still be able to manually enable PTI with a command line option,
but it's also a hardening feature which has the nice side effect of emulating
SMEP on CPU which don't support it (e.g the Atom boxes above).
Couldn't we keep the “default on”? Or maybe on boxes which also have CPID (in
order to limit the performance cost)?
Regards,
--
Yves-Alexis
On Wed, 2018-01-24 at 16:57 +0000, David Woodhouse wrote:
> We don't refuse to load the affected microcodes; just refuse to use SPEC_CTRL
> if they're detected.
Hi David,
Are we sure those microcodes instability are only related to SPEC_CTRL?
Regards,
--
Yves-Alexis
On Fri, 2018-01-26 at 13:14 +0100, Yves-Alexis Perez wrote:
> On Wed, 2018-01-24 at 16:57 +0000, David Woodhouse wrote:
> > Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
> > the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not vulnerable.
> >
> > Roll the AMD exemption into the x86_match_cpu() table too.
> >
> > Based on suggestions from Dave Hansen and Alan Cox.
>
> Hi David,
>
> I know we'll still be able to manually enable PTI with a command line option,
> but it's also a hardening feature which has the nice side effect of emulating
> SMEP on CPU which don't support it (e.g the Atom boxes above).
>
> Couldn't we keep the “default on”? Or maybe on boxes which also have CPID (in
> order to limit the performance cost)?
Strictly speaking, "don't enable PTI" is a side-effect of my patch, not
directly what it does.
All this patch does is *correctly* refrain from setting
X86_BUG_CPU_MELTDOWN on CPUs which don't suffer that bug.
It's the logic in arch/x86/mm/pti.c which enables PTI by default only
for CPUs with the the bug.
As for whether PCID reduces the performance hit sufficiently to make it worthwhile, "just" to emulate SMEP, I'm not sure. But I am sure it's someone else's problem for today except as a cosmetic comment on the headline of my patch :)
On Fri, 2018-01-26 at 13:16 +0100, Yves-Alexis Perez wrote:
> On Wed, 2018-01-24 at 16:57 +0000, David Woodhouse wrote:
> > We don't refuse to load the affected microcodes; just refuse to use SPEC_CTRL
> > if they're detected.
>
> Hi David,
>
> Are we sure those microcodes instability are only related to SPEC_CTRL?
See v4 :)
On 01/26/2018 04:14 AM, Yves-Alexis Perez wrote:
> I know we'll still be able to manually enable PTI with a command line option,
> but it's also a hardening feature which has the nice side effect of emulating
> SMEP on CPU which don't support it (e.g the Atom boxes above).
For Meltdown-vulnerable systems, it's a no brainer: pti=on. The
vulnerability there is just too much.
But, if we are going to change the default, IMNHO, we need a clear list
of what SMEP emulation mitigates and where. RSB-related Variant 2 stuff
on Atom where the kernel speculatively 'ret's back to userspace is
certainly a concern. But, there's a lot of other RSB stuffing that's
going on that will mitigate that too.
Were you thinking of anything concrete?
I haven't found anything compelling enough to justify the downsides,
especially since things without SMEP tend not to have PCIDs as well.
On 1/26/2018 7:27 AM, Dave Hansen wrote:
> On 01/26/2018 04:14 AM, Yves-Alexis Perez wrote:
>> I know we'll still be able to manually enable PTI with a command line option,
>> but it's also a hardening feature which has the nice side effect of emulating
>> SMEP on CPU which don't support it (e.g the Atom boxes above).
>
> For Meltdown-vulnerable systems, it's a no brainer: pti=on. The
> vulnerability there is just too much.
>
> But, if we are going to change the default, IMNHO, we need a clear list
> of what SMEP emulation mitigates and where. RSB-related Variant 2 stuff
> on Atom where the kernel speculatively 'ret's back to userspace is
> certainly a concern. But, there's a lot of other RSB stuffing that's
> going on that will mitigate that too.
>
> Were you thinking of anything concrete?
not Atom though. Atom has has SMEP for a very long time, at least the ones
that do speculation do afaict.
SMEP is for other bugs (dud kernel function pointer) and for that,
emulating SMEP is an interesting opt-in for sure.
On Fri, 26 Jan 2018 13:14:46 +0100
Yves-Alexis Perez <[email protected]> wrote:
> On Wed, 2018-01-24 at 16:57 +0000, David Woodhouse wrote:
> > Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
> > the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not vulnerable.
> >
> > Roll the AMD exemption into the x86_match_cpu() table too.
> >
> > Based on suggestions from Dave Hansen and Alan Cox.
>
> Hi David,
>
> I know we'll still be able to manually enable PTI with a command line option,
> but it's also a hardening feature which has the nice side effect of emulating
> SMEP on CPU which don't support it (e.g the Atom boxes above).
>
> Couldn't we keep the “default on”? Or maybe on boxes which also have CPID (in
> order to limit the performance cost)?
For the old atom processors you really don't want the extra cost as a
default. These are older much slower devices and don't have PCID.
Alan
On 1/24/2018 4:52 PM, Tom Lendacky wrote:
> On 1/24/2018 11:57 AM, David Woodhouse wrote:
>> On Wed, 2018-01-24 at 18:20 +0100, Greg KH wrote:
>>> On Wed, Jan 24, 2018 at 04:57:02PM +0000, David Woodhouse wrote:
>>>>
>>>> AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel.
>>>> See http://lkml.kernel.org/r/[email protected]
>>> Oh how fun :(
>>
>> At least they *work* the same :)
>>
>> Although I still haven't seen that doc, Tom...
>
> Working on it...
The CPUID/MSR information is published:
https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf
Thanks,
Tom
>
> Thanks,
> Tom
>
>>