GENERIC_CPU_VULNERABILITIES provide a common way to figure out if a
system is affected by vulnerabilities like meltdown and other variants
of spectre. This small series adds support for it in arm64.
Thank you,
Best regards,
Yousaf
Mian Yousaf Kaukab (6):
arm64: kpti: move check for non-vulnerable CPUs to a function
arm64: add sysfs vulnerability show for meltdown
arm64: add sysfs vulnerability show for spectre v1
arm64: add sysfs vulnerability show for spectre v2
arm64: add sysfs vulnerability show for speculative store bypass
arm64: enable generic CPU vulnerabilites support
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/cpufeature.h | 16 +++++++
arch/arm64/kernel/cpu_errata.c | 84 ++++++++++++++++++++++++++++++++++++-
arch/arm64/kernel/cpufeature.c | 9 +---
4 files changed, 101 insertions(+), 9 deletions(-)
--
2.11.0
Return status based no ssbd_state. Return string "Unknown" in case
CONFIG_ARM64_SSBD is disabled or arch workaround2 is not available
in the firmware.
Signed-off-by: Mian Yousaf Kaukab <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 8469d3be7b15..8b60aa30a3fa 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -744,4 +744,24 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "Not affected\n");
}
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ switch (arm64_get_ssbd_state()) {
+ case ARM64_SSBD_MITIGATED:
+ return sprintf(buf, "Not affected\n");
+
+ case ARM64_SSBD_KERNEL:
+ case ARM64_SSBD_FORCE_ENABLE:
+ return sprintf(buf,
+ "Mitigation: Speculative Store Bypass disabled");
+
+ case ARM64_SSBD_FORCE_DISABLE:
+ return sprintf(buf, "Vulnerable\n");
+
+ default: /* ARM64_SSBD_UNKNOWN*/
+ return sprintf(buf, "Unknown\n");
+ }
+}
+
#endif
--
2.11.0
Hard-coded since patches are merged and there are no configuration
options.
Signed-off-by: Mian Yousaf Kaukab <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 996edb4e18ad..92616431ae4e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -706,4 +706,10 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "Vulnerable\n");
}
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
#endif
--
2.11.0
Checking CSV3 support directly in case CONFIG_UNMAP_KERNEL_AT_EL0
is not enabled.
Signed-off-by: Mian Yousaf Kaukab <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index dec10898d688..996edb4e18ad 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -22,6 +22,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/mmu.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -683,3 +684,26 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
}
};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u64 pfr0;
+ u32 csv3;
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: KPTI\n");
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ csv3 = cpuid_feature_extract_unsigned_field(pfr0,
+ ID_AA64PFR0_CSV3_SHIFT);
+
+ if (csv3 || is_cpu_meltdown_safe())
+ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+
+#endif
--
2.11.0
Only report mitigation present if hardening callback has been
successfully installed.
Signed-off-by: Mian Yousaf Kaukab <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 34 +++++++++++++++++++++++++++++++++-
1 file changed, 33 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 92616431ae4e..8469d3be7b15 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -481,7 +481,8 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
caps->cpu_enable(caps);
}
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#if defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
+ defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
/*
* List of CPUs where we need to issue a psci call to
@@ -712,4 +713,35 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u64 pfr0;
+ struct bp_hardening_data *data;
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return sprintf(buf, "Not affected\n");
+
+ if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
+ /*
+ * Hardware is vulnerable. Lets check if bp hardening callback
+ * has been successfully installed
+ */
+ data = arm64_get_bp_hardening_data();
+ if (data && data->fn)
+ return sprintf(buf,
+ "Mitigation: Branch predictor hardening");
+ else
+ /* For example SMCCC_VERSION_1_0 */
+ return sprintf(buf, "Vulnerable\n");
+ }
+
+ /* In case CONFIG_HARDEN_BRANCH_PREDICTOR is not enabled */
+ if (is_midr_in_range_list(read_cpuid_id(), arm64_bp_harden_smccc_cpus))
+ return sprintf(buf, "Vulnerable\n");
+
+ return sprintf(buf, "Not affected\n");
+}
+
#endif
--
2.11.0
Enable CPU vulnerabilty show functions for spectre_v1, spectre_v2,
meltdown and store-bypass.
Signed-off-by: Mian Yousaf Kaukab <[email protected]>
---
arch/arm64/Kconfig | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0dec01a0c81c..ffd97bc0f5d5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -84,6 +84,7 @@ config ARM64
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_MULTI_HANDLER
--
2.11.0
Prepare to call it in generic cpu vulnerabilities support.
Signed-off-by: Mian Yousaf Kaukab <[email protected]>
---
arch/arm64/include/asm/cpufeature.h | 16 ++++++++++++++++
arch/arm64/kernel/cpufeature.c | 9 +--------
2 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 1717ba1db35d..0b0b5b3e36ba 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -530,6 +530,22 @@ void arm64_set_ssbd_mitigation(bool state);
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
+static inline bool is_cpu_meltdown_safe(void)
+{
+ /* List of CPUs that are not vulnerable and don't need KPTI */
+ static const struct midr_range kpti_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ { /* sentinel */ }
+ };
+
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ return true;
+
+ return false;
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e238b7932096..6a94f8bce35a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -865,12 +865,6 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int scope)
{
- /* List of CPUs that are not vulnerable and don't need KPTI */
- static const struct midr_range kpti_safe_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- { /* sentinel */ }
- };
char const *str = "command line option";
/*
@@ -894,8 +888,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ if (is_cpu_meltdown_safe())
return false;
/* Defer to CPU feature registers */
--
2.11.0