2019-01-09 23:57:24

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 0/7] arm64: add system vulnerability sysfs entries

Arm64 machines should be displaying a human readable
vulnerability status to speculative execution attacks in
/sys/devices/system/cpu/vulnerabilities

This series enables that behavior by providing the expected
functions. Those functions expose the cpu errata and feature
states, as well as whether firmware is responding appropriately
to display the overall machine status. This means that in a
heterogeneous machine we will only claim the machine is mitigated
or safe if we are confident all booted cores are safe or
mitigated.

v2->v3:
Remove "Unknown" states, replace with further blacklists
and default vulnerable/no affected states.
Add the ability for an arch port to selectively export
sysfs vulnerabilities.

v1->v2:
Add "Unknown" state to ABI/testing docs.
Minor tweaks.

Jeremy Linton (4):
sysfs/cpu: Allow individual architectures to select vulnerabilities
arm64: add sysfs vulnerability show for meltdown
arm64: add sysfs vulnerability show for spectre v2
arm64: add sysfs vulnerability show for speculative store bypass

Mian Yousaf Kaukab (3):
arm64: add sysfs vulnerability show for spectre v1
arm64: kpti: move check for non-vulnerable CPUs to a function
arm64: enable generic CPU vulnerabilites support

arch/arm64/Kconfig | 1 +
arch/arm64/kernel/cpu_errata.c | 126 +++++++++++++++++++++++++++++++--
arch/arm64/kernel/cpufeature.c | 45 +++++++++---
drivers/base/cpu.c | 19 +++++
include/linux/cpu.h | 7 ++
5 files changed, 185 insertions(+), 13 deletions(-)

--
2.17.2



2019-01-09 23:57:24

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 7/7] arm64: enable generic CPU vulnerabilites support

From: Mian Yousaf Kaukab <[email protected]>

Enable CPU vulnerabilty show functions for spectre_v1, spectre_v2,
meltdown and store-bypass.

Signed-off-by: Mian Yousaf Kaukab <[email protected]>
Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/Kconfig | 1 +
1 file changed, 1 insertion(+)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a4168d366127..be9872ee1d61 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -88,6 +88,7 @@ config ARM64
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_MULTI_HANDLER
--
2.17.2


2019-01-09 23:57:34

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 6/7] arm64: add sysfs vulnerability show for speculative store bypass

Return status based on ssbd_state and the arm64 SSBS feature. If
the mitigation is disabled, or the firmware isn't responding then
return the expected machine state based on a new blacklist of known
vulnerable cores.

Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ee286d606d9b..c8ff96158b94 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;

static const struct ssbd_options {
const char *str;
@@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
{
struct arm_smccc_res res;
bool required = true;
+ bool is_vul;
s32 val;

WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

+ is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+
+ if (is_vul)
+ __ssb_safe = false;
+
+ arm64_requested_vuln_attrs |= VULN_SSB;
+
if (this_cpu_has_cap(ARM64_SSBS)) {
required = false;
goto out_printmsg;
@@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
ssbd_state = ARM64_SSBD_UNKNOWN;
return false;

+ /* machines with mixed mitigation requirements must not return this */
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
@@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,

return required;
}
+
+/* known vulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+ {},
+};
+
#endif /* CONFIG_ARM64_SSBD */

static void __maybe_unused
@@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_SSBD,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation,
+ .midr_range_list = arm64_ssb_cpus,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1188873
@@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "Vulnerable\n");
}

+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ /*
+ * Two assumptions: First, get_ssbd_state() reflects the worse case
+ * for hetrogenous machines, and that if SSBS is supported its
+ * supported by all cores.
+ */
+ switch (arm64_get_ssbd_state()) {
+ case ARM64_SSBD_MITIGATED:
+ return sprintf(buf, "Not affected\n");
+
+ case ARM64_SSBD_KERNEL:
+ case ARM64_SSBD_FORCE_ENABLE:
+ if (cpus_have_cap(ARM64_SSBS))
+ return sprintf(buf, "Not affected\n");
+ return sprintf(buf,
+ "Mitigation: Speculative Store Bypass disabled\n");
+ }
+
+ if (__ssb_safe)
+ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+
#endif
--
2.17.2


2019-01-09 23:57:49

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 5/7] arm64: add sysfs vulnerability show for spectre v2

Add code to track whether all the cores in the machine are
vulnerable, and whether all the vulnerable cores have been
mitigated.

Once we have that information we can add the sysfs stub and
provide an accurate view of what is known about the machine.

Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 61 +++++++++++++++++++++++++++++++---
1 file changed, 56 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 8dde8c616b7e..ee286d606d9b 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -111,6 +111,11 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);

uint arm64_requested_vuln_attrs = VULN_SPECTREV1;

+#if defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+#endif
+
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -233,15 +238,19 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;

- if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+ __hardenbp_enab = false;
return;
+ }

switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
+ if ((int)res.a0 < 0) {
+ __hardenbp_enab = false;
return;
+ }
cb = call_hvc_arch_workaround_1;
/* This is a guest, no need to patch KVM vectors */
smccc_start = NULL;
@@ -251,14 +260,17 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
+ if ((int)res.a0 < 0) {
+ __hardenbp_enab = false;
return;
+ }
cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start;
smccc_end = __smccc_workaround_1_smc_end;
break;

default:
+ __hardenbp_enab = false;
return;
}

@@ -509,7 +521,32 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)

-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#if defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
+ defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
+
+
+static bool __spectrev2_safe = true;
+
+/*
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
+ */
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ bool is_vul;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+
+ if (is_vul)
+ __spectrev2_safe = false;
+
+ arm64_requested_vuln_attrs |= VULN_SPECTREV2;
+
+ return is_vul;
+}

/*
* List of CPUs where we need to issue a psci call to
@@ -707,7 +744,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
.cpu_enable = enable_smccc_arch_workaround_1,
- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = check_branch_predictor,
+ .midr_range_list = arm64_bp_harden_smccc_cpus,
},
#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
@@ -758,4 +797,16 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}

+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__spectrev2_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (__hardenbp_enab)
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+
#endif
--
2.17.2


2019-01-09 23:58:32

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 2/7] arm64: add sysfs vulnerability show for spectre v1

From: Mian Yousaf Kaukab <[email protected]>

spectre v1, has been mitigated, and the mitigation is
always active.

Signed-off-by: Mian Yousaf Kaukab <[email protected]>
Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/cpu_errata.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 09ac548c9d44..8dde8c616b7e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -109,6 +109,8 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)

atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);

+uint arm64_requested_vuln_attrs = VULN_SPECTREV1;
+
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -742,3 +744,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
}
};
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+uint arch_supported_vuln_attr_fields(void)
+{
+ return arm64_requested_vuln_attrs;
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+#endif
--
2.17.2


2019-01-09 23:58:47

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 3/7] arm64: kpti: move check for non-vulnerable CPUs to a function

From: Mian Yousaf Kaukab <[email protected]>

Add is_meltdown_safe() which is a whitelist of known safe cores.

Signed-off-by: Mian Yousaf Kaukab <[email protected]>
[Moved location of function]
Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/cpufeature.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..ab784d7a0083 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -947,8 +947,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
- int scope)
+static bool is_cpu_meltdown_safe(void)
{
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
@@ -962,6 +961,15 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
{ /* sentinel */ }
};
+ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ return true;
+
+ return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
char const *str = "command line option";

/*
@@ -985,8 +993,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;

- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ if (is_cpu_meltdown_safe())
return false;

/* Defer to CPU feature registers */
--
2.17.2


2019-01-09 23:59:31

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 1/7] sysfs/cpu: Allow individual architectures to select vulnerabilities

As suggested on the list, https://lkml.org/lkml/2019/1/4/282, there are
a number of cases where its useful for a system to avoid exporting a
sysfs entry for a given vulnerability. This set adds an architecture
specific callback which returns the bitmap of vulnerabilities the
architecture would like to advertise.

Signed-off-by: Jeremy Linton <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Rafael J. Wysocki <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Konrad Rzeszutek Wilk <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Jiri Kosina <[email protected]>
---
drivers/base/cpu.c | 19 +++++++++++++++++++
include/linux/cpu.h | 7 +++++++
2 files changed, 26 insertions(+)

diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index eb9443d5bae1..35f6dfb24cd6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -561,6 +561,11 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
NULL
};

+uint __weak arch_supported_vuln_attr_fields(void)
+{
+ return VULN_MELTDOWN|VULN_SPECTREV1|VULN_SPECTREV2|VULN_SSB|VULN_L1TF;
+}
+
static const struct attribute_group cpu_root_vulnerabilities_group = {
.name = "vulnerabilities",
.attrs = cpu_root_vulnerabilities_attrs,
@@ -568,6 +573,20 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {

static void __init cpu_register_vulnerabilities(void)
{
+ int fld;
+ int max_fields = ARRAY_SIZE(cpu_root_vulnerabilities_attrs) - 1;
+ struct attribute **hd = cpu_root_vulnerabilities_attrs;
+ uint enabled_fields = arch_supported_vuln_attr_fields();
+
+ /* only enable entries requested by the arch code */
+ for (fld = 0; fld < max_fields; fld++) {
+ if (enabled_fields & 1 << fld) {
+ *hd = cpu_root_vulnerabilities_attrs[fld];
+ hd++;
+ }
+ }
+ *hd = NULL;
+
if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
&cpu_root_vulnerabilities_group))
pr_err("Unable to register CPU vulnerabilities\n");
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 218df7f4d3e1..5e45814bcc24 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -189,4 +189,11 @@ static inline void cpu_smt_check_topology_early(void) { }
static inline void cpu_smt_check_topology(void) { }
#endif

+/* generic cpu vulnerability attributes */
+#define VULN_MELTDOWN 0x01
+#define VULN_SPECTREV1 0x02
+#define VULN_SPECTREV2 0x04
+#define VULN_SSB 0x08
+#define VULN_L1TF 0x10
+
#endif /* _LINUX_CPU_H_ */
--
2.17.2


2019-01-10 00:13:45

by Jeremy Linton

[permalink] [raw]
Subject: [PATCH v3 4/7] arm64: add sysfs vulnerability show for meltdown

Display the mitigation status if active, otherwise
assume the cpu is safe unless it doesn't have CSV3
and isn't in our whitelist.

Signed-off-by: Jeremy Linton <[email protected]>
---
arch/arm64/kernel/cpufeature.c | 32 +++++++++++++++++++++++++++-----
1 file changed, 27 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ab784d7a0083..ef7bbc49ef78 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -944,8 +944,12 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
return has_cpuid_feature(entry, scope);
}

+/* default value is invalid until unmap_kernel_at_el0() runs */
+static bool __meltdown_safe = true;
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+extern uint arm64_requested_vuln_attrs;

static bool is_cpu_meltdown_safe(void)
{
@@ -972,6 +976,14 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
{
char const *str = "command line option";

+ bool meltdown_safe = is_cpu_meltdown_safe() ||
+ has_cpuid_feature(entry, scope);
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;
+
+ arm64_requested_vuln_attrs |= VULN_MELTDOWN;
+
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
* ThunderX leads to apparent I-cache corruption of kernel text, which
@@ -993,11 +1005,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;

- if (is_cpu_meltdown_safe())
- return false;
-
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return !meltdown_safe;
}

static void
@@ -2065,3 +2073,17 @@ static int __init enable_mrs_emulation(void)
}

core_initcall(enable_mrs_emulation);
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: KPTI\n");
+
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+#endif
--
2.17.2


2019-01-10 09:27:00

by Julien Thierry

[permalink] [raw]
Subject: Re: [PATCH v3 4/7] arm64: add sysfs vulnerability show for meltdown

Hi Jeremy,

On 09/01/2019 23:55, Jeremy Linton wrote:
> Display the mitigation status if active, otherwise
> assume the cpu is safe unless it doesn't have CSV3
> and isn't in our whitelist.
>
> Signed-off-by: Jeremy Linton <[email protected]>
> ---
> arch/arm64/kernel/cpufeature.c | 32 +++++++++++++++++++++++++++-----
> 1 file changed, 27 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index ab784d7a0083..ef7bbc49ef78 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -944,8 +944,12 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
> return has_cpuid_feature(entry, scope);
> }
>
> +/* default value is invalid until unmap_kernel_at_el0() runs */
> +static bool __meltdown_safe = true;
> +
> #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
> +extern uint arm64_requested_vuln_attrs;
>
> static bool is_cpu_meltdown_safe(void)
> {
> @@ -972,6 +976,14 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> {
> char const *str = "command line option";
>
> + bool meltdown_safe = is_cpu_meltdown_safe() ||
> + has_cpuid_feature(entry, scope);
> +
> + if (!meltdown_safe)
> + __meltdown_safe = false;
> +
> + arm64_requested_vuln_attrs |= VULN_MELTDOWN;
> +
> /*
> * For reasons that aren't entirely clear, enabling KPTI on Cavium
> * ThunderX leads to apparent I-cache corruption of kernel text, which
> @@ -993,11 +1005,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
> return true;
>
> - if (is_cpu_meltdown_safe())
> - return false;
> -
> - /* Defer to CPU feature registers */
> - return !has_cpuid_feature(entry, scope);
> + return !meltdown_safe;
> }
>
> static void
> @@ -2065,3 +2073,17 @@ static int __init enable_mrs_emulation(void)
> }
>
> core_initcall(enable_mrs_emulation);
> +
> +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
> +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
> + char *buf)
> +{
> + if (arm64_kernel_unmapped_at_el0())
> + return sprintf(buf, "Mitigation: KPTI\n");
> +
> + if (__meltdown_safe)
> + return sprintf(buf, "Not affected\n");

An issue I see is that we don't even bother to check it that CPUs are
meltdown safe if CONFIG_UNMAP_KERNEL_AT_EL0 is not defined but here
we'll advertise that the system is meltdown safe.

I think that checking whether we know that CPUs are meltdown safe should
be separated from whether mitigation is applied.

Someone who knows thinks their CPUs are in the white list might want to
compile out code that does the kpti, but it would be good to give them a
proper diagnostic whether they were wrong or not.

Cheers,

--
Julien Thierry

2019-01-10 14:13:13

by Jeremy Linton

[permalink] [raw]
Subject: Re: [PATCH v3 4/7] arm64: add sysfs vulnerability show for meltdown

Hi Julien,

On 01/10/2019 03:23 AM, Julien Thierry wrote:
> Hi Jeremy,
>
> On 09/01/2019 23:55, Jeremy Linton wrote:
>> Display the mitigation status if active, otherwise
>> assume the cpu is safe unless it doesn't have CSV3
>> and isn't in our whitelist.
>>
>> Signed-off-by: Jeremy Linton <[email protected]>
>> ---
>> arch/arm64/kernel/cpufeature.c | 32 +++++++++++++++++++++++++++-----
>> 1 file changed, 27 insertions(+), 5 deletions(-)
>>
>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index ab784d7a0083..ef7bbc49ef78 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -944,8 +944,12 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
>> return has_cpuid_feature(entry, scope);
>> }
>>
>> +/* default value is invalid until unmap_kernel_at_el0() runs */
>> +static bool __meltdown_safe = true;
>> +
>> #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>> static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
>> +extern uint arm64_requested_vuln_attrs;
>>
>> static bool is_cpu_meltdown_safe(void)
>> {
>> @@ -972,6 +976,14 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
>> {
>> char const *str = "command line option";
>>
>> + bool meltdown_safe = is_cpu_meltdown_safe() ||
>> + has_cpuid_feature(entry, scope);
>> +
>> + if (!meltdown_safe)
>> + __meltdown_safe = false;
>> +
>> + arm64_requested_vuln_attrs |= VULN_MELTDOWN;
>> +
>> /*
>> * For reasons that aren't entirely clear, enabling KPTI on Cavium
>> * ThunderX leads to apparent I-cache corruption of kernel text, which
>> @@ -993,11 +1005,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
>> if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>> return true;
>>
>> - if (is_cpu_meltdown_safe())
>> - return false;
>> -
>> - /* Defer to CPU feature registers */
>> - return !has_cpuid_feature(entry, scope);
>> + return !meltdown_safe;
>> }
>>
>> static void
>> @@ -2065,3 +2073,17 @@ static int __init enable_mrs_emulation(void)
>> }
>>
>> core_initcall(enable_mrs_emulation);
>> +
>> +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
>> +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
>> + char *buf)
>> +{
>> + if (arm64_kernel_unmapped_at_el0())
>> + return sprintf(buf, "Mitigation: KPTI\n");
>> +
>> + if (__meltdown_safe)
>> + return sprintf(buf, "Not affected\n");
>
> An issue I see is that we don't even bother to check it that CPUs are
> meltdown safe if CONFIG_UNMAP_KERNEL_AT_EL0 is not defined but here
> we'll advertise that the system is meltdown safe.

That check isn't necessary anymore because the sysfs attribute is only
populated if unmap_kernel_at_el0() runs (assuming I haven't messed
something up). That was Dave/Will's suggestions in the last thread about
how to handle this case.



>
> I think that checking whether we know that CPUs are meltdown safe should
> be separated from whether mitigation is applied.
>
> Someone who knows thinks their CPUs are in the white list might want to
> compile out code that does the kpti, but it would be good to give them a
> proper diagnostic whether they were wrong or not.
>
> Cheers,
>


2019-01-10 19:36:02

by Julien Thierry

[permalink] [raw]
Subject: Re: [PATCH v3 4/7] arm64: add sysfs vulnerability show for meltdown



On 10/01/2019 14:10, Jeremy Linton wrote:
> Hi Julien,
>
> On 01/10/2019 03:23 AM, Julien Thierry wrote:
>> Hi Jeremy,
>>
>> On 09/01/2019 23:55, Jeremy Linton wrote:
>>> Display the mitigation status if active, otherwise
>>> assume the cpu is safe unless it doesn't have CSV3
>>> and isn't in our whitelist.
>>>
>>> Signed-off-by: Jeremy Linton <[email protected]>
>>> ---
>>>   arch/arm64/kernel/cpufeature.c | 32 +++++++++++++++++++++++++++-----
>>>   1 file changed, 27 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/arch/arm64/kernel/cpufeature.c
>>> b/arch/arm64/kernel/cpufeature.c
>>> index ab784d7a0083..ef7bbc49ef78 100644
>>> --- a/arch/arm64/kernel/cpufeature.c
>>> +++ b/arch/arm64/kernel/cpufeature.c
>>> @@ -944,8 +944,12 @@ has_useable_cnp(const struct
>>> arm64_cpu_capabilities *entry, int scope)
>>>       return has_cpuid_feature(entry, scope);
>>>   }
>>>   +/* default value is invalid until unmap_kernel_at_el0() runs */
>>> +static bool __meltdown_safe = true;
>>> +
>>>   #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>>   static int __kpti_forced; /* 0: not forced, >0: forced on, <0:
>>> forced off */
>>> +extern uint arm64_requested_vuln_attrs;
>>>     static bool is_cpu_meltdown_safe(void)
>>>   {
>>> @@ -972,6 +976,14 @@ static bool unmap_kernel_at_el0(const struct
>>> arm64_cpu_capabilities *entry,
>>>   {
>>>       char const *str = "command line option";
>>>   +    bool meltdown_safe = is_cpu_meltdown_safe() ||
>>> +        has_cpuid_feature(entry, scope);
>>> +
>>> +    if (!meltdown_safe)
>>> +        __meltdown_safe = false;
>>> +
>>> +    arm64_requested_vuln_attrs |= VULN_MELTDOWN;
>>> +
>>>       /*
>>>        * For reasons that aren't entirely clear, enabling KPTI on Cavium
>>>        * ThunderX leads to apparent I-cache corruption of kernel
>>> text, which
>>> @@ -993,11 +1005,7 @@ static bool unmap_kernel_at_el0(const struct
>>> arm64_cpu_capabilities *entry,
>>>       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
>>>           return true;
>>>   -    if (is_cpu_meltdown_safe())
>>> -        return false;
>>> -
>>> -    /* Defer to CPU feature registers */
>>> -    return !has_cpuid_feature(entry, scope);
>>> +    return !meltdown_safe;
>>>   }
>>>     static void
>>> @@ -2065,3 +2073,17 @@ static int __init enable_mrs_emulation(void)
>>>   }
>>>     core_initcall(enable_mrs_emulation);
>>> +
>>> +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
>>> +ssize_t cpu_show_meltdown(struct device *dev, struct
>>> device_attribute *attr,
>>> +        char *buf)
>>> +{
>>> +    if (arm64_kernel_unmapped_at_el0())
>>> +        return sprintf(buf, "Mitigation: KPTI\n");
>>> +
>>> +    if (__meltdown_safe)
>>> +        return sprintf(buf, "Not affected\n");
>>
>> An issue I see is that we don't even bother to check it that CPUs are
>> meltdown safe if CONFIG_UNMAP_KERNEL_AT_EL0 is not defined but here
>> we'll advertise that the system is meltdown safe.
>
> That check isn't necessary anymore because the sysfs attribute is only
> populated if unmap_kernel_at_el0() runs (assuming I haven't messed
> something up). That was Dave/Will's suggestions in the last thread about
> how to handle this case.
>

Oh right, I missed that bit. Sorry for the noise.

>>
>> I think that checking whether we know that CPUs are meltdown safe should
>> be separated from whether mitigation is applied.
>>
>> Someone who knows thinks their CPUs are in the white list might want to
>> compile out code that does the kpti, but it would be good to give them a
>> proper diagnostic whether they were wrong or not.
>>
>> Cheers,
>>
>

--
Julien Thierry

2019-01-12 10:43:46

by Stefan Wahren

[permalink] [raw]
Subject: Re: [PATCH v3 3/7] arm64: kpti: move check for non-vulnerable CPUs to a function

Hi Jeremy,

> Jeremy Linton <[email protected]> hat am 10. Januar 2019 um 00:55 geschrieben:
>
>
> From: Mian Yousaf Kaukab <[email protected]>
>
> Add is_meltdown_safe() which is a whitelist of known safe cores.
>
> Signed-off-by: Mian Yousaf Kaukab <[email protected]>
> [Moved location of function]
> Signed-off-by: Jeremy Linton <[email protected]>
> ---
> arch/arm64/kernel/cpufeature.c | 15 +++++++++++----
> 1 file changed, 11 insertions(+), 4 deletions(-)

i only want to inform you that this patch doesn't cleanly apply against linux-next on Friday.

Best regards
Stefan

2019-01-14 10:03:50

by Suzuki K Poulose

[permalink] [raw]
Subject: Re: [PATCH v3 1/7] sysfs/cpu: Allow individual architectures to select vulnerabilities



On 09/01/2019 23:55, Jeremy Linton wrote:
> As suggested on the list, https://lkml.org/lkml/2019/1/4/282, there are
> a number of cases where its useful for a system to avoid exporting a
> sysfs entry for a given vulnerability. This set adds an architecture
> specific callback which returns the bitmap of vulnerabilities the
> architecture would like to advertise.
>
> Signed-off-by: Jeremy Linton <[email protected]>
> Cc: Greg Kroah-Hartman <[email protected]>
> Cc: Rafael J. Wysocki <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Josh Poimboeuf <[email protected]>
> Cc: Konrad Rzeszutek Wilk <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Waiman Long <[email protected]>
> Cc: Andi Kleen <[email protected]>
> Cc: Jiri Kosina <[email protected]>
> ---
> drivers/base/cpu.c | 19 +++++++++++++++++++
> include/linux/cpu.h | 7 +++++++
> 2 files changed, 26 insertions(+)
>
> diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
> index eb9443d5bae1..35f6dfb24cd6 100644
> --- a/drivers/base/cpu.c
> +++ b/drivers/base/cpu.c
> @@ -561,6 +561,11 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
> NULL
> };
>
> +uint __weak arch_supported_vuln_attr_fields(void)
> +{
> + return VULN_MELTDOWN|VULN_SPECTREV1|VULN_SPECTREV2|VULN_SSB|VULN_L1TF;
> +}
> +
> static const struct attribute_group cpu_root_vulnerabilities_group = {
> .name = "vulnerabilities",
> .attrs = cpu_root_vulnerabilities_attrs,
> @@ -568,6 +573,20 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {
>
> static void __init cpu_register_vulnerabilities(void)
> {
> + int fld;
> + int max_fields = ARRAY_SIZE(cpu_root_vulnerabilities_attrs) - 1;
> + struct attribute **hd = cpu_root_vulnerabilities_attrs;
> + uint enabled_fields = arch_supported_vuln_attr_fields();
> +
> + /* only enable entries requested by the arch code */
> + for (fld = 0; fld < max_fields; fld++) {
> + if (enabled_fields & 1 << fld) {
> + *hd = cpu_root_vulnerabilities_attrs[fld];
> + hd++;
> + }
> + }
> + *hd = NULL;
> +

nit: Could we use "is_visible" callback in the attribute group to check this
dynamically ?

> if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
> &cpu_root_vulnerabilities_group))
> pr_err("Unable to register CPU vulnerabilities\n");
> diff --git a/include/linux/cpu.h b/include/linux/cpu.h
> index 218df7f4d3e1..5e45814bcc24 100644
> --- a/include/linux/cpu.h
> +++ b/include/linux/cpu.h
> @@ -189,4 +189,11 @@ static inline void cpu_smt_check_topology_early(void) { }
> static inline void cpu_smt_check_topology(void) { }
> #endif
>
> +/* generic cpu vulnerability attributes */
> +#define VULN_MELTDOWN 0x01
> +#define VULN_SPECTREV1 0x02
> +#define VULN_SPECTREV2 0x04
> +#define VULN_SSB 0x08
> +#define VULN_L1TF 0x10

nit: May use BIT() ?

Cheers
Suzuki

2019-01-14 10:17:56

by Marc Zyngier

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] arm64: add sysfs vulnerability show for speculative store bypass

On 09/01/2019 23:55, Jeremy Linton wrote:
> Return status based on ssbd_state and the arm64 SSBS feature. If
> the mitigation is disabled, or the firmware isn't responding then
> return the expected machine state based on a new blacklist of known
> vulnerable cores.
>
> Signed-off-by: Jeremy Linton <[email protected]>
> ---
> arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++
> 1 file changed, 48 insertions(+)
>
> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
> index ee286d606d9b..c8ff96158b94 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
> DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
>
> int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
> +static bool __ssb_safe = true;
>
> static const struct ssbd_options {
> const char *str;
> @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
> {
> struct arm_smccc_res res;
> bool required = true;
> + bool is_vul;
> s32 val;
>
> WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
>
> + is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
> +
> + if (is_vul)
> + __ssb_safe = false;
> +
> + arm64_requested_vuln_attrs |= VULN_SSB;
> +
> if (this_cpu_has_cap(ARM64_SSBS)) {
> required = false;
> goto out_printmsg;
> @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
> ssbd_state = ARM64_SSBD_UNKNOWN;
> return false;
>
> + /* machines with mixed mitigation requirements must not return this */
> case SMCCC_RET_NOT_REQUIRED:
> pr_info_once("%s mitigation not required\n", entry->desc);
> ssbd_state = ARM64_SSBD_MITIGATED;
> @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>
> return required;
> }
> +
> +/* known vulnerable cores */
> +static const struct midr_range arm64_ssb_cpus[] = {
> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
> + {},
> +};
> +
> #endif /* CONFIG_ARM64_SSBD */
>
> static void __maybe_unused
> @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
> .capability = ARM64_SSBD,
> .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
> .matches = has_ssbd_mitigation,
> + .midr_range_list = arm64_ssb_cpus,
> },
> #endif
> #ifdef CONFIG_ARM64_ERRATUM_1188873
> @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
> return sprintf(buf, "Vulnerable\n");
> }
>
> +ssize_t cpu_show_spec_store_bypass(struct device *dev,
> + struct device_attribute *attr, char *buf)
> +{
> + /*
> + * Two assumptions: First, get_ssbd_state() reflects the worse case
> + * for hetrogenous machines, and that if SSBS is supported its
> + * supported by all cores.
> + */
> + switch (arm64_get_ssbd_state()) {
> + case ARM64_SSBD_MITIGATED:
> + return sprintf(buf, "Not affected\n");
> +
> + case ARM64_SSBD_KERNEL:
> + case ARM64_SSBD_FORCE_ENABLE:
> + if (cpus_have_cap(ARM64_SSBS))
> + return sprintf(buf, "Not affected\n");
> + return sprintf(buf,
> + "Mitigation: Speculative Store Bypass disabled\n");
> + }
> +
> + if (__ssb_safe)
> + return sprintf(buf, "Not affected\n");

The kbuild robot reports that this fails if CONFIG_ARM64_SSBD is not
selected. What should we print in this case? "Vulnerable"? Or "Unknown"?

> +
> + return sprintf(buf, "Vulnerable\n");
> +}
> +
> #endif
>

Thanks,

M.
--
Jazz is not dead. It just smells funny...

2019-01-14 11:33:29

by Suzuki K Poulose

[permalink] [raw]
Subject: Re: [PATCH v3 3/7] arm64: kpti: move check for non-vulnerable CPUs to a function

Hi Jeremy,

On 09/01/2019 23:55, Jeremy Linton wrote:
> From: Mian Yousaf Kaukab <[email protected]>
>
> Add is_meltdown_safe() which is a whitelist of known safe cores.
>
> Signed-off-by: Mian Yousaf Kaukab <[email protected]>
> [Moved location of function]
> Signed-off-by: Jeremy Linton <[email protected]>
> ---
> arch/arm64/kernel/cpufeature.c | 15 +++++++++++----
> 1 file changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 4f272399de89..ab784d7a0083 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -947,8 +947,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
> #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
>
> -static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> - int scope)
> +static bool is_cpu_meltdown_safe(void)
> {
> /* List of CPUs that are not vulnerable and don't need KPTI */
> static const struct midr_range kpti_safe_list[] = {
> @@ -962,6 +961,15 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
> MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
> { /* sentinel */ }
> };
> + if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))

nit: Does it make sense to rename the list to "meltdown_safe_list", to match the
function name ?

Also also, you may do :

return is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);

Either way

Reviewed-by: Suzuki K Poulose <[email protected]>

2019-01-14 16:39:15

by Jeremy Linton

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] arm64: add sysfs vulnerability show for speculative store bypass

Hi,

On 01/14/2019 04:15 AM, Marc Zyngier wrote:
> On 09/01/2019 23:55, Jeremy Linton wrote:
>> Return status based on ssbd_state and the arm64 SSBS feature. If
>> the mitigation is disabled, or the firmware isn't responding then
>> return the expected machine state based on a new blacklist of known
>> vulnerable cores.
>>
>> Signed-off-by: Jeremy Linton <[email protected]>
>> ---
>> arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++
>> 1 file changed, 48 insertions(+)
>>
>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
>> index ee286d606d9b..c8ff96158b94 100644
>> --- a/arch/arm64/kernel/cpu_errata.c
>> +++ b/arch/arm64/kernel/cpu_errata.c
>> @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
>> DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
>>
>> int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
>> +static bool __ssb_safe = true;
>>
>> static const struct ssbd_options {
>> const char *str;
>> @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>> {
>> struct arm_smccc_res res;
>> bool required = true;
>> + bool is_vul;
>> s32 val;
>>
>> WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
>>
>> + is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
>> +
>> + if (is_vul)
>> + __ssb_safe = false;
>> +
>> + arm64_requested_vuln_attrs |= VULN_SSB;
>> +
>> if (this_cpu_has_cap(ARM64_SSBS)) {
>> required = false;
>> goto out_printmsg;
>> @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>> ssbd_state = ARM64_SSBD_UNKNOWN;
>> return false;
>>
>> + /* machines with mixed mitigation requirements must not return this */
>> case SMCCC_RET_NOT_REQUIRED:
>> pr_info_once("%s mitigation not required\n", entry->desc);
>> ssbd_state = ARM64_SSBD_MITIGATED;
>> @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>>
>> return required;
>> }
>> +
>> +/* known vulnerable cores */
>> +static const struct midr_range arm64_ssb_cpus[] = {
>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
>> + {},
>> +};
>> +
>> #endif /* CONFIG_ARM64_SSBD */
>>
>> static void __maybe_unused
>> @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
>> .capability = ARM64_SSBD,
>> .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
>> .matches = has_ssbd_mitigation,
>> + .midr_range_list = arm64_ssb_cpus,
>> },
>> #endif
>> #ifdef CONFIG_ARM64_ERRATUM_1188873
>> @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
>> return sprintf(buf, "Vulnerable\n");
>> }
>>
>> +ssize_t cpu_show_spec_store_bypass(struct device *dev,
>> + struct device_attribute *attr, char *buf)
>> +{
>> + /*
>> + * Two assumptions: First, get_ssbd_state() reflects the worse case
>> + * for hetrogenous machines, and that if SSBS is supported its
>> + * supported by all cores.
>> + */
>> + switch (arm64_get_ssbd_state()) {
>> + case ARM64_SSBD_MITIGATED:
>> + return sprintf(buf, "Not affected\n");
>> +
>> + case ARM64_SSBD_KERNEL:
>> + case ARM64_SSBD_FORCE_ENABLE:
>> + if (cpus_have_cap(ARM64_SSBS))
>> + return sprintf(buf, "Not affected\n");
>> + return sprintf(buf,
>> + "Mitigation: Speculative Store Bypass disabled\n");
>> + }
>> +
>> + if (__ssb_safe)
>> + return sprintf(buf, "Not affected\n");
>
> The kbuild robot reports that this fails if CONFIG_ARM64_SSBD is not
> selected. What should we print in this case? "Vulnerable"? Or "Unknown"?

The immediate fix is that the __ssb_safe variable should be in its own
conditional block which is CONFIG_GENERIC_CPU_VULNERABILITIES ||
CONFIG_ARM64_SSBD. If the mitigation isn't built in then this code won't
be run anyway because the sysfs entry won't be populated.


But, these CONFIG_ conditionals are less than ideal (and would be even
uglier if they were made more efficient). My own opinion at this point
is that we should really remove the compile time configs and leave the
mitigation built all the time. The raw code is fairly small, and we
could add in the nospectre_v2 command line options so that users can
choose to runtime disable them. That would also remove the need to
modify the core cpu vulnerabilities sysfs code.


2019-01-14 17:08:49

by Marc Zyngier

[permalink] [raw]
Subject: Re: [PATCH v3 6/7] arm64: add sysfs vulnerability show for speculative store bypass

On 14/01/2019 16:37, Jeremy Linton wrote:
> Hi,
>
> On 01/14/2019 04:15 AM, Marc Zyngier wrote:
>> On 09/01/2019 23:55, Jeremy Linton wrote:
>>> Return status based on ssbd_state and the arm64 SSBS feature. If
>>> the mitigation is disabled, or the firmware isn't responding then
>>> return the expected machine state based on a new blacklist of known
>>> vulnerable cores.
>>>
>>> Signed-off-by: Jeremy Linton <[email protected]>
>>> ---
>>> arch/arm64/kernel/cpu_errata.c | 48 ++++++++++++++++++++++++++++++++++
>>> 1 file changed, 48 insertions(+)
>>>
>>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
>>> index ee286d606d9b..c8ff96158b94 100644
>>> --- a/arch/arm64/kernel/cpu_errata.c
>>> +++ b/arch/arm64/kernel/cpu_errata.c
>>> @@ -288,6 +288,7 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
>>> DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
>>>
>>> int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
>>> +static bool __ssb_safe = true;
>>>
>>> static const struct ssbd_options {
>>> const char *str;
>>> @@ -385,10 +386,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>>> {
>>> struct arm_smccc_res res;
>>> bool required = true;
>>> + bool is_vul;
>>> s32 val;
>>>
>>> WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
>>>
>>> + is_vul = is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
>>> +
>>> + if (is_vul)
>>> + __ssb_safe = false;
>>> +
>>> + arm64_requested_vuln_attrs |= VULN_SSB;
>>> +
>>> if (this_cpu_has_cap(ARM64_SSBS)) {
>>> required = false;
>>> goto out_printmsg;
>>> @@ -422,6 +431,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>>> ssbd_state = ARM64_SSBD_UNKNOWN;
>>> return false;
>>>
>>> + /* machines with mixed mitigation requirements must not return this */
>>> case SMCCC_RET_NOT_REQUIRED:
>>> pr_info_once("%s mitigation not required\n", entry->desc);
>>> ssbd_state = ARM64_SSBD_MITIGATED;
>>> @@ -476,6 +486,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
>>>
>>> return required;
>>> }
>>> +
>>> +/* known vulnerable cores */
>>> +static const struct midr_range arm64_ssb_cpus[] = {
>>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
>>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
>>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
>>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
>>> + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
>>> + {},
>>> +};
>>> +
>>> #endif /* CONFIG_ARM64_SSBD */
>>>
>>> static void __maybe_unused
>>> @@ -762,6 +783,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
>>> .capability = ARM64_SSBD,
>>> .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
>>> .matches = has_ssbd_mitigation,
>>> + .midr_range_list = arm64_ssb_cpus,
>>> },
>>> #endif
>>> #ifdef CONFIG_ARM64_ERRATUM_1188873
>>> @@ -809,4 +831,30 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
>>> return sprintf(buf, "Vulnerable\n");
>>> }
>>>
>>> +ssize_t cpu_show_spec_store_bypass(struct device *dev,
>>> + struct device_attribute *attr, char *buf)
>>> +{
>>> + /*
>>> + * Two assumptions: First, get_ssbd_state() reflects the worse case
>>> + * for hetrogenous machines, and that if SSBS is supported its
>>> + * supported by all cores.
>>> + */
>>> + switch (arm64_get_ssbd_state()) {
>>> + case ARM64_SSBD_MITIGATED:
>>> + return sprintf(buf, "Not affected\n");
>>> +
>>> + case ARM64_SSBD_KERNEL:
>>> + case ARM64_SSBD_FORCE_ENABLE:
>>> + if (cpus_have_cap(ARM64_SSBS))
>>> + return sprintf(buf, "Not affected\n");
>>> + return sprintf(buf,
>>> + "Mitigation: Speculative Store Bypass disabled\n");
>>> + }
>>> +
>>> + if (__ssb_safe)
>>> + return sprintf(buf, "Not affected\n");
>>
>> The kbuild robot reports that this fails if CONFIG_ARM64_SSBD is not
>> selected. What should we print in this case? "Vulnerable"? Or "Unknown"?
>
> The immediate fix is that the __ssb_safe variable should be in its own
> conditional block which is CONFIG_GENERIC_CPU_VULNERABILITIES ||
> CONFIG_ARM64_SSBD. If the mitigation isn't built in then this code won't
> be run anyway because the sysfs entry won't be populated.

But in that case, we should probably assume that the system is
vulnerable, and we get a different default value for __ssb_safe.

> But, these CONFIG_ conditionals are less than ideal (and would be even
> uglier if they were made more efficient). My own opinion at this point
> is that we should really remove the compile time configs and leave the
> mitigation built all the time. The raw code is fairly small, and we
> could add in the nospectre_v2 command line options so that users can
> choose to runtime disable them. That would also remove the need to
> modify the core cpu vulnerabilities sysfs code.

That'd work for me. The whole thing is now an intractable mess, and I'd
welcome some level of simplification.

Thanks,

M.
--
Jazz is not dead. It just smells funny...

2019-01-16 11:37:33

by Stefan Wahren

[permalink] [raw]
Subject: Re: [PATCH v3 0/7] arm64: add system vulnerability sysfs entries

Hi Jeremy,

> Jeremy Linton <[email protected]> hat am 10. Januar 2019 um 00:55 geschrieben:
>
>
> Arm64 machines should be displaying a human readable
> vulnerability status to speculative execution attacks in
> /sys/devices/system/cpu/vulnerabilities
>
> This series enables that behavior by providing the expected
> functions. Those functions expose the cpu errata and feature
> states, as well as whether firmware is responding appropriately
> to display the overall machine status. This means that in a
> heterogeneous machine we will only claim the machine is mitigated
> or safe if we are confident all booted cores are safe or
> mitigated.
>

i applied this v3 series and Marc's v2 series.

Now i'm getting the following on a Raspberry Pi 3 B+ :

meltdown:Not affected
spec_store_bypass:Not affected
spectre_v1:Mitigation: __user pointer sanitization

So the entries l1tf and spectre_v2 disappeared.

Stefan

2019-01-16 13:27:35

by Jeremy Linton

[permalink] [raw]
Subject: Re: [PATCH v3 0/7] arm64: add system vulnerability sysfs entries

Hi,

On 01/15/2019 01:50 PM, Stefan Wahren wrote:
> Hi Jeremy,
>
>> Jeremy Linton <[email protected]> hat am 10. Januar 2019 um 00:55 geschrieben:
>>
>>
>> Arm64 machines should be displaying a human readable
>> vulnerability status to speculative execution attacks in
>> /sys/devices/system/cpu/vulnerabilities
>>
>> This series enables that behavior by providing the expected
>> functions. Those functions expose the cpu errata and feature
>> states, as well as whether firmware is responding appropriately
>> to display the overall machine status. This means that in a
>> heterogeneous machine we will only claim the machine is mitigated
>> or safe if we are confident all booted cores are safe or
>> mitigated.
>>
>
> i applied this v3 series and Marc's v2 series.
>
> Now i'm getting the following on a Raspberry Pi 3 B+ :
>
> meltdown:Not affected
> spec_store_bypass:Not affected
> spectre_v1:Mitigation: __user pointer sanitization
>
> So the entries l1tf and spectre_v2 disappeared.

Yes, the l1tf entry should be gone.

I believe there is a problem with the "1/2 advertise.." patch in that
the 'arm64_requested_vuln_attrs |=' line needs to be hoisted to the top
of check_branch_predictor() and the '__spectrev2_safe = false' line
needs to be hoisted 6 lines immediately above "/* Fallback to firmware
detection*/"

That should re-enable the spectre_v2 entry.




2019-01-18 15:49:12

by Greg Kroah-Hartman

[permalink] [raw]
Subject: Re: [PATCH v3 1/7] sysfs/cpu: Allow individual architectures to select vulnerabilities

On Mon, Jan 14, 2019 at 10:02:21AM +0000, Suzuki K Poulose wrote:
>
>
> On 09/01/2019 23:55, Jeremy Linton wrote:
> > As suggested on the list, https://lkml.org/lkml/2019/1/4/282, there are
> > a number of cases where its useful for a system to avoid exporting a
> > sysfs entry for a given vulnerability. This set adds an architecture
> > specific callback which returns the bitmap of vulnerabilities the
> > architecture would like to advertise.
> >
> > Signed-off-by: Jeremy Linton <[email protected]>
> > Cc: Greg Kroah-Hartman <[email protected]>
> > Cc: Rafael J. Wysocki <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
> > Cc: Josh Poimboeuf <[email protected]>
> > Cc: Konrad Rzeszutek Wilk <[email protected]>
> > Cc: Ingo Molnar <[email protected]>
> > Cc: Waiman Long <[email protected]>
> > Cc: Andi Kleen <[email protected]>
> > Cc: Jiri Kosina <[email protected]>
> > ---
> > drivers/base/cpu.c | 19 +++++++++++++++++++
> > include/linux/cpu.h | 7 +++++++
> > 2 files changed, 26 insertions(+)
> >
> > diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
> > index eb9443d5bae1..35f6dfb24cd6 100644
> > --- a/drivers/base/cpu.c
> > +++ b/drivers/base/cpu.c
> > @@ -561,6 +561,11 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
> > NULL
> > };
> > +uint __weak arch_supported_vuln_attr_fields(void)
> > +{
> > + return VULN_MELTDOWN|VULN_SPECTREV1|VULN_SPECTREV2|VULN_SSB|VULN_L1TF;
> > +}
> > +
> > static const struct attribute_group cpu_root_vulnerabilities_group = {
> > .name = "vulnerabilities",
> > .attrs = cpu_root_vulnerabilities_attrs,
> > @@ -568,6 +573,20 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {
> > static void __init cpu_register_vulnerabilities(void)
> > {
> > + int fld;
> > + int max_fields = ARRAY_SIZE(cpu_root_vulnerabilities_attrs) - 1;
> > + struct attribute **hd = cpu_root_vulnerabilities_attrs;
> > + uint enabled_fields = arch_supported_vuln_attr_fields();
> > +
> > + /* only enable entries requested by the arch code */
> > + for (fld = 0; fld < max_fields; fld++) {
> > + if (enabled_fields & 1 << fld) {
> > + *hd = cpu_root_vulnerabilities_attrs[fld];
> > + hd++;
> > + }
> > + }
> > + *hd = NULL;
> > +
>
> nit: Could we use "is_visible" callback in the attribute group to check this
> dynamically ?

You should, that is what it is there for.

thanks,

greg k-h

2019-01-18 16:33:04

by Jeremy Linton

[permalink] [raw]
Subject: Re: [PATCH v3 1/7] sysfs/cpu: Allow individual architectures to select vulnerabilities

On 01/18/2019 09:46 AM, Greg KH wrote:
> On Mon, Jan 14, 2019 at 10:02:21AM +0000, Suzuki K Poulose wrote:
>>
>>
>> On 09/01/2019 23:55, Jeremy Linton wrote:
>>> As suggested on the list, https://lkml.org/lkml/2019/1/4/282, there are
>>> a number of cases where its useful for a system to avoid exporting a
>>> sysfs entry for a given vulnerability. This set adds an architecture
>>> specific callback which returns the bitmap of vulnerabilities the
>>> architecture would like to advertise.
>>>
>>> Signed-off-by: Jeremy Linton <[email protected]>
>>> Cc: Greg Kroah-Hartman <[email protected]>
>>> Cc: Rafael J. Wysocki <[email protected]>
>>> Cc: Thomas Gleixner <[email protected]>
>>> Cc: Josh Poimboeuf <[email protected]>
>>> Cc: Konrad Rzeszutek Wilk <[email protected]>
>>> Cc: Ingo Molnar <[email protected]>
>>> Cc: Waiman Long <[email protected]>
>>> Cc: Andi Kleen <[email protected]>
>>> Cc: Jiri Kosina <[email protected]>
>>> ---
>>> drivers/base/cpu.c | 19 +++++++++++++++++++
>>> include/linux/cpu.h | 7 +++++++
>>> 2 files changed, 26 insertions(+)
>>>
>>> diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
>>> index eb9443d5bae1..35f6dfb24cd6 100644
>>> --- a/drivers/base/cpu.c
>>> +++ b/drivers/base/cpu.c
>>> @@ -561,6 +561,11 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
>>> NULL
>>> };
>>> +uint __weak arch_supported_vuln_attr_fields(void)
>>> +{
>>> + return VULN_MELTDOWN|VULN_SPECTREV1|VULN_SPECTREV2|VULN_SSB|VULN_L1TF;
>>> +}
>>> +
>>> static const struct attribute_group cpu_root_vulnerabilities_group = {
>>> .name = "vulnerabilities",
>>> .attrs = cpu_root_vulnerabilities_attrs,
>>> @@ -568,6 +573,20 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {
>>> static void __init cpu_register_vulnerabilities(void)
>>> {
>>> + int fld;
>>> + int max_fields = ARRAY_SIZE(cpu_root_vulnerabilities_attrs) - 1;
>>> + struct attribute **hd = cpu_root_vulnerabilities_attrs;
>>> + uint enabled_fields = arch_supported_vuln_attr_fields();
>>> +
>>> + /* only enable entries requested by the arch code */
>>> + for (fld = 0; fld < max_fields; fld++) {
>>> + if (enabled_fields & 1 << fld) {
>>> + *hd = cpu_root_vulnerabilities_attrs[fld];
>>> + hd++;
>>> + }
>>> + }
>>> + *hd = NULL;
>>> +
>>
>> nit: Could we use "is_visible" callback in the attribute group to check this
>> dynamically ?
>
> You should, that is what it is there for.


Yes, its a good suggestion. OTOH, I think the plan is to drop this
functionality all together by removing the ability to build kernels
without the vulnerability checking/processor white lists. That will
simplify some of the #ifdef'ing going on as well.





2019-01-18 16:38:06

by Jeremy Linton

[permalink] [raw]
Subject: Re: [PATCH v3 3/7] arm64: kpti: move check for non-vulnerable CPUs to a function

Hi,

On 01/14/2019 05:32 AM, Suzuki K Poulose wrote:
> Hi Jeremy,
>
> On 09/01/2019 23:55, Jeremy Linton wrote:
>> From: Mian Yousaf Kaukab <[email protected]>
>>
>> Add is_meltdown_safe() which is a whitelist of known safe cores.
>>
>> Signed-off-by: Mian Yousaf Kaukab <[email protected]>
>> [Moved location of function]
>> Signed-off-by: Jeremy Linton <[email protected]>
>> ---
>>   arch/arm64/kernel/cpufeature.c | 15 +++++++++++----
>>   1 file changed, 11 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/arm64/kernel/cpufeature.c
>> b/arch/arm64/kernel/cpufeature.c
>> index 4f272399de89..ab784d7a0083 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -947,8 +947,7 @@ has_useable_cnp(const struct
>> arm64_cpu_capabilities *entry, int scope)
>>   #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>>   static int __kpti_forced; /* 0: not forced, >0: forced on, <0:
>> forced off */
>> -static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities
>> *entry,
>> -                int scope)
>> +static bool is_cpu_meltdown_safe(void)
>>   {
>>       /* List of CPUs that are not vulnerable and don't need KPTI */
>>       static const struct midr_range kpti_safe_list[] = {
>> @@ -962,6 +961,15 @@ static bool unmap_kernel_at_el0(const struct
>> arm64_cpu_capabilities *entry,
>>           MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
>>           { /* sentinel */ }
>>       };
>> +    if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
>
> nit: Does it make sense to rename the list to "meltdown_safe_list", to
> match the
> function name ?
>
> Also also, you may do :
>
>     return is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
>
> Either way
>
> Reviewed-by: Suzuki K Poulose <[email protected]>

Hi, again.

Part of the delay in responding to this one, has been the fact that
originally meltodwn_safe() was being used in two places (which is why it
was broken out). But that isn't true anymore, and this patch is
effectively just fluff, so it seemed appropriate for the chopping block
too, which is what i'm planning.

2019-01-18 18:08:41

by Stefan Wahren

[permalink] [raw]
Subject: Re: [PATCH v3 0/7] arm64: add system vulnerability sysfs entries

Hi,

> Jeremy Linton <[email protected]> hat am 15. Januar 2019 um 22:21 geschrieben:
>
>
> Hi,
>
> On 01/15/2019 01:50 PM, Stefan Wahren wrote:
> > Hi Jeremy,
> >
> >> Jeremy Linton <[email protected]> hat am 10. Januar 2019 um 00:55 geschrieben:
> >>
> >>
> >> Arm64 machines should be displaying a human readable
> >> vulnerability status to speculative execution attacks in
> >> /sys/devices/system/cpu/vulnerabilities
> >>
> >> This series enables that behavior by providing the expected
> >> functions. Those functions expose the cpu errata and feature
> >> states, as well as whether firmware is responding appropriately
> >> to display the overall machine status. This means that in a
> >> heterogeneous machine we will only claim the machine is mitigated
> >> or safe if we are confident all booted cores are safe or
> >> mitigated.
> >>
> >
> > i applied this v3 series and Marc's v2 series.
> >
> > Now i'm getting the following on a Raspberry Pi 3 B+ :
> >
> > meltdown:Not affected
> > spec_store_bypass:Not affected
> > spectre_v1:Mitigation: __user pointer sanitization
> >
> > So the entries l1tf and spectre_v2 disappeared.
>
> Yes, the l1tf entry should be gone.
>
> I believe there is a problem with the "1/2 advertise.." patch in that
> the 'arm64_requested_vuln_attrs |=' line needs to be hoisted to the top
> of check_branch_predictor() and the '__spectrev2_safe = false' line
> needs to be hoisted 6 lines immediately above "/* Fallback to firmware
> detection*/"

a snippet or a new version would be nice

>
> That should re-enable the spectre_v2 entry.

2019-01-18 22:24:45

by Jeremy Linton

[permalink] [raw]
Subject: Re: [PATCH v3 0/7] arm64: add system vulnerability sysfs entries

On 01/18/2019 12:05 PM, Stefan Wahren wrote:
> Hi,
>
>> Jeremy Linton <[email protected]> hat am 15. Januar 2019 um 22:21 geschrieben:
>>
>>
>> Hi,
>>
>> On 01/15/2019 01:50 PM, Stefan Wahren wrote:
>>> Hi Jeremy,
>>>
>>>> Jeremy Linton <[email protected]> hat am 10. Januar 2019 um 00:55 geschrieben:
>>>>
>>>>
>>>> Arm64 machines should be displaying a human readable
>>>> vulnerability status to speculative execution attacks in
>>>> /sys/devices/system/cpu/vulnerabilities
>>>>
>>>> This series enables that behavior by providing the expected
>>>> functions. Those functions expose the cpu errata and feature
>>>> states, as well as whether firmware is responding appropriately
>>>> to display the overall machine status. This means that in a
>>>> heterogeneous machine we will only claim the machine is mitigated
>>>> or safe if we are confident all booted cores are safe or
>>>> mitigated.
>>>>
>>>
>>> i applied this v3 series and Marc's v2 series.
>>>
>>> Now i'm getting the following on a Raspberry Pi 3 B+ :
>>>
>>> meltdown:Not affected
>>> spec_store_bypass:Not affected
>>> spectre_v1:Mitigation: __user pointer sanitization
>>>
>>> So the entries l1tf and spectre_v2 disappeared.
>>
>> Yes, the l1tf entry should be gone.
>>
>> I believe there is a problem with the "1/2 advertise.." patch in that
>> the 'arm64_requested_vuln_attrs |=' line needs to be hoisted to the top
>> of check_branch_predictor() and the '__spectrev2_safe = false' line
>> needs to be hoisted 6 lines immediately above "/* Fallback to firmware
>> detection*/"
>
> a snippet or a new version would be nice

Sure, I've got another version, to be posted soon (probably Tue of next
week).

In the meantime, Marc's tree should work with the following fix:

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b44f87e7360d..7cfd34b2c0e5 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -286,11 +286,15 @@ static int detect_harden_bp_fw(void)
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */

+#if defined(CONFIG_ARM64_SSBD) || \
+ defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
+static bool __ssb_safe = true;
+#endif
+
#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
-static bool __ssb_safe = true;

static const struct ssbd_options {
const char *str;
@@ -569,6 +573,8 @@ check_branch_predictor(const struct
arm64_cpu_capabilities *entry, int scope)

WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

+ arm64_requested_vuln_attrs |= VULN_SPECTREV2;
+
/* If the CPU has CSV2 set, we're safe */
if
(cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
ID_AA64PFR0_CSV2_SHIFT))
@@ -578,17 +584,17 @@ check_branch_predictor(const struct
arm64_cpu_capabilities *entry, int scope)
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
return false;

+ __spectrev2_safe = false;
+
/* Fallback to firmware detection */
need_wa = detect_harden_bp_fw();
if (!need_wa)
return false;

- __spectrev2_safe = false;
-
if (need_wa < 0)
pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from
firmware\n");

- arm64_requested_vuln_attrs |= VULN_SPECTREV2;
+

return (need_wa > 0);
}



2019-01-19 11:54:24

by Stefan Wahren

[permalink] [raw]
Subject: Re: [PATCH v3 0/7] arm64: add system vulnerability sysfs entries


> Jeremy Linton <[email protected]> hat am 18. Januar 2019 um 23:22 geschrieben:
>
>
> On 01/18/2019 12:05 PM, Stefan Wahren wrote:
> > Hi,
> >
> > ...
> >
> > a snippet or a new version would be nice
>
> Sure, I've got another version, to be posted soon (probably Tue of next
> week).
>
> In the meantime, Marc's tree should work with the following fix:
>
> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
> index b44f87e7360d..7cfd34b2c0e5 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -286,11 +286,15 @@ static int detect_harden_bp_fw(void)
> }
> #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
>
> +#if defined(CONFIG_ARM64_SSBD) || \
> + defined(CONFIG_GENERIC_CPU_VULNERABILITIES)
> +static bool __ssb_safe = true;
> +#endif
> +
> #ifdef CONFIG_ARM64_SSBD
> DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
>
> int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
> -static bool __ssb_safe = true;
>
> static const struct ssbd_options {
> const char *str;
> @@ -569,6 +573,8 @@ check_branch_predictor(const struct
> arm64_cpu_capabilities *entry, int scope)
>
> WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
>
> + arm64_requested_vuln_attrs |= VULN_SPECTREV2;
> +
> /* If the CPU has CSV2 set, we're safe */
> if
> (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
> ID_AA64PFR0_CSV2_SHIFT))
> @@ -578,17 +584,17 @@ check_branch_predictor(const struct
> arm64_cpu_capabilities *entry, int scope)
> if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
> return false;
>
> + __spectrev2_safe = false;
> +
> /* Fallback to firmware detection */
> need_wa = detect_harden_bp_fw();
> if (!need_wa)
> return false;
>
> - __spectrev2_safe = false;
> -
> if (need_wa < 0)
> pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from
> firmware\n");
>
> - arm64_requested_vuln_attrs |= VULN_SPECTREV2;
> +
>
> return (need_wa > 0);
> }
>
>

fine with these changes i'm getting the following:

meltdown:Not affected
spec_store_bypass:Not affected
spectre_v1:Mitigation: __user pointer sanitization
spectre_v2:Not affected

Thanks
Stefan