This is the start of the stable review cycle for the 4.14.275 release.
There are 27 patches in this series, all will be posted as a response
to this one. If anyone has any issues with these being applied, please
let me know.
Responses should be made by Sun, 03 Apr 2022 06:36:16 +0000.
Anything received after that time might be too late.
The whole patch series can be found in one patch at:
https://www.kernel.org/pub/linux/kernel/v4.x/stable-review/patch-4.14.275-rc1.gz
or in the git tree and branch at:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git linux-4.14.y
and the diffstat can be found below.
thanks,
greg k-h
-------------
Pseudo-Shortlog of commits:
Greg Kroah-Hartman <[email protected]>
Linux 4.14.275-rc1
James Morse <[email protected]>
arm64: Use the clearbhb instruction in mitigations
James Morse <[email protected]>
arm64: add ID_AA64ISAR2_EL1 sys register
James Morse <[email protected]>
KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated
James Morse <[email protected]>
arm64: Mitigate spectre style branch history side channels
James Morse <[email protected]>
KVM: arm64: Add templates for BHB mitigation sequences
James Morse <[email protected]>
arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2
James Morse <[email protected]>
arm64: Add percpu vectors for EL1
James Morse <[email protected]>
arm64: entry: Add macro for reading symbol addresses from the trampoline
James Morse <[email protected]>
arm64: entry: Add vectors that have the bhb mitigation sequences
James Morse <[email protected]>
arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations
James Morse <[email protected]>
arm64: entry: Allow the trampoline text to occupy multiple pages
James Morse <[email protected]>
arm64: entry: Make the kpti trampoline's kpti sequence optional
James Morse <[email protected]>
arm64: entry: Move trampoline macros out of ifdef'd section
James Morse <[email protected]>
arm64: entry: Don't assume tramp_vectors is the start of the vectors
James Morse <[email protected]>
arm64: entry: Allow tramp_alias to access symbols after the 4K boundary
James Morse <[email protected]>
arm64: entry: Move the trampoline data page before the text page
James Morse <[email protected]>
arm64: entry: Free up another register on kpti's tramp_exit path
James Morse <[email protected]>
arm64: entry: Make the trampoline cleanup optional
James Morse <[email protected]>
arm64: entry.S: Add ventry overflow sanity checks
Anshuman Khandual <[email protected]>
arm64: Add Cortex-X2 CPU part definition
Suzuki K Poulose <[email protected]>
arm64: Add Neoverse-N2, Cortex-A710 CPU part definition
Rob Herring <[email protected]>
arm64: Add part number for Arm Cortex-A77
Marc Zyngier <[email protected]>
arm64: Add part number for Neoverse N1
Marc Zyngier <[email protected]>
arm64: Make ARM64_ERRATUM_1188873 depend on COMPAT
Marc Zyngier <[email protected]>
arm64: Add silicon-errata.txt entry for ARM erratum 1188873
Arnd Bergmann <[email protected]>
arm64: arch_timer: avoid unused function warning
Marc Zyngier <[email protected]>
arm64: arch_timer: Add workaround for ARM erratum 1188873
-------------
Diffstat:
Documentation/arm64/silicon-errata.txt | 1 +
Makefile | 4 +-
arch/arm/include/asm/kvm_host.h | 6 +
arch/arm64/Kconfig | 24 ++
arch/arm64/include/asm/assembler.h | 34 +++
arch/arm64/include/asm/cpu.h | 1 +
arch/arm64/include/asm/cpucaps.h | 4 +-
arch/arm64/include/asm/cpufeature.h | 39 ++++
arch/arm64/include/asm/cputype.h | 20 ++
arch/arm64/include/asm/fixmap.h | 6 +-
arch/arm64/include/asm/kvm_host.h | 5 +
arch/arm64/include/asm/kvm_mmu.h | 2 +-
arch/arm64/include/asm/mmu.h | 8 +-
arch/arm64/include/asm/sections.h | 6 +
arch/arm64/include/asm/sysreg.h | 5 +
arch/arm64/include/asm/vectors.h | 74 ++++++
arch/arm64/kernel/bpi.S | 55 +++++
arch/arm64/kernel/cpu_errata.c | 395 ++++++++++++++++++++++++++++++++-
arch/arm64/kernel/cpufeature.c | 21 ++
arch/arm64/kernel/cpuinfo.c | 1 +
arch/arm64/kernel/entry.S | 196 ++++++++++++----
arch/arm64/kernel/vmlinux.lds.S | 2 +-
arch/arm64/kvm/hyp/hyp-entry.S | 4 +
arch/arm64/kvm/hyp/switch.c | 9 +-
arch/arm64/mm/mmu.c | 11 +-
drivers/clocksource/arm_arch_timer.c | 15 ++
include/linux/arm-smccc.h | 7 +
virt/kvm/arm/psci.c | 12 +
28 files changed, 909 insertions(+), 58 deletions(-)
From: James Morse <[email protected]>
commit 558c303c9734af5a813739cd284879227f7297d2 upstream.
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.
The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.
For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.
For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.
Reviewed-by: Catalin Marinas <[email protected]>
Cc: <[email protected]> # <v5.17.x 72bb9dcb6c33c arm64: Add Cortex-X2 CPU part definition
Cc: <[email protected]> # <v5.16.x 2d0d656700d67 arm64: Add Neoverse-N2, Cortex-A710 CPU part definition
Cc: <[email protected]> # <v5.10.x 8a6b88e66233f arm64: Add part number for Arm Cortex-A77
[ modified for stable, moved code to cpu_errata.c removed bitmap of
mitigations, use kvm template infrastructure ]
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/Kconfig | 10 +
arch/arm64/include/asm/assembler.h | 4
arch/arm64/include/asm/cpufeature.h | 18 ++
arch/arm64/include/asm/cputype.h | 8 +
arch/arm64/include/asm/sysreg.h | 1
arch/arm64/include/asm/vectors.h | 6
arch/arm64/kernel/cpu_errata.c | 268 +++++++++++++++++++++++++++++++++++-
arch/arm64/kvm/hyp/hyp-entry.S | 4
8 files changed, 316 insertions(+), 3 deletions(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -872,6 +872,16 @@ config ARM64_SSBD
If unsure, say Y.
+config MITIGATE_SPECTRE_BRANCH_HISTORY
+ bool "Mitigate Spectre style attacks against branch history" if EXPERT
+ default y
+ depends on HARDEN_BRANCH_PREDICTOR || !KVM
+ help
+ Speculation attacks against some high-performance processors can
+ make use of branch history to influence future speculation.
+ When taking an exception from user-space, a sequence of branches
+ or a firmware call overwrites the branch history.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -551,7 +551,9 @@ alternative_endif
.macro __mitigate_spectre_bhb_loop tmp
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
- mov \tmp, #32
+alternative_cb spectre_bhb_patch_loop_iter
+ mov \tmp, #32 // Patched to correct the immediate
+alternative_cb_end
.Lspectre_bhb_loop\@:
b . + 4
subs \tmp, \tmp, #1
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -456,6 +456,21 @@ static inline bool cpu_supports_mixed_en
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
+static inline bool supports_csv2p3(int scope)
+{
+ u64 pfr0;
+ u8 csv2_val;
+
+ if (scope == SCOPE_LOCAL_CPU)
+ pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
+ else
+ pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
+ ID_AA64PFR0_CSV2_SHIFT);
+ return csv2_val == 3;
+}
+
static inline bool system_supports_32bit_el0(void)
{
return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
@@ -503,6 +518,9 @@ enum mitigation_state {
};
enum mitigation_state arm64_get_spectre_bhb_state(void);
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+u8 spectre_bhb_loop_affected(int scope);
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
#endif /* __ASSEMBLY__ */
#endif
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -90,9 +90,13 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_NEOVERSE_V1 0xD40
+#define ARM_CPU_PART_CORTEX_A78 0xD41
+#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
+#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define APM_CPU_PART_POTENZA 0x000
@@ -121,9 +125,13 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
+#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -448,6 +448,7 @@
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
/* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ECBHB_SHIFT 60
#define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12
--- a/arch/arm64/include/asm/vectors.h
+++ b/arch/arm64/include/asm/vectors.h
@@ -9,6 +9,7 @@
#include <linux/percpu.h>
#include <asm/fixmap.h>
+#include <asm/mmu.h>
extern char vectors[];
extern char tramp_vectors[];
@@ -40,6 +41,11 @@ enum arm64_bp_harden_el1_vectors {
EL1_VECTOR_KPTI,
};
+#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+#define EL1_VECTOR_BHB_LOOP -1
+#define EL1_VECTOR_BHB_FW -1
+#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
/* The vectors to use on return from EL0. e.g. to remap the kernel */
DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -23,6 +23,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/vectors.h>
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -732,6 +733,13 @@ const struct arm64_cpu_capabilities arm6
},
#endif
{
+ .desc = "Spectre-BHB",
+ .capability = ARM64_SPECTRE_BHB,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = is_spectre_bhb_affected,
+ .cpu_enable = spectre_bhb_enable_mitigation,
+ },
+ {
}
};
@@ -795,6 +803,33 @@ ssize_t cpu_show_spec_store_bypass(struc
return sprintf(buf, "Vulnerable\n");
}
+/*
+ * We try to ensure that the mitigation state can never change as the result of
+ * onlining a late CPU.
+ */
+static void update_mitigation_state(enum mitigation_state *oldp,
+ enum mitigation_state new)
+{
+ enum mitigation_state state;
+
+ do {
+ state = READ_ONCE(*oldp);
+ if (new <= state)
+ break;
+ } while (cmpxchg_relaxed(oldp, state, new) != state);
+}
+
+/*
+ * Spectre BHB.
+ *
+ * A CPU is either:
+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
+ * in our "loop mitigated list".
+ * - Mitigated in software by the firmware Spectre v2 call.
+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
+ * software mitigation in the vectors is needed.
+ * - Has CSV2.3, so is unaffected.
+ */
static enum mitigation_state spectre_bhb_state;
enum mitigation_state arm64_get_spectre_bhb_state(void)
@@ -802,6 +837,163 @@ enum mitigation_state arm64_get_spectre_
return spectre_bhb_state;
}
+/*
+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
+ * SCOPE_SYSTEM call will give the right answer.
+ */
+u8 spectre_bhb_loop_affected(int scope)
+{
+ u8 k = 0;
+ static u8 max_bhb_k;
+
+ if (scope == SCOPE_LOCAL_CPU) {
+ static const struct midr_range spectre_bhb_k32_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k24_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k8_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ {},
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+ k = 32;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+ k = 24;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+ k = 8;
+
+ max_bhb_k = max(max_bhb_k, k);
+ } else {
+ k = max_bhb_k;
+ }
+
+ return k;
+}
+
+static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ return SPECTRE_VULNERABLE;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_3, &res);
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_3, &res);
+ break;
+
+ default:
+ return SPECTRE_VULNERABLE;
+ }
+
+ ret = res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return SPECTRE_MITIGATED;
+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+ return SPECTRE_UNAFFECTED;
+ default:
+ case SMCCC_RET_NOT_SUPPORTED:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
+static bool is_spectre_bhb_fw_affected(int scope)
+{
+ static bool system_affected;
+ enum mitigation_state fw_state;
+ bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
+ static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ {},
+ };
+ bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
+ spectre_bhb_firmware_mitigated_list);
+
+ if (scope != SCOPE_LOCAL_CPU)
+ return system_affected;
+
+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+ if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
+ system_affected = true;
+ return true;
+ }
+
+ return false;
+}
+
+static bool supports_ecbhb(int scope)
+{
+ u64 mmfr1;
+
+ if (scope == SCOPE_LOCAL_CPU)
+ mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
+ else
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+
+ return cpuid_feature_extract_unsigned_field(mmfr1,
+ ID_AA64MMFR1_ECBHB_SHIFT);
+}
+
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ if (supports_csv2p3(scope))
+ return false;
+
+ if (spectre_bhb_loop_affected(scope))
+ return true;
+
+ if (is_spectre_bhb_fw_affected(scope))
+ return true;
+
+ return false;
+}
+
+static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+{
+ const char *v = arm64_get_bp_hardening_vector(slot);
+
+ if (slot < 0)
+ return;
+
+ __this_cpu_write(this_cpu_vector, v);
+
+ /*
+ * When KPTI is in use, the vectors are switched when exiting to
+ * user-space.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ return;
+
+ write_sysreg(v, vbar_el1);
+ isb();
+}
+
#ifdef CONFIG_KVM
static const char *kvm_bhb_get_vecs_end(const char *start)
{
@@ -817,7 +1009,7 @@ static const char *kvm_bhb_get_vecs_end(
return NULL;
}
-void kvm_setup_bhb_slot(const char *hyp_vecs_start)
+static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
{
int cpu, slot = -1;
const char *hyp_vecs_end;
@@ -855,5 +1047,77 @@ void kvm_setup_bhb_slot(const char *hyp_
#define __spectre_bhb_loop_k24_start NULL
#define __spectre_bhb_loop_k32_start NULL
-void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
+static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
#endif
+
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+{
+ enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+
+ if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
+ return;
+
+ if (!__spectrev2_safe && !__hardenbp_enab) {
+ /* No point mitigating Spectre-BHB alone. */
+ } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
+ pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
+ } else if (cpu_mitigations_off()) {
+ pr_info_once("spectre-bhb mitigation disabled by command line option\n");
+ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
+ state = SPECTRE_MITIGATED;
+ } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+ switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
+ case 8:
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
+ break;
+ case 24:
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
+ break;
+ case 32:
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
+
+ state = SPECTRE_MITIGATED;
+ } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+ if (fw_state == SPECTRE_MITIGATED) {
+ kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
+ this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+
+ /*
+ * With WA3 in the vectors, the WA1 calls can be
+ * removed.
+ */
+ __this_cpu_write(bp_hardening_data.fn, NULL);
+
+ state = SPECTRE_MITIGATED;
+ }
+ }
+
+ update_mitigation_state(&spectre_bhb_state, state);
+}
+
+/* Patched to correct the immediate */
+void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ u8 rd;
+ u32 insn;
+ u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
+
+ BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
+ return;
+
+ insn = le32_to_cpu(*origptr);
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+ insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_ZERO);
+ *updptr++ = cpu_to_le32(insn);
+}
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -135,6 +135,10 @@ el1_hvc_guest:
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
ARM_SMCCC_ARCH_WORKAROUND_2)
+ cbz w1, wa_epilogue
+
+ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
+ ARM_SMCCC_ARCH_WORKAROUND_3)
cbnz w1, el1_trap
#ifdef CONFIG_ARM64_SSBD
From: James Morse <[email protected]>
commit c091fb6ae059cda563b2a4d93fdbc548ef34e1d6 upstream.
The trampoline code has a data page that holds the address of the vectors,
which is unmapped when running in user-space. This ensures that with
CONFIG_RANDOMIZE_BASE, the randomised address of the kernel can't be
discovered until after the kernel has been mapped.
If the trampoline text page is extended to include multiple sets of
vectors, it will be larger than a single page, making it tricky to
find the data page without knowing the size of the trampoline text
pages, which will vary with PAGE_SIZE.
Move the data page to appear before the text page. This allows the
data page to be found without knowing the size of the trampoline text
pages. 'tramp_vectors' is used to refer to the beginning of the
.entry.tramp.text section, do that explicitly.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
[ removed SDEI for backport ]
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/include/asm/fixmap.h | 2 +-
arch/arm64/kernel/entry.S | 7 ++++++-
2 files changed, 7 insertions(+), 2 deletions(-)
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -59,8 +59,8 @@ enum fixed_addresses {
#endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- FIX_ENTRY_TRAMP_DATA,
FIX_ENTRY_TRAMP_TEXT,
+ FIX_ENTRY_TRAMP_DATA,
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1019,6 +1019,11 @@ alternative_else_nop_endif
*/
.endm
+ .macro tramp_data_page dst
+ adr \dst, .entry.tramp.text
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
.macro tramp_ventry, regsize = 64
.align 7
1:
@@ -1035,7 +1040,7 @@ alternative_else_nop_endif
2:
tramp_map_kernel x30
#ifdef CONFIG_RANDOMIZE_BASE
- adr x30, tramp_vectors + PAGE_SIZE
+ tramp_data_page x30
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
ldr x30, [x30]
#else
From: James Morse <[email protected]>
commit d739da1694a0eaef0358a42b76904b611539b77b upstream.
Subsequent patches will add additional sets of vectors that use
the same tricks as the kpti vectors to reach the full-fat vectors.
The full-fat vectors contain some cleanup for kpti that is patched
in by alternatives when kpti is in use. Once there are additional
vectors, the cleanup will be needed in more cases.
But on big/little systems, the cleanup would be harmful if no
trampoline vector were in use. Instead of forcing CPUs that don't
need a trampoline vector to use one, make the trampoline cleanup
optional.
Entry at the top of the vectors will skip the cleanup. The trampoline
vectors can then skip the first instruction, triggering the cleanup
to run.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -76,16 +76,20 @@
.align 7
.Lventry_start\@:
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
.if \el == 0
+ /*
+ * This must be the first instruction of the EL0 vector entries. It is
+ * skipped by the trampoline vectors, to trigger the cleanup.
+ */
+ b .Lskip_tramp_vectors_cleanup\@
.if \regsize == 64
mrs x30, tpidrro_el0
msr tpidrro_el0, xzr
.else
mov x30, xzr
.endif
+.Lskip_tramp_vectors_cleanup\@:
.endif
-alternative_else_nop_endif
#endif
sub sp, sp, #S_FRAME_SIZE
@@ -1035,7 +1039,7 @@ alternative_insn isb, nop, ARM64_WORKARO
#endif
prfm plil1strm, [x30, #(1b - tramp_vectors)]
msr vbar_el1, x30
- add x30, x30, #(1b - tramp_vectors)
+ add x30, x30, #(1b - tramp_vectors + 4)
isb
ret
.org 1b + 128 // Did we overflow the ventry slot?
From: James Morse <[email protected]>
commit 9e45365f1469ef2b934f9d035975dbc9ad352116 upstream.
This is a new ID register, introduced in 8.7.
Signed-off-by: Joey Gouly <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Marc Zyngier <[email protected]>
Cc: James Morse <[email protected]>
Cc: Alexandru Elisei <[email protected]>
Cc: Suzuki K Poulose <[email protected]>
Cc: Reiji Watanabe <[email protected]>
Acked-by: Marc Zyngier <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/include/asm/cpu.h | 1 +
arch/arm64/include/asm/sysreg.h | 1 +
arch/arm64/kernel/cpufeature.c | 9 +++++++++
arch/arm64/kernel/cpuinfo.c | 1 +
4 files changed, 12 insertions(+)
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -36,6 +36,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64dfr1;
u64 reg_id_aa64isar0;
u64 reg_id_aa64isar1;
+ u64 reg_id_aa64isar2;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
u64 reg_id_aa64mmfr2;
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -157,6 +157,7 @@
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
+#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -134,6 +134,10 @@ static const struct arm64_ftr_bits ftr_i
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
@@ -361,6 +365,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 6 */
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
@@ -506,6 +511,7 @@ void __init init_cpu_features(struct cpu
init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
@@ -617,6 +623,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
+ info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
/*
* Differing PARange support is fine as long as all peripherals and
@@ -737,6 +745,7 @@ static u64 __read_sysreg_by_encoding(u32
read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
read_sysreg_case(SYS_CNTFRQ_EL0);
read_sysreg_case(SYS_CTR_EL0);
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -333,6 +333,7 @@ static void __cpuinfo_store_cpu(struct c
info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+ info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
From: James Morse <[email protected]>
commit ba2689234be92024e5635d30fe744f4853ad97db upstream.
Some CPUs affected by Spectre-BHB need a sequence of branches, or a
firmware call to be run before any indirect branch. This needs to go
in the vectors. No CPU needs both.
While this can be patched in, it would run on all CPUs as there is a
single set of vectors. If only one part of a big/little combination is
affected, the unaffected CPUs have to run the mitigation too.
Create extra vectors that include the sequence. Subsequent patches will
allow affected CPUs to select this set of vectors. Later patches will
modify the loop count to match what the CPU requires.
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/include/asm/assembler.h | 25 +++++++++++++++++
arch/arm64/include/asm/vectors.h | 34 +++++++++++++++++++++++
arch/arm64/kernel/entry.S | 53 ++++++++++++++++++++++++++++++-------
include/linux/arm-smccc.h | 7 ++++
4 files changed, 110 insertions(+), 9 deletions(-)
create mode 100644 arch/arm64/include/asm/vectors.h
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -549,4 +549,29 @@ alternative_endif
.Ldone\@:
.endm
+ .macro __mitigate_spectre_bhb_loop tmp
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ mov \tmp, #32
+.Lspectre_bhb_loop\@:
+ b . + 4
+ subs \tmp, \tmp, #1
+ b.ne .Lspectre_bhb_loop\@
+ dsb nsh
+ isb
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ .endm
+
+ /* Save/restores x0-x3 to the stack */
+ .macro __mitigate_spectre_bhb_fw
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ stp x0, x1, [sp, #-16]!
+ stp x2, x3, [sp, #-16]!
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
+alternative_cb arm64_update_smccc_conduit
+ nop // Patched to SMC/HVC #0
+alternative_cb_end
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ .endm
#endif /* __ASM_ASSEMBLER_H */
--- /dev/null
+++ b/arch/arm64/include/asm/vectors.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef __ASM_VECTORS_H
+#define __ASM_VECTORS_H
+
+/*
+ * Note: the order of this enum corresponds to two arrays in entry.S:
+ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
+ * 'full fat' vectors are used directly.
+ */
+enum arm64_bp_harden_el1_vectors {
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ /*
+ * Perform the BHB loop mitigation, before branching to the canonical
+ * vectors.
+ */
+ EL1_VECTOR_BHB_LOOP,
+
+ /*
+ * Make the SMC call for firmware mitigation, before branching to the
+ * canonical vectors.
+ */
+ EL1_VECTOR_BHB_FW,
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
+ /*
+ * Remap the kernel before branching to the canonical vectors.
+ */
+ EL1_VECTOR_KPTI,
+};
+
+#endif /* __ASM_VECTORS_H */
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1022,13 +1022,26 @@ alternative_else_nop_endif
sub \dst, \dst, PAGE_SIZE
.endm
- .macro tramp_ventry, vector_start, regsize, kpti
+
+#define BHB_MITIGATION_NONE 0
+#define BHB_MITIGATION_LOOP 1
+#define BHB_MITIGATION_FW 2
+
+ .macro tramp_ventry, vector_start, regsize, kpti, bhb
.align 7
1:
.if \regsize == 64
msr tpidrro_el0, x30 // Restored in kernel_ventry
.endif
+ .if \bhb == BHB_MITIGATION_LOOP
+ /*
+ * This sequence must appear before the first indirect branch. i.e. the
+ * ret out of tramp_ventry. It appears here because x30 is free.
+ */
+ __mitigate_spectre_bhb_loop x30
+ .endif // \bhb == BHB_MITIGATION_LOOP
+
.if \kpti == 1
/*
* Defend against branch aliasing attacks by pushing a dummy
@@ -1053,6 +1066,15 @@ alternative_insn isb, nop, ARM64_WORKARO
ldr x30, =vectors
.endif // \kpti == 1
+ .if \bhb == BHB_MITIGATION_FW
+ /*
+ * The firmware sequence must appear before the first indirect branch.
+ * i.e. the ret out of tramp_ventry. But it also needs the stack to be
+ * mapped to save/restore the registers the SMC clobbers.
+ */
+ __mitigate_spectre_bhb_fw
+ .endif // \bhb == BHB_MITIGATION_FW
+
add x30, x30, #(1b - \vector_start + 4)
ret
.org 1b + 128 // Did we overflow the ventry slot?
@@ -1060,6 +1082,9 @@ alternative_insn isb, nop, ARM64_WORKARO
.macro tramp_exit, regsize = 64
adr x30, tramp_vectors
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ add x30, x30, SZ_4K
+#endif
msr vbar_el1, x30
ldr lr, [sp, #S_LR]
tramp_unmap_kernel x29
@@ -1070,26 +1095,32 @@ alternative_insn isb, nop, ARM64_WORKARO
eret
.endm
- .macro generate_tramp_vector, kpti
+ .macro generate_tramp_vector, kpti, bhb
.Lvector_start\@:
.space 0x400
.rept 4
- tramp_ventry .Lvector_start\@, 64, \kpti
+ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
.endr
.rept 4
- tramp_ventry .Lvector_start\@, 32, \kpti
+ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
.endr
.endm
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
/*
* Exception vectors trampoline.
+ * The order must match __bp_harden_el1_vectors and the
+ * arm64_bp_harden_el1_vectors enum.
*/
.pushsection ".entry.tramp.text", "ax"
.align 11
ENTRY(tramp_vectors)
- generate_tramp_vector kpti=1
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
END(tramp_vectors)
ENTRY(tramp_exit_native)
@@ -1116,7 +1147,7 @@ __entry_tramp_data_start:
* Exception vectors for spectre mitigations on entry from EL1 when
* kpti is not in use.
*/
- .macro generate_el1_vector
+ .macro generate_el1_vector, bhb
.Lvector_start\@:
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
@@ -1129,17 +1160,21 @@ __entry_tramp_data_start:
kernel_ventry 1, error_invalid // Error EL1h
.rept 4
- tramp_ventry .Lvector_start\@, 64, kpti=0
+ tramp_ventry .Lvector_start\@, 64, 0, \bhb
.endr
.rept 4
- tramp_ventry .Lvector_start\@, 32, kpti=0
+ tramp_ventry .Lvector_start\@, 32, 0, \bhb
.endr
.endm
+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
.pushsection ".entry.text", "ax"
.align 11
ENTRY(__bp_harden_el1_vectors)
- generate_el1_vector
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_el1_vector bhb=BHB_MITIGATION_LOOP
+ generate_el1_vector bhb=BHB_MITIGATION_FW
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
END(__bp_harden_el1_vectors)
.popsection
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -85,6 +85,13 @@
ARM_SMCCC_SMC_32, \
0, 0x7fff)
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
From: James Morse <[email protected]>
commit aff65393fa1401e034656e349abd655cfe272de0 upstream.
kpti is an optional feature, for systems not using kpti a set of
vectors for the spectre-bhb mitigations is needed.
Add another set of vectors, __bp_harden_el1_vectors, that will be
used if a mitigation is needed and kpti is not in use.
The EL1 ventries are repeated verbatim as there is no additional
work needed for entry from EL1.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 34 +++++++++++++++++++++++++++++++++-
1 file changed, 33 insertions(+), 1 deletion(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1025,10 +1025,11 @@ alternative_else_nop_endif
.macro tramp_ventry, vector_start, regsize, kpti
.align 7
1:
- .if \kpti == 1
.if \regsize == 64
msr tpidrro_el0, x30 // Restored in kernel_ventry
.endif
+
+ .if \kpti == 1
/*
* Defend against branch aliasing attacks by pushing a dummy
* entry onto the return stack and using a RET instruction to
@@ -1112,6 +1113,37 @@ __entry_tramp_data_start:
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
+ * Exception vectors for spectre mitigations on entry from EL1 when
+ * kpti is not in use.
+ */
+ .macro generate_el1_vector
+.Lvector_start\@:
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
+
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error_invalid // Error EL1h
+
+ .rept 4
+ tramp_ventry .Lvector_start\@, 64, kpti=0
+ .endr
+ .rept 4
+ tramp_ventry .Lvector_start\@, 32, kpti=0
+ .endr
+ .endm
+
+ .pushsection ".entry.text", "ax"
+ .align 11
+ENTRY(__bp_harden_el1_vectors)
+ generate_el1_vector
+END(__bp_harden_el1_vectors)
+ .popsection
+
+/*
* Special system call wrappers.
*/
ENTRY(sys_rt_sigreturn_wrapper)
From: James Morse <[email protected]>
commit 6c5bf79b69f911560fbf82214c0971af6e58e682 upstream.
Systems using kpti enter and exit the kernel through a trampoline mapping
that is always mapped, even when the kernel is not. tramp_valias is a macro
to find the address of a symbol in the trampoline mapping.
Adding extra sets of vectors will expand the size of the entry.tramp.text
section to beyond 4K. tramp_valias will be unable to generate addresses
for symbols beyond 4K as it uses the 12 bit immediate of the add
instruction.
As there are now two registers available when tramp_alias is called,
use the extra register to avoid the 4K limit of the 12 bit immediate.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
[ Removed SDEI for backport ]
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -139,9 +139,12 @@
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm
- .macro tramp_alias, dst, sym
+ .macro tramp_alias, dst, sym, tmp
mov_q \dst, TRAMP_VALIAS
- add \dst, \dst, #(\sym - .entry.tramp.text)
+ adr_l \tmp, \sym
+ add \dst, \dst, \tmp
+ adr_l \tmp, .entry.tramp.text
+ sub \dst, \dst, \tmp
.endm
// This macro corrupts x0-x3. It is the caller's duty
@@ -366,10 +369,10 @@ alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
bne 4f
msr far_el1, x29
- tramp_alias x30, tramp_exit_native
+ tramp_alias x30, tramp_exit_native, x29
br x30
4:
- tramp_alias x30, tramp_exit_compat
+ tramp_alias x30, tramp_exit_compat, x29
br x30
#endif
.else
From: James Morse <[email protected]>
commit a9c406e6462ff14956d690de7bbe5131a5677dc9 upstream.
Adding a second set of vectors to .entry.tramp.text will make it
larger than a single 4K page.
Allow the trampoline text to occupy up to three pages by adding two
more fixmap slots. Previous changes to tramp_valias allowed it to reach
beyond a single page.
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/include/asm/fixmap.h | 6 ++++--
arch/arm64/include/asm/sections.h | 6 ++++++
arch/arm64/kernel/entry.S | 2 +-
arch/arm64/kernel/vmlinux.lds.S | 2 +-
arch/arm64/mm/mmu.c | 11 ++++++++---
5 files changed, 20 insertions(+), 7 deletions(-)
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -59,9 +59,11 @@ enum fixed_addresses {
#endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- FIX_ENTRY_TRAMP_TEXT,
+ FIX_ENTRY_TRAMP_TEXT3,
+ FIX_ENTRY_TRAMP_TEXT2,
+ FIX_ENTRY_TRAMP_TEXT1,
FIX_ENTRY_TRAMP_DATA,
-#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -28,5 +28,11 @@ extern char __initdata_begin[], __initda
extern char __inittext_begin[], __inittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
+extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+
+static inline size_t entry_tramp_text_size(void)
+{
+ return __entry_tramp_text_end - __entry_tramp_text_start;
+}
#endif /* __ASM_SECTIONS_H */
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1018,7 +1018,7 @@ alternative_else_nop_endif
.endm
.macro tramp_data_page dst
- adr \dst, .entry.tramp.text
+ adr_l \dst, .entry.tramp.text
sub \dst, \dst, PAGE_SIZE
.endm
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -258,7 +258,7 @@ ASSERT(__hibernate_exit_text_end - (__hi
<= SZ_4K, "Hibernate exit text too big or misaligned")
#endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
"Entry trampoline text too big")
#endif
/*
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -532,6 +532,7 @@ early_param("rodata", parse_rodata);
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
+ int i;
extern char __entry_tramp_text_start[];
pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
@@ -542,11 +543,15 @@ static int __init map_entry_trampoline(v
/* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE);
- __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
- prot, pgd_pgtable_alloc, 0);
+ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
+ entry_tramp_text_size(), prot, pgd_pgtable_alloc,
+ 0);
/* Map both the text and data into the kernel page table */
- __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+ for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+ pa_start + i * PAGE_SIZE, prot);
+
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern char __entry_tramp_data_start[];
On Fri, Apr 01, 2022 at 08:36:10AM +0200, Greg Kroah-Hartman wrote:
> This is the start of the stable review cycle for the 4.14.275 release.
> There are 27 patches in this series, all will be posted as a response
> to this one. If anyone has any issues with these being applied, please
> let me know.
>
> Responses should be made by Sun, 03 Apr 2022 06:36:16 +0000.
> Anything received after that time might be too late.
>
Build results:
total: 168 pass: 168 fail: 0
Qemu test results:
total: 424 pass: 424 fail: 0
Tested-by: Guenter Roeck <[email protected]>
Guenter
From: James Morse <[email protected]>
commit 4330e2c5c04c27bebf89d34e0bc14e6943413067 upstream.
Subsequent patches add even more code to the ventry slots.
Ensure kernels that overflow a ventry slot don't get built.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 3 +++
1 file changed, 3 insertions(+)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -74,6 +74,7 @@
.macro kernel_ventry, el, label, regsize = 64
.align 7
+.Lventry_start\@:
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
.if \el == 0
@@ -131,6 +132,7 @@ alternative_else_nop_endif
mrs x0, tpidrro_el0
#endif
b el\()\el\()_\label
+.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm
.macro tramp_alias, dst, sym
@@ -1036,6 +1038,7 @@ alternative_insn isb, nop, ARM64_WORKARO
add x30, x30, #(1b - tramp_vectors)
isb
ret
+.org 1b + 128 // Did we overflow the ventry slot?
.endm
.macro tramp_exit, regsize = 64
From: Marc Zyngier <[email protected]>
commit e03a4e5bb7430f9294c12f02c69eb045d010e942 upstream.
Document that we actually work around ARM erratum 1188873
Fixes: 95b861a4a6d9 ("arm64: arch_timer: Add workaround for ARM erratum 1188873")
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
Documentation/arm64/silicon-errata.txt | 1 +
1 file changed, 1 insertion(+)
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -56,6 +56,7 @@ stable kernels.
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
+| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
From: Rob Herring <[email protected]>
commit 8a6b88e66233f5f1779b0a1342aa9dc030dddcd5 upstream.
Add the MIDR part number info for the Arm Cortex-A77.
Signed-off-by: Rob Herring <[email protected]>
Acked-by: Catalin Marinas <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/include/asm/cputype.h | 2 ++
1 file changed, 2 insertions(+)
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -89,6 +89,7 @@
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
+#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define APM_CPU_PART_POTENZA 0x000
@@ -116,6 +117,7 @@
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
From: James Morse <[email protected]>
commit dee435be76f4117410bbd90573a881fd33488f37 upstream.
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation as part of
a spectre-v2 attack. This is not mitigated by CSV2, meaning CPUs that
previously reported 'Not affected' are now moderately mitigated by CSV2.
Update the value in /sys/devices/system/cpu/vulnerabilities/spectre_v2
to also show the state of the BHB mitigation.
Reviewed-by: Catalin Marinas <[email protected]>
[ code move to cpu_errata.c for backport ]
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/include/asm/cpufeature.h | 8 +++++++
arch/arm64/kernel/cpu_errata.c | 38 +++++++++++++++++++++++++++++++++---
2 files changed, 43 insertions(+), 3 deletions(-)
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -495,6 +495,14 @@ static inline int arm64_get_ssbd_state(v
void arm64_set_ssbd_mitigation(bool state);
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+ SPECTRE_UNAFFECTED,
+ SPECTRE_MITIGATED,
+ SPECTRE_VULNERABLE,
+};
+
+enum mitigation_state arm64_get_spectre_bhb_state(void);
#endif /* __ASSEMBLY__ */
#endif
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -730,14 +730,39 @@ ssize_t cpu_show_spectre_v1(struct devic
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
+static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
+{
+ switch (bhb_state) {
+ case SPECTRE_UNAFFECTED:
+ return "";
+ default:
+ case SPECTRE_VULNERABLE:
+ return ", but not BHB";
+ case SPECTRE_MITIGATED:
+ return ", BHB";
+ }
+}
+
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (__spectrev2_safe)
- return sprintf(buf, "Not affected\n");
+ enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
+ const char *bhb_str = get_bhb_affected_string(bhb_state);
+ const char *v2_str = "Branch predictor hardening";
+
+ if (__spectrev2_safe) {
+ if (bhb_state == SPECTRE_UNAFFECTED)
+ return sprintf(buf, "Not affected\n");
+
+ /*
+ * Platforms affected by Spectre-BHB can't report
+ * "Not affected" for Spectre-v2.
+ */
+ v2_str = "CSV2";
+ }
if (__hardenbp_enab)
- return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+ return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
return sprintf(buf, "Vulnerable\n");
}
@@ -758,3 +783,10 @@ ssize_t cpu_show_spec_store_bypass(struc
return sprintf(buf, "Vulnerable\n");
}
+
+static enum mitigation_state spectre_bhb_state;
+
+enum mitigation_state arm64_get_spectre_bhb_state(void)
+{
+ return spectre_bhb_state;
+}
From: James Morse <[email protected]>
commit 03aff3a77a58b5b52a77e00537a42090ad57b80b upstream.
Kpti stashes x30 in far_el1 while it uses x30 for all its work.
Making the vectors a per-cpu data structure will require a second
register.
Allow tramp_exit two registers before it unmaps the kernel, by
leaving x30 on the stack, and stashing x29 in far_el1.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -356,14 +356,16 @@ alternative_else_nop_endif
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
- ldr lr, [sp, #S_LR]
- add sp, sp, #S_FRAME_SIZE // restore sp
.if \el == 0
-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #S_FRAME_SIZE // restore sp
+ eret
+alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
bne 4f
- msr far_el1, x30
+ msr far_el1, x29
tramp_alias x30, tramp_exit_native
br x30
4:
@@ -371,6 +373,8 @@ alternative_insn eret, nop, ARM64_UNMAP_
br x30
#endif
.else
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #S_FRAME_SIZE // restore sp
eret
.endif
.endm
@@ -1048,10 +1052,12 @@ alternative_insn isb, nop, ARM64_WORKARO
.macro tramp_exit, regsize = 64
adr x30, tramp_vectors
msr vbar_el1, x30
- tramp_unmap_kernel x30
+ ldr lr, [sp, #S_LR]
+ tramp_unmap_kernel x29
.if \regsize == 64
- mrs x30, far_el1
+ mrs x29, far_el1
.endif
+ add sp, sp, #S_FRAME_SIZE // restore sp
eret
.endm
From: James Morse <[email protected]>
commit ed50da7764535f1e24432ded289974f2bf2b0c5a upstream.
The tramp_ventry macro uses tramp_vectors as the address of the vectors
when calculating which ventry in the 'full fat' vectors to branch to.
While there is one set of tramp_vectors, this will be true.
Adding multiple sets of vectors will break this assumption.
Move the generation of the vectors to a macro, and pass the start
of the vectors as an argument to tramp_ventry.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1027,7 +1027,7 @@ alternative_else_nop_endif
sub \dst, \dst, PAGE_SIZE
.endm
- .macro tramp_ventry, regsize = 64
+ .macro tramp_ventry, vector_start, regsize
.align 7
1:
.if \regsize == 64
@@ -1049,9 +1049,9 @@ alternative_insn isb, nop, ARM64_WORKARO
#else
ldr x30, =vectors
#endif
- prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ prfm plil1strm, [x30, #(1b - \vector_start)]
msr vbar_el1, x30
- add x30, x30, #(1b - tramp_vectors + 4)
+ add x30, x30, #(1b - \vector_start + 4)
isb
ret
.org 1b + 128 // Did we overflow the ventry slot?
@@ -1069,19 +1069,21 @@ alternative_insn isb, nop, ARM64_WORKARO
eret
.endm
- .align 11
-ENTRY(tramp_vectors)
+ .macro generate_tramp_vector
+.Lvector_start\@:
.space 0x400
- tramp_ventry
- tramp_ventry
- tramp_ventry
- tramp_ventry
-
- tramp_ventry 32
- tramp_ventry 32
- tramp_ventry 32
- tramp_ventry 32
+ .rept 4
+ tramp_ventry .Lvector_start\@, 64
+ .endr
+ .rept 4
+ tramp_ventry .Lvector_start\@, 32
+ .endr
+ .endm
+
+ .align 11
+ENTRY(tramp_vectors)
+ generate_tramp_vector
END(tramp_vectors)
ENTRY(tramp_exit_native)
From: James Morse <[email protected]>
commit 13d7a08352a83ef2252aeb464a5e08dfc06b5dfd upstream.
The macros for building the kpti trampoline are all behind
CONFIG_UNMAP_KERNEL_AT_EL0, and in a region that outputs to the
.entry.tramp.text section.
Move the macros out so they can be used to generate other kinds of
trampoline. Only the symbols need to be guarded by
CONFIG_UNMAP_KERNEL_AT_EL0 and appear in the .entry.tramp.text section.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -985,12 +985,7 @@ __ni_sys_trace:
.popsection // .entry.text
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-/*
- * Exception vectors trampoline.
- */
- .pushsection ".entry.tramp.text", "ax"
-
+ // Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1
sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
@@ -1081,6 +1076,11 @@ alternative_insn isb, nop, ARM64_WORKARO
.endr
.endm
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+ .pushsection ".entry.tramp.text", "ax"
.align 11
ENTRY(tramp_vectors)
generate_tramp_vector
From: James Morse <[email protected]>
commit a5905d6af492ee6a4a2205f0d550b3f931b03d03 upstream.
KVM allows the guest to discover whether the ARCH_WORKAROUND SMCCC are
implemented, and to preserve that state during migration through its
firmware register interface.
Add the necessary boiler plate for SMCCC_ARCH_WORKAROUND_3.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
[ kvm code moved to virt/kvm/arm, removed fw regs ABI. Added 32bit stub ]
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm/include/asm/kvm_host.h | 6 ++++++
arch/arm64/include/asm/kvm_host.h | 5 +++++
virt/kvm/arm/psci.c | 12 ++++++++++++
3 files changed, 23 insertions(+)
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -26,6 +26,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/fpstate.h>
+#include <asm/spectre.h>
#include <kvm/arm_arch_timer.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -324,4 +325,9 @@ static inline int kvm_arm_have_ssbd(void
return KVM_SSBD_UNKNOWN;
}
+static inline int kvm_arm_get_spectre_bhb_state(void)
+{
+ /* 32bit guests don't need firmware for this */
+ return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */
+}
#endif /* __ARM_KVM_HOST_H__ */
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -450,4 +450,9 @@ static inline int kvm_arm_have_ssbd(void
}
}
+static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void)
+{
+ return arm64_get_spectre_bhb_state();
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -433,6 +433,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu
break;
}
break;
+ case ARM_SMCCC_ARCH_WORKAROUND_3:
+ switch (kvm_arm_get_spectre_bhb_state()) {
+ case SPECTRE_VULNERABLE:
+ break;
+ case SPECTRE_MITIGATED:
+ val = SMCCC_RET_SUCCESS;
+ break;
+ case SPECTRE_UNAFFECTED:
+ val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
+ break;
+ }
+ break;
}
break;
default:
On Fri, 1 Apr 2022 at 12:06, Greg Kroah-Hartman
<[email protected]> wrote:
>
> This is the start of the stable review cycle for the 4.14.275 release.
> There are 27 patches in this series, all will be posted as a response
> to this one. If anyone has any issues with these being applied, please
> let me know.
>
> Responses should be made by Sun, 03 Apr 2022 06:36:16 +0000.
> Anything received after that time might be too late.
>
> The whole patch series can be found in one patch at:
> https://www.kernel.org/pub/linux/kernel/v4.x/stable-review/patch-4.14.275-rc1.gz
> or in the git tree and branch at:
> git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git linux-4.14.y
> and the diffstat can be found below.
>
> thanks,
>
> greg k-h
Results from Linaro’s test farm.
No regressions on arm64, arm, x86_64, and i386.
Tested-by: Linux Kernel Functional Testing <[email protected]>
## Build
* kernel: 4.14.275-rc1
* git: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git
* git branch: linux-4.14.y
* git commit: c9d20a4cf85d73eed2f609ed877cd8b2a249aaa6
* git describe: v4.14.274-28-gc9d20a4cf85d
* test details:
https://qa-reports.linaro.org/lkft/linux-stable-rc-linux-4.14.y/build/v4.14.274-28-gc9d20a4cf85d
## Test Regressions (compared to v4.14.274)
No test regressions found.
## Metric Regressions (compared to v4.14.274)
No metric regressions found.
## Test Fixes (compared to v4.14.274)
No test fixes found.
## Metric Fixes (compared to v4.14.274)
No metric fixes found.
## Test result summary
total: 84274, pass: 66776, fail: 1073, skip: 13891, xfail: 2534
## Build Summary
* arm: 280 total, 270 passed, 10 failed
* arm64: 35 total, 35 passed, 0 failed
* dragonboard-410c: 1 total, 1 passed, 0 failed
* hi6220-hikey: 1 total, 1 passed, 0 failed
* i386: 19 total, 19 passed, 0 failed
* juno-r2: 1 total, 1 passed, 0 failed
* mips: 22 total, 22 passed, 0 failed
* powerpc: 60 total, 16 passed, 44 failed
* sparc: 12 total, 12 passed, 0 failed
* x15: 1 total, 1 passed, 0 failed
* x86: 1 total, 1 passed, 0 failed
* x86_64: 34 total, 34 passed, 0 failed
## Test suites summary
* fwts
* igt-gpu-tools
* kselftest-android
* kselftest-arm64
* kselftest-bpf
* kselftest-breakpoints
* kselftest-capabilities
* kselftest-cgroup
* kselftest-clone3
* kselftest-core
* kselftest-cpu-hotplug
* kselftest-cpufreq
* kselftest-drivers
* kselftest-efivarfs
* kselftest-filesystems
* kselftest-firmware
* kselftest-fpu
* kselftest-futex
* kselftest-gpio
* kselftest-intel_pstate
* kselftest-ipc
* kselftest-ir
* kselftest-kcmp
* kselftest-kexec
* kselftest-kvm
* kselftest-lib
* kselftest-livepatch
* kselftest-membarrier
* kselftest-net
* kselftest-netfilter
* kselftest-nsfs
* kselftest-openat2
* kselftest-pid_namespace
* kselftest-pidfd
* kselftest-proc
* kselftest-pstore
* kselftest-ptrace
* kselftest-rseq
* kselftest-rtc
* kselftest-seccomp
* kselftest-sigaltstack
* kselftest-size
* kselftest-splice
* kselftest-static_keys
* kselftest-sync
* kselftest-sysctl
* kselftest-tc-testing
* kselftest-timens
* kselftest-timers
* kselftest-tmpfs
* kselftest-tpm2
* kselftest-user
* kselftest-vm
* kselftest-x86
* kselftest-zram
* kvm-unit-tests
* libhugetlbfs
* linux-log-parser
* ltp-cap_bounds-tests
* ltp-commands-tests
* ltp-containers-tests
* ltp-controllers-tests
* ltp-cpuhotplug-tests
* ltp-crypto-tests
* ltp-cve-tests
* ltp-dio-tests
* ltp-fcntl-locktests-tests
* ltp-filecaps-tests
* ltp-fs-tests
* ltp-fs_bind-tests
* ltp-fs_perms_simple-tests
* ltp-fsx-tests
* ltp-hugetlb-tests
* ltp-io-tests
* ltp-ipc-tests
* ltp-math-tests
* ltp-mm-tests
* ltp-nptl-tests
* ltp-open-posix-tests
* ltp-pty-tests
* ltp-sched-tests
* ltp-securebits-tests
* ltp-syscalls-tests
* ltp-tracing-tests
* network-basic-tests
* packetdrill
* perf
* rcutorture
* ssuite
* v4l2-compliance
* vdso
--
Linaro LKFT
https://lkft.linaro.org
From: James Morse <[email protected]>
commit c47e4d04ba0f1ea17353d85d45f611277507e07a upstream.
Spectre-BHB needs to add sequences to the vectors. Having one global
set of vectors is a problem for big/little systems where the sequence
is costly on cpus that are not vulnerable.
Making the vectors per-cpu in the style of KVM's bh_harden_hyp_vecs
requires the vectors to be generated by macros.
Make the kpti re-mapping of the kernel optional, so the macros can be
used without kpti.
Reviewed-by: Russell King (Oracle) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/kernel/entry.S | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1022,9 +1022,10 @@ alternative_else_nop_endif
sub \dst, \dst, PAGE_SIZE
.endm
- .macro tramp_ventry, vector_start, regsize
+ .macro tramp_ventry, vector_start, regsize, kpti
.align 7
1:
+ .if \kpti == 1
.if \regsize == 64
msr tpidrro_el0, x30 // Restored in kernel_ventry
.endif
@@ -1046,8 +1047,12 @@ alternative_insn isb, nop, ARM64_WORKARO
#endif
prfm plil1strm, [x30, #(1b - \vector_start)]
msr vbar_el1, x30
- add x30, x30, #(1b - \vector_start + 4)
isb
+ .else
+ ldr x30, =vectors
+ .endif // \kpti == 1
+
+ add x30, x30, #(1b - \vector_start + 4)
ret
.org 1b + 128 // Did we overflow the ventry slot?
.endm
@@ -1064,15 +1069,15 @@ alternative_insn isb, nop, ARM64_WORKARO
eret
.endm
- .macro generate_tramp_vector
+ .macro generate_tramp_vector, kpti
.Lvector_start\@:
.space 0x400
.rept 4
- tramp_ventry .Lvector_start\@, 64
+ tramp_ventry .Lvector_start\@, 64, \kpti
.endr
.rept 4
- tramp_ventry .Lvector_start\@, 32
+ tramp_ventry .Lvector_start\@, 32, \kpti
.endr
.endm
@@ -1083,7 +1088,7 @@ alternative_insn isb, nop, ARM64_WORKARO
.pushsection ".entry.tramp.text", "ax"
.align 11
ENTRY(tramp_vectors)
- generate_tramp_vector
+ generate_tramp_vector kpti=1
END(tramp_vectors)
ENTRY(tramp_exit_native)
From: Marc Zyngier <[email protected]>
commit 95b861a4a6d94f64d5242605569218160ebacdbe upstream.
When running on Cortex-A76, a timer access from an AArch32 EL0
task may end up with a corrupted value or register. The workaround for
this is to trap these accesses at EL1/EL2 and execute them there.
This only affects versions r0p0, r1p0 and r2p0 of the CPU.
Acked-by: Mark Rutland <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/Kconfig | 12 ++++++++++++
arch/arm64/include/asm/cpucaps.h | 3 ++-
arch/arm64/include/asm/cputype.h | 2 ++
arch/arm64/kernel/cpu_errata.c | 8 ++++++++
drivers/clocksource/arm_arch_timer.c | 15 +++++++++++++++
5 files changed, 39 insertions(+), 1 deletion(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -458,6 +458,18 @@ config ARM64_ERRATUM_1024718
If unsure, say Y.
+config ARM64_ERRATUM_1188873
+ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
+ help
+ This option adds work arounds for ARM Cortex-A76 erratum 1188873
+
+ Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
+ register corruption when accessing the timer registers from
+ AArch32 userspace.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -45,7 +45,8 @@
#define ARM64_SSBD 25
#define ARM64_MISMATCHED_CACHE_TYPE 26
#define ARM64_SSBS 27
+#define ARM64_WORKAROUND_1188873 28
-#define ARM64_NCAPS 28
+#define ARM64_NCAPS 29
#endif /* __ASM_CPUCAPS_H */
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -87,6 +87,7 @@
#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
+#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define APM_CPU_PART_POTENZA 0x000
@@ -112,6 +113,7 @@
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -712,6 +712,14 @@ const struct arm64_cpu_capabilities arm6
.matches = has_ssbd_mitigation,
.midr_range_list = arm64_ssb_cpus,
},
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+ /* Cortex-A76 r0p0 to r2p0 */
+ .desc = "ARM erratum 1188873",
+ .capability = ARM64_WORKAROUND_1188873,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ },
+#endif
{
}
};
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -298,6 +298,13 @@ static u64 notrace arm64_858921_read_cnt
}
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+static u64 notrace arm64_1188873_read_cntvct_el0(void)
+{
+ return read_sysreg(cntvct_el0);
+}
+#endif
+
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
@@ -381,6 +388,14 @@ static const struct arch_timer_erratum_w
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+ .match_type = ate_match_local_cap_id,
+ .id = (void *)ARM64_WORKAROUND_1188873,
+ .desc = "ARM erratum 1188873",
+ .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
From: Marc Zyngier <[email protected]>
commit c2b5bba3967a000764e9148e6f020d776b7ecd82 upstream.
Since ARM64_ERRATUM_1188873 only affects AArch32 EL0, it makes some
sense that it should depend on COMPAT.
Signed-off-by: Marc Zyngier <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
Signed-off-by: James Morse <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm64/Kconfig | 1 +
1 file changed, 1 insertion(+)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -461,6 +461,7 @@ config ARM64_ERRATUM_1024718
config ARM64_ERRATUM_1188873
bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
default y
+ depends on COMPAT
select ARM_ARCH_TIMER_OOL_WORKAROUND
help
This option adds work arounds for ARM Cortex-A76 erratum 1188873