From: Borislav Petkov <[email protected]>
Hi folks,
this is the first part of me trying to noinstr-ify the mce mess
properly. That one is dealing with making all indirect calls on the #MC
exception path, direct, to avoid the compiler from adding ratpoline
thunks which objtool doesn't like. And when you look at the changes, you
probably would go, gee, why did we ever did indirect calls - it is even
more readable with direct calls and there are practically no downsides.
So let's remove former.
There's another patch ontop which does the actual noinstr annotation but
that takes longer currently due to objtool changes in tip.
So for now, the first part.
As always, constructive review is welcome.
Thx.
Borislav Petkov (4):
x86/mce: Get rid of the mce_severity function pointer
x86/mce: Get rid of machine_check_vector
x86/mce: Get rid of msr_ops
x86/mce: Get rid of the ->quirk_no_way_out() indirect call
arch/x86/include/asm/mce.h | 12 --
arch/x86/kernel/cpu/mce/amd.c | 10 +-
arch/x86/kernel/cpu/mce/core.c | 217 ++++++++++++-----------------
arch/x86/kernel/cpu/mce/internal.h | 47 +++++--
arch/x86/kernel/cpu/mce/p5.c | 6 +-
arch/x86/kernel/cpu/mce/severity.c | 11 +-
arch/x86/kernel/cpu/mce/winchip.c | 6 +-
7 files changed, 137 insertions(+), 172 deletions(-)
--
2.29.2
From: Borislav Petkov <[email protected]>
Get rid of the indirect function pointer and use flags settings instead
to steer execution.
Now that it is not an indirect call any longer, drop the instrumentation
annotation for objtool too.
No functional changes.
Signed-off-by: Borislav Petkov <[email protected]>
---
arch/x86/include/asm/mce.h | 10 ------
arch/x86/kernel/cpu/mce/core.c | 57 ++++++++++++++----------------
arch/x86/kernel/cpu/mce/internal.h | 29 ++++++++++++---
arch/x86/kernel/cpu/mce/p5.c | 6 +---
arch/x86/kernel/cpu/mce/winchip.c | 6 +---
5 files changed, 53 insertions(+), 55 deletions(-)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 258ef6d9955c..813b4f5b0dd6 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -215,16 +215,6 @@ static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id) { return -EINVAL; }
#endif
-#ifdef CONFIG_X86_ANCIENT_MCE
-void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
-void winchip_mcheck_init(struct cpuinfo_x86 *c);
-static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
-#else
-static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
-static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
-static inline void enable_p5_mce(void) {}
-#endif
-
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct device *, mce_device);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index fa38d51d9a23..59f327cae84f 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1305,6 +1305,15 @@ static void queue_task_work(struct mce *m, int kill_current_task)
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
}
+/* Handle unconfigured int18 (should never happen) */
+static noinstr void unexpected_machine_check(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
+ smp_processor_id());
+ instrumentation_end();
+}
+
/*
* The actual machine check handler. This only handles real
* exceptions when something got corrupted coming in through int 18.
@@ -1325,36 +1334,43 @@ static void queue_task_work(struct mce *m, int kill_current_task)
*/
noinstr void do_machine_check(struct pt_regs *regs)
{
+ int worst = 0, order, no_way_out, kill_current_task, lmce;
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
struct mce m, *final;
char *msg = NULL;
- int worst = 0;
+
+ if (unlikely(mce_flags.p5))
+ return pentium_machine_check(regs);
+ else if (unlikely(mce_flags.winchip))
+ return winchip_machine_check(regs);
+ else if (unlikely(!mca_cfg.initialized))
+ return unexpected_machine_check(regs);
/*
* Establish sequential order between the CPUs entering the machine
* check handler.
*/
- int order = -1;
+ order = -1;
/*
* If no_way_out gets set, there is no safe way to recover from this
* MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
*/
- int no_way_out = 0;
+ no_way_out = 0;
/*
* If kill_current_task is not set, there might be a way to recover from this
* error.
*/
- int kill_current_task = 0;
+ kill_current_task = 0;
/*
* MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
* on Intel.
*/
- int lmce = 1;
+ lmce = 1;
this_cpu_inc(mce_exception_count);
@@ -1829,9 +1845,11 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
intel_p5_mcheck_init(c);
+ mce_flags.p5 = 1;
return 1;
case X86_VENDOR_CENTAUR:
winchip_mcheck_init(c);
+ mce_flags.winchip = 1;
return 1;
default:
return 0;
@@ -1986,18 +2004,6 @@ bool filter_mce(struct mce *m)
return false;
}
-/* Handle unconfigured int18 (should never happen) */
-static noinstr void unexpected_machine_check(struct pt_regs *regs)
-{
- instrumentation_begin();
- pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
- smp_processor_id());
- instrumentation_end();
-}
-
-/* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
-
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
irqentry_state_t irq_state;
@@ -2008,31 +2014,22 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
* Only required when from kernel mode. See
* mce_check_crashing_cpu() for details.
*/
- if (machine_check_vector == do_machine_check &&
- mce_check_crashing_cpu())
+ if (mca_cfg.initialized && mce_check_crashing_cpu())
return;
irq_state = irqentry_nmi_enter(regs);
- /*
- * The call targets are marked noinstr, but objtool can't figure
- * that out because it's an indirect call. Annotate it.
- */
- instrumentation_begin();
- machine_check_vector(regs);
+ do_machine_check(regs);
- instrumentation_end();
irqentry_nmi_exit(regs, irq_state);
}
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{
irqentry_enter_from_user_mode(regs);
- instrumentation_begin();
- machine_check_vector(regs);
+ do_machine_check(regs);
- instrumentation_end();
irqentry_exit_to_user_mode(regs);
}
@@ -2099,7 +2096,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
return;
}
- machine_check_vector = do_machine_check;
+ mca_cfg.initialized = 1;
__mcheck_cpu_init_early(c);
__mcheck_cpu_init_generic();
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 09cb5ab9a81d..d71d6c5c3ef0 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -8,9 +8,6 @@
#include <linux/device.h>
#include <asm/mce.h>
-/* Pointer to the installed machine check handler for this CPU setup. */
-extern void (*machine_check_vector)(struct pt_regs *);
-
enum severity_level {
MCE_NO_SEVERITY,
MCE_DEFERRED_SEVERITY,
@@ -126,7 +123,9 @@ struct mca_config {
ser : 1,
recovery : 1,
bios_cmci_threshold : 1,
- __reserved : 59;
+ /* Proper #MC exception handler is set */
+ initialized : 1,
+ __reserved : 58;
s8 bootlog;
int tolerant;
@@ -162,7 +161,13 @@ struct mce_vendor_flags {
/* AMD-style error thresholding banks present. */
amd_threshold : 1,
- __reserved_0 : 60;
+ /* Pentium, family 5-style MCA */
+ p5 : 1,
+
+ /* Centaur Winchip C6-style MCA */
+ winchip : 1,
+
+ __reserved_0 : 58;
};
extern struct mce_vendor_flags mce_flags;
@@ -195,4 +200,18 @@ __visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
unsigned long error_code,
unsigned long fault_addr);
+#ifdef CONFIG_X86_ANCIENT_MCE
+void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
+void winchip_mcheck_init(struct cpuinfo_x86 *c);
+noinstr void pentium_machine_check(struct pt_regs *regs);
+noinstr void winchip_machine_check(struct pt_regs *regs);
+static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
+#else
+static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
+static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
+static inline void enable_p5_mce(void) {}
+static inline void pentium_machine_check(struct pt_regs *regs) {}
+static inline void winchip_machine_check(struct pt_regs *regs) {}
+#endif
+
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mce/p5.c b/arch/x86/kernel/cpu/mce/p5.c
index 19e90cae8e97..2272ad53fc33 100644
--- a/arch/x86/kernel/cpu/mce/p5.c
+++ b/arch/x86/kernel/cpu/mce/p5.c
@@ -21,7 +21,7 @@
int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */
-static noinstr void pentium_machine_check(struct pt_regs *regs)
+noinstr void pentium_machine_check(struct pt_regs *regs)
{
u32 loaddr, hi, lotype;
@@ -54,10 +54,6 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_MCE))
return;
- machine_check_vector = pentium_machine_check;
- /* Make sure the vector pointer is visible before we enable MCEs: */
- wmb();
-
/* Read registers before enabling: */
rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
diff --git a/arch/x86/kernel/cpu/mce/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c
index 9c9f0abd2d7f..6c99f2941909 100644
--- a/arch/x86/kernel/cpu/mce/winchip.c
+++ b/arch/x86/kernel/cpu/mce/winchip.c
@@ -17,7 +17,7 @@
#include "internal.h"
/* Machine check handler for WinChip C6: */
-static noinstr void winchip_machine_check(struct pt_regs *regs)
+noinstr void winchip_machine_check(struct pt_regs *regs)
{
instrumentation_begin();
pr_emerg("CPU0: Machine Check Exception.\n");
@@ -30,10 +30,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
{
u32 lo, hi;
- machine_check_vector = winchip_machine_check;
- /* Make sure the vector pointer is visible before we enable MCEs: */
- wmb();
-
rdmsr(MSR_IDT_FCR1, lo, hi);
lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */
lo &= ~(1<<4); /* Enable MCE */
--
2.29.2
From: Borislav Petkov <[email protected]>
Turn it into a normal function which calls an AMD- or Intel-specific
variant depending on the CPU it runs on.
No functional changes.
Signed-off-by: Borislav Petkov <[email protected]>
---
arch/x86/include/asm/mce.h | 2 --
arch/x86/kernel/cpu/mce/core.c | 1 -
arch/x86/kernel/cpu/mce/internal.h | 3 +--
arch/x86/kernel/cpu/mce/severity.c | 11 +++++------
4 files changed, 6 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index da9321548f6f..258ef6d9955c 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -205,14 +205,12 @@ struct cper_ia_proc_ctx;
int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
void mcheck_cpu_clear(struct cpuinfo_x86 *c);
-void mcheck_vendor_init_severity(void);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id);
#else
static inline int mcheck_init(void) { return 0; }
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
-static inline void mcheck_vendor_init_severity(void) {}
static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id) { return -EINVAL; }
#endif
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 8cb7816d03b4..fa38d51d9a23 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -2207,7 +2207,6 @@ int __init mcheck_init(void)
mce_register_decode_chain(&early_nb);
mce_register_decode_chain(&mce_uc_nb);
mce_register_decode_chain(&mce_default_nb);
- mcheck_vendor_init_severity();
INIT_WORK(&mce_work, mce_gen_pool_process);
init_irq_work(&mce_irq_work, mce_irq_work_cb);
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 88dcc79cfb07..09cb5ab9a81d 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -38,8 +38,7 @@ int mce_gen_pool_add(struct mce *mce);
int mce_gen_pool_init(void);
struct llist_node *mce_gen_pool_prepare_records(void);
-extern int (*mce_severity)(struct mce *a, struct pt_regs *regs,
- int tolerant, char **msg, bool is_excp);
+int mce_severity(struct mce *a, struct pt_regs *regs, int tolerant, char **msg, bool is_excp);
struct dentry *mce_get_debugfs_dir(void);
extern mce_banks_t mce_banks_ce_disabled;
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index 17e631443116..695570fadb5e 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -407,15 +407,14 @@ static int mce_severity_intel(struct mce *m, struct pt_regs *regs,
}
}
-/* Default to mce_severity_intel */
-int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) =
- mce_severity_intel;
-
-void __init mcheck_vendor_init_severity(void)
+int mce_severity(struct mce *m, struct pt_regs *regs, int tolerant, char **msg,
+ bool is_excp)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
- mce_severity = mce_severity_amd;
+ return mce_severity_amd(m, regs, tolerant, msg, is_excp);
+ else
+ return mce_severity_intel(m, regs, tolerant, msg, is_excp);
}
#ifdef CONFIG_DEBUG_FS
--
2.29.2
From: Borislav Petkov <[email protected]>
Avoid having indirect calls and use a normal function which returns the
proper MSR address based on ->smca setting.
No functional changes.
Signed-off-by: Borislav Petkov <[email protected]>
---
arch/x86/kernel/cpu/mce/amd.c | 10 ++--
arch/x86/kernel/cpu/mce/core.c | 95 +++++++++++-------------------
arch/x86/kernel/cpu/mce/internal.h | 12 ++--
3 files changed, 44 insertions(+), 73 deletions(-)
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 08831acc1d03..27cacf504663 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -526,7 +526,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high,
/* Fall back to method we used for older processors: */
switch (block) {
case 0:
- addr = msr_ops.misc(bank);
+ addr = mca_msr_reg(bank, MCA_MISC);
break;
case 1:
offset = ((low & MASK_BLKPTR_LO) >> 21);
@@ -978,8 +978,8 @@ static void log_error_deferred(unsigned int bank)
{
bool defrd;
- defrd = _log_error_bank(bank, msr_ops.status(bank),
- msr_ops.addr(bank), 0);
+ defrd = _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS),
+ mca_msr_reg(bank, MCA_ADDR), 0);
if (!mce_flags.smca)
return;
@@ -1009,7 +1009,7 @@ static void amd_deferred_error_interrupt(void)
static void log_error_thresholding(unsigned int bank, u64 misc)
{
- _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
+ _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), mca_msr_reg(bank, MCA_ADDR), misc);
}
static void log_and_reset_block(struct threshold_block *block)
@@ -1397,7 +1397,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
}
}
- err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
+ err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC));
if (err)
goto out_kobj;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 59f327cae84f..ee4f534424b8 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -176,53 +176,31 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
-static inline u32 ctl_reg(int bank)
+u32 mca_msr_reg(int bank, enum mca_msr reg)
{
- return MSR_IA32_MCx_CTL(bank);
-}
-
-static inline u32 status_reg(int bank)
-{
- return MSR_IA32_MCx_STATUS(bank);
-}
-
-static inline u32 addr_reg(int bank)
-{
- return MSR_IA32_MCx_ADDR(bank);
-}
-
-static inline u32 misc_reg(int bank)
-{
- return MSR_IA32_MCx_MISC(bank);
-}
+ switch (reg) {
+ case MCA_CTL:
+ return mce_flags.smca ? MSR_AMD64_SMCA_MCx_CTL(bank)
+ : MSR_IA32_MCx_CTL(bank);
-static inline u32 smca_ctl_reg(int bank)
-{
- return MSR_AMD64_SMCA_MCx_CTL(bank);
-}
+ case MCA_STATUS:
+ return mce_flags.smca ? MSR_AMD64_SMCA_MCx_STATUS(bank)
+ : MSR_IA32_MCx_STATUS(bank);
-static inline u32 smca_status_reg(int bank)
-{
- return MSR_AMD64_SMCA_MCx_STATUS(bank);
-}
+ case MCA_ADDR:
+ return mce_flags.smca ? MSR_AMD64_SMCA_MCx_ADDR(bank)
+ : MSR_IA32_MCx_ADDR(bank);
-static inline u32 smca_addr_reg(int bank)
-{
- return MSR_AMD64_SMCA_MCx_ADDR(bank);
-}
+ case MCA_MISC:
+ return mce_flags.smca ? MSR_AMD64_SMCA_MCx_MISC(bank)
+ : MSR_IA32_MCx_MISC(bank);
+ default: break;
+ }
-static inline u32 smca_misc_reg(int bank)
-{
- return MSR_AMD64_SMCA_MCx_MISC(bank);
+ WARN_ON_ONCE(1);
+ return 0;
}
-struct mca_msr_regs msr_ops = {
- .ctl = ctl_reg,
- .status = status_reg,
- .addr = addr_reg,
- .misc = misc_reg
-};
-
static void __print_mce(struct mce *m)
{
pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
@@ -362,11 +340,11 @@ static int msr_to_offset(u32 msr)
if (msr == mca_cfg.rip_msr)
return offsetof(struct mce, ip);
- if (msr == msr_ops.status(bank))
+ if (msr == mca_msr_reg(bank, MCA_STATUS))
return offsetof(struct mce, status);
- if (msr == msr_ops.addr(bank))
+ if (msr == mca_msr_reg(bank, MCA_ADDR))
return offsetof(struct mce, addr);
- if (msr == msr_ops.misc(bank))
+ if (msr == mca_msr_reg(bank, MCA_MISC))
return offsetof(struct mce, misc);
if (msr == MSR_IA32_MCG_STATUS)
return offsetof(struct mce, mcgstatus);
@@ -685,10 +663,10 @@ static struct notifier_block mce_default_nb = {
static void mce_read_aux(struct mce *m, int i)
{
if (m->status & MCI_STATUS_MISCV)
- m->misc = mce_rdmsrl(msr_ops.misc(i));
+ m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC));
if (m->status & MCI_STATUS_ADDRV) {
- m->addr = mce_rdmsrl(msr_ops.addr(i));
+ m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR));
/*
* Mask the reported address by the reported granularity.
@@ -758,7 +736,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
m.bank = i;
barrier();
- m.status = mce_rdmsrl(msr_ops.status(i));
+ m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
/* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL))
@@ -826,7 +804,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
/*
* Clear state for this bank.
*/
- mce_wrmsrl(msr_ops.status(i), 0);
+ mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
}
/*
@@ -851,7 +829,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
int i;
for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
- m->status = mce_rdmsrl(msr_ops.status(i));
+ m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
if (!(m->status & MCI_STATUS_VAL))
continue;
@@ -1144,7 +1122,7 @@ static void mce_clear_state(unsigned long *toclear)
for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
if (test_bit(i, toclear))
- mce_wrmsrl(msr_ops.status(i), 0);
+ mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
}
}
@@ -1203,7 +1181,7 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
m->addr = 0;
m->bank = i;
- m->status = mce_rdmsrl(msr_ops.status(i));
+ m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
if (!(m->status & MCI_STATUS_VAL))
continue;
@@ -1682,8 +1660,8 @@ static void __mcheck_cpu_init_clear_banks(void)
if (!b->init)
continue;
- wrmsrl(msr_ops.ctl(i), b->ctl);
- wrmsrl(msr_ops.status(i), 0);
+ wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
+ wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
}
}
@@ -1709,7 +1687,7 @@ static void __mcheck_cpu_check_banks(void)
if (!b->init)
continue;
- rdmsrl(msr_ops.ctl(i), msrval);
+ rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
b->init = !!msrval;
}
}
@@ -1868,13 +1846,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
mce_flags.amd_threshold = 1;
-
- if (mce_flags.smca) {
- msr_ops.ctl = smca_ctl_reg;
- msr_ops.status = smca_status_reg;
- msr_ops.addr = smca_addr_reg;
- msr_ops.misc = smca_misc_reg;
- }
}
}
@@ -2228,7 +2199,7 @@ static void mce_disable_error_reporting(void)
struct mce_bank *b = &mce_banks[i];
if (b->init)
- wrmsrl(msr_ops.ctl(i), 0);
+ wrmsrl(mca_msr_reg(i, MCA_CTL), 0);
}
return;
}
@@ -2580,7 +2551,7 @@ static void mce_reenable_cpu(void)
struct mce_bank *b = &mce_banks[i];
if (b->init)
- wrmsrl(msr_ops.ctl(i), b->ctl);
+ wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
}
}
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index d71d6c5c3ef0..1ad7b4bf5423 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -172,14 +172,14 @@ struct mce_vendor_flags {
extern struct mce_vendor_flags mce_flags;
-struct mca_msr_regs {
- u32 (*ctl) (int bank);
- u32 (*status) (int bank);
- u32 (*addr) (int bank);
- u32 (*misc) (int bank);
+enum mca_msr {
+ MCA_CTL,
+ MCA_STATUS,
+ MCA_ADDR,
+ MCA_MISC,
};
-extern struct mca_msr_regs msr_ops;
+u32 mca_msr_reg(int bank, enum mca_msr reg);
/* Decide whether to add MCE record to MCE event pool or filter it out. */
extern bool filter_mce(struct mce *m);
--
2.29.2
From: Borislav Petkov <[email protected]>
Use a flag setting to call the only quirk function for that.
No functional changes.
Signed-off-by: Borislav Petkov <[email protected]>
---
arch/x86/kernel/cpu/mce/core.c | 64 +++++++++++++++---------------
arch/x86/kernel/cpu/mce/internal.h | 5 ++-
2 files changed, 35 insertions(+), 34 deletions(-)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index ee4f534424b8..e0cef8781c6f 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -121,8 +121,6 @@ mce_banks_t mce_banks_ce_disabled;
static struct work_struct mce_work;
static struct irq_work mce_irq_work;
-static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
-
/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
@@ -818,6 +816,34 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
}
EXPORT_SYMBOL_GPL(machine_check_poll);
+/*
+ * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
+ * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
+ * Vol 3B Table 15-20). But this confuses both the code that determines
+ * whether the machine check occurred in kernel or user mode, and also
+ * the severity assessment code. Pretend that EIPV was set, and take the
+ * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
+ */
+static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
+{
+ if (bank != 0)
+ return;
+ if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
+ return;
+ if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
+ MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
+ MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
+ MCACOD)) !=
+ (MCI_STATUS_UC|MCI_STATUS_EN|
+ MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
+ MCI_STATUS_AR|MCACOD_INSTR))
+ return;
+
+ m->mcgstatus |= MCG_STATUS_EIPV;
+ m->ip = regs->ip;
+ m->cs = regs->cs;
+}
+
/*
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
@@ -834,8 +860,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
continue;
__set_bit(i, validp);
- if (quirk_no_way_out)
- quirk_no_way_out(i, m, regs);
+ if (mce_flags.snb_ifu_quirk)
+ quirk_sandybridge_ifu(i, m, regs);
m->bank = i;
if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
@@ -1692,34 +1718,6 @@ static void __mcheck_cpu_check_banks(void)
}
}
-/*
- * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
- * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
- * Vol 3B Table 15-20). But this confuses both the code that determines
- * whether the machine check occurred in kernel or user mode, and also
- * the severity assessment code. Pretend that EIPV was set, and take the
- * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
- */
-static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
-{
- if (bank != 0)
- return;
- if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
- return;
- if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
- MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
- MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
- MCACOD)) !=
- (MCI_STATUS_UC|MCI_STATUS_EN|
- MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
- MCI_STATUS_AR|MCACOD_INSTR))
- return;
-
- m->mcgstatus |= MCG_STATUS_EIPV;
- m->ip = regs->ip;
- m->cs = regs->cs;
-}
-
/* Add per CPU specific workarounds here */
static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
@@ -1793,7 +1791,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
cfg->bootlog = 0;
if (c->x86 == 6 && c->x86_model == 45)
- quirk_no_way_out = quirk_sandybridge_ifu;
+ mce_flags.snb_ifu_quirk = 1;
}
if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 1ad7b4bf5423..21865545cd3b 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -167,7 +167,10 @@ struct mce_vendor_flags {
/* Centaur Winchip C6-style MCA */
winchip : 1,
- __reserved_0 : 58;
+ /* SandyBridge IFU quirk */
+ snb_ifu_quirk : 1,
+
+ __reserved_0 : 57;
};
extern struct mce_vendor_flags mce_flags;
--
2.29.2
On Fri, Sep 17, 2021 at 12:53:53PM +0200, Borislav Petkov wrote:
> @@ -126,7 +123,9 @@ struct mca_config {
> ser : 1,
> recovery : 1,
> bios_cmci_threshold : 1,
> - __reserved : 59;
> + /* Proper #MC exception handler is set */
> + initialized : 1,
> + __reserved : 58;
Does this __reserved field do anything useful? It seems to
just be an annoyance that must be updated each time a new
bit is added. Surely the compiler will see that these bitfields
are in a "u64" and do the math and skip to the right boundary
without this.
-Tony
On Fri, Sep 17, 2021 at 12:53:55PM +0200, Borislav Petkov wrote:
> @@ -1793,7 +1791,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
> cfg->bootlog = 0;
>
> if (c->x86 == 6 && c->x86_model == 45)
> - quirk_no_way_out = quirk_sandybridge_ifu;
> + mce_flags.snb_ifu_quirk = 1;
> }
Someday all these tests for random vendor/family/model/stepping could be
refactored into a x86_match_cpu() lookup table (now that x86_match_cpu
can handle steppings). But that's a topic for a different patch series.
-Tony
On 20/09/2021 06.57, Luck, Tony wrote:
> On Fri, Sep 17, 2021 at 12:53:53PM +0200, Borislav Petkov wrote:
>> @@ -126,7 +123,9 @@ struct mca_config {
>> ser : 1,
>> recovery : 1,
>> bios_cmci_threshold : 1,
>> - __reserved : 59;
>> + /* Proper #MC exception handler is set */
>> + initialized : 1,
>> + __reserved : 58;
>
> Does this __reserved field do anything useful? It seems to
> just be an annoyance that must be updated each time a new
> bit is added. Surely the compiler will see that these bitfields
> are in a "u64" and do the math and skip to the right boundary
> without this.
Not at all. And it also seems that the current layout is not at all what
may have been intended (the bit fields do not start at an 8-byte boundary).
$ cat a.c
#include <string.h>
#include <stdint.h>
struct s1 {
char x;
uint64_t a:1,
b:1,
c:1,
d:61;
char y;
};
struct s2 {
char x;
uint64_t a:1,
b:1,
c:1;
char y;
};
struct s3 {
uint64_t x;
uint64_t a:1,
b:1,
c:1;
char y;
};
// some dummy functions to make the structs appear used and make gcc
// actually emit debug info
void f1(struct s1 *s) { memset(s, 0xff, sizeof(*s)); }
void f2(struct s2 *s) { memset(s, 0xff, sizeof(*s)); }
void f3(struct s3 *s) { memset(s, 0xff, sizeof(*s)); }
$ gcc -o a.o -c a.c -O2 -g
$ pahole a.o
struct s1 {
char x; /* 0 1 */
/* Bitfield combined with previous fields */
uint64_t a:1; /* 0: 8 8 */
uint64_t b:1; /* 0: 9 8 */
uint64_t c:1; /* 0:10 8 */
/* XXX 53 bits hole, try to pack */
/* Force alignment to the next boundary: */
uint64_t :0;
uint64_t d:61; /* 8: 0 8 */
/* XXX 3 bits hole, try to pack */
char y; /* 16 1 */
/* size: 24, cachelines: 1, members: 6 */
/* sum members: 2 */
/* sum bitfield members: 64 bits, bit holes: 2, sum bit holes: 56
bits */
/* padding: 7 */
/* last cacheline: 24 bytes */
};
struct s2 {
char x; /* 0 1 */
/* Bitfield combined with previous fields */
uint64_t a:1; /* 0: 8 8 */
uint64_t b:1; /* 0: 9 8 */
uint64_t c:1; /* 0:10 8 */
/* XXX 5 bits hole, try to pack */
/* Bitfield combined with next fields */
char y; /* 2 1 */
/* size: 8, cachelines: 1, members: 5 */
/* sum members: 2 */
/* sum bitfield members: 3 bits, bit holes: 1, sum bit holes: 5 bits */
/* padding: 5 */
/* last cacheline: 8 bytes */
};
struct s3 {
uint64_t x; /* 0 8 */
uint64_t a:1; /* 8: 0 8 */
uint64_t b:1; /* 8: 1 8 */
uint64_t c:1; /* 8: 2 8 */
/* XXX 5 bits hole, try to pack */
/* Bitfield combined with next fields */
char y; /* 9 1 */
/* size: 16, cachelines: 1, members: 5 */
/* sum members: 9 */
/* sum bitfield members: 3 bits, bit holes: 1, sum bit holes: 5 bits */
/* padding: 6 */
/* last cacheline: 16 bytes */
};
And, since in the concrete case mca_config just has four bool members
before the bitfields, we see that the 1-bit bitfields are put within the
first 8 bytes of the struct, while the __reserved field gets an entire
u64 all to itself:
struct mca_config {
_Bool dont_log_ce; /* 0 1 */
_Bool cmci_disabled; /* 1 1 */
_Bool ignore_ce; /* 2 1 */
_Bool print_all; /* 3 1 */
/* Bitfield combined with previous fields */
long long unsigned int lmce_disabled:1; /* 0:32 8 */
long long unsigned int disabled:1; /* 0:33 8 */
long long unsigned int ser:1; /* 0:34 8 */
long long unsigned int recovery:1; /* 0:35 8 */
long long unsigned int bios_cmci_threshold:1; /* 0:36 8 */
/* XXX 27 bits hole, try to pack */
/* Force alignment to the next boundary: */
long long unsigned int :0;
long long unsigned int __reserved:59; /* 8: 0 8 */
/* XXX 5 bits hole, try to pack */
signed char bootlog; /* 16 1 */
/* XXX 3 bytes hole, try to pack */
int tolerant; /* 20 4 */
int monarch_timeout; /* 24 4 */
int panic_timeout; /* 28 4 */
unsigned int rip_msr; /* 32 4 */
/* size: 40, cachelines: 1, members: 15 */
/* sum members: 21, holes: 1, sum holes: 3 */
/* sum bitfield members: 64 bits, bit holes: 2, sum bit holes: 32
bits */
/* padding: 4 */
/* last cacheline: 40 bytes */
};
But why the messy mix between 1-bit bitfields and _Bools in the first place?
Rasmus
On Sun, Sep 19, 2021 at 09:47:51PM -0700, Luck, Tony wrote:
> I think this would be easier on the eyeballs with
> a couple of helper functions:
>
> if (mce_flags.smca)
> return smca_msr_number(bank, reg);
> else
> return msr_number(bank, reg);
>
> with the switch (reg) in each of those helper functions.
I'll switch it to
if ()
else
but please don't make me add more helper functions. Those MSR defines
already have "SMCA" and "IA32" in their names so that should be a good
enough differentiation, I'd say.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Sep 17, 2021 at 12:53:54PM +0200, Borislav Petkov wrote:
> + switch (reg) {
> + case MCA_CTL:
> + return mce_flags.smca ? MSR_AMD64_SMCA_MCx_CTL(bank)
> + : MSR_IA32_MCx_CTL(bank);
>
> -static inline u32 smca_ctl_reg(int bank)
> -{
> - return MSR_AMD64_SMCA_MCx_CTL(bank);
> -}
> + case MCA_STATUS:
> + return mce_flags.smca ? MSR_AMD64_SMCA_MCx_STATUS(bank)
> + : MSR_IA32_MCx_STATUS(bank);
>
> -static inline u32 smca_status_reg(int bank)
> -{
> - return MSR_AMD64_SMCA_MCx_STATUS(bank);
> -}
> + case MCA_ADDR:
> + return mce_flags.smca ? MSR_AMD64_SMCA_MCx_ADDR(bank)
> + : MSR_IA32_MCx_ADDR(bank);
>
> -static inline u32 smca_addr_reg(int bank)
> -{
> - return MSR_AMD64_SMCA_MCx_ADDR(bank);
> -}
> + case MCA_MISC:
> + return mce_flags.smca ? MSR_AMD64_SMCA_MCx_MISC(bank)
> + : MSR_IA32_MCx_MISC(bank);
> + default: break;
> + }
I think this would be easier on the eyeballs with
a couple of helper functions:
if (mce_flags.smca)
return smca_msr_number(bank, reg);
else
return msr_number(bank, reg);
with the switch (reg) in each of those helper functions.
-Tony
On Sun, Sep 19, 2021 at 09:57:09PM -0700, Luck, Tony wrote:
> Does this __reserved field do anything useful? It seems to
> just be an annoyance that must be updated each time a new
> bit is added. Surely the compiler will see that these bitfields
> are in a "u64" and do the math and skip to the right boundary
> without this.
It is there to tell you how many bits you have left so that you don't
have to count each time. And updating it each time is simply Ctrl-x in
vim.
:-)
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Mon, Sep 20, 2021 at 09:42:22AM +0200, Rasmus Villemoes wrote:
> And, since in the concrete case mca_config just has four bool members
> before the bitfields, we see that the 1-bit bitfields are put within the
> first 8 bytes of the struct, while the __reserved field gets an entire
> u64 all to itself:
I probably should move that into alignment.
> But why the messy mix between 1-bit bitfields and _Bools in the first place?
My intention was to convert those boolean flags into a bitfield.
But you can't convert them all, unfortunately:
In file included from ./include/linux/miscdevice.h:7,
from arch/x86/kernel/cpu/mce/core.c:14:
./include/linux/device.h:148:64: error: cannot take address of bit-field ‘dont_log_ce’
148 | { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
| ^
arch/x86/kernel/cpu/mce/core.c:2415:8: note: in expansion of macro ‘DEVICE_BOOL_ATTR’
2415 | static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
| ^~~~~~~~~~~~~~~~
make[4]: *** [scripts/Makefile.build:277: arch/x86/kernel/cpu/mce/core.o] Error 1
make[3]: *** [scripts/Makefile.build:540: arch/x86/kernel/cpu/mce] Error 2
make[3]: *** Waiting for unfinished jobs....
make[2]: *** [scripts/Makefile.build:540: arch/x86/kernel/cpu] Error 2
make[1]: *** [scripts/Makefile.build:540: arch/x86/kernel] Error 2
make[1]: *** Waiting for unfinished jobs....
make: *** [Makefile:1874: arch/x86] Error 2
make: *** Waiting for unfinished jobs....
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Sun, Sep 19, 2021 at 10:06:03PM -0700, Luck, Tony wrote:
> Someday all these tests for random vendor/family/model/stepping could be
> refactored into a x86_match_cpu() lookup table (now that x86_match_cpu
> can handle steppings). But that's a topic for a different patch series.
If it remains somewhat readable, sure. If it turns into my favourite
code in severity.c then probably not so much.
:-)
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
> It is there to tell you how many bits you have left so that you don't
> have to count each time. And updating it each time is simply Ctrl-x in
> vim.
You want to know how many bit are left for some reason? Is there some
user API that will break if we overflow and start allocating bits from the next
64-bit word? Or is this just the countdown for when you and I both have a nervous
breakdown trying to keep track of that many different option paths through the
machine check code :-)
Thanks for the tip about Ctrl-x to decrement a number in vim (Google
tell me Ctrl-a increments). Learned something new today :-)
-Tony
On Mon, Sep 20, 2021 at 04:04:35PM +0000, Luck, Tony wrote:
> You want to know how many bit are left for some reason? Is there some
> user API that will break if we overflow and start allocating bits from
> the next 64-bit word?
Well, I'd like for all of the bits to fit in a single u64, naturally.
"Spilling" into the next word is unnecessary, even though it doesn't
matter for solely kernel internal usage - just common sense.
> Or is this just the countdown for when you and I both have a nervous
> breakdown trying to keep track of that many different option paths
> through the machine check code :-)
Nah, this is the moment where I close the MCE tree and no more features
are allowed!
:-P
> Thanks for the tip about Ctrl-x to decrement a number in vim (Google
> tell me Ctrl-a increments). Learned something new today :-)
Yeah, vim is simply great. :-)
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Mon, Sep 20, 2021 at 10:32:11AM +0200, Borislav Petkov wrote:
> but please don't make me add more helper functions. Those MSR defines
> already have "SMCA" and "IA32" in their names so that should be a good
> enough differentiation, I'd say.
I just had a better idea - it is compact but regular and one can see at
a quick glance which block is for which set of MSRs:
u32 mca_msr_reg(int bank, enum mca_msr reg)
{
if (mce_flags.smca) {
switch (reg) {
case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); break;
case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); break;
case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); break;
case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); break;
default: goto out; break;
}
}
switch (reg) {
case MCA_CTL: return MSR_IA32_MCx_CTL(bank); break;
case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); break;
case MCA_MISC: return MSR_IA32_MCx_MISC(bank); break;
case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); break;
default: goto out; break;
}
out:
WARN_ON_ONCE(1);
return 0;
}
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
Looks nice. I don’t think you need those “break;” after each “return …;”
Sent from my iPhone
> On Sep 22, 2021, at 05:18, Borislav Petkov <[email protected]> wrote:
>
> On Mon, Sep 20, 2021 at 10:32:11AM +0200, Borislav Petkov wrote:
>> but please don't make me add more helper functions. Those MSR defines
>> already have "SMCA" and "IA32" in their names so that should be a good
>> enough differentiation, I'd say.
>
> I just had a better idea - it is compact but regular and one can see at
> a quick glance which block is for which set of MSRs:
>
> u32 mca_msr_reg(int bank, enum mca_msr reg)
> {
> if (mce_flags.smca) {
> switch (reg) {
> case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); break;
> case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); break;
> case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); break;
> case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); break;
> default: goto out; break;
> }
> }
>
> switch (reg) {
> case MCA_CTL: return MSR_IA32_MCx_CTL(bank); break;
> case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); break;
> case MCA_MISC: return MSR_IA32_MCx_MISC(bank); break;
> case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); break;
> default: goto out; break;
> }
>
> out:
> WARN_ON_ONCE(1);
> return 0;
> }
>
> --
> Regards/Gruss,
> Boris.
>
> https://people.kernel.org/tglx/notes-about-netiquette
On Wed, Sep 22, 2021 at 01:23:28PM +0000, Luck, Tony wrote:
> Looks nice. I don’t think you need those “break;” after each “return …;”
Yah, all gone.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
>> Looks nice. I don’t think you need those “break;” after each “return …;”
>
> Yah, all gone.
Also:
switch (enum) {
actions for every value of that enum
}
doesn't need a default: option
-Tony
On Wed, Sep 22, 2021 at 03:22:01PM +0000, Luck, Tony wrote:
> Also:
>
> switch (enum) {
> actions for every value of that enum
> }
>
> doesn't need a default: option
Right, my thought was to catch silly stuff like this here:
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index b355ce737543..dfecf65e5407 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1714,6 +1714,7 @@ static void __mcheck_cpu_init_clear_banks(void)
continue;
wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
+ wrmsrl(mca_msr_reg(i, 12), 0);
}
}
but we already warn nicely there:
[ 0.146457] unchecked MSR access error: WRMSR to 0x0 (tried to write 0x0000000000000000) at rIP: 0xffffffff8103234b (__mcheck_cpu_init_clear_banks+0x6b/0xe0)
[ 0.147550] Call Trace:
[ 0.147836] mcheck_cpu_init+0x16c/0x4a0
[ 0.148234] identify_cpu+0x3fe/0x670
[ 0.148607] identify_boot_cpu+0xc/0x94
[ 0.148995] check_bugs+0x26/0xaef
[ 0.149353] ? __get_locked_pte+0x14a/0x200
[ 0.149777] start_kernel+0x602/0x621
[ 0.150153] secondary_startup_64_no_verify+0xb0/0xbb
and the mce_rd/wrmsrl() wrappers even panic.
So default case gone.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Sep 17, 2021 at 12:53:55PM +0200, Borislav Petkov wrote:
> From: Borislav Petkov <[email protected]>
>
> Use a flag setting to call the only quirk function for that.
>
I'd like to add another quirk function. First revision here:
https://lkml.kernel.org/r/[email protected]
Do you recommend this create another quirk flag and follow this patch? Or
should the quirks be grouped together somehow?
Thanks,
Yazen
On Thu, Sep 23, 2021 at 02:51:49PM +0000, Yazen Ghannam wrote:
> On Fri, Sep 17, 2021 at 12:53:55PM +0200, Borislav Petkov wrote:
> > From: Borislav Petkov <[email protected]>
> >
> > Use a flag setting to call the only quirk function for that.
> >
>
> I'd like to add another quirk function. First revision here:
> https://lkml.kernel.org/r/[email protected]
>
> Do you recommend this create another quirk flag and follow this patch? Or
> should the quirks be grouped together somehow?
Does that quirk match machines with mce_flags.smca=1 per chance?
:-)
Also, your test:
+ if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
+ return;
should be
+ if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) ==
(MCG_STATUS_EIPV|MCG_STATUS_RIPV))
+ return;
methinks.
Unless I'm misunderstanding the erratum ofc...
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Thu, Sep 23, 2021 at 05:11:08PM +0200, Borislav Petkov wrote:
> On Thu, Sep 23, 2021 at 02:51:49PM +0000, Yazen Ghannam wrote:
...
> > Do you recommend this create another quirk flag and follow this patch? Or
> > should the quirks be grouped together somehow?
>
> Does that quirk match machines with mce_flags.smca=1 per chance?
>
> :-)
>
Yes, at the moment it does. So I'll use that. Thanks!
> Also, your test:
>
> + if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
> + return;
>
> should be
>
> + if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) ==
> (MCG_STATUS_EIPV|MCG_STATUS_RIPV))
> + return;
>
> methinks.
>
> Unless I'm misunderstanding the erratum ofc...
>
> --
The check is intended to catch the case where both bits are 0. But I'll
double-check with the hardware folks.
Thanks,
Yazen