This is the core functionality to track CMCI storms at the
machine check bank granularity. Subsequent patches will add
the vendor specific hooks to supply input to the storm
detection and take actions on the start/end of a storm.
Maintain a bitmap history for each bank showing whether the bank
logged an corrected error or not each time it is polled.
In normal operation the interval between polls of this banks
determines how far to shift the history. The 64 bit width corresponds
to about one second.
When a storm is observed a CPU vendor specific action is taken to reduce
or stop CMCI from the bank that is the source of the storm. The bank
is added to the bitmap of banks for this CPU to poll. The polling rate
is increased to once per second. During a storm each bit in the history
indicates the status of the bank each time it is polled. Thus the history
covers just over a minute.
Declare a storm for that bank if the number of corrected interrupts
seen in that history is above some threshold (defined as 5 in this
series, could be tuned later if there is data to suggest a better
value).
A storm on a bank ends if enough consecutive polls of the bank show
no corrected errors (defined as 30, may also change). That calls the
CPU vendor specific function to revert to normal operational mode,
and changes the polling rate back to the default.
Signed-off-by: Tony Luck <[email protected]>
---
arch/x86/kernel/cpu/mce/internal.h | 33 ++++++++-
arch/x86/kernel/cpu/mce/core.c | 109 ++++++++++++++++++++++++++---
2 files changed, 133 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 9dcad55835fa..eae88a824d97 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -40,6 +40,8 @@ struct dentry *mce_get_debugfs_dir(void);
extern mce_banks_t mce_banks_ce_disabled;
+void track_cmci_storm(int bank, u64 status);
+
#ifdef CONFIG_X86_MCE_INTEL
void cmci_disable_bank(int bank);
void intel_init_cmci(void);
@@ -54,7 +56,36 @@ static inline void intel_clear_lmce(void) { }
static inline bool intel_filter_mce(struct mce *m) { return false; }
#endif
-void mce_timer_kick(unsigned long interval);
+void mce_timer_kick(bool storm);
+void mce_handle_storm(int bank, bool on);
+void cmci_storm_begin(int bank);
+void cmci_storm_end(int bank);
+
+/**
+ * struct mca_storm_desc - CMCI storm tracking data
+ * @stormy_bank_count: count of MC banks in storm state
+ * @bank_history: bitmask tracking of corrected errors seen in each bank
+ * @bank_storm: determines whether the bank is in storm mode
+ * @bank_time_stamp: last time (in jiffies) that each bank was polled
+ */
+struct mca_storm_desc {
+ int stormy_bank_count;
+ u64 bank_history[MAX_NR_BANKS];
+ bool bank_storm[MAX_NR_BANKS];
+ unsigned long bank_time_stamp[MAX_NR_BANKS];
+};
+DECLARE_PER_CPU(struct mca_storm_desc, storm_desc);
+
+/*
+ * How many errors within the history buffer mark the start of a storm
+ */
+#define STORM_BEGIN_THRESHOLD 5
+
+/*
+ * How many polls of machine check bank without an error before declaring
+ * the storm is over
+ */
+#define STORM_END_POLL_THRESHOLD 30
#ifdef CONFIG_ACPI_APEI
int apei_write_mce(struct mce *m);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index e7936be84204..cd9d9ea5bb0a 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -607,6 +607,83 @@ static struct notifier_block mce_default_nb = {
.priority = MCE_PRIO_LOWEST,
};
+DEFINE_PER_CPU(struct mca_storm_desc, storm_desc);
+
+void cmci_storm_begin(int bank)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+
+ __set_bit(bank, this_cpu_ptr(mce_poll_banks));
+ storm->bank_storm[bank] = true;
+
+ /*
+ * If this is the first bank on this CPU to enter storm mode
+ * start polling
+ */
+ if (++storm->stormy_bank_count == 1)
+ mce_timer_kick(true);
+}
+
+void cmci_storm_end(int bank)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+
+ __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
+ storm->bank_history[bank] = 0ull;
+ storm->bank_storm[bank] = false;
+
+ /* If no banks left in storm mode, stop polling */
+ if (!this_cpu_dec_return(storm_desc.stormy_bank_count))
+ mce_timer_kick(false);
+}
+
+void track_cmci_storm(int bank, u64 status)
+{
+ struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
+ unsigned long now = jiffies, delta;
+ unsigned int shift = 1;
+ u64 history;
+
+ /*
+ * When a bank is in storm mode it is polled once per second and
+ * the history mask will record about the last minute of poll results.
+ * If it is not in storm mode, then the bank is only checked when
+ * there is a CMCI interrupt. Check how long it has been since
+ * this bank was last checked, and adjust the amount of "shift"
+ * to apply to history.
+ */
+ if (!storm->bank_storm[bank]) {
+ delta = now - storm->bank_time_stamp[bank];
+ shift = (delta + HZ) / HZ;
+ }
+
+ /* If has been a long time since the last poll, clear history */
+ if (shift >= 64)
+ history = 0;
+ else
+ history = storm->bank_history[bank] << shift;
+ storm->bank_time_stamp[bank] = now;
+
+ /* History keeps track of corrected errors. VAL=1 && UC=0 */
+ if ((status & (MCI_STATUS_VAL | MCI_STATUS_UC)) == MCI_STATUS_VAL)
+ history |= 1;
+ storm->bank_history[bank] = history;
+
+ if (storm->bank_storm[bank]) {
+ if (history & GENMASK_ULL(STORM_END_POLL_THRESHOLD - 1, 0))
+ return;
+ pr_notice("CPU%d BANK%d CMCI storm subsided\n", smp_processor_id(), bank);
+ mce_handle_storm(bank, false);
+ cmci_storm_end(bank);
+ } else {
+ if (hweight64(history) < STORM_BEGIN_THRESHOLD)
+ return;
+ pr_notice("CPU%d BANK%d CMCI storm detected\n", smp_processor_id(), bank);
+ mce_handle_storm(bank, true);
+ cmci_storm_begin(bank);
+ }
+}
+
/*
* Read ADDR and MISC registers.
*/
@@ -680,6 +757,8 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
barrier();
m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
+ track_cmci_storm(i, m.status);
+
/* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL))
continue;
@@ -1587,6 +1666,7 @@ static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
+static DEFINE_PER_CPU(bool, storm_poll_mode);
static void __start_timer(struct timer_list *t, unsigned long interval)
{
@@ -1622,22 +1702,29 @@ static void mce_timer_fn(struct timer_list *t)
else
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
- __this_cpu_write(mce_next_interval, iv);
- __start_timer(t, iv);
+ if (__this_cpu_read(storm_poll_mode)) {
+ __start_timer(t, HZ);
+ } else {
+ __this_cpu_write(mce_next_interval, iv);
+ __start_timer(t, iv);
+ }
}
/*
- * Ensure that the timer is firing in @interval from now.
+ * When a storm starts on any bank on this CPU, switch to polling
+ * once per second. When the storm ends, revert to the default
+ * polling interval.
*/
-void mce_timer_kick(unsigned long interval)
+void mce_timer_kick(bool storm)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
- unsigned long iv = __this_cpu_read(mce_next_interval);
- __start_timer(t, interval);
+ __this_cpu_write(storm_poll_mode, storm);
- if (interval < iv)
- __this_cpu_write(mce_next_interval, interval);
+ if (storm)
+ __start_timer(t, HZ);
+ else
+ __this_cpu_write(mce_next_interval, check_interval * HZ);
}
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
@@ -1965,6 +2052,12 @@ static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
intel_clear_lmce();
}
+void mce_handle_storm(int bank, bool on)
+{
+ switch (boot_cpu_data.x86_vendor) {
+ }
+}
+
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
{
switch (c->x86_vendor) {
--
2.40.1
On Fri, Jun 16, 2023 at 11:27:42AM -0700, Tony Luck wrote:
> -void mce_timer_kick(unsigned long interval);
> +void mce_timer_kick(bool storm);
> +void mce_handle_storm(int bank, bool on);
> +void cmci_storm_begin(int bank);
> +void cmci_storm_end(int bank);
> +
> +/**
Yeah, let's not add kernel-doc comments about structs in internal.h
which are no one's business outside of MCA.
> + * struct mca_storm_desc - CMCI storm tracking data
> + * @stormy_bank_count: count of MC banks in storm state
> + * @bank_history: bitmask tracking of corrected errors seen in each bank
> + * @bank_storm: determines whether the bank is in storm mode
> + * @bank_time_stamp: last time (in jiffies) that each bank was polled
> + */
> +struct mca_storm_desc {
> + int stormy_bank_count;
> + u64 bank_history[MAX_NR_BANKS];
> + bool bank_storm[MAX_NR_BANKS];
> + unsigned long bank_time_stamp[MAX_NR_BANKS];
> +};
> +DECLARE_PER_CPU(struct mca_storm_desc, storm_desc);
Would that make the members organization even better:
struct storm_bank {
u64 history;
u64 timestamp;
bool storm;
};
struct mca_storm_desc {
struct storm_bank banks[MAX_NR_BANKS];
unsigned int bank_count;
};
?
From the previous mail:
> storm_poll_mode is a regular per-cpu variable that indicates a CPU is in
> poll mode because one or more of the banks it owns has gone over the
> storm threshold.
It is still a per-CPU var which can be part of the storm descriptor, no?
> bank_storm - is a per-cpu per-bank indicator that a particular bank
> on a particular CPU is in storm mode.
Ok, so the above can be extended to:
struct mca_storm_desc {
struct storm_bank banks[MAX_NR_BANKS];
unsigned int bank_count;
bool poll_mode;
};
?
> +/*
> + * How many polls of machine check bank without an error before declaring
> + * the storm is over
> + */
> +#define STORM_END_POLL_THRESHOLD 30
So what's stopping you from doing
/*
* How many polls of machine check bank without an error before declaring
* the storm is over. Since it is tracked in the struct
* storm_bank.history member as a bitmask, the mask is 30 bits [0 ... 29]
*/
#define STORM_END_POLL_THRESHOLD 29
?
And you've also explained it in text too so that it is perfectly clear
what the intent is.
> #ifdef CONFIG_ACPI_APEI
> int apei_write_mce(struct mce *m);
> diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
> index e7936be84204..cd9d9ea5bb0a 100644
> --- a/arch/x86/kernel/cpu/mce/core.c
> +++ b/arch/x86/kernel/cpu/mce/core.c
> @@ -607,6 +607,83 @@ static struct notifier_block mce_default_nb = {
> .priority = MCE_PRIO_LOWEST,
> };
>
> +DEFINE_PER_CPU(struct mca_storm_desc, storm_desc);
> +
> +void cmci_storm_begin(int bank)
> +{
> + struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
> +
> + __set_bit(bank, this_cpu_ptr(mce_poll_banks));
> + storm->bank_storm[bank] = true;
> +
> + /*
> + * If this is the first bank on this CPU to enter storm mode
> + * start polling
> + */
> + if (++storm->stormy_bank_count == 1)
if (++storm->stormy_bank_count)
> + mce_timer_kick(true);
> +}
> +
> +void cmci_storm_end(int bank)
> +{
> + struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
> +
> + __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
> + storm->bank_history[bank] = 0ull;
> + storm->bank_storm[bank] = false;
> +
> + /* If no banks left in storm mode, stop polling */
> + if (!this_cpu_dec_return(storm_desc.stormy_bank_count))
> + mce_timer_kick(false);
> +}
> +
> +void track_cmci_storm(int bank, u64 status)
This is still not called cmci_track_storm() ;-\
And looking at the AMD side of things, there's a track_cmci_storm() in
amd_threshold_interrupt() which doesn't make any sense whatsoever.
Or at least this was my initial reaction because why would the AMD side
call a "CMCI" specific function. So they're prefixed with "cmci_" but
they don't really have anything to do with the Intel CMCI feature - it
is a storm handling code.
Which means, since those are used by both, the confusing "cmci" should
not be in the names.
And which also means, those should be static and private to mce/core.c.
But I'll see what functionality the rest of the patches need and how it
all should be split/exported properly.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
Thanks for the review.
> Which means, since those are used by both, the confusing "cmci" should
> not be in the names.
>
> And which also means, those should be static and private to mce/core.c.
> But I'll see what functionality the rest of the patches need and how it
> all should be split/exported properly.
I'll wait for you to finish this analysis before I spin up v7 of the series.
-Tony
On Fri, Jun 23, 2023 at 03:40:25PM +0000, Luck, Tony wrote:
> I'll wait for you to finish this analysis before I spin up v7 of the series.
You can go ahead and do the Intel side only:
https://lore.kernel.org/r/[email protected], at the end.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette