Today PAT can't be used without MTRR being available, unless MTRR is at
least configured via CONFIG_MTRR and the system is running as Xen PV
guest. In this case PAT is automatically available via the hypervisor,
but the PAT MSR can't be modified by the kernel and MTRR is disabled.
The same applies to a kernel built with no MTRR support: it won't
allow to use the PAT MSR, even if there is no technical reason for
that, other than setting up PAT on all cpus the same way (which is a
requirement of the processor's cache management) is relying on some
MTRR specific code.
Fix all of that by:
- moving the function needed by PAT from MTRR specific code one level
up
- reworking the init sequences of MTRR and PAT to be more similar to
each other without calling PAT from MTRR code
- removing the dependency of PAT on MTRR
While working on that I discovered two minor bugs in MTRR code, which
are fixed, too.
Changes in V3:
- replace patch 1 by just adding a comment
Changes in V2:
- complete rework of the patches based on comments by Boris Petkov
- added several patches to the series
Juergen Gross (10):
x86/mtrr: add comment for set_mtrr_state() serialization
x86/mtrr: remove unused cyrix_set_all() function
x86/mtrr: replace use_intel() with a local flag
x86: move some code out of arch/x86/kernel/cpu/mtrr
x86/mtrr: split generic_set_all()
x86/mtrr: remove set_all callback from struct mtrr_ops
x86/mtrr: simplify mtrr_bp_init()
x86/mtrr: let cache_aps_delayed_init replace mtrr_aps_delayed_init
x86/mtrr: add a stop_machine() handler calling only cache_cpu_init()
x86: decouple pat and mtrr handling
arch/x86/include/asm/cacheinfo.h | 14 +++
arch/x86/include/asm/memtype.h | 5 +-
arch/x86/include/asm/mtrr.h | 12 +--
arch/x86/kernel/cpu/cacheinfo.c | 159 +++++++++++++++++++++++++++++
arch/x86/kernel/cpu/common.c | 3 +-
arch/x86/kernel/cpu/mtrr/cyrix.c | 34 ------
arch/x86/kernel/cpu/mtrr/generic.c | 107 ++-----------------
arch/x86/kernel/cpu/mtrr/mtrr.c | 158 +++++-----------------------
arch/x86/kernel/cpu/mtrr/mtrr.h | 5 -
arch/x86/kernel/setup.c | 14 +--
arch/x86/kernel/smpboot.c | 9 +-
arch/x86/mm/pat/memtype.c | 127 +++++++----------------
arch/x86/power/cpu.c | 3 +-
13 files changed, 265 insertions(+), 385 deletions(-)
--
2.35.3
Prepare making PAT and MTRR support independent from each other by
moving some code needed by both out of the MTRR specific sources.
Signed-off-by: Juergen Gross <[email protected]>
---
V2:
- move code from cpu/common.c to cpu/cacheinfo.c (Boris Petkov)
---
arch/x86/include/asm/cacheinfo.h | 3 ++
arch/x86/include/asm/mtrr.h | 4 ++
arch/x86/kernel/cpu/cacheinfo.c | 77 ++++++++++++++++++++++++++++
arch/x86/kernel/cpu/mtrr/generic.c | 81 ++++--------------------------
4 files changed, 93 insertions(+), 72 deletions(-)
diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 1aeafa9888f7..313a6920d0f9 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -10,4 +10,7 @@ extern unsigned int cache_generic;
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
+void cache_disable(void);
+void cache_enable(void);
+
#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 76d726074c16..12a16caed395 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -48,6 +48,8 @@ extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
+void mtrr_disable(void);
+void mtrr_enable(void);
# else
static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
{
@@ -87,6 +89,8 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
+#define mtrr_disable() do {} while (0)
+#define mtrr_enable() do {} while (0)
# endif
#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 3b05d3ade7a6..47e2c72fa8a4 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -20,6 +20,8 @@
#include <asm/cacheinfo.h>
#include <asm/amd_nb.h>
#include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
#include "cpu.h"
@@ -1043,3 +1045,78 @@ int populate_cache_leaves(unsigned int cpu)
return 0;
}
+
+/*
+ * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
+ *
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after cache_enable() has been called.
+ */
+static unsigned long saved_cr4;
+static DEFINE_RAW_SPINLOCK(cache_disable_lock);
+
+void cache_disable(void) __acquires(cache_disable_lock)
+{
+ unsigned long cr0;
+
+ /*
+ * Note that this is not ideal
+ * since the cache is only flushed/disabled for this CPU while the
+ * MTRRs are changed, but changing this requires more invasive
+ * changes to the way the kernel boots
+ */
+
+ raw_spin_lock(&cache_disable_lock);
+
+ /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+ cr0 = read_cr0() | X86_CR0_CD;
+ write_cr0(cr0);
+
+ /*
+ * Cache flushing is the most time-consuming step when programming
+ * the MTRRs. Fortunately, as per the Intel Software Development
+ * Manual, we can skip it if the processor supports cache self-
+ * snooping.
+ */
+ if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+ wbinvd();
+
+ /* Save value of CR4 and clear Page Global Enable (bit 7) */
+ if (boot_cpu_has(X86_FEATURE_PGE)) {
+ saved_cr4 = __read_cr4();
+ __write_cr4(saved_cr4 & ~X86_CR4_PGE);
+ }
+
+ /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ flush_tlb_local();
+
+ if (boot_cpu_has(X86_FEATURE_MTRR))
+ mtrr_disable();
+
+ /* Again, only flush caches if we have to. */
+ if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+ wbinvd();
+}
+
+void cache_enable(void) __releases(cache_disable_lock)
+{
+ /* Flush TLBs (no need to flush caches - they are disabled) */
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ flush_tlb_local();
+
+ if (boot_cpu_has(X86_FEATURE_MTRR))
+ mtrr_enable();
+
+ /* Enable caches */
+ write_cr0(read_cr0() & ~X86_CR0_CD);
+
+ /* Restore value of CR4 */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ __write_cr4(saved_cr4);
+
+ raw_spin_unlock(&cache_disable_lock);
+}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 81742870ecc5..5ed397f03a87 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <asm/processor-flags.h>
+#include <asm/cacheinfo.h>
#include <asm/cpufeature.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
@@ -396,9 +397,6 @@ print_fixed(unsigned base, unsigned step, const mtrr_type *types)
}
}
-static void prepare_set(void);
-static void post_set(void);
-
static void __init print_mtrr_state(void)
{
unsigned int i;
@@ -450,11 +448,11 @@ void __init mtrr_bp_pat_init(void)
unsigned long flags;
local_irq_save(flags);
- prepare_set();
+ cache_disable();
pat_init();
- post_set();
+ cache_enable();
local_irq_restore(flags);
}
@@ -718,80 +716,19 @@ static unsigned long set_mtrr_state(void)
return change_mask;
}
-
-static unsigned long cr4;
-static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
-
-/*
- * Since we are disabling the cache don't allow any interrupts,
- * they would run extremely slow and would only increase the pain.
- *
- * The caller must ensure that local interrupts are disabled and
- * are reenabled after post_set() has been called.
- */
-static void prepare_set(void) __acquires(set_atomicity_lock)
+void mtrr_disable(void)
{
- unsigned long cr0;
-
- /*
- * Note that this is not ideal
- * since the cache is only flushed/disabled for this CPU while the
- * MTRRs are changed, but changing this requires more invasive
- * changes to the way the kernel boots
- */
-
- raw_spin_lock(&set_atomicity_lock);
-
- /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
- cr0 = read_cr0() | X86_CR0_CD;
- write_cr0(cr0);
-
- /*
- * Cache flushing is the most time-consuming step when programming
- * the MTRRs. Fortunately, as per the Intel Software Development
- * Manual, we can skip it if the processor supports cache self-
- * snooping.
- */
- if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
- wbinvd();
-
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
- if (boot_cpu_has(X86_FEATURE_PGE)) {
- cr4 = __read_cr4();
- __write_cr4(cr4 & ~X86_CR4_PGE);
- }
-
- /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- flush_tlb_local();
-
/* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
-
- /* Again, only flush caches if we have to. */
- if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
- wbinvd();
}
-static void post_set(void) __releases(set_atomicity_lock)
+void mtrr_enable(void)
{
- /* Flush TLBs (no need to flush caches - they are disabled) */
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- flush_tlb_local();
-
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
- /* Enable caches */
- write_cr0(read_cr0() & ~X86_CR0_CD);
-
- /* Restore value of CR4 */
- if (boot_cpu_has(X86_FEATURE_PGE))
- __write_cr4(cr4);
- raw_spin_unlock(&set_atomicity_lock);
}
static void generic_set_all(void)
@@ -800,7 +737,7 @@ static void generic_set_all(void)
unsigned long flags;
local_irq_save(flags);
- prepare_set();
+ cache_disable();
/* Actually set the state */
mask = set_mtrr_state();
@@ -808,7 +745,7 @@ static void generic_set_all(void)
/* also set PAT */
pat_init();
- post_set();
+ cache_enable();
local_irq_restore(flags);
/* Use the atomic bitops to update the global mask */
@@ -839,7 +776,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
vr = &mtrr_state.var_ranges[reg];
local_irq_save(flags);
- prepare_set();
+ cache_disable();
if (size == 0) {
/*
@@ -858,7 +795,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
}
- post_set();
+ cache_enable();
local_irq_restore(flags);
}
--
2.35.3
Instead of having a stop_machine() handler for either a specific MTRR
register or all state at once, add a handler just for calling
cache_cpu_init() if appropriate.
Add functions for calling stop_machine() with this handler as well.
Add a generic replacements for mtrr_bp_restore() and a wrapper for
mtrr_bp_init().
Signed-off-by: Juergen Gross <[email protected]>
---
V2:
- completely new replacement of former patch 2
---
arch/x86/include/asm/cacheinfo.h | 5 +-
arch/x86/include/asm/mtrr.h | 4 --
arch/x86/kernel/cpu/cacheinfo.c | 59 +++++++++++++++++++++-
arch/x86/kernel/cpu/common.c | 3 +-
arch/x86/kernel/cpu/mtrr/mtrr.c | 87 +-------------------------------
arch/x86/kernel/setup.c | 3 +-
arch/x86/kernel/smpboot.c | 4 +-
arch/x86/power/cpu.c | 3 +-
8 files changed, 72 insertions(+), 96 deletions(-)
diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index e80ed3c523c8..a122a1aad936 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -14,6 +14,9 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cache_disable(void);
void cache_enable(void);
-void cache_cpu_init(void);
+void cache_bp_init(void);
+void cache_bp_restore(void);
+void cache_ap_init(void);
+void cache_aps_init(void);
#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 5d31219c8529..ec73d1e5bafb 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -42,8 +42,6 @@ extern int mtrr_add_page(unsigned long base, unsigned long size,
extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
-extern void mtrr_ap_init(void);
-extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
@@ -85,8 +83,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
-#define mtrr_ap_init() do {} while (0)
-#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
#define mtrr_enable() do {} while (0)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index c6e7c93e45e8..4946f93eb16f 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -15,6 +15,7 @@
#include <linux/capability.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
+#include <linux/stop_machine.h>
#include <asm/cpufeature.h>
#include <asm/cacheinfo.h>
@@ -1121,7 +1122,7 @@ void cache_enable(void) __releases(cache_disable_lock)
raw_spin_unlock(&cache_disable_lock);
}
-void cache_cpu_init(void)
+static void cache_cpu_init(void)
{
unsigned long flags;
@@ -1141,3 +1142,59 @@ void cache_cpu_init(void)
}
bool cache_aps_delayed_init;
+
+static int cache_rendezvous_handler(void *unused)
+{
+ if (cache_aps_delayed_init || !cpu_online(smp_processor_id()))
+ cache_cpu_init();
+
+ return 0;
+}
+
+void __init cache_bp_init(void)
+{
+ mtrr_bp_init();
+
+ if (cache_generic)
+ cache_cpu_init();
+}
+
+void cache_bp_restore(void)
+{
+ if (cache_generic)
+ cache_cpu_init();
+}
+
+void cache_ap_init(void)
+{
+ if (!cache_generic || cache_aps_delayed_init)
+ return;
+
+ /*
+ * Ideally we should hold mtrr_mutex here to avoid mtrr entries
+ * changed, but this routine will be called in cpu boot time,
+ * holding the lock breaks it.
+ *
+ * This routine is called in two cases:
+ *
+ * 1. very early time of software resume, when there absolutely
+ * isn't mtrr entry changes;
+ *
+ * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
+ * lock to prevent mtrr entry changes
+ */
+ stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL,
+ cpu_callout_mask);
+}
+
+/*
+ * Delayed cache initialization for all AP's
+ */
+void cache_aps_init(void)
+{
+ if (!cache_generic || !cache_aps_delayed_init)
+ return;
+
+ stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask);
+ cache_aps_delayed_init = false;
+}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3e508f239098..fd058b547f8d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -52,6 +52,7 @@
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
+#include <asm/cacheinfo.h>
#include <asm/memtype.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
@@ -1948,7 +1949,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_32
enable_sep_cpu();
#endif
- mtrr_ap_init();
+ cache_ap_init();
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
update_srbds_msr();
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index a47d46035240..5e8be11d1873 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -70,9 +70,6 @@ static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
const struct mtrr_ops *mtrr_if;
-static void set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type);
-
void __init set_mtrr_ops(const struct mtrr_ops *ops)
{
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
@@ -155,25 +152,8 @@ static int mtrr_rendezvous_handler(void *info)
{
struct set_mtrr_data *data = info;
- /*
- * We use this same function to initialize the mtrrs during boot,
- * resume, runtime cpu online and on an explicit request to set a
- * specific MTRR.
- *
- * During boot or suspend, the state of the boot cpu's mtrrs has been
- * saved, and we want to replicate that across all the cpus that come
- * online (either at the end of boot or resume or during a runtime cpu
- * online). If we're doing that, @reg is set to something special and on
- * all the cpu's we do cache_cpu_init() (On the logical cpu that
- * started the boot/resume sequence, this might be a duplicate
- * cache_cpu_init()).
- */
- if (data->smp_reg != ~0U) {
- mtrr_if->set(data->smp_reg, data->smp_base,
- data->smp_size, data->smp_type);
- } else if (cache_aps_delayed_init || !cpu_online(smp_processor_id())) {
- cache_cpu_init();
- }
+ mtrr_if->set(data->smp_reg, data->smp_base,
+ data->smp_size, data->smp_type);
return 0;
}
@@ -243,19 +223,6 @@ static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
}
-static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-{
- struct set_mtrr_data data = { .smp_reg = reg,
- .smp_base = base,
- .smp_size = size,
- .smp_type = type
- };
-
- stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
- cpu_callout_mask);
-}
-
/**
* mtrr_add_page - Add a memory type region
* @base: Physical base address of region in pages (in units of 4 kB!)
@@ -764,7 +731,6 @@ void __init mtrr_bp_init(void)
CACHE_GENERIC_PAT;
changed_by_mtrr_cleanup =
mtrr_cleanup(phys_addr);
- cache_cpu_init();
}
}
}
@@ -781,27 +747,6 @@ void __init mtrr_bp_init(void)
}
}
-void mtrr_ap_init(void)
-{
- if (!cache_generic || cache_aps_delayed_init)
- return;
-
- /*
- * Ideally we should hold mtrr_mutex here to avoid mtrr entries
- * changed, but this routine will be called in cpu boot time,
- * holding the lock breaks it.
- *
- * This routine is called in two cases:
- *
- * 1. very early time of software resume, when there absolutely
- * isn't mtrr entry changes;
- *
- * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
- * lock to prevent mtrr entry changes
- */
- set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
-}
-
/**
* mtrr_save_state - Save current fixed-range MTRR state of the first
* cpu in cpu_online_mask.
@@ -817,34 +762,6 @@ void mtrr_save_state(void)
smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
}
-/*
- * Delayed MTRR initialization for all AP's
- */
-void mtrr_aps_init(void)
-{
- if (!cache_generic)
- return;
-
- /*
- * Check if someone has requested the delay of AP MTRR initialization,
- * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
- * then we are done.
- */
- if (!cache_aps_delayed_init)
- return;
-
- set_mtrr(~0U, 0, 0, 0);
- cache_aps_delayed_init = false;
-}
-
-void mtrr_bp_restore(void)
-{
- if (!cache_generic)
- return;
-
- cache_cpu_init();
-}
-
static int __init mtrr_init_finialize(void)
{
if (!mtrr_enabled)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 216fee7144ee..e0e185ee0229 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -34,6 +34,7 @@
#include <asm/numa.h>
#include <asm/bios_ebda.h>
#include <asm/bugs.h>
+#include <asm/cacheinfo.h>
#include <asm/cpu.h>
#include <asm/efi.h>
#include <asm/gart.h>
@@ -1075,7 +1076,7 @@ void __init setup_arch(char **cmdline_p)
/* update e820 for memory not covered by WB MTRRs */
if (IS_ENABLED(CONFIG_MTRR))
- mtrr_bp_init();
+ cache_bp_init();
else
pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ef7bce21cbe8..ff793f436904 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1445,7 +1445,7 @@ void arch_thaw_secondary_cpus_begin(void)
void arch_thaw_secondary_cpus_end(void)
{
- mtrr_aps_init();
+ cache_aps_init();
}
/*
@@ -1488,7 +1488,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
nmi_selftest();
impress_friends();
- mtrr_aps_init();
+ cache_aps_init();
}
static int __initdata setup_possible_cpus = -1;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index bb176c72891c..754221c9a1c3 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -23,6 +23,7 @@
#include <asm/fpu/api.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
+#include <asm/cacheinfo.h>
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
@@ -261,7 +262,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
do_fpu_end();
tsc_verify_tsc_adjust(true);
x86_platform.restore_sched_clock_state();
- mtrr_bp_restore();
+ cache_bp_restore();
perf_restore_debug_store();
c = &cpu_data(smp_processor_id());
--
2.35.3
In MTRR code use_intel() is only used in one source file, and the
relevant use_intel_if member of struct mtrr_ops is set only in
generic_mtrr_ops.
Replace use_intel() with a single flag in cacheinfo.c, which can be set
when assigning generic_mtrr_ops to mtrr_if. This allows to drop
use_intel_if from mtrr_ops, while preparing to support PAT without
MTRR. As another preparation for the PAT/MTRR decoupling use a bit for
MTRR control and one for PAT control. For now set both bits together,
this can be changed later.
As the new flag will be set only if mtrr_enabled is set, the test for
mtrr_enabled can be dropped at some places.
At the same time drop the local mtrr_enabled() function and rename
the __mtrr_enabled flag to mtrr_enabled.
Signed-off-by: Juergen Gross <[email protected]>
---
V2:
- new patch
---
arch/x86/include/asm/cacheinfo.h | 5 +++
arch/x86/kernel/cpu/cacheinfo.c | 3 ++
arch/x86/kernel/cpu/mtrr/generic.c | 1 -
arch/x86/kernel/cpu/mtrr/mtrr.c | 58 ++++++++++++++----------------
arch/x86/kernel/cpu/mtrr/mtrr.h | 2 --
5 files changed, 35 insertions(+), 34 deletions(-)
diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 86b2e0dcc4bf..1aeafa9888f7 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -2,6 +2,11 @@
#ifndef _ASM_X86_CACHEINFO_H
#define _ASM_X86_CACHEINFO_H
+/* Kernel controls MTRR and/or PAT MSRs. */
+extern unsigned int cache_generic;
+#define CACHE_GENERIC_MTRR 0x01
+#define CACHE_GENERIC_PAT 0x02
+
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 66556833d7af..3b05d3ade7a6 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
/* Shared L2 cache maps */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
+/* Kernel controls MTRR and/or PAT MSRs. */
+unsigned int cache_generic;
+
struct _cache_table {
unsigned char descriptor;
char cache_type;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cd64eab02393..81742870ecc5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -917,7 +917,6 @@ int positive_have_wrcomb(void)
* Generic structure...
*/
const struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = 1,
.set_all = generic_set_all,
.get = generic_get_mtrr,
.get_free_region = generic_get_free_region,
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 2746cac9d8a9..7d7d5bd30219 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -46,6 +46,7 @@
#include <linux/syscore_ops.h>
#include <linux/rcupdate.h>
+#include <asm/cacheinfo.h>
#include <asm/cpufeature.h>
#include <asm/e820/api.h>
#include <asm/mtrr.h>
@@ -58,12 +59,7 @@
#define MTRR_TO_PHYS_WC_OFFSET 1000
u32 num_var_ranges;
-static bool __mtrr_enabled;
-
-static bool mtrr_enabled(void)
-{
- return __mtrr_enabled;
-}
+static bool mtrr_enabled;
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
@@ -119,11 +115,11 @@ static int have_wrcomb(void)
}
/* This function returns the number of variable MTRRs */
-static void __init set_num_var_ranges(void)
+static void __init set_num_var_ranges(bool use_generic)
{
unsigned long config = 0, dummy;
- if (use_intel())
+ if (use_generic)
rdmsr(MSR_MTRRcap, config, dummy);
else if (is_cpu(AMD) || is_cpu(HYGON))
config = 2;
@@ -303,7 +299,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
int i, replace, error;
mtrr_type ltype;
- if (!mtrr_enabled())
+ if (!mtrr_enabled)
return -ENXIO;
error = mtrr_if->validate_add_page(base, size, type);
@@ -451,7 +447,7 @@ static int mtrr_check(unsigned long base, unsigned long size)
int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
bool increment)
{
- if (!mtrr_enabled())
+ if (!mtrr_enabled)
return -ENODEV;
if (mtrr_check(base, size))
return -EINVAL;
@@ -480,7 +476,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
unsigned long lbase, lsize;
int error = -EINVAL;
- if (!mtrr_enabled())
+ if (!mtrr_enabled)
return -ENODEV;
max = num_var_ranges;
@@ -540,7 +536,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
*/
int mtrr_del(int reg, unsigned long base, unsigned long size)
{
- if (!mtrr_enabled())
+ if (!mtrr_enabled)
return -ENODEV;
if (mtrr_check(base, size))
return -EINVAL;
@@ -566,7 +562,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
{
int ret;
- if (pat_enabled() || !mtrr_enabled())
+ if (pat_enabled() || !mtrr_enabled)
return 0; /* Success! (We don't need to do anything.) */
ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
@@ -686,6 +682,7 @@ int __initdata changed_by_mtrr_cleanup;
*/
void __init mtrr_bp_init(void)
{
+ bool use_generic = false;
u32 phys_addr;
init_ifs();
@@ -694,6 +691,7 @@ void __init mtrr_bp_init(void)
if (boot_cpu_has(X86_FEATURE_MTRR)) {
mtrr_if = &generic_mtrr_ops;
+ use_generic = true;
size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
phys_addr = 36;
@@ -755,15 +753,18 @@ void __init mtrr_bp_init(void)
}
if (mtrr_if) {
- __mtrr_enabled = true;
- set_num_var_ranges();
+ mtrr_enabled = true;
+ set_num_var_ranges(use_generic);
init_table();
- if (use_intel()) {
+ if (use_generic) {
/* BIOS may override */
- __mtrr_enabled = get_mtrr_state();
+ mtrr_enabled = get_mtrr_state();
- if (mtrr_enabled())
+ if (mtrr_enabled) {
mtrr_bp_pat_init();
+ cache_generic |= CACHE_GENERIC_MTRR |
+ CACHE_GENERIC_PAT;
+ }
if (mtrr_cleanup(phys_addr)) {
changed_by_mtrr_cleanup = 1;
@@ -772,7 +773,7 @@ void __init mtrr_bp_init(void)
}
}
- if (!mtrr_enabled()) {
+ if (!mtrr_enabled) {
pr_info("Disabled\n");
/*
@@ -786,10 +787,7 @@ void __init mtrr_bp_init(void)
void mtrr_ap_init(void)
{
- if (!mtrr_enabled())
- return;
-
- if (!use_intel() || mtrr_aps_delayed_init)
+ if (!cache_generic || mtrr_aps_delayed_init)
return;
/*
@@ -816,7 +814,7 @@ void mtrr_save_state(void)
{
int first_cpu;
- if (!mtrr_enabled())
+ if (!mtrr_enabled)
return;
first_cpu = cpumask_first(cpu_online_mask);
@@ -825,9 +823,7 @@ void mtrr_save_state(void)
void set_mtrr_aps_delayed_init(void)
{
- if (!mtrr_enabled())
- return;
- if (!use_intel())
+ if (!cache_generic)
return;
mtrr_aps_delayed_init = true;
@@ -838,7 +834,7 @@ void set_mtrr_aps_delayed_init(void)
*/
void mtrr_aps_init(void)
{
- if (!use_intel() || !mtrr_enabled())
+ if (!cache_generic)
return;
/*
@@ -855,7 +851,7 @@ void mtrr_aps_init(void)
void mtrr_bp_restore(void)
{
- if (!use_intel() || !mtrr_enabled())
+ if (!cache_generic)
return;
mtrr_if->set_all();
@@ -863,10 +859,10 @@ void mtrr_bp_restore(void)
static int __init mtrr_init_finialize(void)
{
- if (!mtrr_enabled())
+ if (!mtrr_enabled)
return 0;
- if (use_intel()) {
+ if (cache_generic & CACHE_GENERIC_MTRR) {
if (!changed_by_mtrr_cleanup)
mtrr_state_warn();
return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 2ac99e561181..88b1c4b6174a 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -14,7 +14,6 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
u32 vendor;
- u32 use_intel_if;
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
@@ -61,7 +60,6 @@ extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
extern unsigned int num_var_ranges;
extern u64 mtrr_tom2;
--
2.35.3
The Cyrix CPU specific MTRR function cyrix_set_all() will never be
called, as the struct mtrr_ops set_all() callback will only be called
in the use_intel() case, which would require the use_intel_if member
of struct mtrr_ops to be set, which isn't the case for Cyrix.
Signed-off-by: Juergen Gross <[email protected]>
---
V2:
- new patch
---
arch/x86/kernel/cpu/mtrr/cyrix.c | 34 --------------------------------
1 file changed, 34 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index ca670919b561..c77d3b0a5bf2 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -234,42 +234,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
post_set();
}
-typedef struct {
- unsigned long base;
- unsigned long size;
- mtrr_type type;
-} arr_state_t;
-
-static arr_state_t arr_state[8] = {
- {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
- {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
-};
-
-static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
-
-static void cyrix_set_all(void)
-{
- int i;
-
- prepare_set();
-
- /* the CCRs are not contiguous */
- for (i = 0; i < 4; i++)
- setCx86(CX86_CCR0 + i, ccr_state[i]);
- for (; i < 7; i++)
- setCx86(CX86_CCR4 + i, ccr_state[i]);
-
- for (i = 0; i < 8; i++) {
- cyrix_set_arr(i, arr_state[i].base,
- arr_state[i].size, arr_state[i].type);
- }
-
- post_set();
-}
-
static const struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
- .set_all = cyrix_set_all,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
.get_free_region = cyrix_get_free_region,
--
2.35.3
Instead of using an indirect call to mtrr_if->set_all just call the
only possible target cache_cpu_init() directly. This enables to remove
the set_all callback from struct mtrr_ops.
Signed-off-by: Juergen Gross <[email protected]>
---
arch/x86/kernel/cpu/mtrr/generic.c | 1 -
arch/x86/kernel/cpu/mtrr/mtrr.c | 10 +++++-----
arch/x86/kernel/cpu/mtrr/mtrr.h | 2 --
3 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fc7b2d952737..5f83ee865def 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -843,7 +843,6 @@ int positive_have_wrcomb(void)
* Generic structure...
*/
const struct mtrr_ops generic_mtrr_ops = {
- .set_all = cache_cpu_init,
.get = generic_get_mtrr,
.get_free_region = generic_get_free_region,
.set = generic_set_mtrr,
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 7d7d5bd30219..9609a0d235f8 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -165,15 +165,15 @@ static int mtrr_rendezvous_handler(void *info)
* saved, and we want to replicate that across all the cpus that come
* online (either at the end of boot or resume or during a runtime cpu
* online). If we're doing that, @reg is set to something special and on
- * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
+ * all the cpu's we do cache_cpu_init() (On the logical cpu that
* started the boot/resume sequence, this might be a duplicate
- * set_all()).
+ * cache_cpu_init()).
*/
if (data->smp_reg != ~0U) {
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
} else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
- mtrr_if->set_all();
+ cache_cpu_init();
}
return 0;
}
@@ -768,7 +768,7 @@ void __init mtrr_bp_init(void)
if (mtrr_cleanup(phys_addr)) {
changed_by_mtrr_cleanup = 1;
- mtrr_if->set_all();
+ cache_cpu_init();
}
}
}
@@ -854,7 +854,7 @@ void mtrr_bp_restore(void)
if (!cache_generic)
return;
- mtrr_if->set_all();
+ cache_cpu_init();
}
static int __init mtrr_init_finialize(void)
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 88b1c4b6174a..3b1883185185 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -16,8 +16,6 @@ struct mtrr_ops {
u32 vendor;
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
- void (*set_all)(void);
-
void (*get)(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type *type);
int (*get_free_region)(unsigned long base, unsigned long size,
--
2.35.3
On Thu, Sep 08, 2022 at 10:49:07AM +0200, Juergen Gross wrote:
> diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
> index 86b2e0dcc4bf..1aeafa9888f7 100644
> --- a/arch/x86/include/asm/cacheinfo.h
> +++ b/arch/x86/include/asm/cacheinfo.h
> @@ -2,6 +2,11 @@
> #ifndef _ASM_X86_CACHEINFO_H
> #define _ASM_X86_CACHEINFO_H
>
> +/* Kernel controls MTRR and/or PAT MSRs. */
> +extern unsigned int cache_generic;
So this should be called something more descriptive like
memory_caching_types
or so to denote that this is a bitfield of supported memory caching
technologies. The code then would read as
if (memory_caching_types & CACHE_MTRR)
The name's still not optimal tho - needs more brooding over.
> +#define CACHE_GENERIC_MTRR 0x01
> +#define CACHE_GENERIC_PAT 0x02
And those should be CACHE_{MTRR,PAT}.
> void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
> void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>
> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
> index 66556833d7af..3b05d3ade7a6 100644
> --- a/arch/x86/kernel/cpu/cacheinfo.c
> +++ b/arch/x86/kernel/cpu/cacheinfo.c
> @@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
> /* Shared L2 cache maps */
> DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
>
> +/* Kernel controls MTRR and/or PAT MSRs. */
> +unsigned int cache_generic;
This should either be __ro_after_init and initialized to 0 or you need
accessors...
> u32 num_var_ranges;
> -static bool __mtrr_enabled;
> -
> -static bool mtrr_enabled(void)
> -{
> - return __mtrr_enabled;
> -}
> +static bool mtrr_enabled;
Hmm, I don't like this. There's way too many boolean flags in the mtrr
code. There's mtrr_state.enabled too. ;-\
Can we set (or clear) X86_FEATURE_MTRR to denote MTRR enablement status
and get rid of one more boolean flag?
...
> void __init mtrr_bp_init(void)
> {
> + bool use_generic = false;
> u32 phys_addr;
>
> init_ifs();
> @@ -694,6 +691,7 @@ void __init mtrr_bp_init(void)
>
> if (boot_cpu_has(X86_FEATURE_MTRR)) {
> mtrr_if = &generic_mtrr_ops;
> + use_generic = true;
> size_or_mask = SIZE_OR_MASK_BITS(36);
> size_and_mask = 0x00f00000;
> phys_addr = 36;
> @@ -755,15 +753,18 @@ void __init mtrr_bp_init(void)
> }
>
> if (mtrr_if) {
> - __mtrr_enabled = true;
> - set_num_var_ranges();
> + mtrr_enabled = true;
> + set_num_var_ranges(use_generic);
You don't need use_generic either:
set_num_var_ranges(mtrr_if == generic_mtrr_ops);
(The reason being I wanna get rid of that nasty minefield of boolean
vars all round that code).
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Thu, Sep 08, 2022 at 10:49:08AM +0200, Juergen Gross wrote:
> Prepare making PAT and MTRR support independent from each other by
> moving some code needed by both out of the MTRR specific sources.
This needs to be two patches at least: first one is only *mechanical*
move without any changes. The next one(s) do the renaming and other
changes etc. Otherwise reviewing it is unnecessarily complicated.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On 11.09.22 13:02, Borislav Petkov wrote:
> On Thu, Sep 08, 2022 at 10:49:08AM +0200, Juergen Gross wrote:
>> Prepare making PAT and MTRR support independent from each other by
>> moving some code needed by both out of the MTRR specific sources.
>
> This needs to be two patches at least: first one is only *mechanical*
> move without any changes. The next one(s) do the renaming and other
> changes etc. Otherwise reviewing it is unnecessarily complicated.
Okay.
Juergen
On 11.09.22 12:16, Borislav Petkov wrote:
> On Thu, Sep 08, 2022 at 10:49:07AM +0200, Juergen Gross wrote:
>> diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
>> index 86b2e0dcc4bf..1aeafa9888f7 100644
>> --- a/arch/x86/include/asm/cacheinfo.h
>> +++ b/arch/x86/include/asm/cacheinfo.h
>> @@ -2,6 +2,11 @@
>> #ifndef _ASM_X86_CACHEINFO_H
>> #define _ASM_X86_CACHEINFO_H
>>
>> +/* Kernel controls MTRR and/or PAT MSRs. */
>> +extern unsigned int cache_generic;
>
> So this should be called something more descriptive like
>
> memory_caching_types
In the end this variable doesn't specify which caching types are available,
but the ways to select/control the caching types.
So what about "memory_caching_select" or "memory_caching_control" instead?
> or so to denote that this is a bitfield of supported memory caching
> technologies. The code then would read as
>
> if (memory_caching_types & CACHE_MTRR)
>
> The name's still not optimal tho - needs more brooding over.
>
>> +#define CACHE_GENERIC_MTRR 0x01
>> +#define CACHE_GENERIC_PAT 0x02
>
> And those should be CACHE_{MTRR,PAT}.
Fine with me.
>> void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>> void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>
>> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
>> index 66556833d7af..3b05d3ade7a6 100644
>> --- a/arch/x86/kernel/cpu/cacheinfo.c
>> +++ b/arch/x86/kernel/cpu/cacheinfo.c
>> @@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
>> /* Shared L2 cache maps */
>> DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
>>
>> +/* Kernel controls MTRR and/or PAT MSRs. */
>> +unsigned int cache_generic;
>
> This should either be __ro_after_init and initialized to 0 or you need
> accessors...
Okay.
>
>> u32 num_var_ranges;
>> -static bool __mtrr_enabled;
>> -
>> -static bool mtrr_enabled(void)
>> -{
>> - return __mtrr_enabled;
>> -}
>> +static bool mtrr_enabled;
>
> Hmm, I don't like this. There's way too many boolean flags in the mtrr
> code. There's mtrr_state.enabled too. ;-\
>
> Can we set (or clear) X86_FEATURE_MTRR to denote MTRR enablement status
> and get rid of one more boolean flag?
I'll have a look.
>
> ...
>
>> void __init mtrr_bp_init(void)
>> {
>> + bool use_generic = false;
>> u32 phys_addr;
>>
>> init_ifs();
>> @@ -694,6 +691,7 @@ void __init mtrr_bp_init(void)
>>
>> if (boot_cpu_has(X86_FEATURE_MTRR)) {
>> mtrr_if = &generic_mtrr_ops;
>> + use_generic = true;
>> size_or_mask = SIZE_OR_MASK_BITS(36);
>> size_and_mask = 0x00f00000;
>> phys_addr = 36;
>> @@ -755,15 +753,18 @@ void __init mtrr_bp_init(void)
>> }
>>
>> if (mtrr_if) {
>> - __mtrr_enabled = true;
>> - set_num_var_ranges();
>> + mtrr_enabled = true;
>> + set_num_var_ranges(use_generic);
>
> You don't need use_generic either:
>
> set_num_var_ranges(mtrr_if == generic_mtrr_ops);
>
> (The reason being I wanna get rid of that nasty minefield of boolean
> vars all round that code).
Fine with me.
Juergen
On Mon, Sep 12, 2022 at 11:10:29AM +0200, Juergen Gross wrote:
> In the end this variable doesn't specify which caching types are available,
> but the ways to select/control the caching types.
>
> So what about "memory_caching_select" or "memory_caching_control" instead?
_control sounds like the right thing. As in, which of the memory caching
things the kernel controls. Along with a comment above it what this
exactly is. Yap.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On 12.09.22 11:10, Juergen Gross wrote:
> On 11.09.22 12:16, Borislav Petkov wrote:
>> On Thu, Sep 08, 2022 at 10:49:07AM +0200, Juergen Gross wrote:
>>> diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
>>> index 86b2e0dcc4bf..1aeafa9888f7 100644
>>> --- a/arch/x86/include/asm/cacheinfo.h
>>> +++ b/arch/x86/include/asm/cacheinfo.h
>>> @@ -2,6 +2,11 @@
>>> #ifndef _ASM_X86_CACHEINFO_H
>>> #define _ASM_X86_CACHEINFO_H
>>> +/* Kernel controls MTRR and/or PAT MSRs. */
>>> +extern unsigned int cache_generic;
>>
>> So this should be called something more descriptive like
>>
>> memory_caching_types
>
> In the end this variable doesn't specify which caching types are available,
> but the ways to select/control the caching types.
>
> So what about "memory_caching_select" or "memory_caching_control" instead?
>
>> or so to denote that this is a bitfield of supported memory caching
>> technologies. The code then would read as
>>
>> if (memory_caching_types & CACHE_MTRR)
>>
>> The name's still not optimal tho - needs more brooding over.
>>
>>> +#define CACHE_GENERIC_MTRR 0x01
>>> +#define CACHE_GENERIC_PAT 0x02
>>
>> And those should be CACHE_{MTRR,PAT}.
>
> Fine with me.
>
>>> void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>> void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
>>> index 66556833d7af..3b05d3ade7a6 100644
>>> --- a/arch/x86/kernel/cpu/cacheinfo.c
>>> +++ b/arch/x86/kernel/cpu/cacheinfo.c
>>> @@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
>>> /* Shared L2 cache maps */
>>> DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
>>> +/* Kernel controls MTRR and/or PAT MSRs. */
>>> +unsigned int cache_generic;
>>
>> This should either be __ro_after_init and initialized to 0 or you need
>> accessors...
>
> Okay.
>
>>
>>> u32 num_var_ranges;
>>> -static bool __mtrr_enabled;
>>> -
>>> -static bool mtrr_enabled(void)
>>> -{
>>> - return __mtrr_enabled;
>>> -}
>>> +static bool mtrr_enabled;
>>
>> Hmm, I don't like this. There's way too many boolean flags in the mtrr
>> code. There's mtrr_state.enabled too. ;-\
>>
>> Can we set (or clear) X86_FEATURE_MTRR to denote MTRR enablement status
>> and get rid of one more boolean flag?
>
> I'll have a look.
Hmm, this might be a little bit risky.
It can be done, but then X86_FEATURE_MTRR could be set even for cpus
NOT supporting it (the 32-bit special cases AMD, CENTAUR, CYRIX).
So we have the following alternatives:
- do the switch to X86_FEATURE_MTRR risking code breakage for later
code changes querying X86_FEATURE_MTRR and assuming the MTRR MSRs
being available
- keep the current bool
- replace the bool with mtrr_if != NULL
- add a new synthetic feature, e.g. X86_FEATURE_MTRR_ENABLED (which in
fact would be just a replacement of the current bool)
My preference would be the replacement with mtrr_if != NULL.
Juergen