A bit of code-cutting in MTRR handling for users of CONFIG_EMBEDDED.
Patches generated on top of current linux-2.6.git (commit 2b876f9...),
tested on 2.6.32 (Pentium4 machine only, all patches at once).
size arch/x86/kernel/cpu/mtrr/built-in.o (stats based on 2.6.32)
text data bss dec hex
13934 5340 8476 27750 6c66 before
13931 5216 8436 27583 6bbf after (PROCESSOR_SELECT=n)
11788 5188 8356 25332 62f4 after (CPU_SUP_INTEL=y only)
Best Regards,
Michał Mirosław
Michał Mirosław (7):
x86/mtrr: Remove mtrr_ops[]
mtrr: constify struct mtrr_ops
mtrr: Remove use_intel()
x86/Kconfig.cpu: add CPU_SUP_AMD_32 and CPU_SUP_CENTAUR_32
mtrr: use CONFIG_CPU_SUP_* to select MTRR implementations
mtrr: introduce HAVE_MTRR_VENDOR_SPECIFIC
mtrr: mark mtrr_if as __read_mostly
arch/x86/Kconfig | 3 ++
arch/x86/Kconfig.cpu | 9 ++++++
arch/x86/kernel/cpu/mtrr/Makefile | 4 ++-
arch/x86/kernel/cpu/mtrr/amd.c | 7 +----
arch/x86/kernel/cpu/mtrr/centaur.c | 7 +----
arch/x86/kernel/cpu/mtrr/cyrix.c | 7 +----
arch/x86/kernel/cpu/mtrr/generic.c | 3 +-
arch/x86/kernel/cpu/mtrr/main.c | 54 ++++++++++++-----------------------
arch/x86/kernel/cpu/mtrr/mtrr.h | 23 +++++++++++----
arch/x86/kernel/cpu/mtrr/state.c | 10 +++---
10 files changed, 60 insertions(+), 67 deletions(-)
Remove mtrr_ops[] - it's contents are always the same and all this
registration thing only adds useless code.
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/kernel/cpu/mtrr/amd.c | 7 +------
arch/x86/kernel/cpu/mtrr/centaur.c | 7 +------
arch/x86/kernel/cpu/mtrr/cyrix.c | 7 +------
arch/x86/kernel/cpu/mtrr/main.c | 32 +++++---------------------------
arch/x86/kernel/cpu/mtrr/mtrr.h | 7 +++++--
5 files changed, 13 insertions(+), 47 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index 33af141..2655064 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
return 0;
}
-static struct mtrr_ops amd_mtrr_ops = {
+struct mtrr_ops amd_mtrr_ops = {
.vendor = X86_VENDOR_AMD,
.set = amd_set_mtrr,
.get = amd_get_mtrr,
@@ -117,8 +117,3 @@ static struct mtrr_ops amd_mtrr_ops = {
.have_wrcomb = positive_have_wrcomb,
};
-int __init amd_init_mtrr(void)
-{
- set_mtrr_ops(&amd_mtrr_ops);
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index de89f14..02b5429 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
return 0;
}
-static struct mtrr_ops centaur_mtrr_ops = {
+struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
@@ -119,8 +119,3 @@ static struct mtrr_ops centaur_mtrr_ops = {
.have_wrcomb = positive_have_wrcomb,
};
-int __init centaur_init_mtrr(void)
-{
- set_mtrr_ops(¢aur_mtrr_ops);
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 228d982..c964690 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
post_set();
}
-static struct mtrr_ops cyrix_mtrr_ops = {
+struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
.set_all = cyrix_set_all,
.set = cyrix_set_arr,
@@ -275,8 +275,3 @@ static struct mtrr_ops cyrix_mtrr_ops = {
.have_wrcomb = positive_have_wrcomb,
};
-int __init cyrix_init_mtrr(void)
-{
- set_mtrr_ops(&cyrix_mtrr_ops);
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 84e83de..a35e698 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -60,19 +60,11 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
-
struct mtrr_ops *mtrr_if;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
-void set_mtrr_ops(struct mtrr_ops *ops)
-{
- if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
- mtrr_ops[ops->vendor] = ops;
-}
-
/* Returns non-zero if we have the write-combining memory type */
static int have_wrcomb(void)
{
@@ -574,20 +566,6 @@ int mtrr_del(int reg, unsigned long base, unsigned long size)
}
EXPORT_SYMBOL(mtrr_del);
-/*
- * HACK ALERT!
- * These should be called implicitly, but we can't yet until all the initcall
- * stuff is done...
- */
-static void __init init_ifs(void)
-{
-#ifndef CONFIG_X86_64
- amd_init_mtrr();
- cyrix_init_mtrr();
- centaur_init_mtrr();
-#endif
-}
-
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
* MTRR driver doesn't require this
*/
@@ -645,8 +623,6 @@ void __init mtrr_bp_init(void)
{
u32 phys_addr;
- init_ifs();
-
phys_addr = 32;
if (cpu_has_mtrr) {
@@ -683,25 +659,26 @@ void __init mtrr_bp_init(void)
phys_addr = 32;
}
} else {
+#ifdef CONFIG_X86_32
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if (cpu_has_k6_mtrr) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
- mtrr_if = mtrr_ops[X86_VENDOR_AMD];
+ mtrr_if = &amd_mtrr_ops;
size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
}
break;
case X86_VENDOR_CENTAUR:
if (cpu_has_centaur_mcr) {
- mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
+ mtrr_if = ¢aur_mtrr_ops;
size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
if (cpu_has_cyrix_arr) {
- mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
+ mtrr_if = &cyrix_mtrr_ops;
size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
}
@@ -709,6 +686,7 @@ void __init mtrr_bp_init(void)
default:
break;
}
+#endif
}
if (mtrr_if) {
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index a501dee..ff7af34 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -33,6 +33,11 @@ extern int generic_validate_add_page(unsigned long base, unsigned long size,
unsigned int type);
extern struct mtrr_ops generic_mtrr_ops;
+#ifdef CONFIG_X86_32
+extern struct mtrr_ops amd_mtrr_ops;
+extern struct mtrr_ops centaur_mtrr_ops;
+extern struct mtrr_ops cyrix_mtrr_ops;
+#endif
extern int positive_have_wrcomb(void);
@@ -53,8 +58,6 @@ void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
void get_mtrr_state(void);
-extern void set_mtrr_ops(struct mtrr_ops *ops);
-
extern u64 size_or_mask, size_and_mask;
extern struct mtrr_ops *mtrr_if;
--
1.6.4.4
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/kernel/cpu/mtrr/amd.c | 2 +-
arch/x86/kernel/cpu/mtrr/centaur.c | 2 +-
arch/x86/kernel/cpu/mtrr/cyrix.c | 2 +-
arch/x86/kernel/cpu/mtrr/generic.c | 2 +-
arch/x86/kernel/cpu/mtrr/main.c | 2 +-
arch/x86/kernel/cpu/mtrr/mtrr.h | 10 +++++-----
6 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index 2655064..a265df6 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
return 0;
}
-struct mtrr_ops amd_mtrr_ops = {
+const struct mtrr_ops amd_mtrr_ops = {
.vendor = X86_VENDOR_AMD,
.set = amd_set_mtrr,
.get = amd_get_mtrr,
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index 02b5429..6cb1a83 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
return 0;
}
-struct mtrr_ops centaur_mtrr_ops = {
+const struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index c964690..f00dcdc 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
post_set();
}
-struct mtrr_ops cyrix_mtrr_ops = {
+const struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
.set_all = cyrix_set_all,
.set = cyrix_set_arr,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 55da0c5..4d75584 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
/*
* Generic structure...
*/
-struct mtrr_ops generic_mtrr_ops = {
+const struct mtrr_ops generic_mtrr_ops = {
.use_intel_if = 1,
.set_all = generic_set_all,
.get = generic_get_mtrr,
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index a35e698..628f3dd 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -60,7 +60,7 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
-struct mtrr_ops *mtrr_if;
+const struct mtrr_ops *mtrr_if;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index ff7af34..be2867c 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -32,11 +32,11 @@ extern int generic_get_free_region(unsigned long base, unsigned long size,
extern int generic_validate_add_page(unsigned long base, unsigned long size,
unsigned int type);
-extern struct mtrr_ops generic_mtrr_ops;
+extern const struct mtrr_ops generic_mtrr_ops;
#ifdef CONFIG_X86_32
-extern struct mtrr_ops amd_mtrr_ops;
-extern struct mtrr_ops centaur_mtrr_ops;
-extern struct mtrr_ops cyrix_mtrr_ops;
+extern const struct mtrr_ops amd_mtrr_ops;
+extern const struct mtrr_ops centaur_mtrr_ops;
+extern const struct mtrr_ops cyrix_mtrr_ops;
#endif
extern int positive_have_wrcomb(void);
@@ -59,7 +59,7 @@ void fill_mtrr_var_range(unsigned int index,
void get_mtrr_state(void);
extern u64 size_or_mask, size_and_mask;
-extern struct mtrr_ops *mtrr_if;
+extern const struct mtrr_ops *mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
--
1.6.4.4
Remove use_intel() and use is_cpu(INTEL) instead.
.use_intel_if is duplicating information already found in .vendor, as
the only combinations used are:
generic: use_intel_if == 1, vendor == INTEL
amd/cyrix/centaur: use_intel_if == 0, vendor != INTEL
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/kernel/cpu/mtrr/generic.c | 1 -
arch/x86/kernel/cpu/mtrr/main.c | 14 +++++++-------
arch/x86/kernel/cpu/mtrr/mtrr.h | 2 --
arch/x86/kernel/cpu/mtrr/state.c | 10 +++++-----
4 files changed, 12 insertions(+), 15 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 4d75584..1651877 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -753,7 +753,6 @@ int positive_have_wrcomb(void)
* Generic structure...
*/
const struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = 1,
.set_all = generic_set_all,
.get = generic_get_mtrr,
.get_free_region = generic_get_free_region,
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 628f3dd..bc3436e 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -107,7 +107,7 @@ static void __init set_num_var_ranges(void)
{
unsigned long config = 0, dummy;
- if (use_intel())
+ if (is_cpu(INTEL))
rdmsr(MSR_MTRRcap, config, dummy);
else if (is_cpu(AMD))
config = 2;
@@ -692,7 +692,7 @@ void __init mtrr_bp_init(void)
if (mtrr_if) {
set_num_var_ranges();
init_table();
- if (use_intel()) {
+ if (is_cpu(INTEL)) {
get_mtrr_state();
if (mtrr_cleanup(phys_addr)) {
@@ -705,7 +705,7 @@ void __init mtrr_bp_init(void)
void mtrr_ap_init(void)
{
- if (!use_intel() || mtrr_aps_delayed_init)
+ if (!is_cpu(INTEL) || mtrr_aps_delayed_init)
return;
/*
* Ideally we should hold mtrr_mutex here to avoid mtrr entries
@@ -733,7 +733,7 @@ void mtrr_save_state(void)
void set_mtrr_aps_delayed_init(void)
{
- if (!use_intel())
+ if (!is_cpu(INTEL))
return;
mtrr_aps_delayed_init = true;
@@ -744,7 +744,7 @@ void set_mtrr_aps_delayed_init(void)
*/
void mtrr_aps_init(void)
{
- if (!use_intel())
+ if (!is_cpu(INTEL))
return;
set_mtrr(~0U, 0, 0, 0);
@@ -753,7 +753,7 @@ void mtrr_aps_init(void)
void mtrr_bp_restore(void)
{
- if (!use_intel())
+ if (!is_cpu(INTEL))
return;
mtrr_if->set_all();
@@ -764,7 +764,7 @@ static int __init mtrr_init_finialize(void)
if (!mtrr_if)
return 0;
- if (use_intel()) {
+ if (is_cpu(INTEL)) {
if (!changed_by_mtrr_cleanup)
mtrr_state_warn();
return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index be2867c..562e2e3 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -13,7 +13,6 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
u32 vendor;
- u32 use_intel_if;
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
@@ -62,7 +61,6 @@ extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
extern unsigned int num_var_ranges;
extern u64 mtrr_tom2;
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
index dfc80b4..3bdbf50 100644
--- a/arch/x86/kernel/cpu/mtrr/state.c
+++ b/arch/x86/kernel/cpu/mtrr/state.c
@@ -17,7 +17,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
/* Disable interrupts locally */
local_irq_save(ctxt->flags);
- if (use_intel() || is_cpu(CYRIX)) {
+ if (is_cpu(INTEL) || is_cpu(CYRIX)) {
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_has_pge) {
@@ -34,7 +34,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
write_cr0(cr0);
wbinvd();
- if (use_intel()) {
+ if (is_cpu(INTEL)) {
/* Save MTRR state */
rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
} else {
@@ -49,7 +49,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
{
- if (use_intel()) {
+ if (is_cpu(INTEL)) {
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL,
ctxt->deftype_hi);
@@ -64,13 +64,13 @@ void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
/* Restore the processor after a set_mtrr_prepare */
void set_mtrr_done(struct set_mtrr_context *ctxt)
{
- if (use_intel() || is_cpu(CYRIX)) {
+ if (is_cpu(INTEL) || is_cpu(CYRIX)) {
/* Flush caches and TLBs */
wbinvd();
/* Restore MTRRdefType */
- if (use_intel()) {
+ if (is_cpu(INTEL)) {
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo,
ctxt->deftype_hi);
--
1.6.4.4
Add CPU_SUP_AMD_32 and CPU_SUP_CENTAUR_32. Used later in selecting
possible MTRR implementations.
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/Kconfig.cpu | 6 ++++++
1 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 08e442b..bae1193 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -462,6 +462,9 @@ config CPU_SUP_AMD
If unsure, say N.
+config CPU_SUP_AMD_32
+ def_bool y if CPU_SUP_AMD && !64BIT
+
config CPU_SUP_CENTAUR
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
@@ -475,6 +478,9 @@ config CPU_SUP_CENTAUR
If unsure, say N.
+config CPU_SUP_CENTAUR_32
+ def_bool y if CPU_SUP_CENTAUR && !64BIT
+
config CPU_SUP_TRANSMETA_32
default y
bool "Support Transmeta processors" if PROCESSOR_SELECT
--
1.6.4.4
Use CONFIG_CPU_SUP_* to select MTRR implementations. We let gcc
optimize out the empty switch statement if only generic implementation
is wanted.
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/kernel/cpu/mtrr/Makefile | 4 +++-
arch/x86/kernel/cpu/mtrr/main.c | 10 ++++++++--
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/Makefile b/arch/x86/kernel/cpu/mtrr/Makefile
index f4361b5..d6e8a92 100644
--- a/arch/x86/kernel/cpu/mtrr/Makefile
+++ b/arch/x86/kernel/cpu/mtrr/Makefile
@@ -1,3 +1,5 @@
obj-y := main.o if.o generic.o state.o cleanup.o
-obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
+obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o
+obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
+obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index bc3436e..737780a 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -648,6 +648,7 @@ void __init mtrr_bp_init(void)
size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
+#ifdef CONFIG_CPU_SUP_CENTAUR
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) {
/*
@@ -657,10 +658,11 @@ void __init mtrr_bp_init(void)
size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
phys_addr = 32;
+#endif
}
} else {
-#ifdef CONFIG_X86_32
switch (boot_cpu_data.x86_vendor) {
+#ifdef CONFIG_CPU_SUP_AMD_32
case X86_VENDOR_AMD:
if (cpu_has_k6_mtrr) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
@@ -669,6 +671,8 @@ void __init mtrr_bp_init(void)
size_and_mask = 0;
}
break;
+#endif
+#ifdef CONFIG_CPU_SUP_CENTAUR_32
case X86_VENDOR_CENTAUR:
if (cpu_has_centaur_mcr) {
mtrr_if = ¢aur_mtrr_ops;
@@ -676,6 +680,8 @@ void __init mtrr_bp_init(void)
size_and_mask = 0;
}
break;
+#endif
+#ifdef CONFIG_CPU_SUP_CYRIX_32
case X86_VENDOR_CYRIX:
if (cpu_has_cyrix_arr) {
mtrr_if = &cyrix_mtrr_ops;
@@ -683,10 +689,10 @@ void __init mtrr_bp_init(void)
size_and_mask = 0;
}
break;
+#endif
default:
break;
}
-#endif
}
if (mtrr_if) {
--
1.6.4.4
In case that only generic MTRR implementation is needed, tell gcc
to optimize out tests for other implementations. Also remove .vendor
from mtrr_ops in this configuration.
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/Kconfig | 3 +++
arch/x86/Kconfig.cpu | 3 +++
arch/x86/kernel/cpu/mtrr/mtrr.h | 12 +++++++++++-
3 files changed, 17 insertions(+), 1 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 32a1918..91422d0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1367,6 +1367,9 @@ config MTRR
See <file:Documentation/x86/mtrr.txt> for more information.
+config HAVE_MTRR_VENDOR_SPECIFIC
+ def_bool n
+
config MTRR_SANITIZER
def_bool y
prompt "MTRR cleanup support"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index bae1193..d7a6838 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -439,6 +439,7 @@ config CPU_SUP_CYRIX_32
default y
bool "Support Cyrix processors" if PROCESSOR_SELECT
depends on !64BIT
+ select HAVE_MTRR_VENDOR_SPECIFIC
---help---
This enables detection, tunings and quirks for Cyrix processors
@@ -464,6 +465,7 @@ config CPU_SUP_AMD
config CPU_SUP_AMD_32
def_bool y if CPU_SUP_AMD && !64BIT
+ select HAVE_MTRR_VENDOR_SPECIFIC
config CPU_SUP_CENTAUR
default y
@@ -480,6 +482,7 @@ config CPU_SUP_CENTAUR
config CPU_SUP_CENTAUR_32
def_bool y if CPU_SUP_CENTAUR && !64BIT
+ select HAVE_MTRR_VENDOR_SPECIFIC
config CPU_SUP_TRANSMETA_32
default y
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 562e2e3..7a87e65 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -12,7 +12,9 @@
extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
+#ifdef CONFIG_HAVE_MTRR_VENDOR_SPECIFIC
u32 vendor;
+#endif
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
@@ -32,7 +34,7 @@ extern int generic_validate_add_page(unsigned long base, unsigned long size,
unsigned int type);
extern const struct mtrr_ops generic_mtrr_ops;
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_HAVE_MTRR_VENDOR_SPECIFIC
extern const struct mtrr_ops amd_mtrr_ops;
extern const struct mtrr_ops centaur_mtrr_ops;
extern const struct mtrr_ops cyrix_mtrr_ops;
@@ -60,8 +62,16 @@ void get_mtrr_state(void);
extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if;
+#ifdef CONFIG_HAVE_MTRR_VENDOR_SPECIFIC
+
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
+#else
+
+#define is_cpu(vnd) (mtrr_if && 0 == X86_VENDOR_##vnd)
+
+#endif
+
extern unsigned int num_var_ranges;
extern u64 mtrr_tom2;
extern struct mtrr_state_type mtrr_state;
--
1.6.4.4
Signed-off-by: Michał Mirosław <[email protected]>
---
arch/x86/kernel/cpu/mtrr/main.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 737780a..7310682 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -60,7 +60,7 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
-const struct mtrr_ops *mtrr_if;
+const struct mtrr_ops *mtrr_if __read_mostly;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
--
1.6.4.4
2009/12/8 Michał Mirosław <[email protected]>:
> Remove use_intel() and use is_cpu(INTEL) instead.
>
> .use_intel_if is duplicating information already found in .vendor, as
> the only combinations used are:
> generic: use_intel_if == 1, vendor == INTEL
> amd/cyrix/centaur: use_intel_if == 0, vendor != INTEL
>
> Signed-off-by: Michał Mirosław <[email protected]>
> ---
> arch/x86/kernel/cpu/mtrr/generic.c | 1 -
> arch/x86/kernel/cpu/mtrr/main.c | 14 +++++++-------
> arch/x86/kernel/cpu/mtrr/mtrr.h | 2 --
> arch/x86/kernel/cpu/mtrr/state.c | 10 +++++-----
> 4 files changed, 12 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
> index 4d75584..1651877 100644
> --- a/arch/x86/kernel/cpu/mtrr/generic.c
> +++ b/arch/x86/kernel/cpu/mtrr/generic.c
> @@ -753,7 +753,6 @@ int positive_have_wrcomb(void)
> * Generic structure...
> */
> const struct mtrr_ops generic_mtrr_ops = {
> - .use_intel_if = 1,
> .set_all = generic_set_all,
> .get = generic_get_mtrr,
> .get_free_region = generic_get_free_region,
> diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
> index 628f3dd..bc3436e 100644
> --- a/arch/x86/kernel/cpu/mtrr/main.c
> +++ b/arch/x86/kernel/cpu/mtrr/main.c
> @@ -107,7 +107,7 @@ static void __init set_num_var_ranges(void)
> {
> unsigned long config = 0, dummy;
>
> - if (use_intel())
> + if (is_cpu(INTEL))
> rdmsr(MSR_MTRRcap, config, dummy);
> else if (is_cpu(AMD))
> config = 2;
> @@ -692,7 +692,7 @@ void __init mtrr_bp_init(void)
> if (mtrr_if) {
> set_num_var_ranges();
> init_table();
> - if (use_intel()) {
> + if (is_cpu(INTEL)) {
> get_mtrr_state();
>
> if (mtrr_cleanup(phys_addr)) {
> @@ -705,7 +705,7 @@ void __init mtrr_bp_init(void)
>
> void mtrr_ap_init(void)
> {
> - if (!use_intel() || mtrr_aps_delayed_init)
> + if (!is_cpu(INTEL) || mtrr_aps_delayed_init)
> return;
> /*
> * Ideally we should hold mtrr_mutex here to avoid mtrr entries
> @@ -733,7 +733,7 @@ void mtrr_save_state(void)
>
> void set_mtrr_aps_delayed_init(void)
> {
> - if (!use_intel())
> + if (!is_cpu(INTEL))
> return;
>
> mtrr_aps_delayed_init = true;
> @@ -744,7 +744,7 @@ void set_mtrr_aps_delayed_init(void)
> */
> void mtrr_aps_init(void)
> {
> - if (!use_intel())
> + if (!is_cpu(INTEL))
> return;
>
> set_mtrr(~0U, 0, 0, 0);
> @@ -753,7 +753,7 @@ void mtrr_aps_init(void)
>
> void mtrr_bp_restore(void)
> {
> - if (!use_intel())
> + if (!is_cpu(INTEL))
> return;
>
> mtrr_if->set_all();
> @@ -764,7 +764,7 @@ static int __init mtrr_init_finialize(void)
> if (!mtrr_if)
> return 0;
>
> - if (use_intel()) {
> + if (is_cpu(INTEL)) {
> if (!changed_by_mtrr_cleanup)
> mtrr_state_warn();
> return 0;
> diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
> index be2867c..562e2e3 100644
> --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
> +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
> @@ -13,7 +13,6 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
>
> struct mtrr_ops {
> u32 vendor;
> - u32 use_intel_if;
> void (*set)(unsigned int reg, unsigned long base,
> unsigned long size, mtrr_type type);
> void (*set_all)(void);
> @@ -62,7 +61,6 @@ extern u64 size_or_mask, size_and_mask;
> extern const struct mtrr_ops *mtrr_if;
>
> #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
> -#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
>
> extern unsigned int num_var_ranges;
> extern u64 mtrr_tom2;
> diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
> index dfc80b4..3bdbf50 100644
> --- a/arch/x86/kernel/cpu/mtrr/state.c
> +++ b/arch/x86/kernel/cpu/mtrr/state.c
> @@ -17,7 +17,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
> /* Disable interrupts locally */
> local_irq_save(ctxt->flags);
>
> - if (use_intel() || is_cpu(CYRIX)) {
> + if (is_cpu(INTEL) || is_cpu(CYRIX)) {
>
> /* Save value of CR4 and clear Page Global Enable (bit 7) */
> if (cpu_has_pge) {
> @@ -34,7 +34,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
> write_cr0(cr0);
> wbinvd();
>
> - if (use_intel()) {
> + if (is_cpu(INTEL)) {
> /* Save MTRR state */
> rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
> } else {
> @@ -49,7 +49,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
>
> void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
> {
> - if (use_intel()) {
> + if (is_cpu(INTEL)) {
> /* Disable MTRRs, and set the default type to uncached */
> mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL,
> ctxt->deftype_hi);
> @@ -64,13 +64,13 @@ void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
> /* Restore the processor after a set_mtrr_prepare */
> void set_mtrr_done(struct set_mtrr_context *ctxt)
> {
> - if (use_intel() || is_cpu(CYRIX)) {
> + if (is_cpu(INTEL) || is_cpu(CYRIX)) {
>
> /* Flush caches and TLBs */
> wbinvd();
>
> /* Restore MTRRdefType */
> - if (use_intel()) {
> + if (is_cpu(INTEL)) {
> /* Intel (P6) standard MTRRs */
> mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo,
> ctxt->deftype_hi);
> --
can you check it with amd64 cpu?
YH
On Wed, Dec 09, 2009 at 12:10:50AM -0800, Yinghai Lu wrote:
> 2009/12/8 Micha? Miros?aw <[email protected]>:
> > Remove use_intel() and use is_cpu(INTEL) instead.
> >
> > .use_intel_if is duplicating information already found in .vendor, as
> > the only combinations used are:
> > ? ?generic: use_intel_if == 1, vendor == INTEL
> > ? ?amd/cyrix/centaur: use_intel_if == 0, vendor != INTEL
> >
[...]
> can you check it with amd64 cpu?
Unfortunately I don't have any AMD machines right now.
Anyway, just to clarify why the change is a functional no-op: use_intel()
and is_cpu(X) don't check the detected CPU vendor ID but look into mtrr_ops
struct. For 64BIT build this means that use_intel() / is_cpu(INTEL) == true
when mtrr_if != NULL, and all other is_cpu() == false because no other
implementation could be choosen (amd/cyrix/centaur special cpu features are
possible only in 32BIT).
Best Regards,
Micha? Miros?aw
* Yinghai Lu <[email protected]> wrote:
> can you check it with amd64 cpu?
do you have a suspicion that it might be broken?
Ingo
On Wed, Dec 9, 2009 at 12:52 AM, Ingo Molnar <[email protected]> wrote:
>
> * Yinghai Lu <[email protected]> wrote:
>
>> can you check it with amd64 cpu?
>
> do you have a suspicion that it might be broken?
void __init mtrr_bp_init(void)
{
u32 phys_addr;
init_ifs();
phys_addr = 32;
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
size_or_mask = 0xff000000; /* 36 bits */
size_and_mask = 0x00f00000;
phys_addr = 36;
and in generic_mtrr_ops
/*
* Generic structure...
*/
struct mtrr_ops generic_mtrr_ops = {
.use_intel_if = 1,
.set_all = generic_set_all,
so all cpus that cpuid tell us that there is mtrr support, will all
have use_intel() == 1
YH
On 12/09/2009 10:32 AM, Yinghai Lu wrote:
> /*
> * Generic structure...
> */
> struct mtrr_ops generic_mtrr_ops = {
> .use_intel_if = 1,
> .set_all = generic_set_all,
>
> so all cpus that cpuid tell us that there is mtrr support, will all
> have use_intel() == 1
Yes, because the right thing to check isn't the CPU vendor bit but the
CPU feature bits -- all CPUs that have the MTRR feature bit are using
the "Intel interface" -- regardless of if they are made by Intel, AMD,
VIA, or Transmeta. The older AMD, Cyrix etc. interfaces use different bits.
-hpa