The way mtrr_if is initialized with the correct mtrr_ops structure is
quite weird.
Simplify that by dropping the vendor specific init functions and the
mtrr_ops[] array. Replace those with direct assignments of the related
vendor specific ops array to mtrr_if.
Signed-off-by: Juergen Gross <[email protected]>
---
V4:
- new patch
---
arch/x86/kernel/cpu/mtrr/amd.c | 8 +-------
arch/x86/kernel/cpu/mtrr/centaur.c | 8 +-------
arch/x86/kernel/cpu/mtrr/cyrix.c | 8 +-------
arch/x86/kernel/cpu/mtrr/mtrr.c | 30 +++---------------------------
arch/x86/kernel/cpu/mtrr/mtrr.h | 15 +++++++++------
5 files changed, 15 insertions(+), 54 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index a65a0272096d..eff6ac62c0ff 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -109,7 +109,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
return 0;
}
-static const struct mtrr_ops amd_mtrr_ops = {
+const struct mtrr_ops amd_mtrr_ops = {
.vendor = X86_VENDOR_AMD,
.set = amd_set_mtrr,
.get = amd_get_mtrr,
@@ -117,9 +117,3 @@ static const struct mtrr_ops amd_mtrr_ops = {
.validate_add_page = amd_validate_add_page,
.have_wrcomb = positive_have_wrcomb,
};
-
-int __init amd_init_mtrr(void)
-{
- set_mtrr_ops(&amd_mtrr_ops);
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index f27177816569..b8a74eddde83 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -111,7 +111,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
return 0;
}
-static const struct mtrr_ops centaur_mtrr_ops = {
+const struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
@@ -119,9 +119,3 @@ static const struct mtrr_ops centaur_mtrr_ops = {
.validate_add_page = centaur_validate_add_page,
.have_wrcomb = positive_have_wrcomb,
};
-
-int __init centaur_init_mtrr(void)
-{
- set_mtrr_ops(¢aur_mtrr_ops);
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index c77d3b0a5bf2..173b9e01e623 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -234,7 +234,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
post_set();
}
-static const struct mtrr_ops cyrix_mtrr_ops = {
+const struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
@@ -242,9 +242,3 @@ static const struct mtrr_ops cyrix_mtrr_ops = {
.validate_add_page = generic_validate_add_page,
.have_wrcomb = positive_have_wrcomb,
};
-
-int __init cyrix_init_mtrr(void)
-{
- set_mtrr_ops(&cyrix_mtrr_ops);
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 1b652fa768a6..7ba68356c0ff 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -65,16 +65,8 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
-
const struct mtrr_ops *mtrr_if;
-void __init set_mtrr_ops(const struct mtrr_ops *ops)
-{
- if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
- mtrr_ops[ops->vendor] = ops;
-}
-
/* Returns non-zero if we have the write-combining memory type */
static int have_wrcomb(void)
{
@@ -578,20 +570,6 @@ int arch_phys_wc_index(int handle)
}
EXPORT_SYMBOL_GPL(arch_phys_wc_index);
-/*
- * HACK ALERT!
- * These should be called implicitly, but we can't yet until all the initcall
- * stuff is done...
- */
-static void __init init_ifs(void)
-{
-#ifndef CONFIG_X86_64
- amd_init_mtrr();
- cyrix_init_mtrr();
- centaur_init_mtrr();
-#endif
-}
-
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
* MTRR driver doesn't require this
*/
@@ -649,8 +627,6 @@ void __init mtrr_bp_init(void)
{
u32 phys_addr;
- init_ifs();
-
phys_addr = 32;
if (boot_cpu_has(X86_FEATURE_MTRR)) {
@@ -691,21 +667,21 @@ void __init mtrr_bp_init(void)
case X86_VENDOR_AMD:
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
- mtrr_if = mtrr_ops[X86_VENDOR_AMD];
+ mtrr_if = vendor_mtrr_ops(amd_mtrr_ops);
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CENTAUR:
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
- mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
+ mtrr_if = vendor_mtrr_ops(centaur_mtrr_ops);
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
- mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
+ mtrr_if = vendor_mtrr_ops(cyrix_mtrr_ops);
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index c98928ceee6a..7a7387356192 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -51,8 +51,6 @@ void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
bool get_mtrr_state(void);
-extern void __init set_mtrr_ops(const struct mtrr_ops *ops);
-
extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if;
@@ -66,10 +64,15 @@ void mtrr_state_warn(void);
const char *mtrr_attrib_to_str(int x);
void mtrr_wrmsr(unsigned, unsigned, unsigned);
-/* CPU specific mtrr init functions */
-int amd_init_mtrr(void);
-int cyrix_init_mtrr(void);
-int centaur_init_mtrr(void);
+/* CPU specific mtrr_ops vectors. */
+extern const struct mtrr_ops amd_mtrr_ops;
+extern const struct mtrr_ops cyrix_mtrr_ops;
+extern const struct mtrr_ops centaur_mtrr_ops;
+#ifdef CONFIG_X86_64
+#define vendor_mtrr_ops(x) NULL
+#else
+#define vendor_mtrr_ops(x) &(x)
+#endif
extern int changed_by_mtrr_cleanup;
extern int mtrr_cleanup(unsigned address_bits);
--
2.35.3
On Tue, Oct 04, 2022 at 10:10:23AM +0200, Juergen Gross wrote:
> +#ifdef CONFIG_X86_64
> +#define vendor_mtrr_ops(x) NULL
> +#else
> +#define vendor_mtrr_ops(x) &(x)
> +#endif
The idea is good but this is just as hacky.
Just assign the correct one in each branch without this funky ifdeffery.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On 30.10.22 13:06, Borislav Petkov wrote:
> On Tue, Oct 04, 2022 at 10:10:23AM +0200, Juergen Gross wrote:
>> +#ifdef CONFIG_X86_64
>> +#define vendor_mtrr_ops(x) NULL
>> +#else
>> +#define vendor_mtrr_ops(x) &(x)
>> +#endif
>
> The idea is good but this is just as hacky.
>
> Just assign the correct one in each branch without this funky ifdeffery.
As the specific ops variables are available for X86_32 only, this
would require to add an "#ifdef CONFIG_X86_32" around the code block
doing the assignments. Otherwise the build would fail.
Juergen
On Sun, Oct 30, 2022 at 04:05:29PM +0100, Juergen Gross wrote:
> As the specific ops variables are available for X86_32 only, this
> would require to add an "#ifdef CONFIG_X86_32" around the code block
> doing the assignments. Otherwise the build would fail.
Well, it looks like my compiler is smart enough and eliminates all that
dead code, see diff below.
I've added the asm markers "#begin" and "#end" and the resulting asm
looks like this:
# arch/x86/kernel/cpu/mtrr/mtrr.c:666: asm volatile("#begin");
call __sanitizer_cov_trace_pc #
#APP
# 666 "arch/x86/kernel/cpu/mtrr/mtrr.c" 1
#begin
# 0 "" 2
# arch/x86/kernel/cpu/mtrr/mtrr.c:693: asm volatile("#end");
# 693 "arch/x86/kernel/cpu/mtrr/mtrr.c" 1
#end
# 0 "" 2
# arch/x86/kernel/cpu/mtrr/mtrr.c:630: phys_addr = 32;
#NO_APP
which basically says that all between line 666 and 693 has been
eliminated.
I have the suspicion, though, that clang might not be that smart.
Lemme test it a bit.
---
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 7ba68356c0ff..d499c83b2ad7 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -663,25 +663,26 @@ void __init mtrr_bp_init(void)
phys_addr = 32;
}
} else {
+ asm volatile("#begin");
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
- mtrr_if = vendor_mtrr_ops(amd_mtrr_ops);
+ mtrr_if = &amd_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CENTAUR:
if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
- mtrr_if = vendor_mtrr_ops(centaur_mtrr_ops);
+ mtrr_if = ¢aur_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
- mtrr_if = vendor_mtrr_ops(cyrix_mtrr_ops);
+ mtrr_if = &cyrix_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
@@ -689,6 +690,7 @@ void __init mtrr_bp_init(void)
default:
break;
}
+ asm volatile("#end");
}
if (mtrr_if) {
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 7a7387356192..02eb5871492d 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -68,11 +68,6 @@ void mtrr_wrmsr(unsigned, unsigned, unsigned);
extern const struct mtrr_ops amd_mtrr_ops;
extern const struct mtrr_ops cyrix_mtrr_ops;
extern const struct mtrr_ops centaur_mtrr_ops;
-#ifdef CONFIG_X86_64
-#define vendor_mtrr_ops(x) NULL
-#else
-#define vendor_mtrr_ops(x) &(x)
-#endif
extern int changed_by_mtrr_cleanup;
extern int mtrr_cleanup(unsigned address_bits);
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
Another randconfig build failure in the meantime. Pretty easy to
trigger:
ld: arch/x86/kernel/cpu/cacheinfo.o: in function `cache_bp_init':
cacheinfo.c:(.init.text+0x1): undefined reference to `mtrr_bp_init'
make[1]: *** [scripts/Makefile.vmlinux:34: vmlinux] Error 1
make: *** [Makefile:1236: vmlinux] Error 2
the reason being:
# CONFIG_MTRR is not set
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Tue, Nov 01, 2022 at 10:48:10AM +0100, Juergen Gross wrote:
> Hmm, with the cpu_feature_enabled() implementation it should be quite
> obvious that this is all dead code.
Yes it is. I ran ~100 randconfigs with both gcc and clang and not a
single one failed the build so I guess you can say in the commit message
that it is ok for this symbol to be not present on 64-bit as it will be
eliminated anyway.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On 30.10.22 17:39, Borislav Petkov wrote:
> On Sun, Oct 30, 2022 at 04:05:29PM +0100, Juergen Gross wrote:
>> As the specific ops variables are available for X86_32 only, this
>> would require to add an "#ifdef CONFIG_X86_32" around the code block
>> doing the assignments. Otherwise the build would fail.
>
> Well, it looks like my compiler is smart enough and eliminates all that
> dead code, see diff below.
>
> I have the suspicion, though, that clang might not be that smart.
Hmm, with the cpu_feature_enabled() implementation it should be quite
obvious that this is all dead code.
I'm going with dropping the vendor_mtrr_ops() macro for now.
Juergen