2022-09-08 09:21:02

by Jürgen Groß

[permalink] [raw]
Subject: [PATCH v3 05/10] x86/mtrr: split generic_set_all()

Split generic_set_all() into multiple parts, while moving the main
function body into cacheinfo.c.

This prepares the support of PAT without needing MTRR support by
moving the main function body of generic_set_all() into cacheinfo.c
while renaming it to cache_cpu_init(). The MTRR specific parts are
moved into a dedicated small function called by cache_cpu_init().
The PAT and MTRR specific functions are called conditionally based
on the cache_generic bit settings.

The setting of smp_changes_mask is merged into the (new) function
mtrr_generic_set_state() used to call set_mtrr_state(). It was
probably split in ancient times, as atomic operations while running
uncached might be quite expensive, but OTOH only systems with a
broken BIOS should ever require to set any bit in smp_changes_mask,
so just hurting those devices with a penalty of a few microseconds
during boot shouldn't be a real issue.

Signed-off-by: Juergen Gross <[email protected]>
---
V2:
- new patch
---
arch/x86/include/asm/cacheinfo.h | 1 +
arch/x86/include/asm/mtrr.h | 2 ++
arch/x86/kernel/cpu/cacheinfo.c | 19 +++++++++++++++++++
arch/x86/kernel/cpu/mtrr/generic.c | 15 ++-------------
4 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 313a6920d0f9..563d9cb5fcf5 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -12,5 +12,6 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);

void cache_disable(void);
void cache_enable(void);
+void cache_cpu_init(void);

#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 12a16caed395..986249a2b9b6 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -50,6 +50,7 @@ extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
void mtrr_disable(void);
void mtrr_enable(void);
+void mtrr_generic_set_state(void);
# else
static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
{
@@ -91,6 +92,7 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
#define mtrr_enable() do {} while (0)
+#define mtrr_generic_set_state() do {} while (0)
# endif

#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 47e2c72fa8a4..36378604ec61 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -1120,3 +1120,22 @@ void cache_enable(void) __releases(cache_disable_lock)

raw_spin_unlock(&cache_disable_lock);
}
+
+void cache_cpu_init(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cache_disable();
+
+ /* Set MTRR state. */
+ if (cache_generic & CACHE_GENERIC_MTRR)
+ mtrr_generic_set_state();
+
+ /* Set PAT. */
+ if (cache_generic & CACHE_GENERIC_PAT)
+ pat_init();
+
+ cache_enable();
+ local_irq_restore(flags);
+}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 5ed397f03a87..fc7b2d952737 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -731,30 +731,19 @@ void mtrr_enable(void)
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
}

-static void generic_set_all(void)
+void mtrr_generic_set_state(void)
{
unsigned long mask, count;
- unsigned long flags;
-
- local_irq_save(flags);
- cache_disable();

/* Actually set the state */
mask = set_mtrr_state();

- /* also set PAT */
- pat_init();
-
- cache_enable();
- local_irq_restore(flags);
-
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof(mask) * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
-
}

/**
@@ -854,7 +843,7 @@ int positive_have_wrcomb(void)
* Generic structure...
*/
const struct mtrr_ops generic_mtrr_ops = {
- .set_all = generic_set_all,
+ .set_all = cache_cpu_init,
.get = generic_get_mtrr,
.get_free_region = generic_get_free_region,
.set = generic_set_mtrr,
--
2.35.3


2022-09-19 19:43:02

by Borislav Petkov

[permalink] [raw]
Subject: Re: [PATCH v3 05/10] x86/mtrr: split generic_set_all()

On Thu, Sep 08, 2022 at 10:49:09AM +0200, Juergen Gross wrote:
> Split generic_set_all() into multiple parts, while moving the main
> function body into cacheinfo.c.
>
> This prepares the support of PAT without needing MTRR support by

"Prepare PAT support without ... "

> moving the main function body of generic_set_all() into cacheinfo.c
> while renaming it to cache_cpu_init(). The MTRR specific parts are
> moved into a dedicated small function called by cache_cpu_init().
> The PAT and MTRR specific functions are called conditionally based
> on the cache_generic bit settings.
>
> The setting of smp_changes_mask is merged into the (new) function

... and so on. So the commit message should not say what you're doing -
that should be visible from the diff itself. It should talk more about
the *why* you're doing it.

...

> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
> index 47e2c72fa8a4..36378604ec61 100644
> --- a/arch/x86/kernel/cpu/cacheinfo.c
> +++ b/arch/x86/kernel/cpu/cacheinfo.c
> @@ -1120,3 +1120,22 @@ void cache_enable(void) __releases(cache_disable_lock)
>
> raw_spin_unlock(&cache_disable_lock);
> }
> +
> +void cache_cpu_init(void)
> +{
> + unsigned long flags;
> +
> + local_irq_save(flags);
> + cache_disable();
> +
> + /* Set MTRR state. */

Yeah, and when you name the functions properly as you've done, you don't
really need those comments anymore either.

> + if (cache_generic & CACHE_GENERIC_MTRR)
> + mtrr_generic_set_state();
> +
> + /* Set PAT. */

And this one.

> + if (cache_generic & CACHE_GENERIC_PAT)
> + pat_init();
> +
> + cache_enable();
> + local_irq_restore(flags);
> +}
> diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
> index 5ed397f03a87..fc7b2d952737 100644
> --- a/arch/x86/kernel/cpu/mtrr/generic.c
> +++ b/arch/x86/kernel/cpu/mtrr/generic.c
> @@ -731,30 +731,19 @@ void mtrr_enable(void)
> mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
> }
>
> -static void generic_set_all(void)
> +void mtrr_generic_set_state(void)
> {
> unsigned long mask, count;
> - unsigned long flags;
> -
> - local_irq_save(flags);
> - cache_disable();
>
> /* Actually set the state */
> mask = set_mtrr_state();
>
> - /* also set PAT */
> - pat_init();
> -
> - cache_enable();
> - local_irq_restore(flags);
> -
> /* Use the atomic bitops to update the global mask */
> for (count = 0; count < sizeof(mask) * 8; ++count) {
> if (mask & 0x01)
> set_bit(count, &smp_changes_mask);
> mask >>= 1;
> }
> -
> }
>
> /**
> @@ -854,7 +843,7 @@ int positive_have_wrcomb(void)
> * Generic structure...
> */
> const struct mtrr_ops generic_mtrr_ops = {
> - .set_all = generic_set_all,
> + .set_all = cache_cpu_init,

I was gonna say that this looks weird - a set_all function pointer
assigned to a init function but that changes in the next patch.

But yeah, I like where this is going.

Thx.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette