[PATCH] x86: enable PAT for amd k8 and fam10h
make known_pat_cpu to think amd k8 and fam10h is ok too.
also make tom2 below to be WRBACK
Signed-off-by: Yinghai Lu <[email protected]>
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 011e07e..74ec2ea 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -37,6 +37,7 @@ static struct fixed_range_block fixed_range_blocks[] = {
static unsigned long smp_changes_mask;
static struct mtrr_state mtrr_state = {};
static int mtrr_state_set;
+static u64 tom2;
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "mtrr."
@@ -138,6 +139,11 @@ u8 mtrr_type_lookup(u64 start, u64 end)
}
}
+ if (tom2) {
+ if (start >= (1ULL<<32) && (end < tom2))
+ return MTRR_TYPE_WRBACK;
+ }
+
if (prev_match != 0xFF)
return prev_match;
@@ -206,6 +212,15 @@ void __init get_mtrr_state(void)
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
+ if (amd_special_default_mtrr()) {
+ unsigned lo, hi;
+ /* TOP_MEM2 */
+ rdmsr(MSR_K8_TOP_MEM2, lo, hi);
+ tom2 = hi;
+ tom2 <<= 32;
+ tom2 |= lo;
+ tom2 &= 0xffffff8000000ULL;
+ }
if (mtrr_show) {
int high_width;
@@ -236,6 +251,8 @@ void __init get_mtrr_state(void)
else
printk(KERN_INFO "MTRR %u disabled\n", i);
}
+ if (tom2)
+ printk(KERN_INFO "TOM2: %016lx aka %ldM\n", tom2, tom2>>20);
}
mtrr_state_set = 1;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index a6450b3..6a1e278 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -627,7 +627,7 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
#define Tom2Enabled (1U << 21)
#define Tom2ForceMemTypeWB (1U << 22)
-static __init int amd_special_default_mtrr(void)
+int __init amd_special_default_mtrr(void)
{
u32 l, h;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index f74505f..a9e1a27 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -49,6 +49,12 @@ static int pat_known_cpu(void)
return 1;
}
}
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x11) {
+ if (cpu_has_pat) {
+ return 1;
+ }
+ }
pat_wc_enabled = 0;
printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index ee17229..d3d2662 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -99,6 +99,7 @@ extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
+extern int amd_special_default_mtrr(void);
# else
static inline u8 mtrr_type_lookup(u64 addr, u64 end)
{
Yinghai Lu wrote:
> [PATCH] x86: enable PAT for amd k8 and fam10h
>
> make known_pat_cpu to think amd k8 and fam10h is ok too.
>
> also make tom2 below to be WRBACK
>
> diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
> index f74505f..a9e1a27 100644
> --- a/arch/x86/mm/pat.c
> +++ b/arch/x86/mm/pat.c
> @@ -49,6 +49,12 @@ static int pat_known_cpu(void)
> return 1;
> }
> }
> + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
> + boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x11) {
> + if (cpu_has_pat) {
> + return 1;
> + }
> + }
>
> pat_wc_enabled = 0;
> printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
This really should be handled through a CPU flag. Specifically, it
should be handled by disabling the PAT flag if PAT is unusable or
suspect of being unusable; it should *NOT* be stashed away in a
completely separate piece of code.
-hpa
On Mon, Mar 24, 2008 at 6:29 PM, H. Peter Anvin <[email protected]> wrote:
> Yinghai Lu wrote:
> > [PATCH] x86: enable PAT for amd k8 and fam10h
> >
> > make known_pat_cpu to think amd k8 and fam10h is ok too.
> >
> > also make tom2 below to be WRBACK
> >
>
> > diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
> > index f74505f..a9e1a27 100644
> > --- a/arch/x86/mm/pat.c
> > +++ b/arch/x86/mm/pat.c
> > @@ -49,6 +49,12 @@ static int pat_known_cpu(void)
> > return 1;
> > }
> > }
> > + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
> > + boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x11) {
> > + if (cpu_has_pat) {
> > + return 1;
> > + }
> > + }
> >
> > pat_wc_enabled = 0;
> > printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
>
> This really should be handled through a CPU flag. Specifically, it
> should be handled by disabling the PAT flag if PAT is unusable or
> suspect of being unusable; it should *NOT* be stashed away in a
> completely separate piece of code.
PAT patches in x86.git latest, only support some intel CPUs.
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
(boot_cpu_data.x86 == 0xF ||
(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 15))) {
if (cpu_has_pat) {
return 1;
}
}
should be moved to setup_64.c?
YH
Yinghai Lu wrote:
>>
>> This really should be handled through a CPU flag. Specifically, it
>> should be handled by disabling the PAT flag if PAT is unusable or
>> suspect of being unusable; it should *NOT* be stashed away in a
>> completely separate piece of code.
>
> PAT patches in x86.git latest, only support some intel CPUs.
>
> if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
> (boot_cpu_data.x86 == 0xF ||
> (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 15))) {
> if (cpu_has_pat) {
> return 1;
> }
> }
>
> should be moved to setup_64.c?
>
Yes, which in turn should be merged with the 32-bit code in cpu/*.c.
Personally I would prefer a blacklist rather than a whitelist, but
that's nitpicking.
-=hpa