Commit 8d4a4300 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: cleanup PAT cpu validation

Move the scattered checks for PAT support to a single function. Its
moved to addon_cpuid_features.c as this file is shared between 32 and
64 bit.

Remove the manipulation of the PAT feature bit and just disable PAT in
the PAT layer, based on the PAT bit provided by the CPU and the
current CPU version/model white list.

Change the boot CPU check so it works on Voyager somewhere in the
future as well :) Also panic, when a secondary has PAT disabled but
the primary one has alrady switched to PAT. We have no way to undo
that.

The white list is kept for now to ensure that we can rely on known to
work CPU types and concentrate on the software induced problems
instead of fighthing CPU erratas and subtle wreckage caused by not yet
verified CPUs. Once the PAT code has stabilized enough, we can remove
the white list and open the can of worms.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cb3f43b2
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/pat.h>
#include <asm/processor.h> #include <asm/processor.h>
struct cpuid_bit { struct cpuid_bit {
...@@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) ...@@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
set_cpu_cap(c, cb->feature); set_cpu_cap(c, cb->feature);
} }
} }
#ifdef CONFIG_X86_PAT
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
{
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (c->x86 >= 0xf && c->x86 <= 0x11)
return;
break;
case X86_VENDOR_INTEL:
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
return;
break;
}
pat_disable(cpu_has_pat ?
"PAT disabled. Not yet verified on this CPU type." :
"PAT not supported by CPU.");
}
#endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/pat.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) ...@@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
} }
clear_cpu_cap(c, X86_FEATURE_PAT);
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_PAT);
break;
case X86_VENDOR_INTEL:
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
set_cpu_cap(c, X86_FEATURE_PAT);
break;
}
} }
/* /*
...@@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) ...@@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
} }
clear_cpu_cap(c, X86_FEATURE_PAT);
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_PAT);
break;
case X86_VENDOR_INTEL:
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
set_cpu_cap(c, X86_FEATURE_PAT);
break;
}
} }
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
...@@ -651,6 +627,7 @@ void __init early_cpu_init(void) ...@@ -651,6 +627,7 @@ void __init early_cpu_init(void)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev; cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_detect(); early_cpu_detect();
validate_pat_support(&boot_cpu_data);
} }
/* Make sure %fs is initialized properly in idle threads */ /* Make sure %fs is initialized properly in idle threads */
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <asm/ds.h> #include <asm/ds.h>
#include <asm/topology.h> #include <asm/topology.h>
#include <asm/trampoline.h> #include <asm/trampoline.h>
#include <asm/pat.h>
#include <mach_apic.h> #include <mach_apic.h>
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
...@@ -1063,25 +1064,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -1063,25 +1064,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x80000007) if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007); c->x86_power = cpuid_edx(0x80000007);
clear_cpu_cap(c, X86_FEATURE_PAT);
switch (c->x86_vendor) { switch (c->x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
early_init_amd(c); early_init_amd(c);
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_PAT);
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
early_init_intel(c); early_init_intel(c);
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
set_cpu_cap(c, X86_FEATURE_PAT);
break; break;
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
early_init_centaur(c); early_init_centaur(c);
break; break;
} }
validate_pat_support(c);
} }
/* /*
......
...@@ -25,31 +25,24 @@ ...@@ -25,31 +25,24 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/io.h> #include <asm/io.h>
int pat_wc_enabled = 1; #ifdef CONFIG_X86_PAT
int __read_mostly pat_wc_enabled = 1;
static u64 __read_mostly boot_pat_state; void __init pat_disable(char *reason)
static int nopat(char *str)
{ {
pat_wc_enabled = 0; pat_wc_enabled = 0;
printk(KERN_INFO "x86: PAT support disabled.\n"); printk(KERN_INFO "%s\n", reason);
return 0;
} }
early_param("nopat", nopat);
static int pat_known_cpu(void) static int nopat(char *str)
{ {
if (!pat_wc_enabled) pat_disable("PAT support disabled.");
return 0;
if (cpu_has_pat)
return 1;
pat_wc_enabled = 0;
printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
return 0; return 0;
} }
early_param("nopat", nopat);
#endif
static u64 __read_mostly boot_pat_state;
enum { enum {
PAT_UC = 0, /* uncached */ PAT_UC = 0, /* uncached */
...@@ -66,17 +59,19 @@ void pat_init(void) ...@@ -66,17 +59,19 @@ void pat_init(void)
{ {
u64 pat; u64 pat;
#ifndef CONFIG_X86_PAT if (!pat_wc_enabled)
nopat(NULL);
#endif
/* Boot CPU enables PAT based on CPU feature */
if (!smp_processor_id() && !pat_known_cpu())
return; return;
/* APs enable PAT iff boot CPU has enabled it before */ /* Paranoia check. */
if (smp_processor_id() && !pat_wc_enabled) if (!cpu_has_pat) {
return; printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
/*
* Panic if this happens on the secondary CPU, and we
* switched to PAT on the boot CPU. We have no way to
* undo PAT.
*/
BUG_ON(boot_pat_state);
}
/* Set PWT to Write-Combining. All other bits stay the same */ /* Set PWT to Write-Combining. All other bits stay the same */
/* /*
...@@ -95,9 +90,8 @@ void pat_init(void) ...@@ -95,9 +90,8 @@ void pat_init(void)
PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
/* Boot CPU check */ /* Boot CPU check */
if (!smp_processor_id()) { if (!boot_pat_state)
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
}
wrmsrl(MSR_IA32_CR_PAT, pat); wrmsrl(MSR_IA32_CR_PAT, pat);
printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
......
...@@ -4,7 +4,13 @@ ...@@ -4,7 +4,13 @@
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_X86_PAT
extern int pat_wc_enabled; extern int pat_wc_enabled;
extern void validate_pat_support(struct cpuinfo_x86 *c);
#else
static const int pat_wc_enabled = 0;
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif
extern void pat_init(void); extern void pat_init(void);
...@@ -12,5 +18,7 @@ extern int reserve_memtype(u64 start, u64 end, ...@@ -12,5 +18,7 @@ extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type); unsigned long req_type, unsigned long *ret_type);
extern int free_memtype(u64 start, u64 end); extern int free_memtype(u64 start, u64 end);
extern void pat_disable(char *reason);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment