Commit 446d2733 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/cpu' into x86/core

parents accf0fa6 0a488a53
...@@ -1888,6 +1888,12 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -1888,6 +1888,12 @@ and is between 256 and 4096 characters. It is defined in the file
shapers= [NET] shapers= [NET]
Maximal number of shapers. Maximal number of shapers.
show_msr= [x86] show boot-time MSR settings
Format: { <integer> }
Show boot-time (BIOS-initialized) MSR settings.
The parameter means the number of CPUs to show,
for example 1 means boot CPU only.
sim710= [SCSI,HW] sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c. See header of drivers/scsi/sim710.c.
......
...@@ -8,14 +8,14 @@ obj-y += proc.o capflags.o powerflags.o ...@@ -8,14 +8,14 @@ obj-y += proc.o capflags.o powerflags.o
obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += common_64.o bugs_64.o obj-$(CONFIG_X86_64) += common_64.o bugs_64.o
obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o
obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_X86_MCE) += mcheck/
......
...@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) ...@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1<<8)) if (c->x86_power & (1<<8))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
} }
/* Set MTRR capability flag if appropriate */
if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
} }
static void __cpuinit init_amd(struct cpuinfo_x86 *c) static void __cpuinit init_amd(struct cpuinfo_x86 *c)
...@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
mbytes); mbytes);
} }
/* Set MTRR capability flag if appropriate */
if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
break; break;
} }
...@@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { ...@@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.c_early_init = early_init_amd, .c_early_init = early_init_amd,
.c_init = init_amd, .c_init = init_amd,
.c_size_cache = amd_size_cache, .c_size_cache = amd_size_cache,
.c_x86_vendor = X86_VENDOR_AMD,
}; };
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); cpu_dev_register(amd_cpu_dev);
...@@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = { ...@@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.c_ident = { "AuthenticAMD" }, .c_ident = { "AuthenticAMD" },
.c_early_init = early_init_amd, .c_early_init = early_init_amd,
.c_init = init_amd, .c_init = init_amd,
.c_x86_vendor = X86_VENDOR_AMD,
}; };
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); cpu_dev_register(amd_cpu_dev);
...@@ -314,6 +314,16 @@ enum { ...@@ -314,6 +314,16 @@ enum {
EAMD3D = 1<<20, EAMD3D = 1<<20,
}; };
static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
{
switch (c->x86) {
case 5:
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
break;
}
}
static void __cpuinit init_centaur(struct cpuinfo_x86 *c) static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
{ {
...@@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) ...@@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static struct cpu_dev centaur_cpu_dev __cpuinitdata = { static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_vendor = "Centaur", .c_vendor = "Centaur",
.c_ident = { "CentaurHauls" }, .c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur,
.c_init = init_centaur, .c_init = init_centaur,
.c_size_cache = centaur_size_cache, .c_size_cache = centaur_size_cache,
.c_x86_vendor = X86_VENDOR_CENTAUR,
}; };
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev); cpu_dev_register(centaur_cpu_dev);
...@@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { ...@@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.c_ident = { "CentaurHauls" }, .c_ident = { "CentaurHauls" },
.c_early_init = early_init_centaur, .c_early_init = early_init_centaur,
.c_init = init_centaur, .c_init = init_centaur,
.c_x86_vendor = X86_VENDOR_CENTAUR,
}; };
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev); cpu_dev_register(centaur_cpu_dev);
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include "cpu.h" #include "cpu.h"
static struct cpu_dev *this_cpu __cpuinitdata;
DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
...@@ -58,12 +60,124 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { ...@@ -58,12 +60,124 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
} }; } };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
static int cachesize_override __cpuinitdata = -1; static int cachesize_override __cpuinitdata = -1;
static int disable_x86_serial_nr __cpuinitdata = 1; static int disable_x86_serial_nr __cpuinitdata = 1;
struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; static int __init cachesize_setup(char *str)
{
get_option(&str, &cachesize_override);
return 1;
}
__setup("cachesize=", cachesize_setup);
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*
*/
/* Look up CPU names by table lookup. */
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
{
struct cpu_model_info *info;
if (c->x86_model >= 16)
return NULL; /* Range check */
if (!this_cpu)
return NULL;
info = this_cpu->c_models;
while (info && info->family) {
if (info->family == c->x86)
return info->model_names[c->x86_model];
info++;
}
return NULL; /* Not found */
}
static int __init x86_fxsr_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_FXSR);
setup_clear_cpu_cap(X86_FEATURE_XMM);
return 1;
}
__setup("nofxsr", x86_fxsr_setup);
static int __init x86_sep_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_SEP);
return 1;
}
__setup("nosep", x86_sep_setup);
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
{
u32 f1, f2;
asm("pushfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"movl %0,%1\n\t"
"xorl %2,%0\n\t"
"pushl %0\n\t"
"popfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"popfl\n\t"
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
return ((f1^f2) & flag) != 0;
}
/* Probe for the CPUID instruction */
static int __cpuinit have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
/* Disable processor serial number */
unsigned long lo, hi;
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
printk(KERN_NOTICE "CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN);
/* Disabling the serial number may affect the cpuid level */
c->cpuid_level = cpuid_eax(0);
}
}
static int __init x86_serial_nr_setup(char *s)
{
disable_x86_serial_nr = 0;
return 1;
}
__setup("serialnumber", x86_serial_nr_setup);
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(void)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
static void __cpuinit default_init(struct cpuinfo_x86 *c) static void __cpuinit default_init(struct cpuinfo_x86 *c)
{ {
...@@ -81,22 +195,15 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) ...@@ -81,22 +195,15 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
static struct cpu_dev __cpuinitdata default_cpu = { static struct cpu_dev __cpuinitdata default_cpu = {
.c_init = default_init, .c_init = default_init,
.c_vendor = "Unknown", .c_vendor = "Unknown",
.c_x86_vendor = X86_VENDOR_UNKNOWN,
}; };
static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
static int __init cachesize_setup(char *str)
{
get_option(&str, &cachesize_override);
return 1;
}
__setup("cachesize=", cachesize_setup);
int __cpuinit get_model_name(struct cpuinfo_x86 *c) int __cpuinit get_model_name(struct cpuinfo_x86 *c)
{ {
unsigned int *v; unsigned int *v;
char *p, *q; char *p, *q;
if (cpuid_eax(0x80000000) < 0x80000004) if (c->extended_cpuid_level < 0x80000004)
return 0; return 0;
v = (unsigned int *) c->x86_model_id; v = (unsigned int *) c->x86_model_id;
...@@ -120,24 +227,23 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) ...@@ -120,24 +227,23 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
return 1; return 1;
} }
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
{ {
unsigned int n, dummy, ecx, edx, l2size; unsigned int n, dummy, ebx, ecx, edx, l2size;
n = cpuid_eax(0x80000000); n = c->extended_cpuid_level;
if (n >= 0x80000005) { if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size = (ecx>>24)+(edx>>24); c->x86_cache_size = (ecx>>24) + (edx>>24);
} }
if (n < 0x80000006) /* Some chips just has a large L1. */ if (n < 0x80000006) /* Some chips just has a large L1. */
return; return;
ecx = cpuid_ecx(0x80000006); cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
l2size = ecx >> 16; l2size = ecx >> 16;
/* do processor-specific cache resizing */ /* do processor-specific cache resizing */
...@@ -157,109 +263,87 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) ...@@ -157,109 +263,87 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
l2size, ecx & 0xFF); l2size, ecx & 0xFF);
} }
/* #ifdef CONFIG_X86_HT
* Naming convention should be: <Name> [(<Codename>)] void __cpuinit detect_ht(struct cpuinfo_x86 *c)
* This table only is used unless init_<vendor>() below doesn't set it;
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*
*/
/* Look up CPU names by table lookup. */
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
{ {
struct cpu_model_info *info; u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
if (c->x86_model >= 16) if (!cpu_has(c, X86_FEATURE_HT))
return NULL; /* Range check */ return;
if (!this_cpu) if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
return NULL; goto out;
info = this_cpu->c_models; cpuid(1, &eax, &ebx, &ecx, &edx);
while (info && info->family) { smp_num_siblings = (ebx & 0xff0000) >> 16;
if (info->family == c->x86)
return info->model_names[c->x86_model]; if (smp_num_siblings == 1) {
info++; printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) {
if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
smp_num_siblings);
smp_num_siblings = 1;
return;
} }
return NULL; /* Not found */
}
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
index_msb = get_count_order(smp_num_siblings);
core_bits = get_count_order(c->x86_max_cores);
c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
}
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) out:
if ((c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
}
}
#endif
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{ {
char *v = c->x86_vendor_id; char *v = c->x86_vendor_id;
int i; int i;
static int printed; static int printed;
for (i = 0; i < X86_VENDOR_NUM; i++) { for (i = 0; i < X86_VENDOR_NUM; i++) {
if (cpu_devs[i]) { if (!cpu_devs[i])
break;
if (!strcmp(v, cpu_devs[i]->c_ident[0]) || if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
(cpu_devs[i]->c_ident[1] && (cpu_devs[i]->c_ident[1] &&
!strcmp(v, cpu_devs[i]->c_ident[1]))) { !strcmp(v, cpu_devs[i]->c_ident[1]))) {
c->x86_vendor = i;
if (!early)
this_cpu = cpu_devs[i]; this_cpu = cpu_devs[i];
c->x86_vendor = this_cpu->c_x86_vendor;
return; return;
} }
} }
}
if (!printed) { if (!printed) {
printed++; printed++;
printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
printk(KERN_ERR "CPU: Your system may be unstable.\n"); printk(KERN_ERR "CPU: Your system may be unstable.\n");
} }
c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_vendor = X86_VENDOR_UNKNOWN;
this_cpu = &default_cpu; this_cpu = &default_cpu;
} }
void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
static int __init x86_fxsr_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_FXSR);
setup_clear_cpu_cap(X86_FEATURE_XMM);
return 1;
}
__setup("nofxsr", x86_fxsr_setup);
static int __init x86_sep_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_SEP);
return 1;
}
__setup("nosep", x86_sep_setup);
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
{
u32 f1, f2;
asm("pushfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"movl %0,%1\n\t"
"xorl %2,%0\n\t"
"pushl %0\n\t"
"popfl\n\t"
"pushfl\n\t"
"popl %0\n\t"
"popfl\n\t"
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
return ((f1^f2) & flag) != 0;
}
/* Probe for the CPUID instruction */
static int __cpuinit have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
void __init cpu_detect(struct cpuinfo_x86 *c)
{ {
/* Get vendor name */ /* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level, cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
...@@ -268,29 +352,29 @@ void __init cpu_detect(struct cpuinfo_x86 *c) ...@@ -268,29 +352,29 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
(unsigned int *)&c->x86_vendor_id[4]); (unsigned int *)&c->x86_vendor_id[4]);
c->x86 = 4; c->x86 = 4;
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc; u32 junk, tfms, cap0, misc;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0); cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15; c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 15; c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
if (c->x86 == 0xf) if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff; c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6) if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_model += ((tfms >> 16) & 0xf) << 4;
c->x86_mask = tfms & 15;
if (cap0 & (1<<19)) { if (cap0 & (1<<19)) {
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
c->x86_cache_alignment = c->x86_clflush_size;
} }
} }
} }
static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl; u32 tfms, xlvl;
unsigned int ebx; u32 ebx;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (have_cpuid_p()) {
/* Intel-defined flags: level 0x00000001 */ /* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
u32 capability, excap; u32 capability, excap;
...@@ -301,17 +385,14 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) ...@@ -301,17 +385,14 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
/* AMD-defined flags: level 0x80000001 */ /* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000); xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
if ((xlvl & 0xffff0000) == 0x80000000) { if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) { if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001);
} }
} }
}
} }
/* /*
* Do minimum CPU detection early. * Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask, * Fields really needed: vendor, cpuid_level, family, model, mask,
...@@ -321,25 +402,54 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) ...@@ -321,25 +402,54 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
* WARNING: this function is only called on the BP. Don't add code here * WARNING: this function is only called on the BP. Don't add code here
* that is supposed to run on all CPUs. * that is supposed to run on all CPUs.
*/ */
static void __init early_cpu_detect(void) static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{ {
struct cpuinfo_x86 *c = &boot_cpu_data;
c->x86_cache_alignment = 32;
c->x86_clflush_size = 32; c->x86_clflush_size = 32;
c->x86_cache_alignment = c->x86_clflush_size;
if (!have_cpuid_p()) if (!have_cpuid_p())
return; return;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
c->extended_cpuid_level = 0;
cpu_detect(c); cpu_detect(c);
get_cpu_vendor(c, 1); get_cpu_vendor(c);
if (c->x86_vendor != X86_VENDOR_UNKNOWN && get_cpu_cap(c);
cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c);
early_get_cap(c); if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
validate_pat_support(c);
}
void __init early_cpu_init(void)
{
struct cpu_dev **cdev;
int count = 0;
printk("KERNEL supported cpus:\n");
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
struct cpu_dev *cpudev = *cdev;
unsigned int j;
if (count >= X86_VENDOR_NUM)
break;
cpu_devs[count] = cpudev;
count++;
for (j = 0; j < 2; j++) {
if (!cpudev->c_ident[j])
continue;
printk(" %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]);
}
}
early_identify_cpu(&boot_cpu_data);
} }
/* /*
...@@ -373,86 +483,33 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) ...@@ -373,86 +483,33 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
static void __cpuinit generic_identify(struct cpuinfo_x86 *c) static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl; if (!have_cpuid_p())
unsigned int ebx; return;
if (have_cpuid_p()) { c->extended_cpuid_level = 0;
/* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level, cpu_detect(c);
(unsigned int *)&c->x86_vendor_id[0],
(unsigned int *)&c->x86_vendor_id[8], get_cpu_vendor(c);
(unsigned int *)&c->x86_vendor_id[4]);
get_cpu_cap(c);
get_cpu_vendor(c, 0);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
u32 capability, excap; c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
c->x86_mask = tfms & 15;
c->initial_apicid = (ebx >> 24) & 0xFF;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
c->apicid = phys_pkg_id(c->initial_apicid, 0); c->apicid = phys_pkg_id(c->initial_apicid, 0);
c->phys_proc_id = c->initial_apicid; c->phys_proc_id = c->initial_apicid;
#else #else
c->apicid = c->initial_apicid; c->apicid = c->initial_apicid;
#endif #endif
if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
} else {
/* Have CPUID level 0 only - unheard of */
c->x86 = 4;
} }
/* AMD-defined flags: level 0x80000001 */ if (c->extended_cpuid_level >= 0x80000004)
xlvl = cpuid_eax(0x80000000);
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001);
}
if (xlvl >= 0x80000004)
get_model_name(c); /* Default name */ get_model_name(c); /* Default name */
}
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
detect_nopl(c); detect_nopl(c);
}
}
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
/* Disable processor serial number */
unsigned long lo, hi;
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
printk(KERN_NOTICE "CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN);
/* Disabling the serial number may affect the cpuid level */
c->cpuid_level = cpuid_eax(0);
}
}
static int __init x86_serial_nr_setup(char *s)
{
disable_x86_serial_nr = 0;
return 1;
} }
__setup("serialnumber", x86_serial_nr_setup);
/* /*
* This does the hard work of actually picking apart the CPU stuff... * This does the hard work of actually picking apart the CPU stuff...
...@@ -529,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -529,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
*/ */
if (c != &boot_cpu_data) { if (c != &boot_cpu_data) {
/* AND the already accumulated flags with these */ /* AND the already accumulated flags with these */
for (i = 0 ; i < NCAPINTS ; i++) for (i = 0; i < NCAPINTS; i++)
boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
} }
...@@ -558,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) ...@@ -558,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init(); mtrr_ap_init();
} }
#ifdef CONFIG_X86_HT struct msr_range {
void __cpuinit detect_ht(struct cpuinfo_x86 *c) unsigned min;
{ unsigned max;
u32 eax, ebx, ecx, edx; };
int index_msb, core_bits;
cpuid(1, &eax, &ebx, &ecx, &edx);
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) { static struct msr_range msr_range_array[] __cpuinitdata = {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); { 0x00000000, 0x00000418},
} else if (smp_num_siblings > 1) { { 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
{ 0xc0011000, 0xc001103b},
};
if (smp_num_siblings > NR_CPUS) { static void __cpuinit print_cpu_msr(void)
printk(KERN_WARNING "CPU: Unsupported number of the " {
"siblings %d", smp_num_siblings); unsigned index;
smp_num_siblings = 1; u64 val;
return; int i;
unsigned index_min, index_max;
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
index_min = msr_range_array[i].min;
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
if (rdmsrl_amd_safe(index, &val))
continue;
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
} }
}
}
index_msb = get_count_order(smp_num_siblings); static int show_msr __cpuinitdata;
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); static __init int setup_show_msr(char *arg)
{
printk(KERN_INFO "CPU: Physical Processor ID: %d\n", int num;
c->phys_proc_id);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
index_msb = get_count_order(smp_num_siblings) ;
core_bits = get_count_order(c->x86_max_cores); get_option(&arg, &num);
c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & if (num > 0)
((1 << core_bits) - 1); show_msr = num;
return 1;
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
}
} }
#endif __setup("show_msr=", setup_show_msr);
static __init int setup_noclflush(char *arg) static __init int setup_noclflush(char *arg)
{ {
...@@ -621,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) ...@@ -621,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
vendor = c->x86_vendor_id; vendor = c->x86_vendor_id;
if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
printk("%s ", vendor); printk(KERN_CONT "%s ", vendor);
if (!c->x86_model_id[0]) if (c->x86_model_id[0])
printk("%d86", c->x86); printk(KERN_CONT "%s", c->x86_model_id);
else else
printk("%s", c->x86_model_id); printk(KERN_CONT "%d86", c->x86);
if (c->x86_mask || c->cpuid_level >= 0) if (c->x86_mask || c->cpuid_level >= 0)
printk(" stepping %02x\n", c->x86_mask); printk(KERN_CONT " stepping %02x\n", c->x86_mask);
else else
printk("\n"); printk(KERN_CONT "\n");
#ifdef CONFIG_SMP
if (c->cpu_index < show_msr)
print_cpu_msr();
#else
if (show_msr)
print_cpu_msr();
#endif
} }
static __init int setup_disablecpuid(char *arg) static __init int setup_disablecpuid(char *arg)
...@@ -647,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid); ...@@ -647,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid);
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
void __init early_cpu_init(void)
{
struct cpu_vendor_dev *cvdev;
for (cvdev = __x86cpuvendor_start ;
cvdev < __x86cpuvendor_end ;
cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_detect();
validate_pat_support(&boot_cpu_data);
}
/* Make sure %fs is initialized properly in idle threads */ /* Make sure %fs is initialized properly in idle threads */
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{ {
...@@ -668,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) ...@@ -668,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
return regs; return regs;
} }
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(void)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
}
/* /*
* cpu_init() initializes state that is per-CPU. Some data is already * cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT * initialized (naturally) in the bootstrap process, such as the GDT
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include "cpu.h" #include "cpu.h"
static struct cpu_dev *this_cpu __cpuinitdata;
/* We need valid kernel segments for data and code in long mode too /* We need valid kernel segments for data and code in long mode too
* IRET will check the segment types kkeil 2000/10/28 * IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout * Also sysret mandates a special GDT layout
...@@ -66,7 +68,7 @@ void switch_to_new_gdt(void) ...@@ -66,7 +68,7 @@ void switch_to_new_gdt(void)
load_gdt(&gdt_descr); load_gdt(&gdt_descr);
} }
struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
static void __cpuinit default_init(struct cpuinfo_x86 *c) static void __cpuinit default_init(struct cpuinfo_x86 *c)
{ {
...@@ -76,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) ...@@ -76,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
static struct cpu_dev __cpuinitdata default_cpu = { static struct cpu_dev __cpuinitdata default_cpu = {
.c_init = default_init, .c_init = default_init,
.c_vendor = "Unknown", .c_vendor = "Unknown",
.c_x86_vendor = X86_VENDOR_UNKNOWN,
}; };
static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
int __cpuinit get_model_name(struct cpuinfo_x86 *c) int __cpuinit get_model_name(struct cpuinfo_x86 *c)
{ {
unsigned int *v; unsigned int *v;
char *p, *q;
if (c->extended_cpuid_level < 0x80000004) if (c->extended_cpuid_level < 0x80000004)
return 0; return 0;
...@@ -91,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) ...@@ -91,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
c->x86_model_id[48] = 0; c->x86_model_id[48] = 0;
/* Intel chips right-justify this string for some dumb reason;
undo that brain damage */
p = q = &c->x86_model_id[0];
while (*p == ' ')
p++;
if (p != q) {
while (*p)
*q++ = *p++;
while (q <= &c->x86_model_id[48])
*q++ = '\0'; /* Zero-pad the rest */
}
return 1; return 1;
} }
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
{ {
unsigned int n, dummy, ebx, ecx, edx; unsigned int n, dummy, ebx, ecx, edx, l2size;
n = c->extended_cpuid_level; n = c->extended_cpuid_level;
if (n >= 0x80000005) { if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
"D cache %dK (%d bytes/line)\n",
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size = (ecx>>24) + (edx>>24); c->x86_cache_size = (ecx>>24) + (edx>>24);
/* On K8 L1 TLB is inclusive, so don't count it */ /* On K8 L1 TLB is inclusive, so don't count it */
c->x86_tlbsize = 0; c->x86_tlbsize = 0;
} }
if (n >= 0x80000006) { if (n < 0x80000006) /* Some chips just has a large L1. */
return;
cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
ecx = cpuid_ecx(0x80000006); l2size = ecx >> 16;
c->x86_cache_size = ecx >> 16;
c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
c->x86_cache_size = l2size;
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
c->x86_cache_size, ecx & 0xFF); l2size, ecx & 0xFF);
}
} }
void __cpuinit detect_ht(struct cpuinfo_x86 *c) void __cpuinit detect_ht(struct cpuinfo_x86 *c)
...@@ -128,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -128,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
int index_msb, core_bits; int index_msb, core_bits;
cpuid(1, &eax, &ebx, &ecx, &edx);
if (!cpu_has(c, X86_FEATURE_HT)) if (!cpu_has(c, X86_FEATURE_HT))
return; return;
if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
goto out; goto out;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16; smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) { if (smp_num_siblings == 1) {
...@@ -143,8 +159,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -143,8 +159,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
} else if (smp_num_siblings > 1) { } else if (smp_num_siblings > 1) {
if (smp_num_siblings > NR_CPUS) { if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of " printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
"siblings %d", smp_num_siblings); smp_num_siblings);
smp_num_siblings = 1; smp_num_siblings = 1;
return; return;
} }
...@@ -161,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -161,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
c->cpu_core_id = phys_pkg_id(index_msb) & c->cpu_core_id = phys_pkg_id(index_msb) &
((1 << core_bits) - 1); ((1 << core_bits) - 1);
} }
out: out:
if ((c->x86_max_cores * smp_num_siblings) > 1) { if ((c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n", printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
...@@ -168,7 +185,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -168,7 +185,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
printk(KERN_INFO "CPU: Processor Core ID: %d\n", printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id); c->cpu_core_id);
} }
#endif #endif
} }
...@@ -179,41 +195,148 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) ...@@ -179,41 +195,148 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
static int printed; static int printed;
for (i = 0; i < X86_VENDOR_NUM; i++) { for (i = 0; i < X86_VENDOR_NUM; i++) {
if (cpu_devs[i]) { if (!cpu_devs[i])
break;
if (!strcmp(v, cpu_devs[i]->c_ident[0]) || if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
(cpu_devs[i]->c_ident[1] && (cpu_devs[i]->c_ident[1] &&
!strcmp(v, cpu_devs[i]->c_ident[1]))) { !strcmp(v, cpu_devs[i]->c_ident[1]))) {
c->x86_vendor = i;
this_cpu = cpu_devs[i]; this_cpu = cpu_devs[i];
c->x86_vendor = this_cpu->c_x86_vendor;
return; return;
} }
} }
}
if (!printed) { if (!printed) {
printed++; printed++;
printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
printk(KERN_ERR "CPU: Your system may be unstable.\n"); printk(KERN_ERR "CPU: Your system may be unstable.\n");
} }
c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_vendor = X86_VENDOR_UNKNOWN;
this_cpu = &default_cpu;
}
void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
{
/* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
(unsigned int *)&c->x86_vendor_id[0],
(unsigned int *)&c->x86_vendor_id[8],
(unsigned int *)&c->x86_vendor_id[4]);
c->x86 = 4;
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xf) << 4;
if (cap0 & (1<<19)) {
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
c->x86_cache_alignment = c->x86_clflush_size;
}
}
}
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 capability, excap;
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
}
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001);
}
}
/* Transmeta-defined flags: level 0x80860001 */
xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) {
/* Don't set x86_cpuid_level here for now to not confuse. */
if (xlvl >= 0x80860001)
c->x86_capability[2] = cpuid_edx(0x80860001);
}
if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007);
if (c->extended_cpuid_level >= 0x80000008) {
u32 eax = cpuid_eax(0x80000008);
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
}
} }
static void __init early_cpu_support_print(void) /* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
c->extended_cpuid_level = 0;
cpu_detect(c);
get_cpu_vendor(c);
get_cpu_cap(c);
if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
validate_pat_support(c);
}
void __init early_cpu_init(void)
{ {
int i,j; struct cpu_dev **cdev;
struct cpu_dev *cpu_devx; int count = 0;
printk("KERNEL supported cpus:\n"); printk("KERNEL supported cpus:\n");
for (i = 0; i < X86_VENDOR_NUM; i++) { for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
cpu_devx = cpu_devs[i]; struct cpu_dev *cpudev = *cdev;
if (!cpu_devx) unsigned int j;
continue;
if (count >= X86_VENDOR_NUM)
break;
cpu_devs[count] = cpudev;
count++;
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
if (!cpu_devx->c_ident[j]) if (!cpudev->c_ident[j])
continue; continue;
printk(" %s %s\n", cpu_devx->c_vendor, printk(" %s %s\n", cpudev->c_vendor,
cpu_devx->c_ident[j]); cpudev->c_ident[j]);
} }
} }
early_identify_cpu(&boot_cpu_data);
} }
/* /*
...@@ -249,111 +372,26 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) ...@@ -249,111 +372,26 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
} }
} }
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
void __init early_cpu_init(void)
{
struct cpu_vendor_dev *cvdev;
for (cvdev = __x86cpuvendor_start ;
cvdev < __x86cpuvendor_end ;
cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_support_print();
early_identify_cpu(&boot_cpu_data);
}
/* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl;
c->loops_per_jiffy = loops_per_jiffy;
c->x86_cache_size = -1;
c->x86_vendor = X86_VENDOR_UNKNOWN;
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
c->extended_cpuid_level = 0; c->extended_cpuid_level = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
/* Get vendor name */ cpu_detect(c);
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
(unsigned int *)&c->x86_vendor_id[0],
(unsigned int *)&c->x86_vendor_id[8],
(unsigned int *)&c->x86_vendor_id[4]);
get_cpu_vendor(c); get_cpu_vendor(c);
/* Initialize the standard set of capabilities */ get_cpu_cap(c);
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
__u32 misc;
cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
&c->x86_capability[0]);
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
} else {
/* Have CPUID level 0 only - unheard of */
c->x86 = 4;
}
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
c->phys_proc_id = c->initial_apicid; c->phys_proc_id = c->initial_apicid;
#endif #endif
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001);
}
if (xlvl >= 0x80000004)
get_model_name(c); /* Default name */
}
/* Transmeta-defined flags: level 0x80860001 */
xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) {
/* Don't set x86_cpuid_level here for now to not confuse. */
if (xlvl >= 0x80860001)
c->x86_capability[2] = cpuid_edx(0x80860001);
}
if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007);
if (c->extended_cpuid_level >= 0x80000008) { if (c->extended_cpuid_level >= 0x80000004)
u32 eax = cpuid_eax(0x80000008); get_model_name(c); /* Default name */
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
}
init_scattered_cpuid_features(c);
detect_nopl(c); detect_nopl(c);
if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c);
validate_pat_support(c);
} }
/* /*
...@@ -363,9 +401,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -363,9 +401,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
{ {
int i; int i;
early_identify_cpu(c); c->loops_per_jiffy = loops_per_jiffy;
c->x86_cache_size = -1;
c->x86_vendor = X86_VENDOR_UNKNOWN;
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
init_scattered_cpuid_features(c); generic_identify(c);
c->apicid = phys_pkg_id(0); c->apicid = phys_pkg_id(0);
...@@ -411,7 +459,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -411,7 +459,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
} }
void __cpuinit identify_boot_cpu(void) void __init identify_boot_cpu(void)
{ {
identify_cpu(&boot_cpu_data); identify_cpu(&boot_cpu_data);
} }
...@@ -423,6 +471,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) ...@@ -423,6 +471,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init(); mtrr_ap_init();
} }
struct msr_range {
unsigned min;
unsigned max;
};
static struct msr_range msr_range_array[] __cpuinitdata = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
{ 0xc0011000, 0xc001103b},
};
static void __cpuinit print_cpu_msr(void)
{
unsigned index;
u64 val;
int i;
unsigned index_min, index_max;
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
index_min = msr_range_array[i].min;
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
if (rdmsrl_amd_safe(index, &val))
continue;
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
}
}
}
static int show_msr __cpuinitdata;
static __init int setup_show_msr(char *arg)
{
int num;
get_option(&arg, &num);
if (num > 0)
show_msr = num;
return 1;
}
__setup("show_msr=", setup_show_msr);
static __init int setup_noclflush(char *arg) static __init int setup_noclflush(char *arg)
{ {
setup_clear_cpu_cap(X86_FEATURE_CLFLSH); setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
...@@ -439,6 +530,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) ...@@ -439,6 +530,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
printk(KERN_CONT " stepping %02x\n", c->x86_mask); printk(KERN_CONT " stepping %02x\n", c->x86_mask);
else else
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
#ifdef CONFIG_SMP
if (c->cpu_index < show_msr)
print_cpu_msr();
#else
if (show_msr)
print_cpu_msr();
#endif
} }
static __init int setup_disablecpuid(char *arg) static __init int setup_disablecpuid(char *arg)
......
...@@ -21,21 +21,15 @@ struct cpu_dev { ...@@ -21,21 +21,15 @@ struct cpu_dev {
void (*c_init)(struct cpuinfo_x86 * c); void (*c_init)(struct cpuinfo_x86 * c);
void (*c_identify)(struct cpuinfo_x86 * c); void (*c_identify)(struct cpuinfo_x86 * c);
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
int c_x86_vendor;
}; };
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; #define cpu_dev_register(cpu_devX) \
static struct cpu_dev *__cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
&cpu_devX;
struct cpu_vendor_dev { extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
int vendor;
struct cpu_dev *cpu_dev;
};
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
__attribute__((__section__(".x86cpuvendor.init"))) = \
{ cpu_vendor_id, cpu_dev }
extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
extern int get_model_name(struct cpuinfo_x86 *c); extern int get_model_name(struct cpuinfo_x86 *c);
extern void display_cacheinfo(struct cpuinfo_x86 *c); extern void display_cacheinfo(struct cpuinfo_x86 *c);
......
...@@ -15,13 +15,11 @@ ...@@ -15,13 +15,11 @@
/* /*
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/ */
static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{ {
unsigned char ccr2, ccr3; unsigned char ccr2, ccr3;
unsigned long flags;
/* we test for DEVID by checking whether CCR3 is writable */ /* we test for DEVID by checking whether CCR3 is writable */
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3); ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, ccr3 ^ 0x80); setCx86(CX86_CCR3, ccr3 ^ 0x80);
getCx86(0xc0); /* dummy to change bus */ getCx86(0xc0); /* dummy to change bus */
...@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) ...@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
*dir0 = getCx86(CX86_DIR0); *dir0 = getCx86(CX86_DIR0);
*dir1 = getCx86(CX86_DIR1); *dir1 = getCx86(CX86_DIR1);
} }
local_irq_restore(flags);
} }
static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{
unsigned long flags;
local_irq_save(flags);
__do_cyrix_devid(dir0, dir1);
local_irq_restore(flags);
}
/* /*
* Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
* order to identify the Cyrix CPU model after we're out of setup.c * order to identify the Cyrix CPU model after we're out of setup.c
...@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void) ...@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
{
unsigned char dir0, dir0_msn, dir1 = 0;
__do_cyrix_devid(&dir0, &dir1);
dir0_msn = dir0 >> 4; /* identifies CPU "family" */
switch (dir0_msn) {
case 3: /* 6x86/6x86L */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break;
case 5: /* 6x86MX/M II */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break;
}
}
static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
{ {
...@@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) ...@@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
.c_vendor = "Cyrix", .c_vendor = "Cyrix",
.c_ident = { "CyrixInstead" }, .c_ident = { "CyrixInstead" },
.c_early_init = early_init_cyrix,
.c_init = init_cyrix, .c_init = init_cyrix,
.c_identify = cyrix_identify, .c_identify = cyrix_identify,
.c_x86_vendor = X86_VENDOR_CYRIX,
}; };
cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); cpu_dev_register(cyrix_cpu_dev);
static struct cpu_dev nsc_cpu_dev __cpuinitdata = { static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
.c_vendor = "NSC", .c_vendor = "NSC",
.c_ident = { "Geode by NSC" }, .c_ident = { "Geode by NSC" },
.c_init = init_nsc, .c_init = init_nsc,
.c_x86_vendor = X86_VENDOR_NSC,
}; };
cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); cpu_dev_register(nsc_cpu_dev);
...@@ -303,9 +303,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { ...@@ -303,9 +303,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.c_early_init = early_init_intel, .c_early_init = early_init_intel,
.c_init = init_intel, .c_init = init_intel,
.c_size_cache = intel_size_cache, .c_size_cache = intel_size_cache,
.c_x86_vendor = X86_VENDOR_INTEL,
}; };
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); cpu_dev_register(intel_cpu_dev);
/* arch_initcall(intel_cpu_init); */ /* arch_initcall(intel_cpu_init); */
...@@ -90,6 +90,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { ...@@ -90,6 +90,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.c_ident = { "GenuineIntel" }, .c_ident = { "GenuineIntel" },
.c_early_init = early_init_intel, .c_early_init = early_init_intel,
.c_init = init_intel, .c_init = init_intel,
.c_x86_vendor = X86_VENDOR_INTEL,
}; };
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
cpu_dev_register(intel_cpu_dev);
...@@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { ...@@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
.c_ident = { "GenuineTMx86", "TransmetaCPU" }, .c_ident = { "GenuineTMx86", "TransmetaCPU" },
.c_init = init_transmeta, .c_init = init_transmeta,
.c_identify = transmeta_identify, .c_identify = transmeta_identify,
.c_x86_vendor = X86_VENDOR_TRANSMETA,
}; };
cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); cpu_dev_register(transmeta_cpu_dev);
...@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = { ...@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
} }
}, },
}, },
.c_x86_vendor = X86_VENDOR_UMC,
}; };
cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); cpu_dev_register(umc_cpu_dev);
...@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = { ...@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif #endif
.wbinvd = native_wbinvd, .wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe, .read_msr = native_read_msr_safe,
.read_msr_amd = native_read_msr_amd_safe,
.write_msr = native_write_msr_safe, .write_msr = native_write_msr_safe,
.read_tsc = native_read_tsc, .read_tsc = native_read_tsc,
.read_pmc = native_read_pmc, .read_pmc = native_read_pmc,
......
...@@ -339,9 +339,8 @@ static void ...@@ -339,9 +339,8 @@ static void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl) unsigned long *stack, unsigned long bp, char *log_lvl)
{ {
printk("\nCall Trace:\n"); printk("Call Trace:\n");
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
printk("\n");
} }
void show_trace(struct task_struct *task, struct pt_regs *regs, void show_trace(struct task_struct *task, struct pt_regs *regs,
...@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, ...@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk(" %016lx", *stack++); printk(" %016lx", *stack++);
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
printk("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_trace_log_lvl(task, regs, sp, bp, log_lvl);
} }
...@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs) ...@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs)
printk("Stack: "); printk("Stack: ");
show_stack_log_lvl(NULL, regs, (unsigned long *)sp, show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
regs->bp, ""); regs->bp, "");
printk("\n");
printk(KERN_EMERG "Code: "); printk(KERN_EMERG "Code: ");
......
...@@ -140,10 +140,10 @@ SECTIONS ...@@ -140,10 +140,10 @@ SECTIONS
*(.con_initcall.init) *(.con_initcall.init)
__con_initcall_end = .; __con_initcall_end = .;
} }
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86cpuvendor_start = .; __x86_cpu_dev_start = .;
*(.x86cpuvendor.init) *(.x86_cpu_dev.init)
__x86cpuvendor_end = .; __x86_cpu_dev_end = .;
} }
SECURITY_INIT SECURITY_INIT
. = ALIGN(4); . = ALIGN(4);
......
...@@ -168,13 +168,12 @@ SECTIONS ...@@ -168,13 +168,12 @@ SECTIONS
*(.con_initcall.init) *(.con_initcall.init)
} }
__con_initcall_end = .; __con_initcall_end = .;
. = ALIGN(16); __x86_cpu_dev_start = .;
__x86cpuvendor_start = .; .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { *(.x86_cpu_dev.init)
*(.x86cpuvendor.init)
} }
__x86cpuvendor_end = .;
SECURITY_INIT SECURITY_INIT
__x86_cpu_dev_end = .;
. = ALIGN(8); . = ALIGN(8);
.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
......
...@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, ...@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
} }
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
int *err)
{
DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %0,%0\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high);
}
static inline void native_write_msr(unsigned int msr, static inline void native_write_msr(unsigned int msr,
unsigned low, unsigned high) unsigned low, unsigned high)
{ {
...@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = native_read_msr_safe(msr, &err); *p = native_read_msr_safe(msr, &err);
return err; return err;
} }
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
int err;
*p = native_read_msr_amd_safe(msr, &err);
return err;
}
#define rdtscl(low) \ #define rdtscl(low) \
((low) = (u32)native_read_tsc()) ((low) = (u32)native_read_tsc())
......
...@@ -137,6 +137,7 @@ struct pv_cpu_ops { ...@@ -137,6 +137,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations. /* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64 (*read_msr_amd)(unsigned int msr, int *err);
u64 (*read_msr)(unsigned int msr, int *err); u64 (*read_msr)(unsigned int msr, int *err);
int (*write_msr)(unsigned int msr, unsigned low, unsigned high); int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
...@@ -720,6 +721,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) ...@@ -720,6 +721,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
{ {
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
} }
static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
{
return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
}
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{ {
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
...@@ -765,6 +770,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -765,6 +770,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = paravirt_read_msr(msr, &err); *p = paravirt_read_msr(msr, &err);
return err; return err;
} }
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
int err;
*p = paravirt_read_msr_amd(msr, &err);
return err;
}
static inline u64 paravirt_read_tsc(void) static inline u64 paravirt_read_tsc(void)
{ {
......
...@@ -77,9 +77,9 @@ struct cpuinfo_x86 { ...@@ -77,9 +77,9 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits; __u8 x86_phys_bits;
/* CPUID returned core id bits: */ /* CPUID returned core id bits: */
__u8 x86_coreid_bits; __u8 x86_coreid_bits;
#endif
/* Max extended CPUID function supported: */ /* Max extended CPUID function supported: */
__u32 extended_cpuid_level; __u32 extended_cpuid_level;
#endif
/* Maximum supported CPUID level, -1=no CPUID: */ /* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level; int cpuid_level;
__u32 x86_capability[NCAPINTS]; __u32 x86_capability[NCAPINTS];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment