Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
446d2733
Commit
446d2733
authored
Sep 05, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/cpu' into x86/core
parents
accf0fa6
0a488a53
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
669 additions
and
454 deletions
+669
-454
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+6
-0
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/Makefile
+2
-2
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/amd.c
+7
-5
arch/x86/kernel/cpu/amd_64.c
arch/x86/kernel/cpu/amd_64.c
+2
-2
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/centaur.c
+13
-1
arch/x86/kernel/cpu/centaur_64.c
arch/x86/kernel/cpu/centaur_64.c
+2
-1
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/common.c
+313
-276
arch/x86/kernel/cpu/common_64.c
arch/x86/kernel/cpu/common_64.c
+231
-132
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpu.h
+6
-12
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/cyrix.c
+32
-6
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+2
-1
arch/x86/kernel/cpu/intel_64.c
arch/x86/kernel/cpu/intel_64.c
+2
-1
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/cpu/transmeta.c
+2
-1
arch/x86/kernel/cpu/umc.c
arch/x86/kernel/cpu/umc.c
+2
-1
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt.c
+1
-0
arch/x86/kernel/traps_64.c
arch/x86/kernel/traps_64.c
+2
-3
arch/x86/kernel/vmlinux_32.lds.S
arch/x86/kernel/vmlinux_32.lds.S
+4
-4
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/kernel/vmlinux_64.lds.S
+4
-5
include/asm-x86/msr.h
include/asm-x86/msr.h
+23
-0
include/asm-x86/paravirt.h
include/asm-x86/paravirt.h
+12
-0
include/asm-x86/processor.h
include/asm-x86/processor.h
+1
-1
No files found.
Documentation/kernel-parameters.txt
View file @
446d2733
...
...
@@ -1888,6 +1888,12 @@ and is between 256 and 4096 characters. It is defined in the file
shapers= [NET]
Maximal number of shapers.
show_msr= [x86] show boot-time MSR settings
Format: { <integer> }
Show boot-time (BIOS-initialized) MSR settings.
The parameter means the number of CPUs to show,
for example 1 means boot CPU only.
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
...
...
arch/x86/kernel/cpu/Makefile
View file @
446d2733
...
...
@@ -8,14 +8,14 @@ obj-y += proc.o capflags.o powerflags.o
obj-$(CONFIG_X86_32)
+=
common.o bugs.o cmpxchg.o
obj-$(CONFIG_X86_64)
+=
common_64.o bugs_64.o
obj-$(CONFIG_CPU_SUP_INTEL_32)
+=
intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64)
+=
intel_64.o
obj-$(CONFIG_CPU_SUP_AMD_32)
+=
amd.o
obj-$(CONFIG_CPU_SUP_AMD_64)
+=
amd_64.o
obj-$(CONFIG_CPU_SUP_CYRIX_32)
+=
cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR_32)
+=
centaur.o
obj-$(CONFIG_CPU_SUP_CENTAUR_64)
+=
centaur_64.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32)
+=
transmeta.o
obj-$(CONFIG_CPU_SUP_INTEL_32)
+=
intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64)
+=
intel_64.o
obj-$(CONFIG_CPU_SUP_UMC_32)
+=
umc.o
obj-$(CONFIG_X86_MCE)
+=
mcheck/
...
...
arch/x86/kernel/cpu/amd.c
View file @
446d2733
...
...
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
if
(
c
->
x86_power
&
(
1
<<
8
))
set_cpu_cap
(
c
,
X86_FEATURE_CONSTANT_TSC
);
}
/* Set MTRR capability flag if appropriate */
if
(
c
->
x86_model
==
13
||
c
->
x86_model
==
9
||
(
c
->
x86_model
==
8
&&
c
->
x86_mask
>=
8
))
set_cpu_cap
(
c
,
X86_FEATURE_K6_MTRR
);
}
static
void
__cpuinit
init_amd
(
struct
cpuinfo_x86
*
c
)
...
...
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
mbytes
);
}
/* Set MTRR capability flag if appropriate */
if
(
c
->
x86_model
==
13
||
c
->
x86_model
==
9
||
(
c
->
x86_model
==
8
&&
c
->
x86_mask
>=
8
))
set_cpu_cap
(
c
,
X86_FEATURE_K6_MTRR
);
break
;
}
...
...
@@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.
c_early_init
=
early_init_amd
,
.
c_init
=
init_amd
,
.
c_size_cache
=
amd_size_cache
,
.
c_x86_vendor
=
X86_VENDOR_AMD
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_AMD
,
&
amd_cpu_dev
);
cpu_
dev_register
(
amd_cpu_dev
);
arch/x86/kernel/cpu/amd_64.c
View file @
446d2733
...
...
@@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"AuthenticAMD"
},
.
c_early_init
=
early_init_amd
,
.
c_init
=
init_amd
,
.
c_x86_vendor
=
X86_VENDOR_AMD
,
};
cpu_vendor_dev_register
(
X86_VENDOR_AMD
,
&
amd_cpu_dev
);
cpu_dev_register
(
amd_cpu_dev
);
arch/x86/kernel/cpu/centaur.c
View file @
446d2733
...
...
@@ -314,6 +314,16 @@ enum {
EAMD3D
=
1
<<
20
,
};
static
void
__cpuinit
early_init_centaur
(
struct
cpuinfo_x86
*
c
)
{
switch
(
c
->
x86
)
{
case
5
:
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap
(
c
,
X86_FEATURE_CENTAUR_MCR
);
break
;
}
}
static
void
__cpuinit
init_centaur
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static
struct
cpu_dev
centaur_cpu_dev
__cpuinitdata
=
{
.
c_vendor
=
"Centaur"
,
.
c_ident
=
{
"CentaurHauls"
},
.
c_early_init
=
early_init_centaur
,
.
c_init
=
init_centaur
,
.
c_size_cache
=
centaur_size_cache
,
.
c_x86_vendor
=
X86_VENDOR_CENTAUR
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_CENTAUR
,
&
centaur_cpu_dev
);
cpu_
dev_register
(
centaur_cpu_dev
);
arch/x86/kernel/cpu/centaur_64.c
View file @
446d2733
...
...
@@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"CentaurHauls"
},
.
c_early_init
=
early_init_centaur
,
.
c_init
=
init_centaur
,
.
c_x86_vendor
=
X86_VENDOR_CENTAUR
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_CENTAUR
,
&
centaur_cpu_dev
);
cpu_
dev_register
(
centaur_cpu_dev
);
arch/x86/kernel/cpu/common.c
View file @
446d2733
...
...
@@ -22,6 +22,8 @@
#include "cpu.h"
static
struct
cpu_dev
*
this_cpu
__cpuinitdata
;
DEFINE_PER_CPU
(
struct
gdt_page
,
gdt_page
)
=
{
.
gdt
=
{
[
GDT_ENTRY_KERNEL_CS
]
=
{
{
{
0x0000ffff
,
0x00cf9a00
}
}
},
[
GDT_ENTRY_KERNEL_DS
]
=
{
{
{
0x0000ffff
,
0x00cf9200
}
}
},
...
...
@@ -58,12 +60,124 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
}
};
EXPORT_PER_CPU_SYMBOL_GPL
(
gdt_page
);
__u32
cleared_cpu_caps
[
NCAPINTS
]
__cpuinitdata
;
static
int
cachesize_override
__cpuinitdata
=
-
1
;
static
int
disable_x86_serial_nr
__cpuinitdata
=
1
;
struct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
]
=
{};
static
int
__init
cachesize_setup
(
char
*
str
)
{
get_option
(
&
str
,
&
cachesize_override
);
return
1
;
}
__setup
(
"cachesize="
,
cachesize_setup
);
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*
*/
/* Look up CPU names by table lookup. */
static
char
__cpuinit
*
table_lookup_model
(
struct
cpuinfo_x86
*
c
)
{
struct
cpu_model_info
*
info
;
if
(
c
->
x86_model
>=
16
)
return
NULL
;
/* Range check */
if
(
!
this_cpu
)
return
NULL
;
info
=
this_cpu
->
c_models
;
while
(
info
&&
info
->
family
)
{
if
(
info
->
family
==
c
->
x86
)
return
info
->
model_names
[
c
->
x86_model
];
info
++
;
}
return
NULL
;
/* Not found */
}
static
int
__init
x86_fxsr_setup
(
char
*
s
)
{
setup_clear_cpu_cap
(
X86_FEATURE_FXSR
);
setup_clear_cpu_cap
(
X86_FEATURE_XMM
);
return
1
;
}
__setup
(
"nofxsr"
,
x86_fxsr_setup
);
static
int
__init
x86_sep_setup
(
char
*
s
)
{
setup_clear_cpu_cap
(
X86_FEATURE_SEP
);
return
1
;
}
__setup
(
"nosep"
,
x86_sep_setup
);
/* Standard macro to see if a specific flag is changeable */
static
inline
int
flag_is_changeable_p
(
u32
flag
)
{
u32
f1
,
f2
;
asm
(
"pushfl
\n\t
"
"pushfl
\n\t
"
"popl %0
\n\t
"
"movl %0,%1
\n\t
"
"xorl %2,%0
\n\t
"
"pushl %0
\n\t
"
"popfl
\n\t
"
"pushfl
\n\t
"
"popl %0
\n\t
"
"popfl
\n\t
"
:
"=&r"
(
f1
),
"=&r"
(
f2
)
:
"ir"
(
flag
));
return
((
f1
^
f2
)
&
flag
)
!=
0
;
}
/* Probe for the CPUID instruction */
static
int
__cpuinit
have_cpuid_p
(
void
)
{
return
flag_is_changeable_p
(
X86_EFLAGS_ID
);
}
static
void
__cpuinit
squash_the_stupid_serial_number
(
struct
cpuinfo_x86
*
c
)
{
if
(
cpu_has
(
c
,
X86_FEATURE_PN
)
&&
disable_x86_serial_nr
)
{
/* Disable processor serial number */
unsigned
long
lo
,
hi
;
rdmsr
(
MSR_IA32_BBL_CR_CTL
,
lo
,
hi
);
lo
|=
0x200000
;
wrmsr
(
MSR_IA32_BBL_CR_CTL
,
lo
,
hi
);
printk
(
KERN_NOTICE
"CPU serial number disabled.
\n
"
);
clear_cpu_cap
(
c
,
X86_FEATURE_PN
);
/* Disabling the serial number may affect the cpuid level */
c
->
cpuid_level
=
cpuid_eax
(
0
);
}
}
static
int
__init
x86_serial_nr_setup
(
char
*
s
)
{
disable_x86_serial_nr
=
0
;
return
1
;
}
__setup
(
"serialnumber"
,
x86_serial_nr_setup
);
__u32
cleared_cpu_caps
[
NCAPINTS
]
__cpuinitdata
;
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void
switch_to_new_gdt
(
void
)
{
struct
desc_ptr
gdt_descr
;
gdt_descr
.
address
=
(
long
)
get_cpu_gdt_table
(
smp_processor_id
());
gdt_descr
.
size
=
GDT_SIZE
-
1
;
load_gdt
(
&
gdt_descr
);
asm
(
"mov %0, %%fs"
:
:
"r"
(
__KERNEL_PERCPU
)
:
"memory"
);
}
static
struct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
]
=
{};
static
void
__cpuinit
default_init
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -81,22 +195,15 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
static
struct
cpu_dev
__cpuinitdata
default_cpu
=
{
.
c_init
=
default_init
,
.
c_vendor
=
"Unknown"
,
.
c_x86_vendor
=
X86_VENDOR_UNKNOWN
,
};
static
struct
cpu_dev
*
this_cpu
__cpuinitdata
=
&
default_cpu
;
static
int
__init
cachesize_setup
(
char
*
str
)
{
get_option
(
&
str
,
&
cachesize_override
);
return
1
;
}
__setup
(
"cachesize="
,
cachesize_setup
);
int
__cpuinit
get_model_name
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
*
v
;
char
*
p
,
*
q
;
if
(
c
puid_eax
(
0x80000000
)
<
0x80000004
)
if
(
c
->
extended_cpuid_level
<
0x80000004
)
return
0
;
v
=
(
unsigned
int
*
)
c
->
x86_model_id
;
...
...
@@ -120,24 +227,23 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
return
1
;
}
void
__cpuinit
display_cacheinfo
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
n
,
dummy
,
ecx
,
edx
,
l2size
;
unsigned
int
n
,
dummy
,
e
bx
,
e
cx
,
edx
,
l2size
;
n
=
c
puid_eax
(
0x80000000
)
;
n
=
c
->
extended_cpuid_level
;
if
(
n
>=
0x80000005
)
{
cpuid
(
0x80000005
,
&
dummy
,
&
dummy
,
&
ecx
,
&
edx
);
cpuid
(
0x80000005
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
printk
(
KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)
\n
"
,
edx
>>
24
,
edx
&
0xFF
,
ecx
>>
24
,
ecx
&
0xFF
);
c
->
x86_cache_size
=
(
ecx
>>
24
)
+
(
edx
>>
24
);
edx
>>
24
,
edx
&
0xFF
,
ecx
>>
24
,
ecx
&
0xFF
);
c
->
x86_cache_size
=
(
ecx
>>
24
)
+
(
edx
>>
24
);
}
if
(
n
<
0x80000006
)
/* Some chips just has a large L1. */
return
;
ecx
=
cpuid_ecx
(
0x80000006
);
cpuid
(
0x80000006
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
l2size
=
ecx
>>
16
;
/* do processor-specific cache resizing */
...
...
@@ -154,112 +260,90 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
c
->
x86_cache_size
=
l2size
;
printk
(
KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)
\n
"
,
l2size
,
ecx
&
0xFF
);
l2size
,
ecx
&
0xFF
);
}
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
*
*/
/* Look up CPU names by table lookup. */
static
char
__cpuinit
*
table_lookup_model
(
struct
cpuinfo_x86
*
c
)
#ifdef CONFIG_X86_HT
void
__cpuinit
detect_ht
(
struct
cpuinfo_x86
*
c
)
{
struct
cpu_model_info
*
info
;
u32
eax
,
ebx
,
ecx
,
edx
;
int
index_msb
,
core_bits
;
if
(
c
->
x86_model
>=
16
)
return
NULL
;
/* Range check */
if
(
!
cpu_has
(
c
,
X86_FEATURE_HT
)
)
return
;
if
(
!
this_cpu
)
return
NULL
;
if
(
cpu_has
(
c
,
X86_FEATURE_CMP_LEGACY
)
)
goto
out
;
info
=
this_cpu
->
c_models
;
cpuid
(
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
)
;
while
(
info
&&
info
->
family
)
{
if
(
info
->
family
==
c
->
x86
)
return
info
->
model_names
[
c
->
x86_model
];
info
++
;
smp_num_siblings
=
(
ebx
&
0xff0000
)
>>
16
;
if
(
smp_num_siblings
==
1
)
{
printk
(
KERN_INFO
"CPU: Hyper-Threading is disabled
\n
"
);
}
else
if
(
smp_num_siblings
>
1
)
{
if
(
smp_num_siblings
>
NR_CPUS
)
{
printk
(
KERN_WARNING
"CPU: Unsupported number of siblings %d"
,
smp_num_siblings
);
smp_num_siblings
=
1
;
return
;
}
index_msb
=
get_count_order
(
smp_num_siblings
);
c
->
phys_proc_id
=
phys_pkg_id
(
c
->
initial_apicid
,
index_msb
);
smp_num_siblings
=
smp_num_siblings
/
c
->
x86_max_cores
;
index_msb
=
get_count_order
(
smp_num_siblings
);
core_bits
=
get_count_order
(
c
->
x86_max_cores
);
c
->
cpu_core_id
=
phys_pkg_id
(
c
->
initial_apicid
,
index_msb
)
&
((
1
<<
core_bits
)
-
1
);
}
return
NULL
;
/* Not found */
}
out:
if
((
c
->
x86_max_cores
*
smp_num_siblings
)
>
1
)
{
printk
(
KERN_INFO
"CPU: Physical Processor ID: %d
\n
"
,
c
->
phys_proc_id
);
printk
(
KERN_INFO
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
}
}
#endif
static
void
__cpuinit
get_cpu_vendor
(
struct
cpuinfo_x86
*
c
,
int
early
)
static
void
__cpuinit
get_cpu_vendor
(
struct
cpuinfo_x86
*
c
)
{
char
*
v
=
c
->
x86_vendor_id
;
int
i
;
static
int
printed
;
for
(
i
=
0
;
i
<
X86_VENDOR_NUM
;
i
++
)
{
if
(
cpu_devs
[
i
])
{
if
(
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
0
])
||
(
cpu_devs
[
i
]
->
c_ident
[
1
]
&&
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
1
])))
{
c
->
x86_vendor
=
i
;
if
(
!
early
)
this_cpu
=
cpu_devs
[
i
];
return
;
}
if
(
!
cpu_devs
[
i
])
break
;
if
(
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
0
])
||
(
cpu_devs
[
i
]
->
c_ident
[
1
]
&&
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
1
])))
{
this_cpu
=
cpu_devs
[
i
];
c
->
x86_vendor
=
this_cpu
->
c_x86_vendor
;
return
;
}
}
if
(
!
printed
)
{
printed
++
;
printk
(
KERN_ERR
"CPU: Vendor unknown, using generic init.
\n
"
);
printk
(
KERN_ERR
"CPU: Your system may be unstable.
\n
"
);
}
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
this_cpu
=
&
default_cpu
;
}
static
int
__init
x86_fxsr_setup
(
char
*
s
)
{
setup_clear_cpu_cap
(
X86_FEATURE_FXSR
);
setup_clear_cpu_cap
(
X86_FEATURE_XMM
);
return
1
;
}
__setup
(
"nofxsr"
,
x86_fxsr_setup
);
static
int
__init
x86_sep_setup
(
char
*
s
)
{
setup_clear_cpu_cap
(
X86_FEATURE_SEP
);
return
1
;
}
__setup
(
"nosep"
,
x86_sep_setup
);
/* Standard macro to see if a specific flag is changeable */
static
inline
int
flag_is_changeable_p
(
u32
flag
)
{
u32
f1
,
f2
;
asm
(
"pushfl
\n\t
"
"pushfl
\n\t
"
"popl %0
\n\t
"
"movl %0,%1
\n\t
"
"xorl %2,%0
\n\t
"
"pushl %0
\n\t
"
"popfl
\n\t
"
"pushfl
\n\t
"
"popl %0
\n\t
"
"popfl
\n\t
"
:
"=&r"
(
f1
),
"=&r"
(
f2
)
:
"ir"
(
flag
));
return
((
f1
^
f2
)
&
flag
)
!=
0
;
}
/* Probe for the CPUID instruction */
static
int
__cpuinit
have_cpuid_p
(
void
)
{
return
flag_is_changeable_p
(
X86_EFLAGS_ID
);
}
void
__init
cpu_detect
(
struct
cpuinfo_x86
*
c
)
void
__cpuinit
cpu_detect
(
struct
cpuinfo_x86
*
c
)
{
/* Get vendor name */
cpuid
(
0x00000000
,
(
unsigned
int
*
)
&
c
->
cpuid_level
,
...
...
@@ -268,50 +352,47 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
4
]);
c
->
x86
=
4
;
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
junk
,
tfms
,
cap0
,
misc
;
cpuid
(
0x00000001
,
&
tfms
,
&
misc
,
&
junk
,
&
cap0
);
c
->
x86
=
(
tfms
>>
8
)
&
15
;
c
->
x86_model
=
(
tfms
>>
4
)
&
15
;
c
->
x86
=
(
tfms
>>
8
)
&
0xf
;
c
->
x86_model
=
(
tfms
>>
4
)
&
0xf
;
c
->
x86_mask
=
tfms
&
0xf
;
if
(
c
->
x86
==
0xf
)
c
->
x86
+=
(
tfms
>>
20
)
&
0xff
;
if
(
c
->
x86
>=
0x6
)
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xF
)
<<
4
;
c
->
x86_mask
=
tfms
&
15
;
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xf
)
<<
4
;
if
(
cap0
&
(
1
<<
19
))
{
c
->
x86_cache_alignment
=
((
misc
>>
8
)
&
0xff
)
*
8
;
c
->
x86_clflush_size
=
((
misc
>>
8
)
&
0xff
)
*
8
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
}
}
}
static
void
__cpuinit
early_get_cap
(
struct
cpuinfo_x86
*
c
)
static
void
__cpuinit
get_cpu_cap
(
struct
cpuinfo_x86
*
c
)
{
u32
tfms
,
xlvl
;
u
nsigned
int
ebx
;
u
32
ebx
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
if
(
have_cpuid_p
())
{
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
capability
,
excap
;
cpuid
(
0x00000001
,
&
tfms
,
&
ebx
,
&
excap
,
&
capability
);
c
->
x86_capability
[
0
]
=
capability
;
c
->
x86_capability
[
4
]
=
excap
;
}
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
capability
,
excap
;
cpuid
(
0x00000001
,
&
tfms
,
&
ebx
,
&
excap
,
&
capability
);
c
->
x86_capability
[
0
]
=
capability
;
c
->
x86_capability
[
4
]
=
excap
;
}
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_edx
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ec
x
(
0x80000001
);
}
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
c
->
extended_cpuid_level
=
xlvl
;
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_ed
x
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ecx
(
0x80000001
);
}
}
}
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
...
...
@@ -321,25 +402,54 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
* WARNING: this function is only called on the BP. Don't add code here
* that is supposed to run on all CPUs.
*/
static
void
__init
early_
cpu_detect
(
void
)
static
void
__init
early_
identify_cpu
(
struct
cpuinfo_x86
*
c
)
{
struct
cpuinfo_x86
*
c
=
&
boot_cpu_data
;
c
->
x86_cache_alignment
=
32
;
c
->
x86_clflush_size
=
32
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
if
(
!
have_cpuid_p
())
return
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
c
->
extended_cpuid_level
=
0
;
cpu_detect
(
c
);
get_cpu_vendor
(
c
,
1
);
get_cpu_vendor
(
c
);
get_cpu_cap
(
c
);
if
(
c
->
x86_vendor
!=
X86_VENDOR_UNKNOWN
&&
cpu_devs
[
c
->
x86_vendor
]
->
c_early_init
)
cpu_devs
[
c
->
x86_vendor
]
->
c_early_init
(
c
);
if
(
this_cpu
->
c_early_init
)
this_cpu
->
c_early_init
(
c
);
early_get_cap
(
c
);
validate_pat_support
(
c
);
}
void
__init
early_cpu_init
(
void
)
{
struct
cpu_dev
**
cdev
;
int
count
=
0
;
printk
(
"KERNEL supported cpus:
\n
"
);
for
(
cdev
=
__x86_cpu_dev_start
;
cdev
<
__x86_cpu_dev_end
;
cdev
++
)
{
struct
cpu_dev
*
cpudev
=
*
cdev
;
unsigned
int
j
;
if
(
count
>=
X86_VENDOR_NUM
)
break
;
cpu_devs
[
count
]
=
cpudev
;
count
++
;
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
!
cpudev
->
c_ident
[
j
])
continue
;
printk
(
" %s %s
\n
"
,
cpudev
->
c_vendor
,
cpudev
->
c_ident
[
j
]);
}
}
early_identify_cpu
(
&
boot_cpu_data
);
}
/*
...
...
@@ -373,86 +483,33 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
static
void
__cpuinit
generic_identify
(
struct
cpuinfo_x86
*
c
)
{
u32
tfms
,
xlvl
;
unsigned
int
ebx
;
if
(
have_cpuid_p
())
{
/* Get vendor name */
cpuid
(
0x00000000
,
(
unsigned
int
*
)
&
c
->
cpuid_level
,
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
0
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
8
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
4
]);
get_cpu_vendor
(
c
,
0
);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
capability
,
excap
;
cpuid
(
0x00000001
,
&
tfms
,
&
ebx
,
&
excap
,
&
capability
);
c
->
x86_capability
[
0
]
=
capability
;
c
->
x86_capability
[
4
]
=
excap
;
c
->
x86
=
(
tfms
>>
8
)
&
15
;
c
->
x86_model
=
(
tfms
>>
4
)
&
15
;
if
(
c
->
x86
==
0xf
)
c
->
x86
+=
(
tfms
>>
20
)
&
0xff
;
if
(
c
->
x86
>=
0x6
)
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xF
)
<<
4
;
c
->
x86_mask
=
tfms
&
15
;
c
->
initial_apicid
=
(
ebx
>>
24
)
&
0xFF
;
#ifdef CONFIG_X86_HT
c
->
apicid
=
phys_pkg_id
(
c
->
initial_apicid
,
0
);
c
->
phys_proc_id
=
c
->
initial_apicid
;
#else
c
->
apicid
=
c
->
initial_apicid
;
#endif
if
(
test_cpu_cap
(
c
,
X86_FEATURE_CLFLSH
))
c
->
x86_clflush_size
=
((
ebx
>>
8
)
&
0xff
)
*
8
;
}
else
{
/* Have CPUID level 0 only - unheard of */
c
->
x86
=
4
;
}
if
(
!
have_cpuid_p
())
return
;
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_edx
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ecx
(
0x80000001
);
}
if
(
xlvl
>=
0x80000004
)
get_model_name
(
c
);
/* Default name */
}
c
->
extended_cpuid_level
=
0
;
init_scattered_cpuid_features
(
c
);
detect_nopl
(
c
);
}
}
cpu_detect
(
c
);
static
void
__cpuinit
squash_the_stupid_serial_number
(
struct
cpuinfo_x86
*
c
)
{
if
(
cpu_has
(
c
,
X86_FEATURE_PN
)
&&
disable_x86_serial_nr
)
{
/* Disable processor serial number */
unsigned
long
lo
,
hi
;
rdmsr
(
MSR_IA32_BBL_CR_CTL
,
lo
,
hi
);
lo
|=
0x200000
;
wrmsr
(
MSR_IA32_BBL_CR_CTL
,
lo
,
hi
);
printk
(
KERN_NOTICE
"CPU serial number disabled.
\n
"
);
clear_cpu_cap
(
c
,
X86_FEATURE_PN
);
get_cpu_vendor
(
c
);
/* Disabling the serial number may affect the cpuid level */
c
->
cpuid_level
=
cpuid_eax
(
0
);
}
}
get_cpu_cap
(
c
);
static
int
__init
x86_serial_nr_setup
(
char
*
s
)
{
disable_x86_serial_nr
=
0
;
return
1
;
}
__setup
(
"serialnumber"
,
x86_serial_nr_setup
);
if
(
c
->
cpuid_level
>=
0x00000001
)
{
c
->
initial_apicid
=
(
cpuid_ebx
(
1
)
>>
24
)
&
0xFF
;
#ifdef CONFIG_X86_HT
c
->
apicid
=
phys_pkg_id
(
c
->
initial_apicid
,
0
);
c
->
phys_proc_id
=
c
->
initial_apicid
;
#else
c
->
apicid
=
c
->
initial_apicid
;
#endif
}
if
(
c
->
extended_cpuid_level
>=
0x80000004
)
get_model_name
(
c
);
/* Default name */
init_scattered_cpuid_features
(
c
);
detect_nopl
(
c
);
}
/*
* This does the hard work of actually picking apart the CPU stuff...
...
...
@@ -529,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
*/
if
(
c
!=
&
boot_cpu_data
)
{
/* AND the already accumulated flags with these */
for
(
i
=
0
;
i
<
NCAPINTS
;
i
++
)
for
(
i
=
0
;
i
<
NCAPINTS
;
i
++
)
boot_cpu_data
.
x86_capability
[
i
]
&=
c
->
x86_capability
[
i
];
}
...
...
@@ -558,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init
();
}
#ifdef CONFIG_X86_HT
void
__cpuinit
detect_ht
(
struct
cpuinfo_x86
*
c
)
{
u32
eax
,
ebx
,
ecx
,
edx
;
int
index_msb
,
core_bits
;
cpuid
(
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
if
(
!
cpu_has
(
c
,
X86_FEATURE_HT
)
||
cpu_has
(
c
,
X86_FEATURE_CMP_LEGACY
))
return
;
smp_num_siblings
=
(
ebx
&
0xff0000
)
>>
16
;
struct
msr_range
{
unsigned
min
;
unsigned
max
;
};
if
(
smp_num_siblings
==
1
)
{
printk
(
KERN_INFO
"CPU: Hyper-Threading is disabled
\n
"
);
}
else
if
(
smp_num_siblings
>
1
)
{
static
struct
msr_range
msr_range_array
[]
__cpuinitdata
=
{
{
0x00000000
,
0x00000418
},
{
0xc0000000
,
0xc000040b
},
{
0xc0010000
,
0xc0010142
},
{
0xc0011000
,
0xc001103b
},
};
if
(
smp_num_siblings
>
NR_CPUS
)
{
printk
(
KERN_WARNING
"CPU: Unsupported number of the "
"siblings %d"
,
smp_num_siblings
);
smp_num_siblings
=
1
;
return
;
static
void
__cpuinit
print_cpu_msr
(
void
)
{
unsigned
index
;
u64
val
;
int
i
;
unsigned
index_min
,
index_max
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
msr_range_array
);
i
++
)
{
index_min
=
msr_range_array
[
i
].
min
;
index_max
=
msr_range_array
[
i
].
max
;
for
(
index
=
index_min
;
index
<
index_max
;
index
++
)
{
if
(
rdmsrl_amd_safe
(
index
,
&
val
))
continue
;
printk
(
KERN_INFO
" MSR%08x: %016llx
\n
"
,
index
,
val
);
}
}
}
index_msb
=
get_count_order
(
smp_num_siblings
);
c
->
phys_proc_id
=
phys_pkg_id
(
c
->
initial_apicid
,
index_msb
);
printk
(
KERN_INFO
"CPU: Physical Processor ID: %d
\n
"
,
c
->
phys_proc_id
);
smp_num_siblings
=
smp_num_siblings
/
c
->
x86_max_cores
;
index_msb
=
get_count_order
(
smp_num_siblings
)
;
core_bits
=
get_count_order
(
c
->
x86_max_cores
);
static
int
show_msr
__cpuinitdata
;
static
__init
int
setup_show_msr
(
char
*
arg
)
{
int
num
;
c
->
cpu_core_id
=
phys_pkg_id
(
c
->
initial_apicid
,
index_msb
)
&
((
1
<<
core_bits
)
-
1
);
get_option
(
&
arg
,
&
num
);
if
(
c
->
x86_max_cores
>
1
)
printk
(
KERN_INFO
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
}
if
(
num
>
0
)
show_msr
=
num
;
return
1
;
}
#endif
__setup
(
"show_msr="
,
setup_show_msr
);
static
__init
int
setup_noclflush
(
char
*
arg
)
{
...
...
@@ -621,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
vendor
=
c
->
x86_vendor_id
;
if
(
vendor
&&
strncmp
(
c
->
x86_model_id
,
vendor
,
strlen
(
vendor
)))
printk
(
"%s "
,
vendor
);
printk
(
KERN_CONT
"%s "
,
vendor
);
if
(
!
c
->
x86_model_id
[
0
])
printk
(
"%d86"
,
c
->
x86
);
if
(
c
->
x86_model_id
[
0
])
printk
(
KERN_CONT
"%s"
,
c
->
x86_model_id
);
else
printk
(
"%s"
,
c
->
x86_model_id
);
printk
(
KERN_CONT
"%d86"
,
c
->
x86
);
if
(
c
->
x86_mask
||
c
->
cpuid_level
>=
0
)
printk
(
" stepping %02x
\n
"
,
c
->
x86_mask
);
printk
(
KERN_CONT
" stepping %02x
\n
"
,
c
->
x86_mask
);
else
printk
(
"
\n
"
);
printk
(
KERN_CONT
"
\n
"
);
#ifdef CONFIG_SMP
if
(
c
->
cpu_index
<
show_msr
)
print_cpu_msr
();
#else
if
(
show_msr
)
print_cpu_msr
();
#endif
}
static
__init
int
setup_disablecpuid
(
char
*
arg
)
...
...
@@ -647,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid);
cpumask_t
cpu_initialized
__cpuinitdata
=
CPU_MASK_NONE
;
void
__init
early_cpu_init
(
void
)
{
struct
cpu_vendor_dev
*
cvdev
;
for
(
cvdev
=
__x86cpuvendor_start
;
cvdev
<
__x86cpuvendor_end
;
cvdev
++
)
cpu_devs
[
cvdev
->
vendor
]
=
cvdev
->
cpu_dev
;
early_cpu_detect
();
validate_pat_support
(
&
boot_cpu_data
);
}
/* Make sure %fs is initialized properly in idle threads */
struct
pt_regs
*
__cpuinit
idle_regs
(
struct
pt_regs
*
regs
)
{
...
...
@@ -668,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
return
regs
;
}
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void
switch_to_new_gdt
(
void
)
{
struct
desc_ptr
gdt_descr
;
gdt_descr
.
address
=
(
long
)
get_cpu_gdt_table
(
smp_processor_id
());
gdt_descr
.
size
=
GDT_SIZE
-
1
;
load_gdt
(
&
gdt_descr
);
asm
(
"mov %0, %%fs"
:
:
"r"
(
__KERNEL_PERCPU
)
:
"memory"
);
}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
...
...
arch/x86/kernel/cpu/common_64.c
View file @
446d2733
...
...
@@ -37,6 +37,8 @@
#include "cpu.h"
static
struct
cpu_dev
*
this_cpu
__cpuinitdata
;
/* We need valid kernel segments for data and code in long mode too
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
...
...
@@ -66,7 +68,7 @@ void switch_to_new_gdt(void)
load_gdt
(
&
gdt_descr
);
}
struct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
]
=
{};
st
atic
st
ruct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
]
=
{};
static
void
__cpuinit
default_init
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -76,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
static
struct
cpu_dev
__cpuinitdata
default_cpu
=
{
.
c_init
=
default_init
,
.
c_vendor
=
"Unknown"
,
.
c_x86_vendor
=
X86_VENDOR_UNKNOWN
,
};
static
struct
cpu_dev
*
this_cpu
__cpuinitdata
=
&
default_cpu
;
int
__cpuinit
get_model_name
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
*
v
;
char
*
p
,
*
q
;
if
(
c
->
extended_cpuid_level
<
0x80000004
)
return
0
;
...
...
@@ -91,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
cpuid
(
0x80000003
,
&
v
[
4
],
&
v
[
5
],
&
v
[
6
],
&
v
[
7
]);
cpuid
(
0x80000004
,
&
v
[
8
],
&
v
[
9
],
&
v
[
10
],
&
v
[
11
]);
c
->
x86_model_id
[
48
]
=
0
;
/* Intel chips right-justify this string for some dumb reason;
undo that brain damage */
p
=
q
=
&
c
->
x86_model_id
[
0
];
while
(
*
p
==
' '
)
p
++
;
if
(
p
!=
q
)
{
while
(
*
p
)
*
q
++
=
*
p
++
;
while
(
q
<=
&
c
->
x86_model_id
[
48
])
*
q
++
=
'\0'
;
/* Zero-pad the rest */
}
return
1
;
}
void
__cpuinit
display_cacheinfo
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
n
,
dummy
,
ebx
,
ecx
,
edx
;
unsigned
int
n
,
dummy
,
ebx
,
ecx
,
edx
,
l2size
;
n
=
c
->
extended_cpuid_level
;
if
(
n
>=
0x80000005
)
{
cpuid
(
0x80000005
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
printk
(
KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), "
"D cache %dK (%d bytes/line)
\n
"
,
edx
>>
24
,
edx
&
0xFF
,
ecx
>>
24
,
ecx
&
0xFF
);
printk
(
KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)
\n
"
,
edx
>>
24
,
edx
&
0xFF
,
ecx
>>
24
,
ecx
&
0xFF
);
c
->
x86_cache_size
=
(
ecx
>>
24
)
+
(
edx
>>
24
);
/* On K8 L1 TLB is inclusive, so don't count it */
c
->
x86_tlbsize
=
0
;
}
if
(
n
>=
0x80000006
)
{
cpuid
(
0x80000006
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
ecx
=
cpuid_ecx
(
0x80000006
);
c
->
x86_cache_size
=
ecx
>>
16
;
c
->
x86_tlbsize
+=
((
ebx
>>
16
)
&
0xfff
)
+
(
ebx
&
0xfff
);
if
(
n
<
0x80000006
)
/* Some chips just has a large L1. */
return
;
printk
(
KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)
\n
"
,
c
->
x86_cache_size
,
ecx
&
0xFF
);
}
cpuid
(
0x80000006
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
l2size
=
ecx
>>
16
;
c
->
x86_tlbsize
+=
((
ebx
>>
16
)
&
0xfff
)
+
(
ebx
&
0xfff
);
c
->
x86_cache_size
=
l2size
;
printk
(
KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)
\n
"
,
l2size
,
ecx
&
0xFF
);
}
void
__cpuinit
detect_ht
(
struct
cpuinfo_x86
*
c
)
...
...
@@ -128,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
u32
eax
,
ebx
,
ecx
,
edx
;
int
index_msb
,
core_bits
;
cpuid
(
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
if
(
!
cpu_has
(
c
,
X86_FEATURE_HT
))
return
;
if
(
cpu_has
(
c
,
X86_FEATURE_CMP_LEGACY
))
goto
out
;
cpuid
(
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
smp_num_siblings
=
(
ebx
&
0xff0000
)
>>
16
;
if
(
smp_num_siblings
==
1
)
{
...
...
@@ -143,8 +159,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
}
else
if
(
smp_num_siblings
>
1
)
{
if
(
smp_num_siblings
>
NR_CPUS
)
{
printk
(
KERN_WARNING
"CPU: Unsupported number of
"
"siblings %d"
,
smp_num_siblings
);
printk
(
KERN_WARNING
"CPU: Unsupported number of
siblings %d"
,
smp_num_siblings
);
smp_num_siblings
=
1
;
return
;
}
...
...
@@ -161,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
c
->
cpu_core_id
=
phys_pkg_id
(
index_msb
)
&
((
1
<<
core_bits
)
-
1
);
}
out:
if
((
c
->
x86_max_cores
*
smp_num_siblings
)
>
1
)
{
printk
(
KERN_INFO
"CPU: Physical Processor ID: %d
\n
"
,
...
...
@@ -168,7 +185,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
printk
(
KERN_INFO
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
}
#endif
}
...
...
@@ -179,41 +195,148 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
static
int
printed
;
for
(
i
=
0
;
i
<
X86_VENDOR_NUM
;
i
++
)
{
if
(
cpu_devs
[
i
])
{
if
(
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
0
])
||
(
cpu_devs
[
i
]
->
c_ident
[
1
]
&&
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
1
])))
{
c
->
x86_vendor
=
i
;
this_cpu
=
cpu_devs
[
i
];
return
;
}
if
(
!
cpu_devs
[
i
])
break
;
if
(
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
0
])
||
(
cpu_devs
[
i
]
->
c_ident
[
1
]
&&
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
1
])))
{
this_cpu
=
cpu_devs
[
i
];
c
->
x86_vendor
=
this_cpu
->
c_x86_vendor
;
return
;
}
}
if
(
!
printed
)
{
printed
++
;
printk
(
KERN_ERR
"CPU: Vendor unknown, using generic init.
\n
"
);
printk
(
KERN_ERR
"CPU: Your system may be unstable.
\n
"
);
}
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
this_cpu
=
&
default_cpu
;
}
void
__cpuinit
cpu_detect
(
struct
cpuinfo_x86
*
c
)
{
/* Get vendor name */
cpuid
(
0x00000000
,
(
unsigned
int
*
)
&
c
->
cpuid_level
,
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
0
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
8
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
4
]);
c
->
x86
=
4
;
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
junk
,
tfms
,
cap0
,
misc
;
cpuid
(
0x00000001
,
&
tfms
,
&
misc
,
&
junk
,
&
cap0
);
c
->
x86
=
(
tfms
>>
8
)
&
0xf
;
c
->
x86_model
=
(
tfms
>>
4
)
&
0xf
;
c
->
x86_mask
=
tfms
&
0xf
;
if
(
c
->
x86
==
0xf
)
c
->
x86
+=
(
tfms
>>
20
)
&
0xff
;
if
(
c
->
x86
>=
0x6
)
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xf
)
<<
4
;
if
(
cap0
&
(
1
<<
19
))
{
c
->
x86_clflush_size
=
((
misc
>>
8
)
&
0xff
)
*
8
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
}
}
}
static
void
__init
early_cpu_support_print
(
void
)
static
void
__cpuinit
get_cpu_cap
(
struct
cpuinfo_x86
*
c
)
{
int
i
,
j
;
struct
cpu_dev
*
cpu_devx
;
u32
tfms
,
xlvl
;
u32
ebx
;
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
capability
,
excap
;
cpuid
(
0x00000001
,
&
tfms
,
&
ebx
,
&
excap
,
&
capability
);
c
->
x86_capability
[
0
]
=
capability
;
c
->
x86_capability
[
4
]
=
excap
;
}
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
c
->
extended_cpuid_level
=
xlvl
;
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_edx
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ecx
(
0x80000001
);
}
}
/* Transmeta-defined flags: level 0x80860001 */
xlvl
=
cpuid_eax
(
0x80860000
);
if
((
xlvl
&
0xffff0000
)
==
0x80860000
)
{
/* Don't set x86_cpuid_level here for now to not confuse. */
if
(
xlvl
>=
0x80860001
)
c
->
x86_capability
[
2
]
=
cpuid_edx
(
0x80860001
);
}
if
(
c
->
extended_cpuid_level
>=
0x80000007
)
c
->
x86_power
=
cpuid_edx
(
0x80000007
);
if
(
c
->
extended_cpuid_level
>=
0x80000008
)
{
u32
eax
=
cpuid_eax
(
0x80000008
);
c
->
x86_virt_bits
=
(
eax
>>
8
)
&
0xff
;
c
->
x86_phys_bits
=
eax
&
0xff
;
}
}
/* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static
void
__init
early_identify_cpu
(
struct
cpuinfo_x86
*
c
)
{
c
->
x86_clflush_size
=
64
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
c
->
extended_cpuid_level
=
0
;
cpu_detect
(
c
);
get_cpu_vendor
(
c
);
get_cpu_cap
(
c
);
if
(
this_cpu
->
c_early_init
)
this_cpu
->
c_early_init
(
c
);
validate_pat_support
(
c
);
}
void
__init
early_cpu_init
(
void
)
{
struct
cpu_dev
**
cdev
;
int
count
=
0
;
printk
(
"KERNEL supported cpus:
\n
"
);
for
(
i
=
0
;
i
<
X86_VENDOR_NUM
;
i
++
)
{
cpu_devx
=
cpu_devs
[
i
];
if
(
!
cpu_devx
)
continue
;
for
(
cdev
=
__x86_cpu_dev_start
;
cdev
<
__x86_cpu_dev_end
;
cdev
++
)
{
struct
cpu_dev
*
cpudev
=
*
cdev
;
unsigned
int
j
;
if
(
count
>=
X86_VENDOR_NUM
)
break
;
cpu_devs
[
count
]
=
cpudev
;
count
++
;
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
!
cpu
_devx
->
c_ident
[
j
])
if
(
!
cpu
dev
->
c_ident
[
j
])
continue
;
printk
(
" %s %s
\n
"
,
cpu
_devx
->
c_vendor
,
cpu
_devx
->
c_ident
[
j
]);
printk
(
" %s %s
\n
"
,
cpu
dev
->
c_vendor
,
cpu
dev
->
c_ident
[
j
]);
}
}
early_identify_cpu
(
&
boot_cpu_data
);
}
/*
...
...
@@ -249,111 +372,26 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
}
}
static
void
__cpuinit
early_identify_cpu
(
struct
cpuinfo_x86
*
c
);
void
__init
early_cpu_init
(
void
)
static
void
__cpuinit
generic_identify
(
struct
cpuinfo_x86
*
c
)
{
struct
cpu_vendor_dev
*
cvdev
;
for
(
cvdev
=
__x86cpuvendor_start
;
cvdev
<
__x86cpuvendor_end
;
cvdev
++
)
cpu_devs
[
cvdev
->
vendor
]
=
cvdev
->
cpu_dev
;
early_cpu_support_print
();
early_identify_cpu
(
&
boot_cpu_data
);
}
/* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static
void
__cpuinit
early_identify_cpu
(
struct
cpuinfo_x86
*
c
)
{
u32
tfms
,
xlvl
;
c
->
loops_per_jiffy
=
loops_per_jiffy
;
c
->
x86_cache_size
=
-
1
;
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
c
->
x86_model
=
c
->
x86_mask
=
0
;
/* So far unknown... */
c
->
x86_vendor_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_model_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_clflush_size
=
64
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
c
->
x86_max_cores
=
1
;
c
->
x86_coreid_bits
=
0
;
c
->
extended_cpuid_level
=
0
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
/* Get vendor name */
cpuid
(
0x00000000
,
(
unsigned
int
*
)
&
c
->
cpuid_level
,
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
0
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
8
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
4
]);
cpu_detect
(
c
);
get_cpu_vendor
(
c
);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
__u32
misc
;
cpuid
(
0x00000001
,
&
tfms
,
&
misc
,
&
c
->
x86_capability
[
4
],
&
c
->
x86_capability
[
0
]);
c
->
x86
=
(
tfms
>>
8
)
&
0xf
;
c
->
x86_model
=
(
tfms
>>
4
)
&
0xf
;
c
->
x86_mask
=
tfms
&
0xf
;
if
(
c
->
x86
==
0xf
)
c
->
x86
+=
(
tfms
>>
20
)
&
0xff
;
if
(
c
->
x86
>=
0x6
)
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xF
)
<<
4
;
if
(
test_cpu_cap
(
c
,
X86_FEATURE_CLFLSH
))
c
->
x86_clflush_size
=
((
misc
>>
8
)
&
0xff
)
*
8
;
}
else
{
/* Have CPUID level 0 only - unheard of */
c
->
x86
=
4
;
}
get_cpu_cap
(
c
);
c
->
initial_apicid
=
(
cpuid_ebx
(
1
)
>>
24
)
&
0xff
;
#ifdef CONFIG_SMP
c
->
phys_proc_id
=
c
->
initial_apicid
;
#endif
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
c
->
extended_cpuid_level
=
xlvl
;
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_edx
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ecx
(
0x80000001
);
}
if
(
xlvl
>=
0x80000004
)
get_model_name
(
c
);
/* Default name */
}
/* Transmeta-defined flags: level 0x80860001 */
xlvl
=
cpuid_eax
(
0x80860000
);
if
((
xlvl
&
0xffff0000
)
==
0x80860000
)
{
/* Don't set x86_cpuid_level here for now to not confuse. */
if
(
xlvl
>=
0x80860001
)
c
->
x86_capability
[
2
]
=
cpuid_edx
(
0x80860001
);
}
if
(
c
->
extended_cpuid_level
>=
0x80000007
)
c
->
x86_power
=
cpuid_edx
(
0x80000007
);
if
(
c
->
extended_cpuid_level
>=
0x80000008
)
{
u32
eax
=
cpuid_eax
(
0x80000008
);
c
->
x86_virt_bits
=
(
eax
>>
8
)
&
0xff
;
c
->
x86_phys_bits
=
eax
&
0xff
;
}
if
(
c
->
extended_cpuid_level
>=
0x80000004
)
get_model_name
(
c
);
/* Default name */
init_scattered_cpuid_features
(
c
);
detect_nopl
(
c
);
if
(
c
->
x86_vendor
!=
X86_VENDOR_UNKNOWN
&&
cpu_devs
[
c
->
x86_vendor
]
->
c_early_init
)
cpu_devs
[
c
->
x86_vendor
]
->
c_early_init
(
c
);
validate_pat_support
(
c
);
}
/*
...
...
@@ -363,9 +401,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
{
int
i
;
early_identify_cpu
(
c
);
c
->
loops_per_jiffy
=
loops_per_jiffy
;
c
->
x86_cache_size
=
-
1
;
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
c
->
x86_model
=
c
->
x86_mask
=
0
;
/* So far unknown... */
c
->
x86_vendor_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_model_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_max_cores
=
1
;
c
->
x86_coreid_bits
=
0
;
c
->
x86_clflush_size
=
64
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
init_scattered_cpuid_features
(
c
);
generic_identify
(
c
);
c
->
apicid
=
phys_pkg_id
(
0
);
...
...
@@ -411,7 +459,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
}
void
__
cpu
init
identify_boot_cpu
(
void
)
void
__init
identify_boot_cpu
(
void
)
{
identify_cpu
(
&
boot_cpu_data
);
}
...
...
@@ -423,6 +471,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init
();
}
struct
msr_range
{
unsigned
min
;
unsigned
max
;
};
static
struct
msr_range
msr_range_array
[]
__cpuinitdata
=
{
{
0x00000000
,
0x00000418
},
{
0xc0000000
,
0xc000040b
},
{
0xc0010000
,
0xc0010142
},
{
0xc0011000
,
0xc001103b
},
};
static
void
__cpuinit
print_cpu_msr
(
void
)
{
unsigned
index
;
u64
val
;
int
i
;
unsigned
index_min
,
index_max
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
msr_range_array
);
i
++
)
{
index_min
=
msr_range_array
[
i
].
min
;
index_max
=
msr_range_array
[
i
].
max
;
for
(
index
=
index_min
;
index
<
index_max
;
index
++
)
{
if
(
rdmsrl_amd_safe
(
index
,
&
val
))
continue
;
printk
(
KERN_INFO
" MSR%08x: %016llx
\n
"
,
index
,
val
);
}
}
}
static
int
show_msr
__cpuinitdata
;
static
__init
int
setup_show_msr
(
char
*
arg
)
{
int
num
;
get_option
(
&
arg
,
&
num
);
if
(
num
>
0
)
show_msr
=
num
;
return
1
;
}
__setup
(
"show_msr="
,
setup_show_msr
);
static
__init
int
setup_noclflush
(
char
*
arg
)
{
setup_clear_cpu_cap
(
X86_FEATURE_CLFLSH
);
...
...
@@ -439,6 +530,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
printk
(
KERN_CONT
" stepping %02x
\n
"
,
c
->
x86_mask
);
else
printk
(
KERN_CONT
"
\n
"
);
#ifdef CONFIG_SMP
if
(
c
->
cpu_index
<
show_msr
)
print_cpu_msr
();
#else
if
(
show_msr
)
print_cpu_msr
();
#endif
}
static
__init
int
setup_disablecpuid
(
char
*
arg
)
...
...
arch/x86/kernel/cpu/cpu.h
View file @
446d2733
...
...
@@ -21,21 +21,15 @@ struct cpu_dev {
void
(
*
c_init
)(
struct
cpuinfo_x86
*
c
);
void
(
*
c_identify
)(
struct
cpuinfo_x86
*
c
);
unsigned
int
(
*
c_size_cache
)(
struct
cpuinfo_x86
*
c
,
unsigned
int
size
);
int
c_x86_vendor
;
};
extern
struct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
];
#define cpu_dev_register(cpu_devX) \
static struct cpu_dev *__cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
&cpu_devX;
struct
cpu_vendor_dev
{
int
vendor
;
struct
cpu_dev
*
cpu_dev
;
};
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
__attribute__((__section__(".x86cpuvendor.init"))) = \
{ cpu_vendor_id, cpu_dev }
extern
struct
cpu_vendor_dev
__x86cpuvendor_start
[],
__x86cpuvendor_end
[];
extern
struct
cpu_dev
*
__x86_cpu_dev_start
[],
*
__x86_cpu_dev_end
[];
extern
int
get_model_name
(
struct
cpuinfo_x86
*
c
);
extern
void
display_cacheinfo
(
struct
cpuinfo_x86
*
c
);
...
...
arch/x86/kernel/cpu/cyrix.c
View file @
446d2733
...
...
@@ -15,13 +15,11 @@
/*
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/
static
void
__cpuinit
do_cyrix_devid
(
unsigned
char
*
dir0
,
unsigned
char
*
dir1
)
static
void
__cpuinit
__
do_cyrix_devid
(
unsigned
char
*
dir0
,
unsigned
char
*
dir1
)
{
unsigned
char
ccr2
,
ccr3
;
unsigned
long
flags
;
/* we test for DEVID by checking whether CCR3 is writable */
local_irq_save
(
flags
);
ccr3
=
getCx86
(
CX86_CCR3
);
setCx86
(
CX86_CCR3
,
ccr3
^
0x80
);
getCx86
(
0xc0
);
/* dummy to change bus */
...
...
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
*
dir0
=
getCx86
(
CX86_DIR0
);
*
dir1
=
getCx86
(
CX86_DIR1
);
}
local_irq_restore
(
flags
);
}
static
void
__cpuinit
do_cyrix_devid
(
unsigned
char
*
dir0
,
unsigned
char
*
dir1
)
{
unsigned
long
flags
;
local_irq_save
(
flags
);
__do_cyrix_devid
(
dir0
,
dir1
);
local_irq_restore
(
flags
);
}
/*
* Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
* order to identify the Cyrix CPU model after we're out of setup.c
...
...
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
local_irq_restore
(
flags
);
}
static
void
__cpuinit
early_init_cyrix
(
struct
cpuinfo_x86
*
c
)
{
unsigned
char
dir0
,
dir0_msn
,
dir1
=
0
;
__do_cyrix_devid
(
&
dir0
,
&
dir1
);
dir0_msn
=
dir0
>>
4
;
/* identifies CPU "family" */
switch
(
dir0_msn
)
{
case
3
:
/* 6x86/6x86L */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap
(
c
,
X86_FEATURE_CYRIX_ARR
);
break
;
case
5
:
/* 6x86MX/M II */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap
(
c
,
X86_FEATURE_CYRIX_ARR
);
break
;
}
}
static
void
__cpuinit
init_cyrix
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
static
struct
cpu_dev
cyrix_cpu_dev
__cpuinitdata
=
{
.
c_vendor
=
"Cyrix"
,
.
c_ident
=
{
"CyrixInstead"
},
.
c_early_init
=
early_init_cyrix
,
.
c_init
=
init_cyrix
,
.
c_identify
=
cyrix_identify
,
.
c_x86_vendor
=
X86_VENDOR_CYRIX
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_CYRIX
,
&
cyrix_cpu_dev
);
cpu_
dev_register
(
cyrix_cpu_dev
);
static
struct
cpu_dev
nsc_cpu_dev
__cpuinitdata
=
{
.
c_vendor
=
"NSC"
,
.
c_ident
=
{
"Geode by NSC"
},
.
c_init
=
init_nsc
,
.
c_x86_vendor
=
X86_VENDOR_NSC
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_NSC
,
&
nsc_cpu_dev
);
cpu_
dev_register
(
nsc_cpu_dev
);
arch/x86/kernel/cpu/intel.c
View file @
446d2733
...
...
@@ -303,9 +303,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.
c_early_init
=
early_init_intel
,
.
c_init
=
init_intel
,
.
c_size_cache
=
intel_size_cache
,
.
c_x86_vendor
=
X86_VENDOR_INTEL
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_INTEL
,
&
intel_cpu_dev
);
cpu_
dev_register
(
intel_cpu_dev
);
/* arch_initcall(intel_cpu_init); */
arch/x86/kernel/cpu/intel_64.c
View file @
446d2733
...
...
@@ -90,6 +90,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"GenuineIntel"
},
.
c_early_init
=
early_init_intel
,
.
c_init
=
init_intel
,
.
c_x86_vendor
=
X86_VENDOR_INTEL
,
};
cpu_vendor_dev_register
(
X86_VENDOR_INTEL
,
&
intel_cpu_dev
);
cpu_dev_register
(
intel_cpu_dev
);
arch/x86/kernel/cpu/transmeta.c
View file @
446d2733
...
...
@@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"GenuineTMx86"
,
"TransmetaCPU"
},
.
c_init
=
init_transmeta
,
.
c_identify
=
transmeta_identify
,
.
c_x86_vendor
=
X86_VENDOR_TRANSMETA
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_TRANSMETA
,
&
transmeta_cpu_dev
);
cpu_
dev_register
(
transmeta_cpu_dev
);
arch/x86/kernel/cpu/umc.c
View file @
446d2733
...
...
@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
}
},
},
.
c_x86_vendor
=
X86_VENDOR_UMC
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_UMC
,
&
umc_cpu_dev
);
cpu_
dev_register
(
umc_cpu_dev
);
arch/x86/kernel/paravirt.c
View file @
446d2733
...
...
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif
.
wbinvd
=
native_wbinvd
,
.
read_msr
=
native_read_msr_safe
,
.
read_msr_amd
=
native_read_msr_amd_safe
,
.
write_msr
=
native_write_msr_safe
,
.
read_tsc
=
native_read_tsc
,
.
read_pmc
=
native_read_pmc
,
...
...
arch/x86/kernel/traps_64.c
View file @
446d2733
...
...
@@ -339,9 +339,8 @@ static void
show_trace_log_lvl
(
struct
task_struct
*
task
,
struct
pt_regs
*
regs
,
unsigned
long
*
stack
,
unsigned
long
bp
,
char
*
log_lvl
)
{
printk
(
"
\n
Call Trace:
\n
"
);
printk
(
"Call Trace:
\n
"
);
dump_trace
(
task
,
regs
,
stack
,
bp
,
&
print_trace_ops
,
log_lvl
);
printk
(
"
\n
"
);
}
void
show_trace
(
struct
task_struct
*
task
,
struct
pt_regs
*
regs
,
...
...
@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk
(
" %016lx"
,
*
stack
++
);
touch_nmi_watchdog
();
}
printk
(
"
\n
"
);
show_trace_log_lvl
(
task
,
regs
,
sp
,
bp
,
log_lvl
);
}
...
...
@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs)
printk
(
"Stack: "
);
show_stack_log_lvl
(
NULL
,
regs
,
(
unsigned
long
*
)
sp
,
regs
->
bp
,
""
);
printk
(
"
\n
"
);
printk
(
KERN_EMERG
"Code: "
);
...
...
arch/x86/kernel/vmlinux_32.lds.S
View file @
446d2733
...
...
@@ -140,10 +140,10 @@ SECTIONS
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
}
.
x86
cpuvendor.init
:
AT
(
ADDR
(
.
x86cpuvendor
.
init
)
-
LOAD_OFFSET
)
{
__x86
cpuvendor
_start
=
.
;
*(.
x86
cpuvendor
.init
)
__x86
cpuvendor
_end
=
.
;
.
x86
_cpu_dev
.
init
:
AT
(
ADDR
(
.
x86_cpu_dev
.
init
)
-
LOAD_OFFSET
)
{
__x86
_cpu_dev
_start
=
.
;
*(.
x86
_cpu_dev
.
init
)
__x86
_cpu_dev
_end
=
.
;
}
SECURITY_INIT
.
=
ALIGN
(
4
)
;
...
...
arch/x86/kernel/vmlinux_64.lds.S
View file @
446d2733
...
...
@@ -168,13 +168,12 @@ SECTIONS
*(.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
.
=
ALIGN
(
16
)
;
__x86cpuvendor_start
=
.
;
.
x86cpuvendor.init
:
AT
(
ADDR
(
.
x86cpuvendor
.
init
)
-
LOAD_OFFSET
)
{
*(.
x86cpuvendor.init
)
__x86_cpu_dev_start
=
.
;
.
x86_cpu_dev
.
init
:
AT
(
ADDR
(
.
x86_cpu_dev
.
init
)
-
LOAD_OFFSET
)
{
*(.
x86_cpu_dev
.
init
)
}
__x86cpuvendor_end
=
.
;
SECURITY_INIT
__x86_cpu_dev_end
=
.
;
.
=
ALIGN
(
8
)
;
.
parainstructions
:
AT
(
ADDR
(
.
parainstructions
)
-
LOAD_OFFSET
)
{
...
...
include/asm-x86/msr.h
View file @
446d2733
...
...
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
return
EAX_EDX_VAL
(
val
,
low
,
high
);
}
static
inline
unsigned
long
long
native_read_msr_amd_safe
(
unsigned
int
msr
,
int
*
err
)
{
DECLARE_ARGS
(
val
,
low
,
high
);
asm
volatile
(
"2: rdmsr ; xor %0,%0
\n
"
"1:
\n\t
"
".section .fixup,
\"
ax
\"\n\t
"
"3: mov %3,%0 ; jmp 1b
\n\t
"
".previous
\n\t
"
_ASM_EXTABLE
(
2
b
,
3
b
)
:
"=r"
(
*
err
),
EAX_EDX_RET
(
val
,
low
,
high
)
:
"c"
(
msr
),
"D"
(
0x9c5a203a
),
"i"
(
-
EFAULT
));
return
EAX_EDX_VAL
(
val
,
low
,
high
);
}
static
inline
void
native_write_msr
(
unsigned
int
msr
,
unsigned
low
,
unsigned
high
)
{
...
...
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*
p
=
native_read_msr_safe
(
msr
,
&
err
);
return
err
;
}
static
inline
int
rdmsrl_amd_safe
(
unsigned
msr
,
unsigned
long
long
*
p
)
{
int
err
;
*
p
=
native_read_msr_amd_safe
(
msr
,
&
err
);
return
err
;
}
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
...
...
include/asm-x86/paravirt.h
View file @
446d2733
...
...
@@ -137,6 +137,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64
(
*
read_msr_amd
)(
unsigned
int
msr
,
int
*
err
);
u64
(
*
read_msr
)(
unsigned
int
msr
,
int
*
err
);
int
(
*
write_msr
)(
unsigned
int
msr
,
unsigned
low
,
unsigned
high
);
...
...
@@ -720,6 +721,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
{
return
PVOP_CALL2
(
u64
,
pv_cpu_ops
.
read_msr
,
msr
,
err
);
}
static
inline
u64
paravirt_read_msr_amd
(
unsigned
msr
,
int
*
err
)
{
return
PVOP_CALL2
(
u64
,
pv_cpu_ops
.
read_msr_amd
,
msr
,
err
);
}
static
inline
int
paravirt_write_msr
(
unsigned
msr
,
unsigned
low
,
unsigned
high
)
{
return
PVOP_CALL3
(
int
,
pv_cpu_ops
.
write_msr
,
msr
,
low
,
high
);
...
...
@@ -765,6 +770,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*
p
=
paravirt_read_msr
(
msr
,
&
err
);
return
err
;
}
static
inline
int
rdmsrl_amd_safe
(
unsigned
msr
,
unsigned
long
long
*
p
)
{
int
err
;
*
p
=
paravirt_read_msr_amd
(
msr
,
&
err
);
return
err
;
}
static
inline
u64
paravirt_read_tsc
(
void
)
{
...
...
include/asm-x86/processor.h
View file @
446d2733
...
...
@@ -77,9 +77,9 @@ struct cpuinfo_x86 {
__u8
x86_phys_bits
;
/* CPUID returned core id bits: */
__u8
x86_coreid_bits
;
#endif
/* Max extended CPUID function supported: */
__u32
extended_cpuid_level
;
#endif
/* Maximum supported CPUID level, -1=no CPUID: */
int
cpuid_level
;
__u32
x86_capability
[
NCAPINTS
];
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment