Commit b6a3d01f authored by Patrick Mochel's avatar Patrick Mochel Committed by Linus Torvalds

Reorganize the mtrr init sequence a bit. All mtrr init now happens

during the initcall sequence, after all CPUs have been brought up. 
mtrr_init() calls a static init_other_cpus(), which fires off a function 
on all other cpus to replicate the state across all of them. 

arch/i386/kernel/smpboot.c::smp_callin() had the following: 

#ifdef CONFIG_MTRR
       /*
        * Must be done before calibration delay is computed
        */
       mtrr_init_secondary_cpu ();
#endif


I couldn't figure this one out. The P4 manual says nothing about this, nor
find any other documentation about it. The P4 manual says only that state
must be synchronized across all CPUs, which it is. And, it happens before
anything else is executed on the other CPUs, and before any devices or
drivers have been brought up.

The cyrix mtrr code was also updated to handle this style of SMP initialization.
parent 2b5d7502
......@@ -110,12 +110,54 @@ cyrix_get_free_region(unsigned long base, unsigned long size)
return -ENOSPC;
}
static u32 cr4 = 0;
static u32 ccr3;
static void prepare_set(void)
{
u32 cr0;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
}
static void post_set(void)
{
/* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
}
static void cyrix_set_arr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
{
unsigned char arr, arr_type, arr_size;
u32 cr0, ccr3;
u32 cr4 = 0;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
......@@ -158,24 +200,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
}
}
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
prepare_set();
base <<= PAGE_SHIFT;
setCx86(arr, ((unsigned char *) &base)[3]);
......@@ -183,18 +208,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
setCx86(CX86_RCR_BASE + reg, arr_type);
/* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
post_set();
}
typedef struct {
......@@ -210,31 +224,11 @@ arr_state_t arr_state[8] __initdata = {
unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
static void __init
cyrix_arr_init_secondary(void)
static void cyrix_set_all(void)
{
int i;
u32 cr0, ccr3, cr4 = 0;
/* flush cache and enable MAPEN */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
prepare_set();
/* the CCRs are not contiguous */
for (i = 0; i < 4; i++)
......@@ -245,18 +239,7 @@ cyrix_arr_init_secondary(void)
cyrix_set_arr(i, arr_state[i].base,
arr_state[i].size, arr_state[i].type);
/* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
post_set();
}
/*
......@@ -361,7 +344,7 @@ cyrix_arr_init(void)
static struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
.init = cyrix_arr_init,
.init_secondary = cyrix_arr_init_secondary,
.set_all = cyrix_set_all,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
.get_free_region = cyrix_get_free_region,
......
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/mtrr.h>
......@@ -6,6 +8,90 @@
#include <asm/cpufeature.h>
#include "mtrr.h"
struct mtrr_state {
struct mtrr_var_range *var_ranges;
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
mtrr_type def_type;
};
static unsigned long smp_changes_mask __initdata = 0;
struct mtrr_state mtrr_state = {};
/* Get the MSR pair relating to a var range */
static void __init
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}
static void __init
get_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int i;
rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
for (i = 0; i < 2; i++)
rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
for (i = 0; i < 8; i++)
rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
}
/* Grab all of the MTRR state for this CPU into *state */
void get_mtrr_state(void)
{
unsigned int i;
struct mtrr_var_range *vrs;
unsigned long lo, dummy;
if (!mtrr_state.var_ranges) {
mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
GFP_KERNEL);
if (!mtrr_state.var_ranges)
return;
}
vrs = mtrr_state.var_ranges;
for (i = 0; i < num_var_ranges; i++)
get_mtrr_var_range(i, &vrs[i]);
get_fixed_ranges(mtrr_state.fixed_ranges);
rdmsr(MTRRdefType_MSR, lo, dummy);
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
}
/* Free resources associated with a struct mtrr_state */
void __init finalize_mtrr_state(void)
{
if (mtrr_state.var_ranges)
kfree(mtrr_state.var_ranges);
mtrr_state.var_ranges = NULL;
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */
void __init mtrr_state_warn(void)
{
unsigned long mask = smp_changes_mask;
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
printk
("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
printk
("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
printk
("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
printk("mtrr: probably your BIOS does not setup all CPUs\n");
}
int generic_get_free_region(unsigned long base, unsigned long size)
/* [SUMMARY] Get a free MTRR.
......@@ -55,23 +141,104 @@ void generic_get_mtrr(unsigned int reg, unsigned long *base,
*type = base_lo & 0xff;
}
void generic_set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
<base> The base address of the region.
<size> The size of the region. If this is 0 the region is disabled.
<type> The type of the region.
<do_safe> If TRUE, do the change safely. If FALSE, safety measures should
be done externally.
[RETURNS] Nothing.
static int __init set_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int changed = FALSE;
int i;
unsigned long lo, hi;
rdmsr(MTRRfix64K_00000_MSR, lo, hi);
if (p[0] != lo || p[1] != hi) {
wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
changed = TRUE;
}
for (i = 0; i < 2; i++) {
rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
p[3 + i * 2]);
changed = TRUE;
}
}
for (i = 0; i < 8; i++) {
rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
p[7 + i * 2]);
changed = TRUE;
}
}
return changed;
}
/* Set the MSR pair relating to a var range. Returns TRUE if
changes are made */
static int __init set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
unsigned int lo, hi;
int changed = FALSE;
rdmsr(MTRRphysBase_MSR(index), lo, hi);
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
changed = TRUE;
}
rdmsr(MTRRphysMask_MSR(index), lo, hi);
if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
|| (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
changed = TRUE;
}
return changed;
}
static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
/* [SUMMARY] Set the MTRR state for this CPU.
<state> The MTRR state information to read.
<ctxt> Some relevant CPU context.
[NOTE] The CPU must already be in a safe state for MTRR changes.
[RETURNS] 0 if no changes made, else a mask indication what was changed.
*/
{
u32 cr0, cr4 = 0;
u32 deftype_lo, deftype_hi;
static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
unsigned int i;
unsigned long change_mask = 0;
for (i = 0; i < num_var_ranges; i++)
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
if (set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
/* Set_mtrr_restore restores the old value of MTRRdefType,
so to set it we fiddle with the saved value */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
return change_mask;
}
static u32 cr4 = 0;
static u32 deftype_lo, deftype_hi;
static void prepare_set(void)
{
u32 cr0;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
spin_lock(&set_atomicity_lock);
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
......@@ -90,18 +257,10 @@ void generic_set_mtrr(unsigned int reg, unsigned long base,
/* Disable MTRRs, and set the default type to uncached */
wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
}
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */
wrmsr(MTRRphysMask_MSR(reg), 0, 0);
} else {
wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
(base & size_and_mask) >> (32 - PAGE_SHIFT));
wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
(-size & size_and_mask) >> (32 - PAGE_SHIFT));
}
static void post_set(void)
{
/* Flush caches and TLBs */
wbinvd();
......@@ -114,7 +273,57 @@ void generic_set_mtrr(unsigned int reg, unsigned long base,
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
spin_unlock(&set_atomicity_lock);
}
static void generic_set_all(void)
{
unsigned long mask, count;
prepare_set();
/* Actually set the state */
mask = set_mtrr_state(deftype_lo,deftype_hi);
post_set();
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
}
static void generic_set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
<base> The base address of the region.
<size> The size of the region. If this is 0 the region is disabled.
<type> The type of the region.
<do_safe> If TRUE, do the change safely. If FALSE, safety measures should
be done externally.
[RETURNS] Nothing.
*/
{
prepare_set();
printk("MTRR: setting reg %x\n",reg);
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */
wrmsr(MTRRphysMask_MSR(reg), 0, 0);
} else {
wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
(base & size_and_mask) >> (32 - PAGE_SHIFT));
wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
(-size & size_and_mask) >> (32 - PAGE_SHIFT));
}
post_set();
}
int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
......@@ -178,7 +387,7 @@ int positive_have_wrcomb(void)
*/
struct mtrr_ops generic_mtrr_ops = {
.use_intel_if = 1,
.init_secondary = generic_init_secondary,
.set_all = generic_set_all,
.get = generic_get_mtrr,
.get_free_region = generic_get_free_region,
.set = generic_set_mtrr,
......
......@@ -163,8 +163,11 @@ static void ipi_handler(void *info)
}
/* The master has cleared me to execute */
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
if (data->smp_reg != ~0UL)
mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
else
mtrr_if->set_all();
atomic_dec(&data->count);
while(atomic_read(&data->gate)) {
......@@ -243,7 +246,15 @@ static void set_mtrr(unsigned int reg, unsigned long base,
atomic_set(&data.gate,1);
/* do our MTRR business */
mtrr_if->set(reg,base,size,type);
/* HACK!
* We use this same function to initialize the mtrrs on boot.
* The state of the boot cpu's mtrrs has been saved, and we want
* to replicate across all the APs.
* If we're doing that @reg is set to something special...
*/
if (reg != ~0UL)
mtrr_if->set(reg,base,size,type);
/* wait for the others */
while(atomic_read(&data.count)) {
......@@ -530,6 +541,20 @@ static void __init init_ifs(void)
centaur_init_mtrr();
}
static void init_other_cpus(void)
{
if (use_intel())
get_mtrr_state();
/* bring up the other processors */
set_mtrr(~0UL,0,0,0);
if (use_intel()) {
finalize_mtrr_state();
mtrr_state_warn();
}
}
/**
* mtrr_init - initialie mtrrs on the boot CPU
*
......@@ -537,7 +562,7 @@ static void __init init_ifs(void)
* initialized (i.e. before smp_init()).
*
*/
int __init mtrr_init(void)
static int __init mtrr_init(void)
{
init_ifs();
......@@ -608,21 +633,15 @@ int __init mtrr_init(void)
break;
}
}
printk("mtrr: v%s\n",MTRR_VERSION);
if (mtrr_if) {
set_num_var_ranges();
if (use_intel()) {
/* Only for Intel MTRRs */
get_mtrr_state();
}
init_table();
init_other_cpus();
}
#if 0
printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
"mtrr: detected mtrr type: %s\n",
MTRR_VERSION, mtrr_if_name[mtrr_if]);
#endif
return mtrr_if ? -ENXIO : 0;
}
//subsys_initcall(mtrr_init);
core_initcall(mtrr_init);
......@@ -38,9 +38,10 @@ struct mtrr_ops {
u32 vendor;
u32 use_intel_if;
void (*init)(void);
void (*init_secondary)(void);
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
void (*get)(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type);
int (*get_free_region) (unsigned long base, unsigned long size);
......
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
#include "mtrr.h"
struct mtrr_state {
struct mtrr_var_range *var_ranges;
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
mtrr_type def_type;
};
static unsigned long smp_changes_mask __initdata = 0;
struct mtrr_state mtrr_state = {};
static int __init set_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int changed = FALSE;
int i;
unsigned long lo, hi;
rdmsr(MTRRfix64K_00000_MSR, lo, hi);
if (p[0] != lo || p[1] != hi) {
wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
changed = TRUE;
}
for (i = 0; i < 2; i++) {
rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
p[3 + i * 2]);
changed = TRUE;
}
}
for (i = 0; i < 8; i++) {
rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
p[7 + i * 2]);
changed = TRUE;
}
}
return changed;
}
/* Set the MSR pair relating to a var range. Returns TRUE if
changes are made */
static int __init set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
unsigned int lo, hi;
int changed = FALSE;
rdmsr(MTRRphysBase_MSR(index), lo, hi);
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
changed = TRUE;
}
rdmsr(MTRRphysMask_MSR(index), lo, hi);
if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
|| (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
changed = TRUE;
}
return changed;
}
static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
/* [SUMMARY] Set the MTRR state for this CPU.
<state> The MTRR state information to read.
<ctxt> Some relevant CPU context.
[NOTE] The CPU must already be in a safe state for MTRR changes.
[RETURNS] 0 if no changes made, else a mask indication what was changed.
*/
{
unsigned int i;
unsigned long change_mask = 0;
for (i = 0; i < num_var_ranges; i++)
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
if (set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
/* Set_mtrr_restore restores the old value of MTRRdefType,
so to set it we fiddle with the saved value */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
return change_mask;
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */
static void __init mtrr_state_warn(void)
{
unsigned long mask = smp_changes_mask;
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
printk
("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
printk
("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
printk
("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
printk("mtrr: probably your BIOS does not setup all CPUs\n");
}
/* Free resources associated with a struct mtrr_state */
static void __init finalize_mtrr_state(void)
{
if (mtrr_state.var_ranges)
kfree(mtrr_state.var_ranges);
mtrr_state.var_ranges = NULL;
}
/* Get the MSR pair relating to a var range */
static void __init
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}
static void __init
get_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int i;
rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
for (i = 0; i < 2; i++)
rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
for (i = 0; i < 8; i++)
rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
}
/* Grab all of the MTRR state for this CPU into *state */
void get_mtrr_state(void)
{
unsigned int i;
struct mtrr_var_range *vrs;
unsigned long lo, dummy;
if (!mtrr_state.var_ranges) {
mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
GFP_KERNEL);
if (!mtrr_state.var_ranges)
return;
}
vrs = mtrr_state.var_ranges;
for (i = 0; i < num_var_ranges; i++)
get_mtrr_var_range(i, &vrs[i]);
get_fixed_ranges(mtrr_state.fixed_ranges);
rdmsr(MTRRdefType_MSR, lo, dummy);
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
}
/* Put the processor into a state where MTRRs can be safely set */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
......@@ -246,93 +76,3 @@ void set_mtrr_done(struct set_mtrr_context *ctxt)
local_irq_restore(ctxt->flags);
}
void __init generic_init_secondary(void)
{
u32 cr0, cr4 = 0;
u32 deftype_lo, deftype_hi;
unsigned long mask, count;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Save MTRR state */
rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
/* Disable MTRRs, and set the default type to uncached */
wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
/* Actually set the state */
mask = set_mtrr_state(deftype_lo,deftype_hi);
/* Flush caches and TLBs */
wbinvd();
/* Intel (P6) standard MTRRs */
wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
}
/**
* mtrr_init_secondary - setup AP MTRR state
*
* Yes, this code is exactly the same as the set_mtrr code, except for the
* piece in the middle - you set all the ranges at once, instead of one
* register at a time.
* Shoot me.
*/
void __init mtrr_init_secondary_cpu(void)
{
unsigned long flags;
if (!mtrr_if || !mtrr_if->init_secondary) {
/* I see no MTRRs I can support in SMP mode... */
printk("mtrr: SMP support incomplete for this vendor\n");
return;
}
local_irq_save(flags);
mtrr_if->init_secondary();
local_irq_restore(flags);
}
/**
* mtrr_final_init - finalize initialization sequence.
*/
static int __init mtrr_finalize_state(void)
{
if (use_intel()) {
finalize_mtrr_state();
mtrr_state_warn();
}
return 0;
}
arch_initcall(mtrr_finalize_state);
......@@ -45,7 +45,6 @@
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/smpboot.h>
......@@ -403,12 +402,6 @@ void __init smp_callin(void)
local_irq_enable();
#ifdef CONFIG_MTRR
/*
* Must be done before calibration delay is computed
*/
mtrr_init_secondary_cpu ();
#endif
/*
* Get our bogomips.
*/
......
......@@ -115,13 +115,6 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
# endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
# if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
# endif
#endif
#endif /* _LINUX_MTRR_H */
......@@ -38,10 +38,6 @@
#include <asm/s390mach.h>
#endif
#ifdef CONFIG_MTRR
# include <asm/mtrr.h>
#endif
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
#endif
......@@ -536,15 +532,6 @@ static int init(void * unused)
*/
child_reaper = current;
#if defined(CONFIG_MTRR) /* Do this after SMP initialization */
/*
* We should probably create some architecture-dependent "fixup after
* everything is up" style function where this would belong better
* than in init/main.c..
*/
mtrr_init();
#endif
/* Sets up cpus_possible() */
smp_prepare_cpus(max_cpus);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment