Commit 98398eba authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_bar
parents 09b1ed32 ed245b59
...@@ -166,6 +166,7 @@ acpi_parse_lapic_addr_ovr ( ...@@ -166,6 +166,7 @@ acpi_parse_lapic_addr_ovr (
return 0; return 0;
} }
#ifndef CONFIG_ACPI_HT_ONLY
static int __init static int __init
acpi_parse_lapic_nmi ( acpi_parse_lapic_nmi (
...@@ -185,12 +186,16 @@ acpi_parse_lapic_nmi ( ...@@ -185,12 +186,16 @@ acpi_parse_lapic_nmi (
return 0; return 0;
} }
#endif /*CONFIG_ACPI_HT_ONLY*/
#endif /*CONFIG_X86_LOCAL_APIC*/ #endif /*CONFIG_X86_LOCAL_APIC*/
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
int acpi_ioapic; int acpi_ioapic;
#ifndef CONFIG_ACPI_HT_ONLY
static int __init static int __init
acpi_parse_ioapic ( acpi_parse_ioapic (
acpi_table_entry_header *header) acpi_table_entry_header *header)
...@@ -251,6 +256,7 @@ acpi_parse_nmi_src ( ...@@ -251,6 +256,7 @@ acpi_parse_nmi_src (
return 0; return 0;
} }
#endif /*!CONFIG_ACPI_HT_ONLY*/
#endif /*CONFIG_X86_IO_APIC*/ #endif /*CONFIG_X86_IO_APIC*/
...@@ -361,18 +367,21 @@ acpi_boot_init ( ...@@ -361,18 +367,21 @@ acpi_boot_init (
return result; return result;
} }
#ifndef CONFIG_ACPI_HT_ONLY
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi); result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
if (result < 0) { if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */ /* TBD: Cleanup to allow fallback to MPS */
return result; return result;
} }
#endif /*!CONFIG_ACPI_HT_ONLY*/
acpi_lapic = 1; acpi_lapic = 1;
#endif /*CONFIG_X86_LOCAL_APIC*/ #endif /*CONFIG_X86_LOCAL_APIC*/
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
#ifndef CONFIG_ACPI_HT_ONLY
/* /*
* I/O APIC * I/O APIC
...@@ -410,6 +419,7 @@ acpi_boot_init ( ...@@ -410,6 +419,7 @@ acpi_boot_init (
acpi_ioapic = 1; acpi_ioapic = 1;
#endif /*!CONFIG_ACPI_HT_ONLY*/
#endif /*CONFIG_X86_IO_APIC*/ #endif /*CONFIG_X86_IO_APIC*/
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -110,12 +110,54 @@ cyrix_get_free_region(unsigned long base, unsigned long size) ...@@ -110,12 +110,54 @@ cyrix_get_free_region(unsigned long base, unsigned long size)
return -ENOSPC; return -ENOSPC;
} }
static u32 cr4 = 0;
static u32 ccr3;
static void prepare_set(void)
{
u32 cr0;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
}
static void post_set(void)
{
/* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
}
static void cyrix_set_arr(unsigned int reg, unsigned long base, static void cyrix_set_arr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type) unsigned long size, mtrr_type type)
{ {
unsigned char arr, arr_type, arr_size; unsigned char arr, arr_type, arr_size;
u32 cr0, ccr3;
u32 cr4 = 0;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
...@@ -158,24 +200,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, ...@@ -158,24 +200,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
} }
} }
/* Save value of CR4 and clear Page Global Enable (bit 7) */ prepare_set();
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
base <<= PAGE_SHIFT; base <<= PAGE_SHIFT;
setCx86(arr, ((unsigned char *) &base)[3]); setCx86(arr, ((unsigned char *) &base)[3]);
...@@ -183,18 +208,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, ...@@ -183,18 +208,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
setCx86(CX86_RCR_BASE + reg, arr_type); setCx86(CX86_RCR_BASE + reg, arr_type);
/* Flush caches and TLBs */ post_set();
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
} }
typedef struct { typedef struct {
...@@ -210,31 +224,11 @@ arr_state_t arr_state[8] __initdata = { ...@@ -210,31 +224,11 @@ arr_state_t arr_state[8] __initdata = {
unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 }; unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
static void __init static void cyrix_set_all(void)
cyrix_arr_init_secondary(void)
{ {
int i; int i;
u32 cr0, ccr3, cr4 = 0;
/* flush cache and enable MAPEN */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as prepare_set();
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */
ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
/* the CCRs are not contiguous */ /* the CCRs are not contiguous */
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
...@@ -245,18 +239,7 @@ cyrix_arr_init_secondary(void) ...@@ -245,18 +239,7 @@ cyrix_arr_init_secondary(void)
cyrix_set_arr(i, arr_state[i].base, cyrix_set_arr(i, arr_state[i].base,
arr_state[i].size, arr_state[i].type); arr_state[i].size, arr_state[i].type);
/* Flush caches and TLBs */ post_set();
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
} }
/* /*
...@@ -361,7 +344,7 @@ cyrix_arr_init(void) ...@@ -361,7 +344,7 @@ cyrix_arr_init(void)
static struct mtrr_ops cyrix_mtrr_ops = { static struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX, .vendor = X86_VENDOR_CYRIX,
.init = cyrix_arr_init, .init = cyrix_arr_init,
.init_secondary = cyrix_arr_init_secondary, .set_all = cyrix_set_all,
.set = cyrix_set_arr, .set = cyrix_set_arr,
.get = cyrix_get_arr, .get = cyrix_get_arr,
.get_free_region = cyrix_get_free_region, .get_free_region = cyrix_get_free_region,
......
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
...@@ -6,6 +8,90 @@ ...@@ -6,6 +8,90 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include "mtrr.h" #include "mtrr.h"
struct mtrr_state {
struct mtrr_var_range *var_ranges;
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
mtrr_type def_type;
};
static unsigned long smp_changes_mask __initdata = 0;
struct mtrr_state mtrr_state = {};
/* Get the MSR pair relating to a var range */
static void __init
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}
static void __init
get_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int i;
rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
for (i = 0; i < 2; i++)
rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
for (i = 0; i < 8; i++)
rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
}
/* Grab all of the MTRR state for this CPU into *state */
void get_mtrr_state(void)
{
unsigned int i;
struct mtrr_var_range *vrs;
unsigned long lo, dummy;
if (!mtrr_state.var_ranges) {
mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
GFP_KERNEL);
if (!mtrr_state.var_ranges)
return;
}
vrs = mtrr_state.var_ranges;
for (i = 0; i < num_var_ranges; i++)
get_mtrr_var_range(i, &vrs[i]);
get_fixed_ranges(mtrr_state.fixed_ranges);
rdmsr(MTRRdefType_MSR, lo, dummy);
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
}
/* Free resources associated with a struct mtrr_state */
void __init finalize_mtrr_state(void)
{
if (mtrr_state.var_ranges)
kfree(mtrr_state.var_ranges);
mtrr_state.var_ranges = NULL;
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */
void __init mtrr_state_warn(void)
{
unsigned long mask = smp_changes_mask;
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
printk
("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
printk
("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
printk
("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
printk("mtrr: probably your BIOS does not setup all CPUs\n");
}
int generic_get_free_region(unsigned long base, unsigned long size) int generic_get_free_region(unsigned long base, unsigned long size)
/* [SUMMARY] Get a free MTRR. /* [SUMMARY] Get a free MTRR.
...@@ -55,23 +141,104 @@ void generic_get_mtrr(unsigned int reg, unsigned long *base, ...@@ -55,23 +141,104 @@ void generic_get_mtrr(unsigned int reg, unsigned long *base,
*type = base_lo & 0xff; *type = base_lo & 0xff;
} }
void generic_set_mtrr(unsigned int reg, unsigned long base, static int __init set_fixed_ranges(mtrr_type * frs)
unsigned long size, mtrr_type type) {
/* [SUMMARY] Set variable MTRR register on the local CPU. unsigned long *p = (unsigned long *) frs;
<reg> The register to set. int changed = FALSE;
<base> The base address of the region. int i;
<size> The size of the region. If this is 0 the region is disabled. unsigned long lo, hi;
<type> The type of the region.
<do_safe> If TRUE, do the change safely. If FALSE, safety measures should rdmsr(MTRRfix64K_00000_MSR, lo, hi);
be done externally. if (p[0] != lo || p[1] != hi) {
[RETURNS] Nothing. wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
changed = TRUE;
}
for (i = 0; i < 2; i++) {
rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
p[3 + i * 2]);
changed = TRUE;
}
}
for (i = 0; i < 8; i++) {
rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
p[7 + i * 2]);
changed = TRUE;
}
}
return changed;
}
/* Set the MSR pair relating to a var range. Returns TRUE if
changes are made */
static int __init set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
unsigned int lo, hi;
int changed = FALSE;
rdmsr(MTRRphysBase_MSR(index), lo, hi);
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
changed = TRUE;
}
rdmsr(MTRRphysMask_MSR(index), lo, hi);
if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
|| (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
changed = TRUE;
}
return changed;
}
static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
/* [SUMMARY] Set the MTRR state for this CPU.
<state> The MTRR state information to read.
<ctxt> Some relevant CPU context.
[NOTE] The CPU must already be in a safe state for MTRR changes.
[RETURNS] 0 if no changes made, else a mask indication what was changed.
*/ */
{ {
u32 cr0, cr4 = 0; unsigned int i;
u32 deftype_lo, deftype_hi; unsigned long change_mask = 0;
static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
for (i = 0; i < num_var_ranges; i++)
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
if (set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
/* Set_mtrr_restore restores the old value of MTRRdefType,
so to set it we fiddle with the saved value */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
return change_mask;
}
static u32 cr4 = 0;
static u32 deftype_lo, deftype_hi;
static void prepare_set(void)
{
u32 cr0;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
spin_lock(&set_atomicity_lock);
/* Save value of CR4 and clear Page Global Enable (bit 7) */ /* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) { if ( cpu_has_pge ) {
cr4 = read_cr4(); cr4 = read_cr4();
...@@ -90,18 +257,10 @@ void generic_set_mtrr(unsigned int reg, unsigned long base, ...@@ -90,18 +257,10 @@ void generic_set_mtrr(unsigned int reg, unsigned long base,
/* Disable MTRRs, and set the default type to uncached */ /* Disable MTRRs, and set the default type to uncached */
wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
}
if (size == 0) { static void post_set(void)
/* The invalid bit is kept in the mask, so we simply clear the {
relevant mask register to disable a range. */
wrmsr(MTRRphysMask_MSR(reg), 0, 0);
} else {
wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
(base & size_and_mask) >> (32 - PAGE_SHIFT));
wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
(-size & size_and_mask) >> (32 - PAGE_SHIFT));
}
/* Flush caches and TLBs */ /* Flush caches and TLBs */
wbinvd(); wbinvd();
...@@ -114,7 +273,57 @@ void generic_set_mtrr(unsigned int reg, unsigned long base, ...@@ -114,7 +273,57 @@ void generic_set_mtrr(unsigned int reg, unsigned long base,
/* Restore value of CR4 */ /* Restore value of CR4 */
if ( cpu_has_pge ) if ( cpu_has_pge )
write_cr4(cr4); write_cr4(cr4);
spin_unlock(&set_atomicity_lock);
}
static void generic_set_all(void)
{
unsigned long mask, count;
prepare_set();
/* Actually set the state */
mask = set_mtrr_state(deftype_lo,deftype_hi);
post_set();
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
}
static void generic_set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
<base> The base address of the region.
<size> The size of the region. If this is 0 the region is disabled.
<type> The type of the region.
<do_safe> If TRUE, do the change safely. If FALSE, safety measures should
be done externally.
[RETURNS] Nothing.
*/
{
prepare_set();
printk("MTRR: setting reg %x\n",reg);
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */
wrmsr(MTRRphysMask_MSR(reg), 0, 0);
} else {
wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
(base & size_and_mask) >> (32 - PAGE_SHIFT));
wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
(-size & size_and_mask) >> (32 - PAGE_SHIFT));
}
post_set();
} }
int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
...@@ -178,7 +387,7 @@ int positive_have_wrcomb(void) ...@@ -178,7 +387,7 @@ int positive_have_wrcomb(void)
*/ */
struct mtrr_ops generic_mtrr_ops = { struct mtrr_ops generic_mtrr_ops = {
.use_intel_if = 1, .use_intel_if = 1,
.init_secondary = generic_init_secondary, .set_all = generic_set_all,
.get = generic_get_mtrr, .get = generic_get_mtrr,
.get_free_region = generic_get_free_region, .get_free_region = generic_get_free_region,
.set = generic_set_mtrr, .set = generic_set_mtrr,
......
...@@ -163,8 +163,11 @@ static void ipi_handler(void *info) ...@@ -163,8 +163,11 @@ static void ipi_handler(void *info)
} }
/* The master has cleared me to execute */ /* The master has cleared me to execute */
if (data->smp_reg != ~0UL)
mtrr_if->set(data->smp_reg, data->smp_base, mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type); data->smp_size, data->smp_type);
else
mtrr_if->set_all();
atomic_dec(&data->count); atomic_dec(&data->count);
while(atomic_read(&data->gate)) { while(atomic_read(&data->gate)) {
...@@ -243,6 +246,14 @@ static void set_mtrr(unsigned int reg, unsigned long base, ...@@ -243,6 +246,14 @@ static void set_mtrr(unsigned int reg, unsigned long base,
atomic_set(&data.gate,1); atomic_set(&data.gate,1);
/* do our MTRR business */ /* do our MTRR business */
/* HACK!
* We use this same function to initialize the mtrrs on boot.
* The state of the boot cpu's mtrrs has been saved, and we want
* to replicate across all the APs.
* If we're doing that @reg is set to something special...
*/
if (reg != ~0UL)
mtrr_if->set(reg,base,size,type); mtrr_if->set(reg,base,size,type);
/* wait for the others */ /* wait for the others */
...@@ -530,6 +541,20 @@ static void __init init_ifs(void) ...@@ -530,6 +541,20 @@ static void __init init_ifs(void)
centaur_init_mtrr(); centaur_init_mtrr();
} }
static void init_other_cpus(void)
{
if (use_intel())
get_mtrr_state();
/* bring up the other processors */
set_mtrr(~0UL,0,0,0);
if (use_intel()) {
finalize_mtrr_state();
mtrr_state_warn();
}
}
/** /**
* mtrr_init - initialie mtrrs on the boot CPU * mtrr_init - initialie mtrrs on the boot CPU
* *
...@@ -537,7 +562,7 @@ static void __init init_ifs(void) ...@@ -537,7 +562,7 @@ static void __init init_ifs(void)
* initialized (i.e. before smp_init()). * initialized (i.e. before smp_init()).
* *
*/ */
int __init mtrr_init(void) static int __init mtrr_init(void)
{ {
init_ifs(); init_ifs();
...@@ -608,21 +633,15 @@ int __init mtrr_init(void) ...@@ -608,21 +633,15 @@ int __init mtrr_init(void)
break; break;
} }
} }
printk("mtrr: v%s\n",MTRR_VERSION);
if (mtrr_if) { if (mtrr_if) {
set_num_var_ranges(); set_num_var_ranges();
if (use_intel()) {
/* Only for Intel MTRRs */
get_mtrr_state();
}
init_table(); init_table();
init_other_cpus();
} }
#if 0
printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
"mtrr: detected mtrr type: %s\n",
MTRR_VERSION, mtrr_if_name[mtrr_if]);
#endif
return mtrr_if ? -ENXIO : 0; return mtrr_if ? -ENXIO : 0;
} }
//subsys_initcall(mtrr_init); core_initcall(mtrr_init);
...@@ -38,9 +38,10 @@ struct mtrr_ops { ...@@ -38,9 +38,10 @@ struct mtrr_ops {
u32 vendor; u32 vendor;
u32 use_intel_if; u32 use_intel_if;
void (*init)(void); void (*init)(void);
void (*init_secondary)(void);
void (*set)(unsigned int reg, unsigned long base, void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type); unsigned long size, mtrr_type type);
void (*set_all)(void);
void (*get)(unsigned int reg, unsigned long *base, void (*get)(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type); unsigned long *size, mtrr_type * type);
int (*get_free_region) (unsigned long base, unsigned long size); int (*get_free_region) (unsigned long base, unsigned long size);
......
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
#include "mtrr.h" #include "mtrr.h"
struct mtrr_state {
struct mtrr_var_range *var_ranges;
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
mtrr_type def_type;
};
static unsigned long smp_changes_mask __initdata = 0;
struct mtrr_state mtrr_state = {};
static int __init set_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int changed = FALSE;
int i;
unsigned long lo, hi;
rdmsr(MTRRfix64K_00000_MSR, lo, hi);
if (p[0] != lo || p[1] != hi) {
wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
changed = TRUE;
}
for (i = 0; i < 2; i++) {
rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
p[3 + i * 2]);
changed = TRUE;
}
}
for (i = 0; i < 8; i++) {
rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
p[7 + i * 2]);
changed = TRUE;
}
}
return changed;
}
/* Set the MSR pair relating to a var range. Returns TRUE if
changes are made */
static int __init set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
unsigned int lo, hi;
int changed = FALSE;
rdmsr(MTRRphysBase_MSR(index), lo, hi);
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
changed = TRUE;
}
rdmsr(MTRRphysMask_MSR(index), lo, hi);
if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
|| (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
changed = TRUE;
}
return changed;
}
static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
/* [SUMMARY] Set the MTRR state for this CPU.
<state> The MTRR state information to read.
<ctxt> Some relevant CPU context.
[NOTE] The CPU must already be in a safe state for MTRR changes.
[RETURNS] 0 if no changes made, else a mask indication what was changed.
*/
{
unsigned int i;
unsigned long change_mask = 0;
for (i = 0; i < num_var_ranges; i++)
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
if (set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
/* Set_mtrr_restore restores the old value of MTRRdefType,
so to set it we fiddle with the saved value */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
return change_mask;
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */
static void __init mtrr_state_warn(void)
{
unsigned long mask = smp_changes_mask;
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
printk
("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
printk
("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
printk
("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
printk("mtrr: probably your BIOS does not setup all CPUs\n");
}
/* Free resources associated with a struct mtrr_state */
static void __init finalize_mtrr_state(void)
{
if (mtrr_state.var_ranges)
kfree(mtrr_state.var_ranges);
mtrr_state.var_ranges = NULL;
}
/* Get the MSR pair relating to a var range */
static void __init
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}
static void __init
get_fixed_ranges(mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
int i;
rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
for (i = 0; i < 2; i++)
rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
for (i = 0; i < 8; i++)
rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
}
/* Grab all of the MTRR state for this CPU into *state */
void get_mtrr_state(void)
{
unsigned int i;
struct mtrr_var_range *vrs;
unsigned long lo, dummy;
if (!mtrr_state.var_ranges) {
mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
GFP_KERNEL);
if (!mtrr_state.var_ranges)
return;
}
vrs = mtrr_state.var_ranges;
for (i = 0; i < num_var_ranges; i++)
get_mtrr_var_range(i, &vrs[i]);
get_fixed_ranges(mtrr_state.fixed_ranges);
rdmsr(MTRRdefType_MSR, lo, dummy);
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
}
/* Put the processor into a state where MTRRs can be safely set */ /* Put the processor into a state where MTRRs can be safely set */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
...@@ -246,93 +76,3 @@ void set_mtrr_done(struct set_mtrr_context *ctxt) ...@@ -246,93 +76,3 @@ void set_mtrr_done(struct set_mtrr_context *ctxt)
local_irq_restore(ctxt->flags); local_irq_restore(ctxt->flags);
} }
void __init generic_init_secondary(void)
{
u32 cr0, cr4 = 0;
u32 deftype_lo, deftype_hi;
unsigned long mask, count;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
cr4 = read_cr4();
write_cr4(cr4 & (unsigned char) ~(1 << 7));
}
/* Disable and flush caches. Note that wbinvd flushes the TLBs as
a side-effect */
cr0 = read_cr0() | 0x40000000;
wbinvd();
write_cr0(cr0);
wbinvd();
/* Save MTRR state */
rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
/* Disable MTRRs, and set the default type to uncached */
wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
/* Actually set the state */
mask = set_mtrr_state(deftype_lo,deftype_hi);
/* Flush caches and TLBs */
wbinvd();
/* Intel (P6) standard MTRRs */
wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
}
/**
* mtrr_init_secondary - setup AP MTRR state
*
* Yes, this code is exactly the same as the set_mtrr code, except for the
* piece in the middle - you set all the ranges at once, instead of one
* register at a time.
* Shoot me.
*/
void __init mtrr_init_secondary_cpu(void)
{
unsigned long flags;
if (!mtrr_if || !mtrr_if->init_secondary) {
/* I see no MTRRs I can support in SMP mode... */
printk("mtrr: SMP support incomplete for this vendor\n");
return;
}
local_irq_save(flags);
mtrr_if->init_secondary();
local_irq_restore(flags);
}
/**
* mtrr_final_init - finalize initialization sequence.
*/
static int __init mtrr_finalize_state(void)
{
if (use_intel()) {
finalize_mtrr_state();
mtrr_state_warn();
}
return 0;
}
arch_initcall(mtrr_finalize_state);
...@@ -45,7 +45,6 @@ ...@@ -45,7 +45,6 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/smpboot.h> #include <asm/smpboot.h>
...@@ -403,12 +402,6 @@ void __init smp_callin(void) ...@@ -403,12 +402,6 @@ void __init smp_callin(void)
local_irq_enable(); local_irq_enable();
#ifdef CONFIG_MTRR
/*
* Must be done before calibration delay is computed
*/
mtrr_init_secondary_cpu ();
#endif
/* /*
* Get our bogomips. * Get our bogomips.
*/ */
......
...@@ -115,7 +115,7 @@ static inline unsigned long do_fast_gettimeoffset(void) ...@@ -115,7 +115,7 @@ static inline unsigned long do_fast_gettimeoffset(void)
return delay_at_last_interrupt + edx; return delay_at_last_interrupt + edx;
} }
#define TICK_SIZE tick #define TICK_SIZE (tick_nsec / 1000)
spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED; spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL(i8253_lock); EXPORT_SYMBOL(i8253_lock);
...@@ -280,7 +280,7 @@ void do_gettimeofday(struct timeval *tv) ...@@ -280,7 +280,7 @@ void do_gettimeofday(struct timeval *tv)
usec += lost * (1000000 / HZ); usec += lost * (1000000 / HZ);
} }
sec = xtime.tv_sec; sec = xtime.tv_sec;
usec += xtime.tv_usec; usec += (xtime.tv_nsec / 1000);
read_unlock_irqrestore(&xtime_lock, flags); read_unlock_irqrestore(&xtime_lock, flags);
while (usec >= 1000000) { while (usec >= 1000000) {
...@@ -309,7 +309,8 @@ void do_settimeofday(struct timeval *tv) ...@@ -309,7 +309,8 @@ void do_settimeofday(struct timeval *tv)
tv->tv_sec--; tv->tv_sec--;
} }
xtime = *tv; xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = (tv->tv_usec * 1000);
time_adjust = 0; /* stop active adjtime() */ time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC; time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT; time_maxerror = NTP_PHASE_LIMIT;
...@@ -437,8 +438,8 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg ...@@ -437,8 +438,8 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
*/ */
if ((time_status & STA_UNSYNC) == 0 && if ((time_status & STA_UNSYNC) == 0 &&
xtime.tv_sec > last_rtc_update + 660 && xtime.tv_sec > last_rtc_update + 660 &&
xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) { (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
if (set_rtc_mmss(xtime.tv_sec) == 0) if (set_rtc_mmss(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec; last_rtc_update = xtime.tv_sec;
else else
...@@ -655,7 +656,7 @@ void __init time_init(void) ...@@ -655,7 +656,7 @@ void __init time_init(void)
extern int x86_udelay_tsc; extern int x86_udelay_tsc;
xtime.tv_sec = get_cmos_time(); xtime.tv_sec = get_cmos_time();
xtime.tv_usec = 0; xtime.tv_nsec = 0;
/* /*
* If we have APM enabled or the CPU clock speed is variable * If we have APM enabled or the CPU clock speed is variable
......
...@@ -2,29 +2,19 @@ ...@@ -2,29 +2,19 @@
# ACPI Configuration # ACPI Configuration
# #
if [ "$CONFIG_X86" = "y" ]; then mainmenu_option next_comment
comment 'ACPI Support'
mainmenu_option next_comment if [ "$CONFIG_X86" = "y" ]; then
comment 'ACPI Support'
bool 'ACPI Support' CONFIG_ACPI bool 'ACPI Support' CONFIG_ACPI
if [ "$CONFIG_ACPI" = "y" ]; then if [ "$CONFIG_ACPI" = "y" ]; then
if [ "$CONFIG_X86_LOCAL_APIC" = "y" ]; then if [ "$CONFIG_X86_LOCAL_APIC" = "y" ]; then
bool 'CPU Enumeration Only' CONFIG_ACPI_HT_ONLY bool 'CPU Enumeration Only' CONFIG_ACPI_HT_ONLY
fi fi
if [ "$CONFIG_ACPI_HT_ONLY" = "y" ]; then
define_bool CONFIG_ACPI_BOOT y
else
define_bool CONFIG_ACPI_BOOT y define_bool CONFIG_ACPI_BOOT y
define_bool CONFIG_ACPI_BUS y
define_bool CONFIG_ACPI_INTERPRETER y if [ "$CONFIG_ACPI_HT_ONLY" != "y" ]; then
define_bool CONFIG_ACPI_EC y
define_bool CONFIG_ACPI_POWER y
if [ "$CONFIG_PCI" = "y" ]; then
define_bool CONFIG_ACPI_PCI y
fi
define_bool CONFIG_ACPI_SLEEP $CONFIG_SOFTWARE_SUSPEND
define_bool CONFIG_ACPI_SYSTEM y
tristate ' AC Adapter' CONFIG_ACPI_AC tristate ' AC Adapter' CONFIG_ACPI_AC
tristate ' Battery' CONFIG_ACPI_BATTERY tristate ' Battery' CONFIG_ACPI_BATTERY
tristate ' Button' CONFIG_ACPI_BUTTON tristate ' Button' CONFIG_ACPI_BUTTON
...@@ -36,19 +26,21 @@ if [ "$CONFIG_X86" = "y" ]; then ...@@ -36,19 +26,21 @@ if [ "$CONFIG_X86" = "y" ]; then
fi fi
tristate ' Toshiba Laptop Extras' CONFIG_ACPI_TOSHIBA tristate ' Toshiba Laptop Extras' CONFIG_ACPI_TOSHIBA
bool ' Debug Statements' CONFIG_ACPI_DEBUG bool ' Debug Statements' CONFIG_ACPI_DEBUG
define_bool CONFIG_ACPI_BOOT y
define_bool CONFIG_ACPI_BUS y
define_bool CONFIG_ACPI_INTERPRETER y
define_bool CONFIG_ACPI_EC y
define_bool CONFIG_ACPI_POWER y
define_bool CONFIG_ACPI_PCI $CONFIG_PCI
define_bool CONFIG_ACPI_SLEEP $CONFIG_SOFTWARE_SUSPEND
define_bool CONFIG_ACPI_SYSTEM y
fi fi
fi fi
endmenu
fi fi
if [ "$CONFIG_IA64" = "y" ]; then if [ "$CONFIG_IA64" = "y" ]; then
if [ "$CONFIG_IA64_SGI_SN" = "y" ]; then if [ "$CONFIG_IA64_SGI_SN" = "y" ]; then
mainmenu_option next_comment
comment 'ACPI Support'
define_bool CONFIG_ACPI y define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_EFI y define_bool CONFIG_ACPI_EFI y
define_bool CONFIG_ACPI_BOOT y define_bool CONFIG_ACPI_BOOT y
...@@ -62,22 +54,9 @@ if [ "$CONFIG_IA64" = "y" ]; then ...@@ -62,22 +54,9 @@ if [ "$CONFIG_IA64" = "y" ]; then
define_bool CONFIG_ACPI_PROCESSOR n define_bool CONFIG_ACPI_PROCESSOR n
define_bool CONFIG_ACPI_THERMAL n define_bool CONFIG_ACPI_THERMAL n
define_bool CONFIG_ACPI_NUMA y define_bool CONFIG_ACPI_NUMA y
endmenu
fi fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
mainmenu_option next_comment
comment 'ACPI Support'
if [ "$CONFIG_PCI" = "y" ]; then
define_bool CONFIG_ACPI_PCI y
fi
define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_EFI y
define_bool CONFIG_ACPI_BOOT y
define_bool CONFIG_ACPI_BUS y
define_bool CONFIG_ACPI_INTERPRETER y
define_bool CONFIG_ACPI_POWER y
define_bool CONFIG_ACPI_SYSTEM y
tristate ' Button' CONFIG_ACPI_BUTTON tristate ' Button' CONFIG_ACPI_BUTTON
tristate ' Fan' CONFIG_ACPI_FAN tristate ' Fan' CONFIG_ACPI_FAN
tristate ' Processor' CONFIG_ACPI_PROCESSOR tristate ' Processor' CONFIG_ACPI_PROCESSOR
...@@ -86,7 +65,15 @@ if [ "$CONFIG_IA64" = "y" ]; then ...@@ -86,7 +65,15 @@ if [ "$CONFIG_IA64" = "y" ]; then
dep_bool ' NUMA support' CONFIG_ACPI_NUMA $CONFIG_NUMA dep_bool ' NUMA support' CONFIG_ACPI_NUMA $CONFIG_NUMA
fi fi
bool ' Debug Statements' CONFIG_ACPI_DEBUG bool ' Debug Statements' CONFIG_ACPI_DEBUG
endmenu define_bool CONFIG_ACPI_PCI $CONFIG_PCI
define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_EFI y
define_bool CONFIG_ACPI_BOOT y
define_bool CONFIG_ACPI_BUS y
define_bool CONFIG_ACPI_INTERPRETER y
define_bool CONFIG_ACPI_POWER y
define_bool CONFIG_ACPI_SYSTEM y
fi fi
fi fi
endmenu
...@@ -114,6 +114,7 @@ EXPORT_SYMBOL(acpi_evaluate_reference); ...@@ -114,6 +114,7 @@ EXPORT_SYMBOL(acpi_evaluate_reference);
#ifdef CONFIG_ACPI_BUS #ifdef CONFIG_ACPI_BUS
EXPORT_SYMBOL(acpi_fadt); EXPORT_SYMBOL(acpi_fadt);
EXPORT_SYMBOL(acpi_walk_namespace);
EXPORT_SYMBOL(acpi_root_dir); EXPORT_SYMBOL(acpi_root_dir);
EXPORT_SYMBOL(acpi_bus_get_device); EXPORT_SYMBOL(acpi_bus_get_device);
EXPORT_SYMBOL(acpi_bus_get_status); EXPORT_SYMBOL(acpi_bus_get_status);
...@@ -127,4 +128,3 @@ EXPORT_SYMBOL(acpi_bus_scan); ...@@ -127,4 +128,3 @@ EXPORT_SYMBOL(acpi_bus_scan);
EXPORT_SYMBOL(acpi_init); EXPORT_SYMBOL(acpi_init);
#endif /*CONFIG_ACPI_BUS*/ #endif /*CONFIG_ACPI_BUS*/
...@@ -162,7 +162,7 @@ acpi_battery_get_info ( ...@@ -162,7 +162,7 @@ acpi_battery_get_info (
} }
end: end:
kfree(buffer.pointer); acpi_os_free(buffer.pointer);
if (!result) if (!result)
(*bif) = (struct acpi_battery_info *) data.pointer; (*bif) = (struct acpi_battery_info *) data.pointer;
...@@ -223,7 +223,7 @@ acpi_battery_get_status ( ...@@ -223,7 +223,7 @@ acpi_battery_get_status (
} }
end: end:
kfree(buffer.pointer); acpi_os_free(buffer.pointer);
if (!result) if (!result)
(*bst) = (struct acpi_battery_status *) data.pointer; (*bst) = (struct acpi_battery_status *) data.pointer;
......
...@@ -32,7 +32,9 @@ ...@@ -32,7 +32,9 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h> #include <asm/mpspec.h>
#endif
#include "acpi_bus.h" #include "acpi_bus.h"
#include "acpi_drivers.h" #include "acpi_drivers.h"
#include "include/acinterp.h" /* for acpi_ex_eisa_id_to_string() */ #include "include/acinterp.h" /* for acpi_ex_eisa_id_to_string() */
...@@ -665,7 +667,7 @@ acpi_bus_generate_event ( ...@@ -665,7 +667,7 @@ acpi_bus_generate_event (
if (!event_is_open) if (!event_is_open)
return_VALUE(0); return_VALUE(0);
event = kmalloc(sizeof(struct acpi_bus_event), GFP_KERNEL); event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
if (!event) if (!event)
return_VALUE(-ENOMEM); return_VALUE(-ENOMEM);
...@@ -1967,11 +1969,13 @@ acpi_bus_init (void) ...@@ -1967,11 +1969,13 @@ acpi_bus_init (void)
goto error1; goto error1;
} }
#ifdef CONFIG_X86
/* Ensure the SCI is set to level-triggered, active-low */ /* Ensure the SCI is set to level-triggered, active-low */
if (acpi_ioapic) if (acpi_ioapic)
mp_override_legacy_irq(acpi_fadt.sci_int, 3, 3, acpi_fadt.sci_int); mp_override_legacy_irq(acpi_fadt.sci_int, 3, 3, acpi_fadt.sci_int);
else else
eisa_set_level_irq(acpi_fadt.sci_int); eisa_set_level_irq(acpi_fadt.sci_int);
#endif
status = acpi_enable_subsystem(ACPI_FULL_INITIALIZATION); status = acpi_enable_subsystem(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
......
...@@ -70,7 +70,6 @@ struct acpi_button { ...@@ -70,7 +70,6 @@ struct acpi_button {
static struct proc_dir_entry *acpi_button_dir = NULL; static struct proc_dir_entry *acpi_button_dir = NULL;
static int static int
acpi_button_read_info ( acpi_button_read_info (
char *page, char *page,
......
#
# Makefile for all Linux ACPI interpreter subdirectories
#
obj-$(CONFIG_ACPI_INTERPRETER) := $(patsubst %.c,%.o,$(wildcard *.c))
EXTRA_CFLAGS += $(ACPI_CFLAGS)
include $(TOPDIR)/Rules.make
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/******************************************************************************
*
* Module Name: dbhistry - debugger HISTORY command
* $Revision: 25 $
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2002, R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "acpi.h"
#include "acdebug.h"
#ifdef ACPI_DEBUGGER
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME ("dbhistry")
#define HI_NO_HISTORY 0
#define HI_RECORD_HISTORY 1
#define HISTORY_SIZE 20
typedef struct history_info
{
NATIVE_CHAR command[80];
u32 cmd_num;
} HISTORY_INFO;
static HISTORY_INFO acpi_gbl_history_buffer[HISTORY_SIZE];
static u16 acpi_gbl_lo_history = 0;
static u16 acpi_gbl_num_history = 0;
static u16 acpi_gbl_next_history_index = 0;
static u32 acpi_gbl_next_cmd_num = 1;
/*******************************************************************************
*
* FUNCTION: Acpi_db_add_to_history
*
* PARAMETERS: Command_line - Command to add
*
* RETURN: None
*
* DESCRIPTION: Add a command line to the history buffer.
*
******************************************************************************/
void
acpi_db_add_to_history (
NATIVE_CHAR *command_line)
{
/* Put command into the next available slot */
ACPI_STRCPY (acpi_gbl_history_buffer[acpi_gbl_next_history_index].command, command_line);
acpi_gbl_history_buffer[acpi_gbl_next_history_index].cmd_num = acpi_gbl_next_cmd_num;
/* Adjust indexes */
if ((acpi_gbl_num_history == HISTORY_SIZE) &&
(acpi_gbl_next_history_index == acpi_gbl_lo_history)) {
acpi_gbl_lo_history++;
if (acpi_gbl_lo_history >= HISTORY_SIZE) {
acpi_gbl_lo_history = 0;
}
}
acpi_gbl_next_history_index++;
if (acpi_gbl_next_history_index >= HISTORY_SIZE) {
acpi_gbl_next_history_index = 0;
}
acpi_gbl_next_cmd_num++;
if (acpi_gbl_num_history < HISTORY_SIZE) {
acpi_gbl_num_history++;
}
}
/*******************************************************************************
*
* FUNCTION: Acpi_db_display_history
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Display the contents of the history buffer
*
******************************************************************************/
void
acpi_db_display_history (void)
{
NATIVE_UINT i;
u16 history_index;
history_index = acpi_gbl_lo_history;
/* Dump entire history buffer */
for (i = 0; i < acpi_gbl_num_history; i++) {
acpi_os_printf ("%ld %s\n", acpi_gbl_history_buffer[history_index].cmd_num,
acpi_gbl_history_buffer[history_index].command);
history_index++;
if (history_index >= HISTORY_SIZE) {
history_index = 0;
}
}
}
/*******************************************************************************
*
* FUNCTION: Acpi_db_get_from_history
*
* PARAMETERS: Command_num_arg - String containing the number of the
* command to be retrieved
*
* RETURN: None
*
* DESCRIPTION: Get a command from the history buffer
*
******************************************************************************/
NATIVE_CHAR *
acpi_db_get_from_history (
NATIVE_CHAR *command_num_arg)
{
NATIVE_UINT i;
u16 history_index;
u32 cmd_num;
if (command_num_arg == NULL) {
cmd_num = acpi_gbl_next_cmd_num - 1;
}
else {
cmd_num = ACPI_STRTOUL (command_num_arg, NULL, 0);
}
/* Search history buffer */
history_index = acpi_gbl_lo_history;
for (i = 0; i < acpi_gbl_num_history; i++) {
if (acpi_gbl_history_buffer[history_index].cmd_num == cmd_num) {
/* Found the commnad, return it */
return (acpi_gbl_history_buffer[history_index].command);
}
history_index++;
if (history_index >= HISTORY_SIZE) {
history_index = 0;
}
}
acpi_os_printf ("Invalid history number: %d\n", history_index);
return (NULL);
}
#endif /* ACPI_DEBUGGER */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -288,7 +288,7 @@ acpi_pci_irq_derive ( ...@@ -288,7 +288,7 @@ acpi_pci_irq_derive (
while (!irq && (bridge = bridge->bus->self)) { while (!irq && (bridge = bridge->bus->self)) {
pin = (pin + PCI_SLOT(bridge->devfn)) % 4; pin = (pin + PCI_SLOT(bridge->devfn)) % 4;
irq = acpi_pci_irq_lookup(0, bridge->bus->number, PCI_SLOT(bridge->devfn), pin); irq = acpi_pci_irq_lookup(0, bridge->bus->number, PCI_SLOT(bridge->devfn), pin);
}; }
if (!irq) { if (!irq) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to derive IRQ for device %s\n", dev->slot_name)); ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Unable to derive IRQ for device %s\n", dev->slot_name));
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/ */
#define ACPI_C
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
......
This diff is collapsed.
...@@ -266,8 +266,8 @@ ...@@ -266,8 +266,8 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/blkpg.h> #include <linux/blkpg.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/fcntl.h>
#include <asm/fcntl.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/* used to tell the module to turn on full debugging messages */ /* used to tell the module to turn on full debugging messages */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment