Commit 3b520b23 authored by Shaohua Li's avatar Shaohua Li Committed by Linus Torvalds

[PATCH] MTRR suspend/resume cleanup

There has been some discuss about solving the SMP MTRR suspend/resume
breakage, but I didn't find a patch for it.  This is an intent for it.  The
basic idea is moving mtrr initializing into cpu_identify for all APs (so it
works for cpu hotplug).  For BP, restore_processor_state is responsible for
restoring MTRR.
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Acked-by: default avatarAndi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 01d29936
...@@ -435,6 +435,11 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -435,6 +435,11 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
if (c == &boot_cpu_data) if (c == &boot_cpu_data)
sysenter_setup(); sysenter_setup();
enable_sep_cpu(); enable_sep_cpu();
if (c == &boot_cpu_data)
mtrr_bp_init();
else
mtrr_ap_init();
} }
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
......
...@@ -67,13 +67,6 @@ void __init get_mtrr_state(void) ...@@ -67,13 +67,6 @@ void __init get_mtrr_state(void)
mtrr_state.enabled = (lo & 0xc00) >> 10; mtrr_state.enabled = (lo & 0xc00) >> 10;
} }
/* Free resources associated with a struct mtrr_state */
void __init finalize_mtrr_state(void)
{
kfree(mtrr_state.var_ranges);
mtrr_state.var_ranges = NULL;
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */ /* Some BIOS's are fucked and don't set all MTRRs the same! */
void __init mtrr_state_warn(void) void __init mtrr_state_warn(void)
{ {
...@@ -334,6 +327,9 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, ...@@ -334,6 +327,9 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
*/ */
{ {
unsigned long flags; unsigned long flags;
struct mtrr_var_range *vr;
vr = &mtrr_state.var_ranges[reg];
local_irq_save(flags); local_irq_save(flags);
prepare_set(); prepare_set();
...@@ -342,11 +338,15 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, ...@@ -342,11 +338,15 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
/* The invalid bit is kept in the mask, so we simply clear the /* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */ relevant mask register to disable a range. */
mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
memset(vr, 0, sizeof(struct mtrr_var_range));
} else { } else {
mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type, vr->base_lo = base << PAGE_SHIFT | type;
(base & size_and_mask) >> (32 - PAGE_SHIFT)); vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800, vr->mask_lo = -size << PAGE_SHIFT | 0x800;
(-size & size_and_mask) >> (32 - PAGE_SHIFT)); vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
} }
post_set(); post_set();
......
...@@ -332,6 +332,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -332,6 +332,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
error = -EINVAL; error = -EINVAL;
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
/* Search for existing MTRR */ /* Search for existing MTRR */
down(&main_lock); down(&main_lock);
for (i = 0; i < num_var_ranges; ++i) { for (i = 0; i < num_var_ranges; ++i) {
...@@ -372,6 +374,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -372,6 +374,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
error = i; error = i;
out: out:
up(&main_lock); up(&main_lock);
unlock_cpu_hotplug();
return error; return error;
} }
...@@ -461,6 +464,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) ...@@ -461,6 +464,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
return -ENXIO; return -ENXIO;
max = num_var_ranges; max = num_var_ranges;
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
down(&main_lock); down(&main_lock);
if (reg < 0) { if (reg < 0) {
/* Search for existing MTRR */ /* Search for existing MTRR */
...@@ -501,6 +506,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) ...@@ -501,6 +506,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
error = reg; error = reg;
out: out:
up(&main_lock); up(&main_lock);
unlock_cpu_hotplug();
return error; return error;
} }
/** /**
...@@ -544,21 +550,9 @@ static void __init init_ifs(void) ...@@ -544,21 +550,9 @@ static void __init init_ifs(void)
centaur_init_mtrr(); centaur_init_mtrr();
} }
static void __init init_other_cpus(void) /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
{ * MTRR driver doesn't require this
if (use_intel()) */
get_mtrr_state();
/* bring up the other processors */
set_mtrr(~0U,0,0,0);
if (use_intel()) {
finalize_mtrr_state();
mtrr_state_warn();
}
}
struct mtrr_value { struct mtrr_value {
mtrr_type ltype; mtrr_type ltype;
unsigned long lbase; unsigned long lbase;
...@@ -611,13 +605,13 @@ static struct sysdev_driver mtrr_sysdev_driver = { ...@@ -611,13 +605,13 @@ static struct sysdev_driver mtrr_sysdev_driver = {
/** /**
* mtrr_init - initialize mtrrs on the boot CPU * mtrr_bp_init - initialize mtrrs on the boot CPU
* *
* This needs to be called early; before any of the other CPUs are * This needs to be called early; before any of the other CPUs are
* initialized (i.e. before smp_init()). * initialized (i.e. before smp_init()).
* *
*/ */
static int __init mtrr_init(void) void __init mtrr_bp_init(void)
{ {
init_ifs(); init_ifs();
...@@ -674,12 +668,48 @@ static int __init mtrr_init(void) ...@@ -674,12 +668,48 @@ static int __init mtrr_init(void)
if (mtrr_if) { if (mtrr_if) {
set_num_var_ranges(); set_num_var_ranges();
init_table(); init_table();
init_other_cpus(); if (use_intel())
get_mtrr_state();
}
}
void mtrr_ap_init(void)
{
unsigned long flags;
return sysdev_driver_register(&cpu_sysdev_class, if (!mtrr_if || !use_intel())
return;
/*
* Ideally we should hold main_lock here to avoid mtrr entries changed,
* but this routine will be called in cpu boot time, holding the lock
* breaks it. This routine is called in two cases: 1.very earily time
* of software resume, when there absolutely isn't mtrr entry changes;
* 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
* prevent mtrr entry changes
*/
local_irq_save(flags);
mtrr_if->set_all();
local_irq_restore(flags);
}
static int __init mtrr_init_finialize(void)
{
if (!mtrr_if)
return 0;
if (use_intel())
mtrr_state_warn();
else {
/* The CPUs haven't MTRR and seemes not support SMP. They have
* specific drivers, we use a tricky method to support
* suspend/resume for them.
* TBD: is there any system with such CPU which supports
* suspend/resume? if no, we should remove the code.
*/
sysdev_driver_register(&cpu_sysdev_class,
&mtrr_sysdev_driver); &mtrr_sysdev_driver);
} }
return -ENXIO; return 0;
} }
subsys_initcall(mtrr_init_finialize);
subsys_initcall(mtrr_init);
...@@ -91,7 +91,6 @@ extern struct mtrr_ops * mtrr_if; ...@@ -91,7 +91,6 @@ extern struct mtrr_ops * mtrr_if;
extern unsigned int num_var_ranges; extern unsigned int num_var_ranges;
void finalize_mtrr_state(void);
void mtrr_state_warn(void); void mtrr_state_warn(void);
char *mtrr_attrib_to_str(int x); char *mtrr_attrib_to_str(int x);
void mtrr_wrmsr(unsigned, unsigned, unsigned); void mtrr_wrmsr(unsigned, unsigned, unsigned);
......
...@@ -137,6 +137,7 @@ void __restore_processor_state(struct saved_context *ctxt) ...@@ -137,6 +137,7 @@ void __restore_processor_state(struct saved_context *ctxt)
fix_processor_context(); fix_processor_context();
do_fpu_end(); do_fpu_end();
mtrr_ap_init();
} }
void restore_processor_state(void) void restore_processor_state(void)
......
...@@ -1076,6 +1076,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -1076,6 +1076,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
mcheck_init(c); mcheck_init(c);
#endif #endif
if (c == &boot_cpu_data)
mtrr_bp_init();
else
mtrr_ap_init();
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (c != &boot_cpu_data) if (c != &boot_cpu_data)
numa_add_cpu(c - cpu_data); numa_add_cpu(c - cpu_data);
......
...@@ -119,6 +119,7 @@ void __restore_processor_state(struct saved_context *ctxt) ...@@ -119,6 +119,7 @@ void __restore_processor_state(struct saved_context *ctxt)
fix_processor_context(); fix_processor_context();
do_fpu_end(); do_fpu_end();
mtrr_ap_init();
} }
void restore_processor_state(void) void restore_processor_state(void)
......
...@@ -694,4 +694,12 @@ extern unsigned long boot_option_idle_override; ...@@ -694,4 +694,12 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void); extern void enable_sep_cpu(void);
extern int sysenter_setup(void); extern int sysenter_setup(void);
#ifdef CONFIG_MTRR
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
#else
#define mtrr_ap_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
#endif
#endif /* __ASM_I386_PROCESSOR_H */ #endif /* __ASM_I386_PROCESSOR_H */
...@@ -15,6 +15,13 @@ extern void pda_init(int); ...@@ -15,6 +15,13 @@ extern void pda_init(int);
extern void early_idt_handler(void); extern void early_idt_handler(void);
extern void mcheck_init(struct cpuinfo_x86 *c); extern void mcheck_init(struct cpuinfo_x86 *c);
#ifdef CONFIG_MTRR
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
#else
#define mtrr_ap_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
#endif
extern void init_memory_mapping(unsigned long start, unsigned long end); extern void init_memory_mapping(unsigned long start, unsigned long end);
extern void system_call(void); extern void system_call(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment