Commit 19995e25 authored by Pavel Machek's avatar Pavel Machek Committed by Linus Torvalds

[PATCH] swsusp: preparation for smp support & fix device suspending

It fixes levels for calling driver model, puts devices into sleep before
powering down (so that emergency parking does not happen), and actually
introduces SMP support, but its disabled for now.  Plus noone should try to
freeze_processes() when thats not implemented, we now BUG()s -- we do not
want Heisenbugs.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 939206e7
...@@ -35,40 +35,45 @@ unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_con ...@@ -35,40 +35,45 @@ unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_con
unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
unsigned long saved_context_eflags; unsigned long saved_context_eflags;
void save_processor_state (void) void __save_processor_state(struct saved_context *ctxt)
{ {
kernel_fpu_begin(); kernel_fpu_begin();
/* /*
* descriptor tables * descriptor tables
*/ */
asm volatile ("sgdt %0" : "=m" (saved_context.gdt_limit)); asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
asm volatile ("sidt %0" : "=m" (saved_context.idt_limit)); asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
asm volatile ("sldt %0" : "=m" (saved_context.ldt)); asm volatile ("sldt %0" : "=m" (ctxt->ldt));
asm volatile ("str %0" : "=m" (saved_context.tr)); asm volatile ("str %0" : "=m" (ctxt->tr));
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
/* EFER should be constant for kernel version, no need to handle it. */ /* EFER should be constant for kernel version, no need to handle it. */
/* /*
* segment registers * segment registers
*/ */
asm volatile ("movw %%ds, %0" : "=m" (saved_context.ds)); asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
asm volatile ("movw %%es, %0" : "=m" (saved_context.es)); asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
asm volatile ("movw %%fs, %0" : "=m" (saved_context.fs)); asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
asm volatile ("movw %%gs, %0" : "=m" (saved_context.gs)); asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
asm volatile ("movw %%ss, %0" : "=m" (saved_context.ss)); asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
rdmsrl(MSR_FS_BASE, saved_context.fs_base); rdmsrl(MSR_FS_BASE, ctxt->fs_base);
rdmsrl(MSR_GS_BASE, saved_context.gs_base); rdmsrl(MSR_GS_BASE, ctxt->gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, saved_context.gs_kernel_base); rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
/* /*
* control registers * control registers
*/ */
asm volatile ("movq %%cr0, %0" : "=r" (saved_context.cr0)); asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0));
asm volatile ("movq %%cr2, %0" : "=r" (saved_context.cr2)); asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2));
asm volatile ("movq %%cr3, %0" : "=r" (saved_context.cr3)); asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3));
asm volatile ("movq %%cr4, %0" : "=r" (saved_context.cr4)); asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4));
}
void save_processor_state(void)
{
__save_processor_state(&saved_context);
} }
static void static void
...@@ -80,42 +85,47 @@ do_fpu_end(void) ...@@ -80,42 +85,47 @@ do_fpu_end(void)
mxcsr_feature_mask_init(); mxcsr_feature_mask_init();
} }
void restore_processor_state(void) void __restore_processor_state(struct saved_context *ctxt)
{ {
/* /*
* control registers * control registers
*/ */
asm volatile ("movq %0, %%cr4" :: "r" (saved_context.cr4)); asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4));
asm volatile ("movq %0, %%cr3" :: "r" (saved_context.cr3)); asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3));
asm volatile ("movq %0, %%cr2" :: "r" (saved_context.cr2)); asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2));
asm volatile ("movq %0, %%cr0" :: "r" (saved_context.cr0)); asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0));
/* /*
* segment registers * segment registers
*/ */
asm volatile ("movw %0, %%ds" :: "r" (saved_context.ds)); asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
asm volatile ("movw %0, %%es" :: "r" (saved_context.es)); asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
asm volatile ("movw %0, %%fs" :: "r" (saved_context.fs)); asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
load_gs_index(saved_context.gs); load_gs_index(ctxt->gs);
asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss)); asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
wrmsrl(MSR_FS_BASE, saved_context.fs_base); wrmsrl(MSR_FS_BASE, ctxt->fs_base);
wrmsrl(MSR_GS_BASE, saved_context.gs_base); wrmsrl(MSR_GS_BASE, ctxt->gs_base);
wrmsrl(MSR_KERNEL_GS_BASE, saved_context.gs_kernel_base); wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
/* /*
* now restore the descriptor tables to their proper values * now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context(). * ltr is done i fix_processor_context().
*/ */
asm volatile ("lgdt %0" :: "m" (saved_context.gdt_limit)); asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
asm volatile ("lidt %0" :: "m" (saved_context.idt_limit)); asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
asm volatile ("lldt %0" :: "m" (saved_context.ldt)); asm volatile ("lldt %0" :: "m" (ctxt->ldt));
fix_processor_context(); fix_processor_context();
do_fpu_end(); do_fpu_end();
} }
void restore_processor_state(void)
{
__restore_processor_state(&saved_context);
}
void fix_processor_context(void) void fix_processor_context(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
...@@ -36,9 +36,6 @@ struct saved_context { ...@@ -36,9 +36,6 @@ struct saved_context {
: /* no output */ \ : /* no output */ \
:"r" ((thread)->debugreg[register])) :"r" ((thread)->debugreg[register]))
extern void save_processor_state(void);
extern void restore_processor_state(void);
#ifdef CONFIG_ACPI_SLEEP #ifdef CONFIG_ACPI_SLEEP
extern unsigned long saved_eip; extern unsigned long saved_eip;
extern unsigned long saved_esp; extern unsigned long saved_esp;
......
...@@ -38,7 +38,6 @@ extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, sa ...@@ -38,7 +38,6 @@ extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, sa
extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
extern unsigned long saved_context_eflags; extern unsigned long saved_context_eflags;
#define loaddebug(thread,register) \ #define loaddebug(thread,register) \
__asm__("movq %0,%%db" #register \ __asm__("movq %0,%%db" #register \
: /* no output */ \ : /* no output */ \
......
...@@ -67,24 +67,27 @@ extern int pm_prepare_console(void); ...@@ -67,24 +67,27 @@ extern int pm_prepare_console(void);
extern void pm_restore_console(void); extern void pm_restore_console(void);
#else #else
static inline void refrigerator(unsigned long flag) static inline void refrigerator(unsigned long flag) {}
{
}
static inline int freeze_processes(void)
{
return 0;
}
static inline void thaw_processes(void)
{
}
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef CONFIG_SMP
extern void disable_nonboot_cpus(void);
extern void enable_nonboot_cpus(void);
#else
static inline void disable_nonboot_cpus(void) {}
static inline void enable_nonboot_cpus(void) {}
#endif
asmlinkage void do_magic(int is_resume); asmlinkage void do_magic(int is_resume);
asmlinkage void do_magic_resume_1(void); asmlinkage void do_magic_resume_1(void);
asmlinkage void do_magic_resume_2(void); asmlinkage void do_magic_resume_2(void);
asmlinkage void do_magic_suspend_1(void); asmlinkage void do_magic_suspend_1(void);
asmlinkage void do_magic_suspend_2(void); asmlinkage void do_magic_suspend_2(void);
void save_processor_state(void);
void restore_processor_state(void);
struct saved_context;
void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt);
#endif /* _LINUX_SWSUSP_H */ #endif /* _LINUX_SWSUSP_H */
obj-y := main.o process.o console.o pm.o obj-y := main.o process.o console.o pm.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_PM_DISK) += disk.o pmdisk.o obj-$(CONFIG_PM_DISK) += disk.o pmdisk.o
......
/*
* drivers/power/smp.c - Functions for stopping other CPUs.
*
* Copyright 2004 Pavel Machek <pavel@suse.cz>
* Copyright (C) 2002-2003 Nigel Cunningham <ncunningham@clear.net.nz>
*
* This file is released under the GPLv2.
*/
#undef DEBUG
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/tlbflush.h>
static atomic_t cpu_counter, freeze;
static void smp_pause(void * data)
{
struct saved_context ctxt;
__save_processor_state(&ctxt);
printk("Sleeping in:\n");
dump_stack();
atomic_inc(&cpu_counter);
while (atomic_read(&freeze)) {
/* FIXME: restore takes place at random piece inside this.
This should probably be written in assembly, and
preserve general-purpose registers, too
What about stack? We may need to move to new stack here.
This should better be ran with interrupts disabled.
*/
cpu_relax();
barrier();
}
atomic_dec(&cpu_counter);
__restore_processor_state(&ctxt);
}
cpumask_t oldmask;
void disable_nonboot_cpus(void)
{
printk("Freezing CPUs (at %d)", smp_processor_id());
oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(0));
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ);
printk("...");
BUG_ON(smp_processor_id() != 0);
/* FIXME: for this to work, all the CPUs must be running
* "idle" thread (or we deadlock). Is that guaranteed? */
atomic_set(&cpu_counter, 0);
atomic_set(&freeze, 1);
smp_call_function(smp_pause, NULL, 0, 0);
while (atomic_read(&cpu_counter) < (num_online_cpus() - 1)) {
cpu_relax();
barrier();
}
printk("ok\n");
}
void enable_nonboot_cpus(void)
{
printk("Restarting CPUs");
atomic_set(&freeze, 0);
while (atomic_read(&cpu_counter)) {
cpu_relax();
barrier();
}
printk("...");
set_cpus_allowed(current, oldmask);
schedule();
printk("ok\n");
}
...@@ -696,6 +696,7 @@ static void suspend_power_down(void) ...@@ -696,6 +696,7 @@ static void suspend_power_down(void)
else else
#endif #endif
{ {
device_suspend(3);
device_shutdown(); device_shutdown();
machine_power_off(); machine_power_off();
} }
...@@ -716,7 +717,7 @@ asmlinkage void do_magic_resume_1(void) ...@@ -716,7 +717,7 @@ asmlinkage void do_magic_resume_1(void)
mb(); mb();
spin_lock_irq(&suspend_pagedir_lock); /* Done to disable interrupts */ spin_lock_irq(&suspend_pagedir_lock); /* Done to disable interrupts */
device_power_down(4); device_power_down(3);
PRINTK( "Waiting for DMAs to settle down...\n"); PRINTK( "Waiting for DMAs to settle down...\n");
mdelay(1000); /* We do not want some readahead with DMA to corrupt our memory, right? mdelay(1000); /* We do not want some readahead with DMA to corrupt our memory, right?
Do it with disabled interrupts for best effect. That way, if some Do it with disabled interrupts for best effect. That way, if some
...@@ -785,7 +786,7 @@ asmlinkage void do_magic_suspend_2(void) ...@@ -785,7 +786,7 @@ asmlinkage void do_magic_suspend_2(void)
{ {
int is_problem; int is_problem;
read_swapfiles(); read_swapfiles();
device_power_down(4); device_power_down(3);
is_problem = suspend_prepare_image(); is_problem = suspend_prepare_image();
device_power_up(); device_power_up();
spin_unlock_irq(&suspend_pagedir_lock); spin_unlock_irq(&suspend_pagedir_lock);
...@@ -802,7 +803,6 @@ asmlinkage void do_magic_suspend_2(void) ...@@ -802,7 +803,6 @@ asmlinkage void do_magic_suspend_2(void)
barrier(); barrier();
mb(); mb();
spin_lock_irq(&suspend_pagedir_lock); /* Done to disable interrupts */ spin_lock_irq(&suspend_pagedir_lock); /* Done to disable interrupts */
mdelay(1000);
free_pages((unsigned long) pagedir_nosave, pagedir_order); free_pages((unsigned long) pagedir_nosave, pagedir_order);
spin_unlock_irq(&suspend_pagedir_lock); spin_unlock_irq(&suspend_pagedir_lock);
...@@ -839,9 +839,10 @@ int software_suspend(void) ...@@ -839,9 +839,10 @@ int software_suspend(void)
need half of memory free. */ need half of memory free. */
free_some_memory(); free_some_memory();
disable_nonboot_cpus();
/* Save state of all device drivers, and stop them. */ /* Save state of all device drivers, and stop them. */
if ((res = device_suspend(4))==0) printk("Suspending devices... ");
if ((res = device_suspend(3))==0) {
/* If stopping device drivers worked, we proceed basically into /* If stopping device drivers worked, we proceed basically into
* suspend_save_image. * suspend_save_image.
* *
...@@ -852,7 +853,9 @@ int software_suspend(void) ...@@ -852,7 +853,9 @@ int software_suspend(void)
* using normal kernel mechanism. * using normal kernel mechanism.
*/ */
do_magic(0); do_magic(0);
}
thaw_processes(); thaw_processes();
enable_nonboot_cpus();
} else } else
res = -EBUSY; res = -EBUSY;
software_suspend_enabled = 1; software_suspend_enabled = 1;
...@@ -1192,7 +1195,9 @@ static int __init software_resume(void) ...@@ -1192,7 +1195,9 @@ static int __init software_resume(void)
printk( "resuming from %s\n", resume_file); printk( "resuming from %s\n", resume_file);
if (read_suspend_image(resume_file, 0)) if (read_suspend_image(resume_file, 0))
goto read_failure; goto read_failure;
device_suspend(4); /* FIXME: Should we stop processes here, just to be safer? */
disable_nonboot_cpus();
device_suspend(3);
do_magic(1); do_magic(1);
panic("This never returns"); panic("This never returns");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment