Commit 7ee18d67 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/power: Make restore_processor_context() sane

My previous attempt to fix a couple of bugs in __restore_processor_context():

  5b06bbcf ("x86/power: Fix some ordering bugs in __restore_processor_context()")

... introduced yet another bug, breaking suspend-resume.

Rather than trying to come up with a minimal fix, let's try to clean it up
for real.  This patch fixes quite a few things:

 - The old code saved a nonsensical subset of segment registers.
   The only registers that need to be saved are those that contain
   userspace state or those that can't be trivially restored without
   percpu access working.  (On x86_32, we can restore percpu access
   by writing __KERNEL_PERCPU to %fs.  On x86_64, it's easier to
   save and restore the kernel's GSBASE.)  With this patch, we
   restore hardcoded values to the kernel state where applicable and
   explicitly restore the user state after fixing all the descriptor
   tables.

 - We used to use an unholy mix of inline asm and C helpers for
   segment register access.  Let's get rid of the inline asm.

This fixes the reported s2ram hangs and make the code all around
more logical.
Analyzed-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Reported-by: default avatarJarkko Nikula <jarkko.nikula@linux.intel.com>
Reported-by: default avatarPavel Machek <pavel@ucw.cz>
Tested-by: default avatarJarkko Nikula <jarkko.nikula@linux.intel.com>
Tested-by: default avatarPavel Machek <pavel@ucw.cz>
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Acked-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Zhang Rui <rui.zhang@intel.com>
Fixes: 5b06bbcf ("x86/power: Fix some ordering bugs in __restore_processor_context()")
Link: http://lkml.kernel.org/r/398ee68e5c0f766425a7b746becfc810840770ff.1513286253.git.luto@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 896c80be
...@@ -12,7 +12,13 @@ ...@@ -12,7 +12,13 @@
/* image of the saved processor state */ /* image of the saved processor state */
struct saved_context { struct saved_context {
u16 es, fs, gs, ss; /*
* On x86_32, all segment registers, with the possible exception of
* gs, are saved at kernel entry in pt_regs.
*/
#ifdef CONFIG_X86_32_LAZY_GS
u16 gs;
#endif
unsigned long cr0, cr2, cr3, cr4; unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable; u64 misc_enable;
bool misc_enable_saved; bool misc_enable_saved;
......
...@@ -20,8 +20,20 @@ ...@@ -20,8 +20,20 @@
*/ */
struct saved_context { struct saved_context {
struct pt_regs regs; struct pt_regs regs;
u16 ds, es, fs, gs, ss;
unsigned long gs_base, gs_kernel_base, fs_base; /*
* User CS and SS are saved in current_pt_regs(). The rest of the
* segment selectors need to be saved and restored here.
*/
u16 ds, es, fs, gs;
/*
* Usermode FSBASE and GSBASE may not match the fs and gs selectors,
* so we save them separately. We save the kernelmode GSBASE to
* restore percpu access after resume.
*/
unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
unsigned long cr0, cr2, cr3, cr4, cr8; unsigned long cr0, cr2, cr3, cr4, cr8;
u64 misc_enable; u64 misc_enable;
bool misc_enable_saved; bool misc_enable_saved;
......
...@@ -99,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt) ...@@ -99,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
/* /*
* segment registers * segment registers
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32_LAZY_GS
savesegment(es, ctxt->es);
savesegment(fs, ctxt->fs);
savesegment(gs, ctxt->gs); savesegment(gs, ctxt->gs);
savesegment(ss, ctxt->ss); #endif
#else #ifdef CONFIG_X86_64
/* CONFIG_X86_64 */ savesegment(gs, ctxt->gs);
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); savesegment(fs, ctxt->fs);
asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); savesegment(ds, ctxt->ds);
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); savesegment(es, ctxt->es);
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
rdmsrl(MSR_FS_BASE, ctxt->fs_base); rdmsrl(MSR_FS_BASE, ctxt->fs_base);
rdmsrl(MSR_GS_BASE, ctxt->gs_base); rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
mtrr_save_fixed_ranges(NULL); mtrr_save_fixed_ranges(NULL);
rdmsrl(MSR_EFER, ctxt->efer); rdmsrl(MSR_EFER, ctxt->efer);
...@@ -192,6 +188,9 @@ static void fix_processor_context(void) ...@@ -192,6 +188,9 @@ static void fix_processor_context(void)
* __restore_processor_state - restore the contents of CPU registers saved * __restore_processor_state - restore the contents of CPU registers saved
* by __save_processor_state() * by __save_processor_state()
* @ctxt - structure to load the registers contents from * @ctxt - structure to load the registers contents from
*
* The asm code that gets us here will have restored a usable GDT, although
* it will be pointing to the wrong alias.
*/ */
static void notrace __restore_processor_state(struct saved_context *ctxt) static void notrace __restore_processor_state(struct saved_context *ctxt)
{ {
...@@ -214,46 +213,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) ...@@ -214,46 +213,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
write_cr2(ctxt->cr2); write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0); write_cr0(ctxt->cr0);
/* Restore the IDT. */
load_idt(&ctxt->idt);
/* /*
* now restore the descriptor tables to their proper values * Just in case the asm code got us here with the SS, DS, or ES
* ltr is done i fix_processor_context(). * out of sync with the GDT, update them.
*/ */
load_idt(&ctxt->idt); loadsegment(ss, __KERNEL_DS);
loadsegment(ds, __USER_DS);
loadsegment(es, __USER_DS);
#ifdef CONFIG_X86_64
/* /*
* We need GSBASE restored before percpu access can work. * Restore percpu access. Percpu access can happen in exception
* percpu access can happen in exception handlers or in complicated * handlers or in complicated helpers like load_gs_index().
* helpers like load_gs_index().
*/ */
wrmsrl(MSR_GS_BASE, ctxt->gs_base); #ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
#else
loadsegment(fs, __KERNEL_PERCPU);
loadsegment(gs, __KERNEL_STACK_CANARY);
#endif #endif
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
fix_processor_context(); fix_processor_context();
/* /*
* Restore segment registers. This happens after restoring the GDT * Now that we have descriptor tables fully restored and working
* and LDT, which happen in fix_processor_context(). * exception handling, restore the usermode segments.
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_64
loadsegment(ds, ctxt->es);
loadsegment(es, ctxt->es); loadsegment(es, ctxt->es);
loadsegment(fs, ctxt->fs); loadsegment(fs, ctxt->fs);
loadsegment(gs, ctxt->gs);
loadsegment(ss, ctxt->ss);
#else
/* CONFIG_X86_64 */
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
load_gs_index(ctxt->gs); load_gs_index(ctxt->gs);
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
/* /*
* Restore FSBASE and user GSBASE after reloading the respective * Restore FSBASE and GSBASE after restoring the selectors, since
* segment selectors. * restoring the selectors clobbers the bases. Keep in mind
* that MSR_KERNEL_GS_BASE is horribly misnamed.
*/ */
wrmsrl(MSR_FS_BASE, ctxt->fs_base); wrmsrl(MSR_FS_BASE, ctxt->fs_base);
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
#elif defined(CONFIG_X86_32_LAZY_GS)
loadsegment(gs, ctxt->gs);
#endif #endif
do_fpu_end(); do_fpu_end();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment