Commit 82cb8a0b authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Peter Zijlstra

x86/entry/32: Move FIXUP_FRAME after pushing %fs in SAVE_ALL

This will allow us to get percpu access working before FIXUP_FRAME,
which will allow us to unwind ESPFIX earlier.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: stable@kernel.org
parent 4c4fd55d
...@@ -213,54 +213,58 @@ ...@@ -213,54 +213,58 @@
* *
* Be careful: we may have nonzero SS base due to ESPFIX. * Be careful: we may have nonzero SS base due to ESPFIX.
*/ */
andl $0x0000ffff, 3*4(%esp) andl $0x0000ffff, 4*4(%esp)
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, 4*4(%esp) testl $X86_EFLAGS_VM, 5*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@ jnz .Lfrom_usermode_no_fixup_\@
#endif #endif
testl $USER_SEGMENT_RPL_MASK, 3*4(%esp) testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@ jnz .Lfrom_usermode_no_fixup_\@
orl $CS_FROM_KERNEL, 3*4(%esp) orl $CS_FROM_KERNEL, 4*4(%esp)
/* /*
* When we're here from kernel mode; the (exception) stack looks like: * When we're here from kernel mode; the (exception) stack looks like:
* *
* 5*4(%esp) - <previous context> * 6*4(%esp) - <previous context>
* 4*4(%esp) - flags * 5*4(%esp) - flags
* 3*4(%esp) - cs * 4*4(%esp) - cs
* 2*4(%esp) - ip * 3*4(%esp) - ip
* 1*4(%esp) - orig_eax * 2*4(%esp) - orig_eax
* 0*4(%esp) - gs / function * 1*4(%esp) - gs / function
* 0*4(%esp) - fs
* *
* Lets build a 5 entry IRET frame after that, such that struct pt_regs * Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us * is complete and in particular regs->sp is correct. This gives us
* the original 5 enties as gap: * the original 6 enties as gap:
* *
* 12*4(%esp) - <previous context> * 14*4(%esp) - <previous context>
* 11*4(%esp) - gap / flags * 13*4(%esp) - gap / flags
* 10*4(%esp) - gap / cs * 12*4(%esp) - gap / cs
* 9*4(%esp) - gap / ip * 11*4(%esp) - gap / ip
* 8*4(%esp) - gap / orig_eax * 10*4(%esp) - gap / orig_eax
* 7*4(%esp) - gap / gs / function * 9*4(%esp) - gap / gs / function
* 6*4(%esp) - ss * 8*4(%esp) - gap / fs
* 5*4(%esp) - sp * 7*4(%esp) - ss
* 4*4(%esp) - flags * 6*4(%esp) - sp
* 3*4(%esp) - cs * 5*4(%esp) - flags
* 2*4(%esp) - ip * 4*4(%esp) - cs
* 1*4(%esp) - orig_eax * 3*4(%esp) - ip
* 0*4(%esp) - gs / function * 2*4(%esp) - orig_eax
* 1*4(%esp) - gs / function
* 0*4(%esp) - fs
*/ */
pushl %ss # ss pushl %ss # ss
pushl %esp # sp (points at ss) pushl %esp # sp (points at ss)
addl $6*4, (%esp) # point sp back at the previous context addl $7*4, (%esp) # point sp back at the previous context
pushl 6*4(%esp) # flags pushl 7*4(%esp) # flags
pushl 6*4(%esp) # cs pushl 7*4(%esp) # cs
pushl 6*4(%esp) # ip pushl 7*4(%esp) # ip
pushl 6*4(%esp) # orig_eax pushl 7*4(%esp) # orig_eax
pushl 6*4(%esp) # gs / function pushl 7*4(%esp) # gs / function
pushl 7*4(%esp) # fs
.Lfrom_usermode_no_fixup_\@: .Lfrom_usermode_no_fixup_\@:
.endm .endm
...@@ -308,8 +312,8 @@ ...@@ -308,8 +312,8 @@
.if \skip_gs == 0 .if \skip_gs == 0
PUSH_GS PUSH_GS
.endif .endif
FIXUP_FRAME
pushl %fs pushl %fs
FIXUP_FRAME
pushl %es pushl %es
pushl %ds pushl %ds
pushl \pt_regs_ax pushl \pt_regs_ax
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment