Commit 2140a994 authored by Jan Beulich's avatar Jan Beulich Committed by Ingo Molnar

x86/entry/64: Relax pvops stub clobber specifications

Except for the error_exit case, none of the code paths following the
{DIS,EN}ABLE_INTERRUPTS() invocations being modified here make any
assumptions on register values, so all registers can be clobbered
there. In the error_exit case a minor adjustment to register usage
(at once eliminating an instruction) also allows for this to be true.
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/5894556D02000078001366D3@prv-mh.provo.novell.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fdbd518a
...@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath: ...@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath:
* If we see that no exit work is required (which we are required * If we see that no exit work is required (which we are required
* to check with IRQs off), then we can go straight to SYSRET64. * to check with IRQs off), then we can go straight to SYSRET64.
*/ */
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
movq PER_CPU_VAR(current_task), %r11 movq PER_CPU_VAR(current_task), %r11
testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
...@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath: ...@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath:
* raise(3) will trigger this, for example. IRQs are off. * raise(3) will trigger this, for example. IRQs are off.
*/ */
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_ANY)
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
movq %rsp, %rdi movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */ call syscall_return_slowpath /* returns with IRQs disabled */
...@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64) ...@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64)
* Called from fast path -- disable IRQs again, pop return address * Called from fast path -- disable IRQs again, pop return address
* and jump to slow path * and jump to slow path
*/ */
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
popq %rax popq %rax
jmp entry_SYSCALL64_slow_path jmp entry_SYSCALL64_slow_path
...@@ -518,7 +518,7 @@ common_interrupt: ...@@ -518,7 +518,7 @@ common_interrupt:
interrupt do_IRQ interrupt do_IRQ
/* 0(%rsp): old RSP */ /* 0(%rsp): old RSP */
ret_from_intr: ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
...@@ -1051,7 +1051,7 @@ END(paranoid_entry) ...@@ -1051,7 +1051,7 @@ END(paranoid_entry)
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/ */
ENTRY(paranoid_exit) ENTRY(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */ testl %ebx, %ebx /* swapgs needed? */
jnz paranoid_exit_no_swapgs jnz paranoid_exit_no_swapgs
...@@ -1156,10 +1156,9 @@ END(error_entry) ...@@ -1156,10 +1156,9 @@ END(error_entry)
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/ */
ENTRY(error_exit) ENTRY(error_exit)
movl %ebx, %eax DISABLE_INTERRUPTS(CLBR_ANY)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl %eax, %eax testl %ebx, %ebx
jnz retint_kernel jnz retint_kernel
jmp retint_user jmp retint_user
END(error_exit) END(error_exit)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment