Commit 3f01daec authored by Dominik Brodowski's avatar Dominik Brodowski Committed by Ingo Molnar

x86/entry/64: Introduce the PUSH_AND_CLEAN_REGS macro

Those instances where ALLOC_PT_GPREGS_ON_STACK is called just before
SAVE_AND_CLEAR_REGS can trivially be replaced by PUSH_AND_CLEAN_REGS.
This macro uses PUSH instead of MOV and should therefore be faster, at
least on newer CPUs.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarDominik Brodowski <linux@dominikbrodowski.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dan.j.williams@intel.com
Link: http://lkml.kernel.org/r/20180211104949.12992-5-linux@dominikbrodowski.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f7bafa2b
...@@ -137,6 +137,42 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -137,6 +137,42 @@ For 32-bit we have the following conventions - kernel is built with
UNWIND_HINT_REGS offset=\offset UNWIND_HINT_REGS offset=\offset
.endm .endm
.macro PUSH_AND_CLEAR_REGS
/*
* Push registers and sanitize registers of values that a
* speculation attack might otherwise want to exploit. The
* lower registers are likely clobbered well before they
* could be put to use in a speculative execution gadget.
* Interleave XOR with PUSH for better uop scheduling:
*/
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq %rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */
xorq %r8, %r8 /* nospec r8 */
pushq %r9 /* pt_regs->r9 */
xorq %r9, %r9 /* nospec r9 */
pushq %r10 /* pt_regs->r10 */
xorq %r10, %r10 /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
xorq %r11, %r11 /* nospec r11*/
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */
xorq %r12, %r12 /* nospec r12*/
pushq %r13 /* pt_regs->r13 */
xorq %r13, %r13 /* nospec r13*/
pushq %r14 /* pt_regs->r14 */
xorq %r14, %r14 /* nospec r14*/
pushq %r15 /* pt_regs->r15 */
xorq %r15, %r15 /* nospec r15*/
UNWIND_HINT_REGS
.endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15 popq %r15
popq %r14 popq %r14
......
...@@ -564,8 +564,7 @@ END(irq_entries_start) ...@@ -564,8 +564,7 @@ END(irq_entries_start)
call switch_to_thread_stack call switch_to_thread_stack
1: 1:
ALLOC_PT_GPREGS_ON_STACK PUSH_AND_CLEAR_REGS
SAVE_AND_CLEAR_REGS
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
testb $3, CS(%rsp) testb $3, CS(%rsp)
...@@ -1112,8 +1111,7 @@ ENTRY(xen_failsafe_callback) ...@@ -1112,8 +1111,7 @@ ENTRY(xen_failsafe_callback)
addq $0x30, %rsp addq $0x30, %rsp
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
pushq $-1 /* orig_ax = -1 => not a system call */ pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK PUSH_AND_CLEAR_REGS
SAVE_AND_CLEAR_REGS
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
jmp error_exit jmp error_exit
END(xen_failsafe_callback) END(xen_failsafe_callback)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment