Commit 0f76330e authored by Tim Chen's avatar Tim Chen Committed by Marcelo Henrique Cerri

x86/syscall: Clear unused extra registers on syscall entrance

CVE-2017-5753
CVE-2017-5715

To prevent the unused registers %r12-%r15, %rbp and %rbx from
being used speculatively, we clear them upon syscall entrance
for code hygiene.
Signed-off-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: default avatarAndy Whitcroft <apw@canonical.com>
(backported from commit 20018a1207a68ee311e9e080f8589e23a0e14852)
Signed-off-by: default avatarAndy Whitcroft <apw@canonical.com>
parent ff2699c9
......@@ -195,6 +195,15 @@ For 32-bit we have the following conventions - kernel is built with
subq $-(15*8+\addskip), %rsp
.endm
.macro CLEAR_EXTRA_REGS
xorq %r15, %r15
xorq %r14, %r14
xorq %r13, %r13
xorq %r12, %r12
xorq %rbp, %rbp
xorq %rbx, %rbx
.endm
.macro icebp
.byte 0xf1
.endm
......
......@@ -172,9 +172,16 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r9 /* pt_regs->r9 */
pushq %r10 /* pt_regs->r10 */
pushq %r11 /* pt_regs->r11 */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not used */
ENABLE_IBRS
/*
* Clear the unused extra regs for code hygiene.
* Will restore the callee saved extra regs at end of syscall.
*/
SAVE_EXTRA_REGS
CLEAR_EXTRA_REGS
STUFF_RSB
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
......@@ -215,10 +222,13 @@ entry_SYSCALL_64_fastpath:
movq RIP(%rsp), %rcx
movq EFLAGS(%rsp), %r11
RESTORE_C_REGS_EXCEPT_RCX_R11
DISABLE_IBRS
RESTORE_EXTRA_REGS
RESTORE_C_REGS_EXCEPT_RCX_R11
/*
* This opens a window where we have a user CR3, but are
* running in the kernel. This makes using the CS
......@@ -262,7 +272,6 @@ tracesys:
jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
tracesys_phase2:
SAVE_EXTRA_REGS
movq %rsp, %rdi
movl $AUDIT_ARCH_X86_64, %esi
movq %rax, %rdx
......@@ -274,7 +283,6 @@ tracesys_phase2:
* the value it wants us to use in the table lookup.
*/
RESTORE_C_REGS_EXCEPT_RAX
RESTORE_EXTRA_REGS
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max, %rax
#else
......@@ -293,10 +301,8 @@ tracesys_phase2:
* Has correct iret frame.
*/
GLOBAL(int_ret_from_sys_call)
SAVE_EXTRA_REGS
movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
RESTORE_EXTRA_REGS
TRACE_IRQS_IRETQ /* we're about to change IF */
/*
......@@ -364,6 +370,8 @@ GLOBAL(int_ret_from_sys_call)
syscall_return_via_sysret:
DISABLE_IBRS
RESTORE_EXTRA_REGS
/* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11
/*
......@@ -378,7 +386,6 @@ syscall_return_via_sysret:
USERGS_SYSRET64
opportunistic_sysret_failed:
/* XXX: might we need a DISABLE_IBRS */
/*
* This opens a window where we have a user CR3, but are
* running in the kernel. This makes using the CS
......@@ -386,6 +393,9 @@ opportunistic_sysret_failed:
* switch CR3 in NMIs. Normal interrupts are OK because
* they are off here.
*/
DISABLE_IBRS
RESTORE_EXTRA_REGS
SWITCH_USER_CR3
SWAPGS
jmp restore_c_regs_and_iret
......@@ -394,7 +404,6 @@ END(entry_SYSCALL_64)
.macro FORK_LIKE func
ENTRY(stub_\func)
SAVE_EXTRA_REGS 8
jmp sys_\func
END(stub_\func)
.endm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment