Commit d54e81f9 authored by Will Deacon's avatar Will Deacon

arm64: entry: avoid writing lr explicitly for constructing return paths

Using an explicit adr instruction to set the link register to point at
ret_fast_syscall/ret_to_user can defeat branch and return stack predictors.

Instead, use the standard calling instructions (bl, blr) and have an
unconditional branch as the following instruction.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 44b82b77
...@@ -455,8 +455,8 @@ el0_da: ...@@ -455,8 +455,8 @@ el0_da:
bic x0, x26, #(0xff << 56) bic x0, x26, #(0xff << 56)
mov x1, x25 mov x1, x25
mov x2, sp mov x2, sp
adr lr, ret_to_user bl do_mem_abort
b do_mem_abort b ret_to_user
el0_ia: el0_ia:
/* /*
* Instruction abort handling * Instruction abort handling
...@@ -468,8 +468,8 @@ el0_ia: ...@@ -468,8 +468,8 @@ el0_ia:
mov x0, x26 mov x0, x26
orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
mov x2, sp mov x2, sp
adr lr, ret_to_user bl do_mem_abort
b do_mem_abort b ret_to_user
el0_fpsimd_acc: el0_fpsimd_acc:
/* /*
* Floating Point or Advanced SIMD access * Floating Point or Advanced SIMD access
...@@ -478,8 +478,8 @@ el0_fpsimd_acc: ...@@ -478,8 +478,8 @@ el0_fpsimd_acc:
ct_user_exit ct_user_exit
mov x0, x25 mov x0, x25
mov x1, sp mov x1, sp
adr lr, ret_to_user bl do_fpsimd_acc
b do_fpsimd_acc b ret_to_user
el0_fpsimd_exc: el0_fpsimd_exc:
/* /*
* Floating Point or Advanced SIMD exception * Floating Point or Advanced SIMD exception
...@@ -488,8 +488,8 @@ el0_fpsimd_exc: ...@@ -488,8 +488,8 @@ el0_fpsimd_exc:
ct_user_exit ct_user_exit
mov x0, x25 mov x0, x25
mov x1, sp mov x1, sp
adr lr, ret_to_user bl do_fpsimd_exc
b do_fpsimd_exc b ret_to_user
el0_sp_pc: el0_sp_pc:
/* /*
* Stack or PC alignment exception handling * Stack or PC alignment exception handling
...@@ -500,8 +500,8 @@ el0_sp_pc: ...@@ -500,8 +500,8 @@ el0_sp_pc:
mov x0, x26 mov x0, x26
mov x1, x25 mov x1, x25
mov x2, sp mov x2, sp
adr lr, ret_to_user bl do_sp_pc_abort
b do_sp_pc_abort b ret_to_user
el0_undef: el0_undef:
/* /*
* Undefined instruction * Undefined instruction
...@@ -510,8 +510,8 @@ el0_undef: ...@@ -510,8 +510,8 @@ el0_undef:
enable_dbg_and_irq enable_dbg_and_irq
ct_user_exit ct_user_exit
mov x0, sp mov x0, sp
adr lr, ret_to_user bl do_undefinstr
b do_undefinstr b ret_to_user
el0_dbg: el0_dbg:
/* /*
* Debug exception handling * Debug exception handling
...@@ -530,8 +530,8 @@ el0_inv: ...@@ -530,8 +530,8 @@ el0_inv:
mov x0, sp mov x0, sp
mov x1, #BAD_SYNC mov x1, #BAD_SYNC
mrs x2, esr_el1 mrs x2, esr_el1
adr lr, ret_to_user bl bad_mode
b bad_mode b ret_to_user
ENDPROC(el0_sync) ENDPROC(el0_sync)
.align 6 .align 6
...@@ -653,14 +653,15 @@ el0_svc_naked: // compat entry point ...@@ -653,14 +653,15 @@ el0_svc_naked: // compat entry point
ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace b.ne __sys_trace
adr lr, ret_fast_syscall // return address
cmp scno, sc_nr // check upper syscall limit cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys b.hs ni_sys
ldr x16, [stbl, scno, lsl #3] // address in the syscall table ldr x16, [stbl, scno, lsl #3] // address in the syscall table
br x16 // call sys_* routine blr x16 // call sys_* routine
b ret_fast_syscall
ni_sys: ni_sys:
mov x0, sp mov x0, sp
b do_ni_syscall bl do_ni_syscall
b ret_fast_syscall
ENDPROC(el0_svc) ENDPROC(el0_svc)
/* /*
...@@ -670,17 +671,16 @@ ENDPROC(el0_svc) ...@@ -670,17 +671,16 @@ ENDPROC(el0_svc)
__sys_trace: __sys_trace:
mov x0, sp mov x0, sp
bl syscall_trace_enter bl syscall_trace_enter
adr lr, __sys_trace_return // return address
uxtw scno, w0 // syscall number (possibly new) uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs mov x1, sp // pointer to regs
cmp scno, sc_nr // check upper syscall limit cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys b.hs __ni_sys_trace
ldp x0, x1, [sp] // restore the syscall args ldp x0, x1, [sp] // restore the syscall args
ldp x2, x3, [sp, #S_X2] ldp x2, x3, [sp, #S_X2]
ldp x4, x5, [sp, #S_X4] ldp x4, x5, [sp, #S_X4]
ldp x6, x7, [sp, #S_X6] ldp x6, x7, [sp, #S_X6]
ldr x16, [stbl, scno, lsl #3] // address in the syscall table ldr x16, [stbl, scno, lsl #3] // address in the syscall table
br x16 // call sys_* routine blr x16 // call sys_* routine
__sys_trace_return: __sys_trace_return:
str x0, [sp] // save returned x0 str x0, [sp] // save returned x0
...@@ -688,6 +688,11 @@ __sys_trace_return: ...@@ -688,6 +688,11 @@ __sys_trace_return:
bl syscall_trace_exit bl syscall_trace_exit
b ret_to_user b ret_to_user
__ni_sys_trace:
mov x0, sp
bl do_ni_syscall
b __sys_trace_return
/* /*
* Special system call wrappers. * Special system call wrappers.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment