Commit 2da03d41 authored by Max Filippov's avatar Max Filippov

xtensa: use call instead of callx in assembly code

Now that xtensa assembly sources are compiled with -mlongcalls let the
assembler and linker relax call instructions into l32r + callx where
needed. This change makes the code cleaner and potentially a bit faster.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent f8f02ca7
...@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore) ...@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore)
ENTRY(fast_coprocessor_double) ENTRY(fast_coprocessor_double)
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception
callx0 a0
ENDPROC(fast_coprocessor_double) ENDPROC(fast_coprocessor_double)
......
...@@ -476,8 +476,7 @@ common_exception_return: ...@@ -476,8 +476,7 @@ common_exception_return:
1: 1:
irq_save a2, a3 irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_off call4 trace_hardirqs_off
callx4 a4
#endif #endif
/* Jump if we are returning from kernel exceptions. */ /* Jump if we are returning from kernel exceptions. */
...@@ -504,24 +503,20 @@ common_exception_return: ...@@ -504,24 +503,20 @@ common_exception_return:
/* Call do_signal() */ /* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_on call4 trace_hardirqs_on
callx4 a4
#endif #endif
rsil a2, 0 rsil a2, 0
movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1 mov a6, a1
callx4 a4 call4 do_notify_resume # int do_notify_resume(struct pt_regs*)
j 1b j 1b
3: /* Reschedule */ 3: /* Reschedule */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_on call4 trace_hardirqs_on
callx4 a4
#endif #endif
rsil a2, 0 rsil a2, 0
movi a4, schedule # void schedule (void) call4 schedule # void schedule (void)
callx4 a4
j 1b j 1b
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -532,8 +527,7 @@ common_exception_return: ...@@ -532,8 +527,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f bnez a4, 4f
movi a4, preempt_schedule_irq call4 preempt_schedule_irq
callx4 a4
j 1b j 1b
#endif #endif
...@@ -546,23 +540,20 @@ common_exception_return: ...@@ -546,23 +540,20 @@ common_exception_return:
5: 5:
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
_bbci.l a4, TIF_DB_DISABLED, 7f _bbci.l a4, TIF_DB_DISABLED, 7f
movi a4, restore_dbreak call4 restore_dbreak
callx4 a4
7: 7:
#endif #endif
#ifdef CONFIG_DEBUG_TLB_SANITY #ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
movi a4, check_tlb_sanity call4 check_tlb_sanity
callx4 a4
#endif #endif
6: 6:
4: 4:
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei a4, LOCKLEVEL, 1f bgei a4, LOCKLEVEL, 1f
movi a4, trace_hardirqs_on call4 trace_hardirqs_on
callx4 a4
1: 1:
#endif #endif
/* Restore optional registers. */ /* Restore optional registers. */
...@@ -938,10 +929,8 @@ ENTRY(unrecoverable_exception) ...@@ -938,10 +929,8 @@ ENTRY(unrecoverable_exception)
movi a0, 0 movi a0, 0
addi a1, a1, PT_REGS_OFFSET addi a1, a1, PT_REGS_OFFSET
movi a4, panic
movi a6, unrecoverable_text movi a6, unrecoverable_text
call4 panic
callx4 a4
1: j 1b 1: j 1b
...@@ -1078,8 +1067,7 @@ ENTRY(fast_syscall_unrecoverable) ...@@ -1078,8 +1067,7 @@ ENTRY(fast_syscall_unrecoverable)
xsr a2, depc # restore a2, depc xsr a2, depc # restore a2, depc
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception
callx0 a0
ENDPROC(fast_syscall_unrecoverable) ENDPROC(fast_syscall_unrecoverable)
...@@ -1418,14 +1406,12 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1418,14 +1406,12 @@ ENTRY(fast_syscall_spill_registers)
rsync rsync
movi a6, SIGSEGV movi a6, SIGSEGV
movi a4, do_exit call4 do_exit
callx4 a4
/* shouldn't return, so panic */ /* shouldn't return, so panic */
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception # should not return
callx0 a0 # should not return
1: j 1b 1: j 1b
...@@ -1571,8 +1557,8 @@ ENDPROC(fast_syscall_spill_registers) ...@@ -1571,8 +1557,8 @@ ENDPROC(fast_syscall_spill_registers)
ENTRY(fast_second_level_miss_double_kernel) ENTRY(fast_second_level_miss_double_kernel)
1: movi a0, unrecoverable_exception 1:
callx0 a0 # should not return call0 unrecoverable_exception # should not return
1: j 1b 1: j 1b
ENDPROC(fast_second_level_miss_double_kernel) ENDPROC(fast_second_level_miss_double_kernel)
...@@ -1904,9 +1890,8 @@ ENTRY(system_call) ...@@ -1904,9 +1890,8 @@ ENTRY(system_call)
l32i a3, a2, PT_AREG2 l32i a3, a2, PT_AREG2
mov a6, a2 mov a6, a2
movi a4, do_syscall_trace_enter
s32i a3, a2, PT_SYSCALL s32i a3, a2, PT_SYSCALL
callx4 a4 call4 do_syscall_trace_enter
mov a3, a6 mov a3, a6
/* syscall = sys_call_table[syscall_nr] */ /* syscall = sys_call_table[syscall_nr] */
...@@ -1938,9 +1923,8 @@ ENTRY(system_call) ...@@ -1938,9 +1923,8 @@ ENTRY(system_call)
1: /* regs->areg[2] = return_value */ 1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
movi a4, do_syscall_trace_leave
mov a6, a2 mov a6, a2
callx4 a4 call4 do_syscall_trace_leave
retw retw
ENDPROC(system_call) ENDPROC(system_call)
...@@ -2056,12 +2040,10 @@ ENTRY(ret_from_fork) ...@@ -2056,12 +2040,10 @@ ENTRY(ret_from_fork)
/* void schedule_tail (struct task_struct *prev) /* void schedule_tail (struct task_struct *prev)
* Note: prev is still in a6 (return value from fake call4 frame) * Note: prev is still in a6 (return value from fake call4 frame)
*/ */
movi a4, schedule_tail call4 schedule_tail
callx4 a4
movi a4, do_syscall_trace_leave
mov a6, a1 mov a6, a1
callx4 a4 call4 do_syscall_trace_leave
j common_exception_return j common_exception_return
......
...@@ -264,11 +264,8 @@ ENTRY(_startup) ...@@ -264,11 +264,8 @@ ENTRY(_startup)
/* init_arch kick-starts the linux kernel */ /* init_arch kick-starts the linux kernel */
movi a4, init_arch call4 init_arch
callx4 a4 call4 start_kernel
movi a4, start_kernel
callx4 a4
should_never_return: should_never_return:
j should_never_return j should_never_return
...@@ -294,8 +291,7 @@ should_never_return: ...@@ -294,8 +291,7 @@ should_never_return:
movi a6, 0 movi a6, 0
wsr a6, excsave1 wsr a6, excsave1
movi a4, secondary_start_kernel call4 secondary_start_kernel
callx4 a4
j should_never_return j should_never_return
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -305,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow: ...@@ -305,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow:
.Lunrecoverable: .Lunrecoverable:
rsr a3, excsave1 rsr a3, excsave1
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception
callx0 a0
.Lfixup:/* Check for a fixup handler or if we were in a critical section. */ .Lfixup:/* Check for a fixup handler or if we were in a critical section. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment