Commit 43d399d2 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Martin Schwidefsky

[S390] cleanup sysc_work and io_work code

Cleanup the #ifdef mess at io_work in entry[64].S and streamline the
TIF work code of the system call and io exit path.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 94038a99
...@@ -301,31 +301,29 @@ sysc_restore_trace_psw: ...@@ -301,31 +301,29 @@ sysc_restore_trace_psw:
#endif #endif
# #
# recheck if there is more work to do # There is work to do, but first we need to check if we return to userspace.
#
sysc_work_loop:
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bz BASED(sysc_restore) # there is no work to do
#
# One of the work bits is on. Find out which one.
# #
sysc_work: sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(sysc_restore) bno BASED(sysc_restore)
#
# One of the work bits is on. Find out which one.
#
sysc_work_loop:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
bo BASED(sysc_mcck_pending) bo BASED(sysc_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(sysc_reschedule) bo BASED(sysc_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING tm __TI_flags+3(%r9),_TIF_SIGPENDING
bnz BASED(sysc_sigpending) bo BASED(sysc_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
bnz BASED(sysc_notify_resume) bo BASED(sysc_notify_resume)
tm __TI_flags+3(%r9),_TIF_RESTART_SVC tm __TI_flags+3(%r9),_TIF_RESTART_SVC
bo BASED(sysc_restart) bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep) bo BASED(sysc_singlestep)
b BASED(sysc_restore) b BASED(sysc_return) # beware of critical section cleanup
sysc_work_done:
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
...@@ -386,7 +384,7 @@ sysc_singlestep: ...@@ -386,7 +384,7 @@ sysc_singlestep:
mvi SP_SVCNR+1(%r15),0xff mvi SP_SVCNR+1(%r15),0xff
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
l %r1,BASED(.Lhandle_per) # load adr. of per handler l %r1,BASED(.Lhandle_per) # load adr. of per handler
la %r14,BASED(sysc_return) # load adr. of system return la %r14,BASED(sysc_work_loop) # load adr. of system return
br %r1 # branch to do_single_step br %r1 # branch to do_single_step
# #
...@@ -636,30 +634,36 @@ io_restore_trace_psw: ...@@ -636,30 +634,36 @@ io_restore_trace_psw:
#endif #endif
# #
# switch to kernel stack, then check the TIF bits # There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK_INT work
# 2) if we return to kernel code and preemptive scheduling is enabled check
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
# #
io_work: io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm SP_PSW+1(%r15),0x01 # returning to user ?
#ifndef CONFIG_PREEMPT bo BASED(io_work_user) # yes -> do resched & signal
bno BASED(io_restore) # no-> skip resched & signal #ifdef CONFIG_PREEMPT
#else
bnz BASED(io_work_user) # no -> check for preemptive scheduling
# check for preemptive scheduling # check for preemptive scheduling
icm %r0,15,__TI_precount(%r9) icm %r0,15,__TI_precount(%r9)
bnz BASED(io_restore) # preemption disabled bnz BASED(io_restore) # preemption disabled
# switch to kernel stack
l %r1,SP_R15(%r15) l %r1,SP_R15(%r15)
s %r1,BASED(.Lc_spsize) s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1 lr %r15,%r1
io_resume_loop: io_resume_loop:
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bno BASED(io_restore)
l %r1,BASED(.Lpreempt_schedule_irq) l %r1,BASED(.Lpreempt_schedule_irq)
la %r14,BASED(io_resume_loop) la %r14,BASED(io_resume_loop)
br %r1 # call schedule tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bor %r1 # call preempt_schedule_irq
#endif #endif
b BASED(io_restore)
#
# Need to do work before returning to userspace, switch to kernel stack
#
io_work_user: io_work_user:
l %r1,__LC_KERNEL_STACK l %r1,__LC_KERNEL_STACK
s %r1,BASED(.Lc_spsize) s %r1,BASED(.Lc_spsize)
...@@ -668,7 +672,7 @@ io_work_user: ...@@ -668,7 +672,7 @@ io_work_user:
lr %r15,%r1 lr %r15,%r1
# #
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING # and _TIF_MCCK_PENDING
# #
io_work_loop: io_work_loop:
...@@ -677,11 +681,10 @@ io_work_loop: ...@@ -677,11 +681,10 @@ io_work_loop:
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(io_reschedule) bo BASED(io_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING tm __TI_flags+3(%r9),_TIF_SIGPENDING
bnz BASED(io_sigpending) bo BASED(io_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
bnz BASED(io_notify_resume) bo BASED(io_notify_resume)
b BASED(io_restore) b BASED(io_return) # beware of critical section cleanup
io_work_done:
# #
# _TIF_MCCK_PENDING is set, call handler # _TIF_MCCK_PENDING is set, call handler
...@@ -701,8 +704,6 @@ io_reschedule: ...@@ -701,8 +704,6 @@ io_reschedule:
basr %r14,%r1 # call scheduler basr %r14,%r1 # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
tm __TI_flags+3(%r9),_TIF_WORK_INT
bz BASED(io_restore) # there is no work to do
b BASED(io_work_loop) b BASED(io_work_loop)
# #
...@@ -921,14 +922,10 @@ cleanup_table_sysc_return: ...@@ -921,14 +922,10 @@ cleanup_table_sysc_return:
.long sysc_return + 0x80000000, sysc_leave + 0x80000000 .long sysc_return + 0x80000000, sysc_leave + 0x80000000
cleanup_table_sysc_leave: cleanup_table_sysc_leave:
.long sysc_leave + 0x80000000, sysc_done + 0x80000000 .long sysc_leave + 0x80000000, sysc_done + 0x80000000
cleanup_table_sysc_work_loop:
.long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000
cleanup_table_io_return: cleanup_table_io_return:
.long io_return + 0x80000000, io_leave + 0x80000000 .long io_return + 0x80000000, io_leave + 0x80000000
cleanup_table_io_leave: cleanup_table_io_leave:
.long io_leave + 0x80000000, io_done + 0x80000000 .long io_leave + 0x80000000, io_done + 0x80000000
cleanup_table_io_work_loop:
.long io_work_loop + 0x80000000, io_work_done + 0x80000000
cleanup_critical: cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call) clc 4(4,%r12),BASED(cleanup_table_system_call)
...@@ -945,11 +942,6 @@ cleanup_critical: ...@@ -945,11 +942,6 @@ cleanup_critical:
bl BASED(0f) bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
bl BASED(cleanup_sysc_leave) bl BASED(cleanup_sysc_leave)
0:
clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
bl BASED(cleanup_sysc_return)
0: 0:
clc 4(4,%r12),BASED(cleanup_table_io_return) clc 4(4,%r12),BASED(cleanup_table_io_return)
bl BASED(0f) bl BASED(0f)
...@@ -960,11 +952,6 @@ cleanup_critical: ...@@ -960,11 +952,6 @@ cleanup_critical:
bl BASED(0f) bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_io_leave+4) clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
bl BASED(cleanup_io_leave) bl BASED(cleanup_io_leave)
0:
clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
bl BASED(cleanup_io_work_loop)
0: 0:
br %r14 br %r14
...@@ -1043,12 +1030,6 @@ cleanup_io_return: ...@@ -1043,12 +1030,6 @@ cleanup_io_return:
la %r12,__LC_RETURN_PSW la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_work_loop:
mvc __LC_RETURN_PSW(4),0(%r12)
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
la %r12,__LC_RETURN_PSW
br %r14
cleanup_io_leave: cleanup_io_leave:
clc 4(4,%r12),BASED(cleanup_io_leave_insn) clc 4(4,%r12),BASED(cleanup_io_leave_insn)
be BASED(2f) be BASED(2f)
......
...@@ -291,38 +291,36 @@ sysc_restore_trace_psw: ...@@ -291,38 +291,36 @@ sysc_restore_trace_psw:
#endif #endif
# #
# recheck if there is more work to do # There is work to do, but first we need to check if we return to userspace.
#
sysc_work_loop:
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jz sysc_restore # there is no work to do
#
# One of the work bits is on. Find out which one.
# #
sysc_work: sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm SP_PSW+1(%r15),0x01 # returning to user ?
jno sysc_restore jno sysc_restore
#
# One of the work bits is on. Find out which one.
#
sysc_work_loop:
tm __TI_flags+7(%r9),_TIF_MCCK_PENDING tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
jo sysc_mcck_pending jo sysc_mcck_pending
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
jo sysc_reschedule jo sysc_reschedule
tm __TI_flags+7(%r9),_TIF_SIGPENDING tm __TI_flags+7(%r9),_TIF_SIGPENDING
jnz sysc_sigpending jo sysc_sigpending
tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
jnz sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+7(%r9),_TIF_RESTART_SVC tm __TI_flags+7(%r9),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
jo sysc_singlestep jo sysc_singlestep
j sysc_restore j sysc_return # beware of critical section cleanup
sysc_work_done:
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
# #
sysc_reschedule: sysc_reschedule:
larl %r14,sysc_work_loop larl %r14,sysc_work_loop
jg schedule # return point is sysc_return jg schedule # return point is sysc_work_loop
# #
# _TIF_MCCK_PENDING is set, call handler # _TIF_MCCK_PENDING is set, call handler
...@@ -369,7 +367,7 @@ sysc_singlestep: ...@@ -369,7 +367,7 @@ sysc_singlestep:
ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
larl %r14,sysc_return # load adr. of system return larl %r14,sysc_work_loop # load adr. of system return
jg do_single_step # branch to do_sigtrap jg do_single_step # branch to do_sigtrap
# #
...@@ -605,37 +603,27 @@ io_restore_trace_psw: ...@@ -605,37 +603,27 @@ io_restore_trace_psw:
#endif #endif
# #
# There is work todo, we need to check if we return to userspace, then # There is work todo, find out in which context we have been interrupted:
# check, if we are in SIE, if yes leave it # 1) if we return to user space we can do all _TIF_WORK_INT work
# 2) if we return to kernel code and kvm is enabled check if we need to
# modify the psw to leave SIE
# 3) if we return to kernel code and preemptive scheduling is enabled check
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
# #
io_work: io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm SP_PSW+1(%r15),0x01 # returning to user ?
#ifndef CONFIG_PREEMPT jo io_work_user # yes -> do resched & signal
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
jnz io_work_user # yes -> no need to check for SIE
la %r1, BASED(sie_opcode) # we return to kernel here
lg %r2, SP_PSW+8(%r15)
clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
jne io_restore # no-> return to kernel
lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
aghi %r1, 4
stg %r1, SP_PSW+8(%r15)
j io_restore # return to kernel
#else
jno io_restore # no-> skip resched & signal
#endif
#else
jnz io_work_user # yes -> do resched & signal
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
la %r1, BASED(sie_opcode) lg %r2,SP_PSW+8(%r15) # check if current instruction is SIE
lg %r2, SP_PSW+8(%r15) lh %r1,0(%r2)
clc 0(2,%r1), 0(%r2) # is current instruction = SIE? chi %r1,-19948 # signed 16 bit compare with 0xb214
jne 0f # no -> leave PSW alone jne 0f # no -> leave PSW alone
lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE aghi %r2,4 # yes-> add 4 bytes to leave SIE
aghi %r1, 4 stg %r2,SP_PSW+8(%r15)
stg %r1, SP_PSW+8(%r15)
0: 0:
#endif #endif
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling # check for preemptive scheduling
icm %r0,15,__TI_precount(%r9) icm %r0,15,__TI_precount(%r9)
jnz io_restore # preemption is disabled jnz io_restore # preemption is disabled
...@@ -646,21 +634,25 @@ io_work: ...@@ -646,21 +634,25 @@ io_work:
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1 lgr %r15,%r1
io_resume_loop: io_resume_loop:
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
jno io_restore
larl %r14,io_resume_loop larl %r14,io_resume_loop
jg preempt_schedule_irq tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jgo preempt_schedule_irq
#endif #endif
j io_restore
#
# Need to do work before returning to userspace, switch to kernel stack
#
io_work_user: io_work_user:
lg %r1,__LC_KERNEL_STACK lg %r1,__LC_KERNEL_STACK
aghi %r1,-SP_SIZE aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1 lgr %r15,%r1
# #
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING # and _TIF_MCCK_PENDING
# #
io_work_loop: io_work_loop:
...@@ -669,16 +661,10 @@ io_work_loop: ...@@ -669,16 +661,10 @@ io_work_loop:
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
jo io_reschedule jo io_reschedule
tm __TI_flags+7(%r9),_TIF_SIGPENDING tm __TI_flags+7(%r9),_TIF_SIGPENDING
jnz io_sigpending jo io_sigpending
tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
jnz io_notify_resume jo io_notify_resume
j io_restore j io_return # beware of critical section cleanup
io_work_done:
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
sie_opcode:
.long 0xb2140000
#endif
# #
# _TIF_MCCK_PENDING is set, call handler # _TIF_MCCK_PENDING is set, call handler
...@@ -696,8 +682,6 @@ io_reschedule: ...@@ -696,8 +682,6 @@ io_reschedule:
brasl %r14,schedule # call scheduler brasl %r14,schedule # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
tm __TI_flags+7(%r9),_TIF_WORK_INT
jz io_restore # there is no work to do
j io_work_loop j io_work_loop
# #
...@@ -903,14 +887,10 @@ cleanup_table_sysc_return: ...@@ -903,14 +887,10 @@ cleanup_table_sysc_return:
.quad sysc_return, sysc_leave .quad sysc_return, sysc_leave
cleanup_table_sysc_leave: cleanup_table_sysc_leave:
.quad sysc_leave, sysc_done .quad sysc_leave, sysc_done
cleanup_table_sysc_work_loop:
.quad sysc_work_loop, sysc_work_done
cleanup_table_io_return: cleanup_table_io_return:
.quad io_return, io_leave .quad io_return, io_leave
cleanup_table_io_leave: cleanup_table_io_leave:
.quad io_leave, io_done .quad io_leave, io_done
cleanup_table_io_work_loop:
.quad io_work_loop, io_work_done
cleanup_critical: cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_system_call) clc 8(8,%r12),BASED(cleanup_table_system_call)
...@@ -927,11 +907,6 @@ cleanup_critical: ...@@ -927,11 +907,6 @@ cleanup_critical:
jl 0f jl 0f
clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
jl cleanup_sysc_leave jl cleanup_sysc_leave
0:
clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
jl 0f
clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
jl cleanup_sysc_return
0: 0:
clc 8(8,%r12),BASED(cleanup_table_io_return) clc 8(8,%r12),BASED(cleanup_table_io_return)
jl 0f jl 0f
...@@ -942,11 +917,6 @@ cleanup_critical: ...@@ -942,11 +917,6 @@ cleanup_critical:
jl 0f jl 0f
clc 8(8,%r12),BASED(cleanup_table_io_leave+8) clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
jl cleanup_io_leave jl cleanup_io_leave
0:
clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
jl 0f
clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
jl cleanup_io_work_loop
0: 0:
br %r14 br %r14
...@@ -1025,12 +995,6 @@ cleanup_io_return: ...@@ -1025,12 +995,6 @@ cleanup_io_return:
la %r12,__LC_RETURN_PSW la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_work_loop:
mvc __LC_RETURN_PSW(8),0(%r12)
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
la %r12,__LC_RETURN_PSW
br %r14
cleanup_io_leave: cleanup_io_leave:
clc 8(8,%r12),BASED(cleanup_io_leave_insn) clc 8(8,%r12),BASED(cleanup_io_leave_insn)
je 3f je 3f
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment