Commit 0b0ed657 authored by Sven Schnelle's avatar Sven Schnelle Committed by Vasily Gorbik

s390: remove critical section cleanup from entry.S

The current code is rather complex and caused a lot of subtle
and hard to debug bugs in the past. Simplify the code by calling
the system_call handler with interrupts disabled, save
machine state, and re-enable them later.

This requires significant changes to the machine check handling code
as well. When the machine check interrupt arrived while being in kernel
mode the new code will signal pending machine checks with a SIGP external
call. When userspace was interrupted, the handler will switch to the
kernel stack and directly execute s390_handle_mcck().
Signed-off-by: default avatarSven Schnelle <svens@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 11886c19
...@@ -99,7 +99,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc); ...@@ -99,7 +99,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc);
void nmi_free_per_cpu(struct lowcore *lc); void nmi_free_per_cpu(struct lowcore *lc);
void s390_handle_mcck(void); void s390_handle_mcck(void);
void s390_do_machine_check(struct pt_regs *regs); int s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */ #endif /* _ASM_S390_NMI_H */
...@@ -14,17 +14,15 @@ ...@@ -14,17 +14,15 @@
#include <linux/bits.h> #include <linux/bits.h>
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_ASCE_PRIMARY 0 /* primary asce needs fixup / uaccess */
#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */ #define CIF_ASCE_SECONDARY 1 /* secondary asce needs fixup / uaccess */
#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */ #define CIF_FPU 3 /* restore FPU registers */
#define CIF_FPU 4 /* restore FPU registers */ #define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */ #define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */ #define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */ #define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
#define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING)
#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY) #define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY) #define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
......
...@@ -55,14 +55,11 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -55,14 +55,11 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ _CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
_CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
_LPP_OFFSET = __LC_LPP _LPP_OFFSET = __LC_LPP
#define BASED(name) name-cleanup_critical(%r13)
.macro TRACE_IRQS_ON .macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0 basr %r2,%r0
...@@ -116,17 +113,39 @@ _LPP_OFFSET = __LC_LPP ...@@ -116,17 +113,39 @@ _LPP_OFFSET = __LC_LPP
.macro SWITCH_ASYNC savearea,timer .macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ? tmhh %r8,0x0001 # interrupting from user ?
jnz 2f jnz 2f
#if IS_ENABLED(CONFIG_KVM)
lgr %r14,%r9 lgr %r14,%r9
cghi %r14,__LC_RETURN_LPSWE larl %r13,.Lsie_gmap
je 0f slgr %r14,%r13
slg %r14,BASED(.Lcritical_start) lghi %r13,.Lsie_done - .Lsie_gmap
clg %r14,BASED(.Lcritical_length) clgr %r14,%r13
jhe 1f jhe 0f
0:
lghi %r11,\savearea # inside critical section, do cleanup lghi %r11,\savearea # inside critical section, do cleanup
brasl %r14,cleanup_critical brasl %r14,.Lcleanup_sie
tmhh %r8,0x0001 # retest problem state after cleanup #endif
jnz 2f 0: larl %r13,.Lpsw_idle_exit
cgr %r13,%r9
jne 1f
mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER
# account system time going idle
ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
lg %r13,__LC_STEAL_TIMER
alg %r13,__CLOCK_IDLE_ENTER(%r2)
slg %r13,__LC_LAST_UPDATE_CLOCK
stg %r13,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lg %r13,__LC_SYSTEM_TIMER
alg %r13,__LC_LAST_UPDATE_TIMER
slg %r13,__TIMER_IDLE_ENTER(%r2)
stg %r13,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
nihh %r8,0xfcfd # clear wait state and irq bits
1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15 slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT srag %r14,%r14,STACK_SHIFT
...@@ -152,12 +171,30 @@ _LPP_OFFSET = __LC_LPP ...@@ -152,12 +171,30 @@ _LPP_OFFSET = __LC_LPP
mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm .endm
.macro REENABLE_IRQS .macro RESTORE_SM_CLEAR_PER
stg %r8,__LC_RETURN_PSW stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf ni __LC_RETURN_PSW,0xbf
ssm __LC_RETURN_PSW ssm __LC_RETURN_PSW
.endm .endm
.macro ENABLE_INTS
stosm __SF_EMPTY(%r15),3
.endm
.macro ENABLE_INTS_TRACE
TRACE_IRQS_ON
ENABLE_INTS
.endm
.macro DISABLE_INTS
stnsm __SF_EMPTY(%r15),0xfc
.endm
.macro DISABLE_INTS_TRACE
DISABLE_INTS
TRACE_IRQS_OFF
.endm
.macro STCK savearea .macro STCK savearea
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
.insn s,0xb27c0000,\savearea # store clock fast .insn s,0xb27c0000,\savearea # store clock fast
...@@ -254,8 +291,6 @@ ENTRY(__switch_to) ...@@ -254,8 +291,6 @@ ENTRY(__switch_to)
BR_EX %r14 BR_EX %r14
ENDPROC(__switch_to) ENDPROC(__switch_to)
.L__critical_start:
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
/* /*
* sie64a calling convention: * sie64a calling convention:
...@@ -288,7 +323,6 @@ ENTRY(sie64a) ...@@ -288,7 +323,6 @@ ENTRY(sie64a)
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry: .Lsie_entry:
sie 0(%r14) sie 0(%r14)
.Lsie_exit:
BPOFF BPOFF
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip: .Lsie_skip:
...@@ -341,7 +375,6 @@ EXPORT_SYMBOL(sie_exit) ...@@ -341,7 +375,6 @@ EXPORT_SYMBOL(sie_exit)
ENTRY(system_call) ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC stmg %r8,%r15,__LC_SAVE_AREA_SYNC
BPOFF BPOFF
lg %r12,__LC_CURRENT lg %r12,__LC_CURRENT
...@@ -350,7 +383,6 @@ ENTRY(system_call) ...@@ -350,7 +383,6 @@ ENTRY(system_call)
.Lsysc_per: .Lsysc_per:
lg %r15,__LC_KERNEL_STACK lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
.Lsysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11) stmg %r0,%r7,__PT_R0(%r11)
...@@ -358,6 +390,7 @@ ENTRY(system_call) ...@@ -358,6 +390,7 @@ ENTRY(system_call)
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11) stg %r14,__PT_FLAGS(%r11)
ENABLE_INTS
.Lsysc_do_svc: .Lsysc_do_svc:
# clear user controlled register to prevent speculative use # clear user controlled register to prevent speculative use
xgr %r0,%r0 xgr %r0,%r0
...@@ -393,26 +426,26 @@ ENTRY(system_call) ...@@ -393,26 +426,26 @@ ENTRY(system_call)
jnz .Lsysc_work jnz .Lsysc_work
TSTMSK __TI_flags(%r12),_TIF_WORK TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lsysc_work # check for work jnz .Lsysc_work # check for work
TSTMSK __LC_CPU_FLAGS,_CIF_WORK TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU)
jnz .Lsysc_work jnz .Lsysc_work
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore: .Lsysc_restore:
DISABLE_INTS
TSTMSK __LC_CPU_FLAGS, _CIF_FPU
jz .Lsysc_skip_fpu
brasl %r14,load_fpu_regs
.Lsysc_skip_fpu:
lg %r14,__LC_VDSO_PER_CPU lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11) lmg %r0,%r15,__PT_R0(%r11)
b __LC_RETURN_LPSWE(%r0) b __LC_RETURN_LPSWE
.Lsysc_done:
# #
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# #
.Lsysc_work: .Lsysc_work:
TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jo .Lsysc_mcck_pending
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule jo .Lsysc_reschedule
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
...@@ -436,11 +469,9 @@ ENTRY(system_call) ...@@ -436,11 +469,9 @@ ENTRY(system_call)
jo .Lsysc_sigpending jo .Lsysc_sigpending
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume jo .Lsysc_notify_resume
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsysc_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lsysc_asce jnz .Lsysc_asce
j .Lsysc_return # beware of critical section cleanup j .Lsysc_return
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
...@@ -449,13 +480,6 @@ ENTRY(system_call) ...@@ -449,13 +480,6 @@ ENTRY(system_call)
larl %r14,.Lsysc_return larl %r14,.Lsysc_return
jg schedule jg schedule
#
# _CIF_MCCK_PENDING is set, call handler
#
.Lsysc_mcck_pending:
larl %r14,.Lsysc_return
jg s390_handle_mcck # TIF bit will be cleared by handler
# #
# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce # _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
# #
...@@ -475,12 +499,6 @@ ENTRY(system_call) ...@@ -475,12 +499,6 @@ ENTRY(system_call)
larl %r14,.Lsysc_return larl %r14,.Lsysc_return
jg set_fs_fixup jg set_fs_fixup
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
#
.Lsysc_vxrs:
larl %r14,.Lsysc_return
jg load_fpu_regs
# #
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
...@@ -564,7 +582,6 @@ ENTRY(system_call) ...@@ -564,7 +582,6 @@ ENTRY(system_call)
jnh .Lsysc_tracenogo jnh .Lsysc_tracenogo
sllg %r8,%r2,3 sllg %r8,%r2,3
lg %r9,0(%r8,%r10) lg %r9,0(%r8,%r10)
.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11) lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15) stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11) lg %r2,__PT_ORIG_GPR2(%r11)
...@@ -585,8 +602,6 @@ ENTRY(ret_from_fork) ...@@ -585,8 +602,6 @@ ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15) la %r11,STACK_FRAME_OVERHEAD(%r15)
lg %r12,__LC_CURRENT lg %r12,__LC_CURRENT
brasl %r14,schedule_tail brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jne .Lsysc_tracenogo jne .Lsysc_tracenogo
# it's a kernel thread # it's a kernel thread
...@@ -620,15 +635,16 @@ ENTRY(pgm_check_handler) ...@@ -620,15 +635,16 @@ ENTRY(pgm_check_handler)
lghi %r10,1 lghi %r10,1
0: lg %r12,__LC_CURRENT 0: lg %r12,__LC_CURRENT
lghi %r11,0 lghi %r11,0
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit tmhh %r8,0x0001 # test problem state bit
jnz 3f # -> fault in user space jnz 3f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a # cleanup critical section for program checks in sie64a
lgr %r14,%r9 lgr %r14,%r9
slg %r14,BASED(.Lsie_critical_start) larl %r13,.Lsie_gmap
clg %r14,BASED(.Lsie_critical_length) slgr %r14,%r13
lghi %r13,.Lsie_done - .Lsie_gmap
clgr %r14,%r13
jhe 1f jhe 1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
...@@ -680,7 +696,7 @@ ENTRY(pgm_check_handler) ...@@ -680,7 +696,7 @@ ENTRY(pgm_check_handler)
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6: REENABLE_IRQS 6: RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11) llgh %r10,__PT_INT_CODE+2(%r11)
...@@ -702,7 +718,7 @@ ENTRY(pgm_check_handler) ...@@ -702,7 +718,7 @@ ENTRY(pgm_check_handler)
# PER event in supervisor state, must be kprobes # PER event in supervisor state, must be kprobes
# #
.Lpgm_kprobe: .Lpgm_kprobe:
REENABLE_IRQS RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap brasl %r14,do_per_trap
...@@ -713,11 +729,10 @@ ENTRY(pgm_check_handler) ...@@ -713,11 +729,10 @@ ENTRY(pgm_check_handler)
# #
.Lpgm_svcper: .Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
lghi %r13,__TASK_thread
larl %r14,.Lsysc_per larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8 stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs lpswe __LC_RETURN_PSW # branch to .Lsysc_per
ENDPROC(pgm_check_handler) ENDPROC(pgm_check_handler)
/* /*
...@@ -729,7 +744,6 @@ ENTRY(io_int_handler) ...@@ -729,7 +744,6 @@ ENTRY(io_int_handler)
BPOFF BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11) stmg %r0,%r7,__PT_R0(%r11)
...@@ -749,7 +763,12 @@ ENTRY(io_int_handler) ...@@ -749,7 +763,12 @@ ENTRY(io_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore jo .Lio_restore
#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
tmhh %r8,0x300
jz 1f
TRACE_IRQS_OFF TRACE_IRQS_OFF
1:
#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop: .Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
...@@ -767,25 +786,27 @@ ENTRY(io_int_handler) ...@@ -767,25 +786,27 @@ ENTRY(io_int_handler)
j .Lio_loop j .Lio_loop
.Lio_return: .Lio_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
.Lio_tif:
TSTMSK __TI_flags(%r12),_TIF_WORK TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.) jnz .Lio_work # there is work to do (signals etc.)
TSTMSK __LC_CPU_FLAGS,_CIF_WORK TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lio_work jnz .Lio_work
.Lio_restore: .Lio_restore:
#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
tm __PT_PSW(%r11),3
jno 0f
TRACE_IRQS_ON
0:
#endif
lg %r14,__LC_VDSO_PER_CPU lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tm __PT_PSW+1(%r11),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lio_exit_kernel jno .Lio_exit_kernel
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lio_exit_timer:
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
.Lio_exit_kernel: .Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11) lmg %r0,%r15,__PT_R0(%r11)
b __LC_RETURN_LPSWE(%r0) b __LC_RETURN_LPSWE
.Lio_done: .Lio_done:
# #
...@@ -813,9 +834,6 @@ ENTRY(io_int_handler) ...@@ -813,9 +834,6 @@ ENTRY(io_int_handler)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1 lgr %r15,%r1
# TRACE_IRQS_ON already done at .Lio_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
brasl %r14,preempt_schedule_irq brasl %r14,preempt_schedule_irq
j .Lio_return j .Lio_return
#else #else
...@@ -835,9 +853,6 @@ ENTRY(io_int_handler) ...@@ -835,9 +853,6 @@ ENTRY(io_int_handler)
# #
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# #
.Lio_work_tif:
TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jo .Lio_mcck_pending
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule jo .Lio_reschedule
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
...@@ -854,15 +869,6 @@ ENTRY(io_int_handler) ...@@ -854,15 +869,6 @@ ENTRY(io_int_handler)
jo .Lio_vxrs jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lio_asce jnz .Lio_asce
j .Lio_return # beware of critical section cleanup
#
# _CIF_MCCK_PENDING is set, call handler
#
.Lio_mcck_pending:
# TRACE_IRQS_ON already done at .Lio_return
brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
TRACE_IRQS_OFF
j .Lio_return j .Lio_return
# #
...@@ -895,23 +901,19 @@ ENTRY(io_int_handler) ...@@ -895,23 +901,19 @@ ENTRY(io_int_handler)
# _TIF_GUARDED_STORAGE is set, call guarded_storage_load # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
# #
.Lio_guarded_storage: .Lio_guarded_storage:
# TRACE_IRQS_ON already done at .Lio_return ENABLE_INTS_TRACE
ssm __LC_SVC_NEW_PSW # reenable interrupts
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,gs_load_bc_cb brasl %r14,gs_load_bc_cb
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts DISABLE_INTS_TRACE
TRACE_IRQS_OFF
j .Lio_return j .Lio_return
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
# #
.Lio_reschedule: .Lio_reschedule:
# TRACE_IRQS_ON already done at .Lio_return ENABLE_INTS_TRACE
ssm __LC_SVC_NEW_PSW # reenable interrupts
brasl %r14,schedule # call scheduler brasl %r14,schedule # call scheduler
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts DISABLE_INTS_TRACE
TRACE_IRQS_OFF
j .Lio_return j .Lio_return
# #
...@@ -928,24 +930,20 @@ ENTRY(io_int_handler) ...@@ -928,24 +930,20 @@ ENTRY(io_int_handler)
# _TIF_SIGPENDING or is set, call do_signal # _TIF_SIGPENDING or is set, call do_signal
# #
.Lio_sigpending: .Lio_sigpending:
# TRACE_IRQS_ON already done at .Lio_return ENABLE_INTS_TRACE
ssm __LC_SVC_NEW_PSW # reenable interrupts
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal brasl %r14,do_signal
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts DISABLE_INTS_TRACE
TRACE_IRQS_OFF
j .Lio_return j .Lio_return
# #
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
# #
.Lio_notify_resume: .Lio_notify_resume:
# TRACE_IRQS_ON already done at .Lio_return ENABLE_INTS_TRACE
ssm __LC_SVC_NEW_PSW # reenable interrupts
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_notify_resume brasl %r14,do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts DISABLE_INTS_TRACE
TRACE_IRQS_OFF
j .Lio_return j .Lio_return
ENDPROC(io_int_handler) ENDPROC(io_int_handler)
...@@ -958,7 +956,6 @@ ENTRY(ext_int_handler) ...@@ -958,7 +956,6 @@ ENTRY(ext_int_handler)
BPOFF BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11) stmg %r0,%r7,__PT_R0(%r11)
...@@ -981,7 +978,12 @@ ENTRY(ext_int_handler) ...@@ -981,7 +978,12 @@ ENTRY(ext_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore jo .Lio_restore
#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
tmhh %r8,0x300
jz 1f
TRACE_IRQS_OFF TRACE_IRQS_OFF
1:
#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT lghi %r3,EXT_INTERRUPT
...@@ -990,11 +992,11 @@ ENTRY(ext_int_handler) ...@@ -990,11 +992,11 @@ ENTRY(ext_int_handler)
ENDPROC(ext_int_handler) ENDPROC(ext_int_handler)
/* /*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle. * Load idle PSW.
*/ */
ENTRY(psw_idle) ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15) stg %r3,__SF_EMPTY(%r15)
larl %r1,.Lpsw_idle_lpsw+4 larl %r1,.Lpsw_idle_exit
stg %r1,__SF_EMPTY+8(%r15) stg %r1,__SF_EMPTY+8(%r15)
larl %r1,smp_cpu_mtid larl %r1,smp_cpu_mtid
llgf %r1,0(%r1) llgf %r1,0(%r1)
...@@ -1006,10 +1008,9 @@ ENTRY(psw_idle) ...@@ -1006,10 +1008,9 @@ ENTRY(psw_idle)
BPON BPON
STCK __CLOCK_IDLE_ENTER(%r2) STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15) lpswe __SF_EMPTY(%r15)
.Lpsw_idle_exit:
BR_EX %r14 BR_EX %r14
.Lpsw_idle_end:
ENDPROC(psw_idle) ENDPROC(psw_idle)
/* /*
...@@ -1020,6 +1021,7 @@ ENDPROC(psw_idle) ...@@ -1020,6 +1021,7 @@ ENDPROC(psw_idle)
* of the register contents at return from io or a system call. * of the register contents at return from io or a system call.
*/ */
ENTRY(save_fpu_regs) ENTRY(save_fpu_regs)
stnsm __SF_EMPTY(%r15),0xfc
lg %r2,__LC_CURRENT lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU TSTMSK __LC_CPU_FLAGS,_CIF_FPU
...@@ -1051,6 +1053,7 @@ ENTRY(save_fpu_regs) ...@@ -1051,6 +1053,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_done: .Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU oi __LC_CPU_FLAGS+7,_CIF_FPU
.Lsave_fpu_regs_exit: .Lsave_fpu_regs_exit:
ssm __SF_EMPTY(%r15)
BR_EX %r14 BR_EX %r14
.Lsave_fpu_regs_end: .Lsave_fpu_regs_end:
ENDPROC(save_fpu_regs) ENDPROC(save_fpu_regs)
...@@ -1102,8 +1105,6 @@ load_fpu_regs: ...@@ -1102,8 +1105,6 @@ load_fpu_regs:
.Lload_fpu_regs_end: .Lload_fpu_regs_end:
ENDPROC(load_fpu_regs) ENDPROC(load_fpu_regs)
.L__critical_end:
/* /*
* Machine check handler routines * Machine check handler routines
*/ */
...@@ -1116,7 +1117,6 @@ ENTRY(mcck_int_handler) ...@@ -1116,7 +1117,6 @@ ENTRY(mcck_int_handler)
lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid jo .Lmcck_panic # yes -> rest of mcck code invalid
...@@ -1202,15 +1202,13 @@ ENTRY(mcck_int_handler) ...@@ -1202,15 +1202,13 @@ ENTRY(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check brasl %r14,s390_do_machine_check
tm __PT_PSW+1(%r11),0x01 # returning to user ? cghi %r2,0
jno .Lmcck_return je .Lmcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1 lgr %r15,%r1
TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck brasl %r14,s390_handle_mcck
TRACE_IRQS_ON TRACE_IRQS_ON
...@@ -1280,265 +1278,23 @@ ENTRY(stack_overflow) ...@@ -1280,265 +1278,23 @@ ENTRY(stack_overflow)
ENDPROC(stack_overflow) ENDPROC(stack_overflow)
#endif #endif
ENTRY(cleanup_critical)
cghi %r9,__LC_RETURN_LPSWE
je .Lcleanup_lpswe
#if IS_ENABLED(CONFIG_KVM)
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
jl 0f
clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
jl .Lcleanup_sie
#endif
clg %r9,BASED(.Lcleanup_table) # system_call
jl 0f
clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
jl .Lcleanup_system_call
clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
jl 0f
clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
jl .Lcleanup_sysc_tif
clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
jl .Lcleanup_sysc_restore
clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
jl 0f
clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
jl .Lcleanup_io_tif
clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
jl .Lcleanup_io_restore
clg %r9,BASED(.Lcleanup_table+64) # psw_idle
jl 0f
clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
jl .Lcleanup_idle
clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
jl 0f
clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
jl .Lcleanup_save_fpu_regs
clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
0: BR_EX %r14,%r11
ENDPROC(cleanup_critical)
.align 8
.Lcleanup_table:
.quad system_call
.quad .Lsysc_do_svc
.quad .Lsysc_tif
.quad .Lsysc_restore
.quad .Lsysc_done
.quad .Lio_tif
.quad .Lio_restore
.quad .Lio_done
.quad psw_idle
.quad .Lpsw_idle_end
.quad save_fpu_regs
.quad .Lsave_fpu_regs_end
.quad load_fpu_regs
.quad .Lload_fpu_regs_end
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
.Lcleanup_table_sie:
.quad .Lsie_gmap
.quad .Lsie_done
.Lcleanup_sie: .Lcleanup_sie:
cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
je 1f je 1f
slg %r9,BASED(.Lsie_crit_mcck_start) larl %r13,.Lsie_entry
clg %r9,BASED(.Lsie_crit_mcck_length) slgr %r9,%r13
jh 1f larl %r13,.Lsie_skip
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST clgr %r9,%r13
1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) jh 1f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit larl %r9,sie_exit # skip forward to sie_exit
BR_EX %r14,%r11 BR_EX %r14,%r11
#endif
.Lcleanup_system_call:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_system_call_insn)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
0: # check if stmg has been executed
clg %r9,BASED(.Lcleanup_system_call_insn+8)
jh 0f
mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
0: # check if base register setup + TIF bit load has been done
clg %r9,BASED(.Lcleanup_system_call_insn+16)
jhe 0f
# set up saved register r12 task struct pointer
stg %r12,32(%r11)
# set up saved register r13 __TASK_thread offset
mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
0: # check if the user time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
lg %r15,__LC_EXIT_TIMER
slg %r15,__LC_SYNC_ENTER_TIMER
alg %r15,__LC_USER_TIMER
stg %r15,__LC_USER_TIMER
0: # check if the system time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+32)
jh 0f
lg %r15,__LC_LAST_UPDATE_TIMER
slg %r15,__LC_EXIT_TIMER
alg %r15,__LC_SYSTEM_TIMER
stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
# set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
stmg %r0,%r7,__PT_R0(%r9)
mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
# setup saved register r15
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
BR_EX %r14,%r11
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
.quad .Lsysc_per
.quad .Lsysc_vtime+36
.quad .Lsysc_vtime+42
.Lcleanup_system_call_const:
.quad __TASK_thread
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
BR_EX %r14,%r11
.Lcleanup_sysc_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_sysc_restore_insn)
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
.Lcleanup_lpswe:
1: lmg %r8,%r9,__LC_RETURN_PSW
BR_EX %r14,%r11
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
BR_EX %r14,%r11
.Lcleanup_io_restore:
# check if stpt has been executed
clg %r9,BASED(.Lcleanup_io_restore_insn)
jh 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
je 1f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
BR_EX %r14,%r11
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
.Lcleanup_idle:
ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
# copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck & stpt have been executed
clg %r9,BASED(.Lcleanup_idle_insn)
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1: # calculate idle cycles
clg %r9,BASED(.Lcleanup_idle_insn)
jl 3f
larl %r1,smp_cpu_mtid
llgf %r1,0(%r1)
ltgr %r1,%r1
jz 3f
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
larl %r3,mt_cycles
ag %r3,__LC_PERCPU_OFFSET
la %r4,__SF_EMPTY+16(%r15)
2: lg %r0,0(%r3)
slg %r0,0(%r4)
alg %r0,64(%r4)
stg %r0,0(%r3)
la %r3,8(%r3)
la %r4,8(%r4)
brct %r1,2b
3: # account system time going idle
lg %r9,__LC_STEAL_TIMER
alg %r9,__CLOCK_IDLE_ENTER(%r2)
slg %r9,__LC_LAST_UPDATE_CLOCK
stg %r9,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lg %r9,__LC_SYSTEM_TIMER
alg %r9,__LC_LAST_UPDATE_TIMER
slg %r9,__TIMER_IDLE_ENTER(%r2)
stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
BR_EX %r14,%r11
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
BR_EX %r14,%r11
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
BR_EX %r14,%r11
/*
* Integer constants
*/
.align 8
.Lcritical_start:
.quad .L__critical_start
.Lcritical_length:
.quad .L__critical_end - .L__critical_start
#if IS_ENABLED(CONFIG_KVM)
.Lsie_critical_start:
.quad .Lsie_gmap
.Lsie_critical_length:
.quad .Lsie_done - .Lsie_gmap
.Lsie_crit_mcck_start:
.quad .Lsie_entry
.Lsie_crit_mcck_length:
.quad .Lsie_skip - .Lsie_entry
#endif #endif
.section .rodata, "a" .section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame #define SYSCALL(esame,emu) .quad __s390x_ ## esame
......
...@@ -24,19 +24,19 @@ void enabled_wait(void) ...@@ -24,19 +24,19 @@ void enabled_wait(void)
{ {
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time; unsigned long long idle_time;
unsigned long psw_mask; unsigned long psw_mask, flags;
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */ /* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY); clear_cpu_flag(CIF_NOHZ_DELAY);
local_irq_save(flags);
/* Call the assembler magic in entry.S */ /* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask); psw_idle(idle, psw_mask);
local_irq_restore(flags);
trace_hardirqs_off();
/* Account time spent with enabled wait psw loaded as idle time. */ /* Account time spent with enabled wait psw loaded as idle time. */
write_seqcount_begin(&idle->seqcount); write_seqcount_begin(&idle->seqcount);
...@@ -118,22 +118,16 @@ u64 arch_cpu_idle_time(int cpu) ...@@ -118,22 +118,16 @@ u64 arch_cpu_idle_time(int cpu)
void arch_cpu_idle_enter(void) void arch_cpu_idle_enter(void)
{ {
local_mcck_disable();
} }
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
if (!test_cpu_flag(CIF_MCCK_PENDING)) enabled_wait();
/* Halt the cpu and keep track of cpu time accounting. */
enabled_wait();
local_irq_enable(); local_irq_enable();
} }
void arch_cpu_idle_exit(void) void arch_cpu_idle_exit(void)
{ {
local_mcck_enable();
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
} }
void arch_cpu_idle_dead(void) void arch_cpu_idle_dead(void)
......
...@@ -148,7 +148,6 @@ void s390_handle_mcck(void) ...@@ -148,7 +148,6 @@ void s390_handle_mcck(void)
local_mcck_disable(); local_mcck_disable();
mcck = *this_cpu_ptr(&cpu_mcck); mcck = *this_cpu_ptr(&cpu_mcck);
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck)); memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable(); local_mcck_enable();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -333,7 +332,7 @@ NOKPROBE_SYMBOL(s390_backup_mcck_info); ...@@ -333,7 +332,7 @@ NOKPROBE_SYMBOL(s390_backup_mcck_info);
/* /*
* machine check handler. * machine check handler.
*/ */
void notrace s390_do_machine_check(struct pt_regs *regs) int notrace s390_do_machine_check(struct pt_regs *regs)
{ {
static int ipd_count; static int ipd_count;
static DEFINE_SPINLOCK(ipd_lock); static DEFINE_SPINLOCK(ipd_lock);
...@@ -342,6 +341,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -342,6 +341,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
unsigned long long tmp; unsigned long long tmp;
union mci mci; union mci mci;
unsigned long mcck_dam_code; unsigned long mcck_dam_code;
int mcck_pending = 0;
nmi_enter(); nmi_enter();
inc_irq_stat(NMI_NMI); inc_irq_stat(NMI_NMI);
...@@ -400,7 +400,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -400,7 +400,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*/ */
mcck->kill_task = 1; mcck->kill_task = 1;
mcck->mcck_code = mci.val; mcck->mcck_code = mci.val;
set_cpu_flag(CIF_MCCK_PENDING); mcck_pending = 1;
} }
/* /*
...@@ -420,8 +420,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -420,8 +420,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
mcck->stp_queue |= stp_sync_check(); mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check(); mcck->stp_queue |= stp_island_check();
if (mcck->stp_queue) mcck_pending = 1;
set_cpu_flag(CIF_MCCK_PENDING);
} }
/* /*
...@@ -442,12 +441,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -442,12 +441,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci.cp) { if (mci.cp) {
/* Channel report word pending */ /* Channel report word pending */
mcck->channel_report = 1; mcck->channel_report = 1;
set_cpu_flag(CIF_MCCK_PENDING); mcck_pending = 1;
} }
if (mci.w) { if (mci.w) {
/* Warning pending */ /* Warning pending */
mcck->warning = 1; mcck->warning = 1;
set_cpu_flag(CIF_MCCK_PENDING); mcck_pending = 1;
} }
/* /*
...@@ -462,7 +461,17 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -462,7 +461,17 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR; *((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
} }
clear_cpu_flag(CIF_MCCK_GUEST); clear_cpu_flag(CIF_MCCK_GUEST);
if (user_mode(regs) && mcck_pending) {
nmi_exit();
return 1;
}
if (mcck_pending)
schedule_mcck_handler();
nmi_exit(); nmi_exit();
return 0;
} }
NOKPROBE_SYMBOL(s390_do_machine_check); NOKPROBE_SYMBOL(s390_do_machine_check);
......
...@@ -384,8 +384,7 @@ static void __init setup_lowcore_dat_off(void) ...@@ -384,8 +384,7 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_psw.addr = (unsigned long) restart_int_handler; lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->external_new_psw.addr = (unsigned long) ext_int_handler;
lc->svc_new_psw.mask = PSW_KERNEL_BITS | lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = (unsigned long) system_call; lc->svc_new_psw.addr = (unsigned long) system_call;
lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
......
...@@ -3995,9 +3995,6 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) ...@@ -3995,9 +3995,6 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
if (need_resched()) if (need_resched())
schedule(); schedule();
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
if (!kvm_is_ucontrol(vcpu->kvm)) { if (!kvm_is_ucontrol(vcpu->kvm)) {
rc = kvm_s390_deliver_pending_interrupts(vcpu); rc = kvm_s390_deliver_pending_interrupts(vcpu);
if (rc) if (rc)
......
...@@ -1002,8 +1002,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -1002,8 +1002,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (need_resched()) if (need_resched())
schedule(); schedule();
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
......
...@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay); ...@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
static void __udelay_disabled(unsigned long long usecs) static void __udelay_disabled(unsigned long long usecs)
{ {
unsigned long cr0, cr0_new, psw_mask; unsigned long cr0, cr0_new, psw_mask, flags;
struct s390_idle_data idle; struct s390_idle_data idle;
u64 end; u64 end;
...@@ -45,7 +45,9 @@ static void __udelay_disabled(unsigned long long usecs) ...@@ -45,7 +45,9 @@ static void __udelay_disabled(unsigned long long usecs)
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT; psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end); set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ); set_cpu_flag(CIF_IGNORE_IRQ);
local_irq_save(flags);
psw_idle(&idle, psw_mask); psw_idle(&idle, psw_mask);
local_irq_restore(flags);
clear_cpu_flag(CIF_IGNORE_IRQ); clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator); set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0); __ctl_load(cr0, 0, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment