Commit c5328901 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] entry[64].S improvements

Another round of cleanup for entry[64].S, in particular the program check
handler looks more reasonable now. The code size for the 31 bit kernel
has been reduced by 616 byte and by 528 byte for the 64 bit version.
Even better the code is a bit faster as well.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 3b7f9933
...@@ -97,47 +97,52 @@ struct _lowcore { ...@@ -97,47 +97,52 @@ struct _lowcore {
__u32 gpregs_save_area[16]; /* 0x0180 */ __u32 gpregs_save_area[16]; /* 0x0180 */
__u32 cregs_save_area[16]; /* 0x01c0 */ __u32 cregs_save_area[16]; /* 0x01c0 */
/* Save areas. */
__u32 save_area_sync[8]; /* 0x0200 */
__u32 save_area_async[8]; /* 0x0220 */
__u32 save_area_restart[1]; /* 0x0240 */
__u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */
/* Return psws. */ /* Return psws. */
__u32 save_area[16]; /* 0x0200 */ psw_t return_psw; /* 0x0248 */
psw_t return_psw; /* 0x0240 */ psw_t return_mcck_psw; /* 0x0250 */
psw_t return_mcck_psw; /* 0x0248 */
/* CPU time accounting values */ /* CPU time accounting values */
__u64 sync_enter_timer; /* 0x0250 */ __u64 sync_enter_timer; /* 0x0258 */
__u64 async_enter_timer; /* 0x0258 */ __u64 async_enter_timer; /* 0x0260 */
__u64 mcck_enter_timer; /* 0x0260 */ __u64 mcck_enter_timer; /* 0x0268 */
__u64 exit_timer; /* 0x0268 */ __u64 exit_timer; /* 0x0270 */
__u64 user_timer; /* 0x0270 */ __u64 user_timer; /* 0x0278 */
__u64 system_timer; /* 0x0278 */ __u64 system_timer; /* 0x0280 */
__u64 steal_timer; /* 0x0280 */ __u64 steal_timer; /* 0x0288 */
__u64 last_update_timer; /* 0x0288 */ __u64 last_update_timer; /* 0x0290 */
__u64 last_update_clock; /* 0x0290 */ __u64 last_update_clock; /* 0x0298 */
/* Current process. */ /* Current process. */
__u32 current_task; /* 0x0298 */ __u32 current_task; /* 0x02a0 */
__u32 thread_info; /* 0x029c */ __u32 thread_info; /* 0x02a4 */
__u32 kernel_stack; /* 0x02a0 */ __u32 kernel_stack; /* 0x02a8 */
/* Interrupt and panic stack. */ /* Interrupt and panic stack. */
__u32 async_stack; /* 0x02a4 */ __u32 async_stack; /* 0x02ac */
__u32 panic_stack; /* 0x02a8 */ __u32 panic_stack; /* 0x02b0 */
/* Address space pointer. */ /* Address space pointer. */
__u32 kernel_asce; /* 0x02ac */ __u32 kernel_asce; /* 0x02b4 */
__u32 user_asce; /* 0x02b0 */ __u32 user_asce; /* 0x02b8 */
__u32 current_pid; /* 0x02b4 */ __u32 current_pid; /* 0x02bc */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x02b8 */ __u32 cpu_nr; /* 0x02c0 */
__u32 softirq_pending; /* 0x02bc */ __u32 softirq_pending; /* 0x02c4 */
__u32 percpu_offset; /* 0x02c0 */ __u32 percpu_offset; /* 0x02c8 */
__u32 ext_call_fast; /* 0x02c4 */ __u32 ext_call_fast; /* 0x02cc */
__u64 int_clock; /* 0x02c8 */ __u64 int_clock; /* 0x02d0 */
__u64 mcck_clock; /* 0x02d0 */ __u64 mcck_clock; /* 0x02d8 */
__u64 clock_comparator; /* 0x02d8 */ __u64 clock_comparator; /* 0x02e0 */
__u32 machine_flags; /* 0x02e0 */ __u32 machine_flags; /* 0x02e8 */
__u32 ftrace_func; /* 0x02e4 */ __u32 ftrace_func; /* 0x02ec */
__u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */ __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */
/* Interrupt response block */ /* Interrupt response block */
__u8 irb[64]; /* 0x0300 */ __u8 irb[64]; /* 0x0300 */
...@@ -229,57 +234,62 @@ struct _lowcore { ...@@ -229,57 +234,62 @@ struct _lowcore {
psw_t mcck_new_psw; /* 0x01e0 */ psw_t mcck_new_psw; /* 0x01e0 */
psw_t io_new_psw; /* 0x01f0 */ psw_t io_new_psw; /* 0x01f0 */
/* Entry/exit save area & return psws. */ /* Save areas. */
__u64 save_area[16]; /* 0x0200 */ __u64 save_area_sync[8]; /* 0x0200 */
psw_t return_psw; /* 0x0280 */ __u64 save_area_async[8]; /* 0x0240 */
psw_t return_mcck_psw; /* 0x0290 */ __u64 save_area_restart[1]; /* 0x0280 */
__u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */
/* Return psws. */
psw_t return_psw; /* 0x0290 */
psw_t return_mcck_psw; /* 0x02a0 */
/* CPU accounting and timing values. */ /* CPU accounting and timing values. */
__u64 sync_enter_timer; /* 0x02a0 */ __u64 sync_enter_timer; /* 0x02b0 */
__u64 async_enter_timer; /* 0x02a8 */ __u64 async_enter_timer; /* 0x02b8 */
__u64 mcck_enter_timer; /* 0x02b0 */ __u64 mcck_enter_timer; /* 0x02c0 */
__u64 exit_timer; /* 0x02b8 */ __u64 exit_timer; /* 0x02c8 */
__u64 user_timer; /* 0x02c0 */ __u64 user_timer; /* 0x02d0 */
__u64 system_timer; /* 0x02c8 */ __u64 system_timer; /* 0x02d8 */
__u64 steal_timer; /* 0x02d0 */ __u64 steal_timer; /* 0x02e0 */
__u64 last_update_timer; /* 0x02d8 */ __u64 last_update_timer; /* 0x02e8 */
__u64 last_update_clock; /* 0x02e0 */ __u64 last_update_clock; /* 0x02f0 */
/* Current process. */ /* Current process. */
__u64 current_task; /* 0x02e8 */ __u64 current_task; /* 0x02f8 */
__u64 thread_info; /* 0x02f0 */ __u64 thread_info; /* 0x0300 */
__u64 kernel_stack; /* 0x02f8 */ __u64 kernel_stack; /* 0x0308 */
/* Interrupt and panic stack. */ /* Interrupt and panic stack. */
__u64 async_stack; /* 0x0300 */ __u64 async_stack; /* 0x0310 */
__u64 panic_stack; /* 0x0308 */ __u64 panic_stack; /* 0x0318 */
/* Address space pointer. */ /* Address space pointer. */
__u64 kernel_asce; /* 0x0310 */ __u64 kernel_asce; /* 0x0320 */
__u64 user_asce; /* 0x0318 */ __u64 user_asce; /* 0x0328 */
__u64 current_pid; /* 0x0320 */ __u64 current_pid; /* 0x0330 */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x0328 */ __u32 cpu_nr; /* 0x0338 */
__u32 softirq_pending; /* 0x032c */ __u32 softirq_pending; /* 0x033c */
__u64 percpu_offset; /* 0x0330 */ __u64 percpu_offset; /* 0x0340 */
__u64 ext_call_fast; /* 0x0338 */ __u64 ext_call_fast; /* 0x0348 */
__u64 int_clock; /* 0x0340 */ __u64 int_clock; /* 0x0350 */
__u64 mcck_clock; /* 0x0348 */ __u64 mcck_clock; /* 0x0358 */
__u64 clock_comparator; /* 0x0350 */ __u64 clock_comparator; /* 0x0360 */
__u64 vdso_per_cpu_data; /* 0x0358 */ __u64 vdso_per_cpu_data; /* 0x0368 */
__u64 machine_flags; /* 0x0360 */ __u64 machine_flags; /* 0x0370 */
__u64 ftrace_func; /* 0x0368 */ __u64 ftrace_func; /* 0x0378 */
__u64 gmap; /* 0x0370 */ __u64 gmap; /* 0x0380 */
__u8 pad_0x0378[0x0380-0x0378]; /* 0x0378 */ __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */
/* Interrupt response block. */ /* Interrupt response block. */
__u8 irb[64]; /* 0x0380 */ __u8 irb[64]; /* 0x0400 */
/* Per cpu primary space access list */ /* Per cpu primary space access list */
__u32 paste[16]; /* 0x03c0 */ __u32 paste[16]; /* 0x0440 */
__u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
/* /*
* 0xe00 contains the address of the IPL Parameter Information * 0xe00 contains the address of the IPL Parameter Information
......
...@@ -108,7 +108,9 @@ int main(void) ...@@ -108,7 +108,9 @@ int main(void)
DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area)); DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
......
...@@ -33,7 +33,7 @@ s390_base_mcck_handler_fn: ...@@ -33,7 +33,7 @@ s390_base_mcck_handler_fn:
.previous .previous
ENTRY(s390_base_ext_handler) ENTRY(s390_base_ext_handler)
stmg %r0,%r15,__LC_SAVE_AREA stmg %r0,%r15,__LC_SAVE_AREA_ASYNC
basr %r13,0 basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD 0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_ext_handler_fn larl %r1,s390_base_ext_handler_fn
...@@ -41,7 +41,7 @@ ENTRY(s390_base_ext_handler) ...@@ -41,7 +41,7 @@ ENTRY(s390_base_ext_handler)
ltgr %r1,%r1 ltgr %r1,%r1
jz 1f jz 1f
basr %r14,%r1 basr %r14,%r1
1: lmg %r0,%r15,__LC_SAVE_AREA 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW lpswe __LC_EXT_OLD_PSW
...@@ -53,7 +53,7 @@ s390_base_ext_handler_fn: ...@@ -53,7 +53,7 @@ s390_base_ext_handler_fn:
.previous .previous
ENTRY(s390_base_pgm_handler) ENTRY(s390_base_pgm_handler)
stmg %r0,%r15,__LC_SAVE_AREA stmg %r0,%r15,__LC_SAVE_AREA_SYNC
basr %r13,0 basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD 0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_pgm_handler_fn larl %r1,s390_base_pgm_handler_fn
...@@ -61,7 +61,7 @@ ENTRY(s390_base_pgm_handler) ...@@ -61,7 +61,7 @@ ENTRY(s390_base_pgm_handler)
ltgr %r1,%r1 ltgr %r1,%r1
jz 1f jz 1f
basr %r14,%r1 basr %r14,%r1
lmg %r0,%r15,__LC_SAVE_AREA lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13) 1: lpswe disabled_wait_psw-0b(%r13)
...@@ -142,7 +142,7 @@ s390_base_mcck_handler_fn: ...@@ -142,7 +142,7 @@ s390_base_mcck_handler_fn:
.previous .previous
ENTRY(s390_base_ext_handler) ENTRY(s390_base_ext_handler)
stm %r0,%r15,__LC_SAVE_AREA stm %r0,%r15,__LC_SAVE_AREA_ASYNC
basr %r13,0 basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD 0: ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,2f-0b(%r13) l %r1,2f-0b(%r13)
...@@ -150,7 +150,7 @@ ENTRY(s390_base_ext_handler) ...@@ -150,7 +150,7 @@ ENTRY(s390_base_ext_handler)
ltr %r1,%r1 ltr %r1,%r1
jz 1f jz 1f
basr %r14,%r1 basr %r14,%r1
1: lm %r0,%r15,__LC_SAVE_AREA 1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpsw __LC_EXT_OLD_PSW lpsw __LC_EXT_OLD_PSW
...@@ -164,7 +164,7 @@ s390_base_ext_handler_fn: ...@@ -164,7 +164,7 @@ s390_base_ext_handler_fn:
.previous .previous
ENTRY(s390_base_pgm_handler) ENTRY(s390_base_pgm_handler)
stm %r0,%r15,__LC_SAVE_AREA stm %r0,%r15,__LC_SAVE_AREA_SYNC
basr %r13,0 basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD 0: ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,2f-0b(%r13) l %r1,2f-0b(%r13)
...@@ -172,7 +172,7 @@ ENTRY(s390_base_pgm_handler) ...@@ -172,7 +172,7 @@ ENTRY(s390_base_pgm_handler)
ltr %r1,%r1 ltr %r1,%r1
jz 1f jz 1f
basr %r14,%r1 basr %r14,%r1
lm %r0,%r15,__LC_SAVE_AREA lm %r0,%r15,__LC_SAVE_AREA_SYNC
lpsw __LC_PGM_OLD_PSW lpsw __LC_PGM_OLD_PSW
1: lpsw disabled_wait_psw-0b(%r13) 1: lpsw disabled_wait_psw-0b(%r13)
......
...@@ -19,32 +19,22 @@ ...@@ -19,32 +19,22 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/page.h> #include <asm/page.h>
/* __PT_R0 = __PT_GPRS
* Stack layout for the system_call stack entry. __PT_R1 = __PT_GPRS + 4
* The first few entries are identical to the user_regs_struct. __PT_R2 = __PT_GPRS + 8
*/ __PT_R3 = __PT_GPRS + 12
SP_PTREGS = STACK_FRAME_OVERHEAD __PT_R4 = __PT_GPRS + 16
SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS __PT_R5 = __PT_GPRS + 20
SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW __PT_R6 = __PT_GPRS + 24
SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS __PT_R7 = __PT_GPRS + 28
SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 __PT_R8 = __PT_GPRS + 32
SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 __PT_R9 = __PT_GPRS + 36
SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 __PT_R10 = __PT_GPRS + 40
SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 __PT_R11 = __PT_GPRS + 44
SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 __PT_R12 = __PT_GPRS + 48
SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 __PT_R13 = __PT_GPRS + 524
SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 __PT_R14 = __PT_GPRS + 56
SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 __PT_R15 = __PT_GPRS + 60
SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP ) _TIF_MCCK_PENDING | _TIF_PER_TRAP )
...@@ -58,133 +48,91 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -58,133 +48,91 @@ STACK_SIZE = 1 << STACK_SHIFT
#define BASED(name) name-system_call(%r13) #define BASED(name) name-system_call(%r13)
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON .macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0 basr %r2,%r0
l %r1,BASED(.Ltrace_irq_on_caller) l %r1,BASED(.Lhardirqs_on)
basr %r14,%r1 basr %r14,%r1 # call trace_hardirqs_on_caller
#endif
.endm .endm
.macro TRACE_IRQS_OFF .macro TRACE_IRQS_OFF
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0 basr %r2,%r0
l %r1,BASED(.Ltrace_irq_off_caller) l %r1,BASED(.Lhardirqs_off)
basr %r14,%r1 basr %r14,%r1 # call trace_hardirqs_off_caller
.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
#endif #endif
.endm
#ifdef CONFIG_LOCKDEP
.macro LOCKDEP_SYS_EXIT .macro LOCKDEP_SYS_EXIT
tm SP_PSW+1(%r15),0x01 # returning to user ? #ifdef CONFIG_LOCKDEP
jz 0f tm __PT_PSW+1(%r11),0x01 # returning to user ?
jz .+10
l %r1,BASED(.Llockdep_sys_exit) l %r1,BASED(.Llockdep_sys_exit)
basr %r14,%r1 basr %r14,%r1 # call lockdep_sys_exit
0:
.endm
#else
#define LOCKDEP_SYS_EXIT
#endif #endif
/*
* Register usage in interrupt handlers:
* R9 - pointer to current task structure
* R13 - pointer to literal pool
* R14 - return register for function calls
* R15 - kernel stack pointer
*/
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
lm %r10,%r11,\lc_from
sl %r10,\lc_to
sl %r11,\lc_to+4
bc 3,BASED(0f)
sl %r10,BASED(.Lc_1)
0: al %r10,\lc_sum
al %r11,\lc_sum+4
bc 12,BASED(1f)
al %r10,BASED(.Lc_1)
1: stm %r10,%r11,\lc_sum
.endm
.macro SAVE_ALL_SVC psworg,savearea
stm %r12,%r15,\savearea
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
s %r15,BASED(.Lc_spsize) # make room for registers & psw
.endm
.macro SAVE_ALL_BASE savearea
stm %r12,%r15,\savearea
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm .endm
.macro SAVE_ALL_PGM psworg,savearea .macro CHECK_STACK stacksize,savearea
tm \psworg+1,0x01 # test problem state bit
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
bnz BASED(1f) tml %r15,\stacksize - CONFIG_STACK_GUARD
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD la %r14,\savearea
bnz BASED(2f) jz stack_overflow
la %r12,\psworg
b BASED(stack_overflow)
#else
bz BASED(2f)
#endif #endif
1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
.endm .endm
.macro SAVE_ALL_ASYNC psworg,savearea .macro SWITCH_ASYNC savearea,stack,shift
stm %r12,%r15,\savearea tmh %r8,0x0001 # interrupting from user ?
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 jnz 1f
la %r12,\psworg lr %r14,%r9
tm \psworg+1,0x01 # test problem state bit sl %r14,BASED(.Lcritical_start)
bnz BASED(1f) # from user -> load async stack cl %r14,BASED(.Lcritical_length)
clc \psworg+4(4),BASED(.Lcritical_end) jhe 0f
bhe BASED(0f) la %r11,\savearea # inside critical section, do cleanup
clc \psworg+4(4),BASED(.Lcritical_start) bras %r14,cleanup_critical
bl BASED(0f) tmh %r8,0x0001 # retest problem state after cleanup
l %r14,BASED(.Lcleanup_critical) jnz 1f
basr %r14,%r14 0: l %r14,\stack # are we already on the target stack?
tm 1(%r12),0x01 # retest problem state after cleanup
bnz BASED(1f)
0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
slr %r14,%r15 slr %r14,%r15
sra %r14,STACK_SHIFT sra %r14,\shift
#ifdef CONFIG_CHECK_STACK jnz 1f
bnz BASED(1f) CHECK_STACK 1<<\shift,\savearea
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD j 2f
bnz BASED(2f) 1: l %r15,\stack # load target stack
b BASED(stack_overflow) 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
#else la %r11,STACK_FRAME_OVERHEAD(%r15)
bz BASED(2f)
#endif
1: l %r15,__LC_ASYNC_STACK
2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
.endm .endm
.macro CREATE_STACK_FRAME savearea .macro ADD64 high,low,timer
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) al \high,\timer
st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 al \low,\timer+4
mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack brc 12,.+8
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack ahi \high,1
.endm .endm
.macro RESTORE_ALL psworg,sync .macro SUB64 high,low,timer
mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore sl \high,\timer
.if !\sync sl \low,\timer+4
ni \psworg+1,0xfd # clear wait state bit brc 3,.+8
.endif ahi \high,-1
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user .endm
stpt __LC_EXIT_TIMER
lpsw \psworg # back to caller .macro UPDATE_VTIME high,low,enter_timer
lm \high,\low,__LC_EXIT_TIMER
SUB64 \high,\low,\enter_timer
ADD64 \high,\low,__LC_USER_TIMER
stm \high,\low,__LC_USER_TIMER
lm \high,\low,__LC_LAST_UPDATE_TIMER
SUB64 \high,\low,__LC_EXIT_TIMER
ADD64 \high,\low,__LC_SYSTEM_TIMER
stm \high,\low,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm .endm
.macro REENABLE_IRQS .macro REENABLE_IRQS
mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) st %r8,__LC_RETURN_PSW
ni __SF_EMPTY(%r15),0xbf ni __LC_RETURN_PSW,0xbf
ssm __SF_EMPTY(%r15) ssm __LC_RETURN_PSW
.endm .endm
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
...@@ -197,14 +145,13 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -197,14 +145,13 @@ STACK_SIZE = 1 << STACK_SHIFT
* gpr2 = prev * gpr2 = prev
*/ */
ENTRY(__switch_to) ENTRY(__switch_to)
basr %r1,0 l %r4,__THREAD_info(%r2) # get thread_info of prev
0: l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next l %r5,__THREAD_info(%r3) # get thread_info of next
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
bz 1f-0b(%r1) jz 0f
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
st %r15,__THREAD_ksp(%r2) # store kernel stack of prev st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
l %r15,__THREAD_ksp(%r3) # load kernel stack of next l %r15,__THREAD_ksp(%r3) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
...@@ -224,48 +171,55 @@ __critical_start: ...@@ -224,48 +171,55 @@ __critical_start:
ENTRY(system_call) ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_stm:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA stm %r8,%r15,__LC_SAVE_AREA_SYNC
CREATE_STACK_FRAME __LC_SAVE_AREA l %r12,__LC_THREAD_INFO
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct l %r13,__LC_SVC_NEW_PSW+4
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW sysc_per:
mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC l %r15,__LC_KERNEL_STACK
oi __TI_flags+3(%r12),_TIF_SYSCALL ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime: sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
sysc_stime: stm %r0,%r7,__PT_R0(%r11)
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
sysc_update: mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __PT_SVC_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc: sysc_do_svc:
xr %r7,%r7 oi __TI_flags+3(%r12),_TIF_SYSCALL
icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 lh %r8,__PT_SVC_CODE+2(%r11)
bnz BASED(sysc_nr_ok) # svc number > 0 sla %r8,2 # shift and test for svc0
jnz sysc_nr_ok
# svc 0: system call number in %r1 # svc 0: system call number in %r1
cl %r1,BASED(.Lnr_syscalls) cl %r1,BASED(.Lnr_syscalls)
bnl BASED(sysc_nr_ok) jnl sysc_nr_ok
sth %r1,SP_SVC_CODE+2(%r15) sth %r1,__PT_SVC_CODE+2(%r11)
lr %r7,%r1 # copy svc number to %r7 lr %r8,%r1
sla %r8,2
sysc_nr_ok: sysc_nr_ok:
sll %r7,2 # svc number *4 l %r10,BASED(.Lsys_call_table) # 31 bit system call table
l %r10,BASED(.Lsysc_table) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
l %r9,0(%r8,%r10) # get system call addr.
tm __TI_flags+2(%r12),_TIF_TRACE >> 8 tm __TI_flags+2(%r12),_TIF_TRACE >> 8
mvc SP_ARGS(4,%r15),SP_R7(%r15) jnz sysc_tracesys
l %r8,0(%r7,%r10) # get system call addr. basr %r14,%r9 # call sys_xxxx
bnz BASED(sysc_tracesys) st %r2,__PT_R2(%r11) # store return value
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
sysc_return: sysc_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
sysc_tif: sysc_tif:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
bno BASED(sysc_restore) jno sysc_restore
tm __TI_flags+3(%r12),_TIF_WORK_SVC tm __TI_flags+3(%r12),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.) jnz sysc_work # check for work
ni __TI_flags+3(%r12),255-_TIF_SYSCALL ni __TI_flags+3(%r12),255-_TIF_SYSCALL
sysc_restore: sysc_restore:
RESTORE_ALL __LC_RETURN_PSW,1 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
lm %r0,%r15,__PT_R0(%r11)
lpsw __LC_RETURN_PSW
sysc_done: sysc_done:
# #
...@@ -273,16 +227,16 @@ sysc_done: ...@@ -273,16 +227,16 @@ sysc_done:
# #
sysc_work: sysc_work:
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
bo BASED(sysc_mcck_pending) jo sysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
bo BASED(sysc_reschedule) jo sysc_reschedule
tm __TI_flags+3(%r12),_TIF_SIGPENDING tm __TI_flags+3(%r12),_TIF_SIGPENDING
bo BASED(sysc_sigpending) jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
bo BASED(sysc_notify_resume) jo sysc_notify_resume
tm __TI_flags+3(%r12),_TIF_PER_TRAP tm __TI_flags+3(%r12),_TIF_PER_TRAP
bo BASED(sysc_singlestep) jo sysc_singlestep
b BASED(sysc_return) # beware of critical section cleanup j sysc_return # beware of critical section cleanup
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
...@@ -290,13 +244,13 @@ sysc_work: ...@@ -290,13 +244,13 @@ sysc_work:
sysc_reschedule: sysc_reschedule:
l %r1,BASED(.Lschedule) l %r1,BASED(.Lschedule)
la %r14,BASED(sysc_return) la %r14,BASED(sysc_return)
br %r1 # call scheduler br %r1 # call schedule
# #
# _TIF_MCCK_PENDING is set, call handler # _TIF_MCCK_PENDING is set, call handler
# #
sysc_mcck_pending: sysc_mcck_pending:
l %r1,BASED(.Ls390_handle_mcck) l %r1,BASED(.Lhandle_mcck)
la %r14,BASED(sysc_return) la %r14,BASED(sysc_return)
br %r1 # TIF bit will be cleared by handler br %r1 # TIF bit will be cleared by handler
...@@ -305,23 +259,24 @@ sysc_mcck_pending: ...@@ -305,23 +259,24 @@ sysc_mcck_pending:
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_signal) l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal basr %r14,%r1 # call do_signal
tm __TI_flags+3(%r12),_TIF_SYSCALL tm __TI_flags+3(%r12),_TIF_SYSCALL
bno BASED(sysc_return) jno sysc_return
lm %r2,%r6,SP_R2(%r15) # load svc arguments lm %r2,%r7,__PT_R2(%r11) # load svc arguments
xr %r7,%r7 # svc 0 returns -ENOSYS xr %r8,%r8 # svc 0 returns -ENOSYS
clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) clc __PT_SVC_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 jnl sysc_nr_ok # invalid svc number -> do svc 0
icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number lh %r8,__PT_SVC_CODE+2(%r11) # load new svc number
b BASED(sysc_nr_ok) # restart svc sla %r8,2
j sysc_nr_ok # restart svc
# #
# _TIF_NOTIFY_RESUME is set, call do_notify_resume # _TIF_NOTIFY_RESUME is set, call do_notify_resume
# #
sysc_notify_resume: sysc_notify_resume:
la %r2,SP_PTREGS(%r15) # load pt_regs lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_notify_resume) l %r1,BASED(.Ldo_notify_resume)
la %r14,BASED(sysc_return) la %r14,BASED(sysc_return)
br %r1 # call do_notify_resume br %r1 # call do_notify_resume
...@@ -331,56 +286,57 @@ sysc_notify_resume: ...@@ -331,56 +286,57 @@ sysc_notify_resume:
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
la %r2,SP_PTREGS(%r15) # address of register-save area lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Lhandle_per) # load adr. of per handler l %r1,BASED(.Ldo_per_trap)
la %r14,BASED(sysc_return) # load adr. of system return la %r14,BASED(sysc_return)
br %r1 # branch to do_per_trap br %r1 # call do_per_trap
# #
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call # and after the system call
# #
sysc_tracesys: sysc_tracesys:
l %r1,BASED(.Ltrace_entry) l %r1,BASED(.Ltrace_enter)
la %r2,SP_PTREGS(%r15) # load pt_regs lr %r2,%r11 # pass pointer to pt_regs
la %r3,0 la %r3,0
xr %r0,%r0 xr %r0,%r0
icm %r0,3,SP_SVC_CODE(%r15) icm %r0,3,__PT_SVC_CODE+2(%r11)
st %r0,SP_R2(%r15) st %r0,__PT_R2(%r11)
basr %r14,%r1 basr %r14,%r1 # call do_syscall_trace_enter
cl %r2,BASED(.Lnr_syscalls) cl %r2,BASED(.Lnr_syscalls)
bnl BASED(sysc_tracenogo) jnl sysc_tracenogo
lr %r7,%r2 lr %r8,%r2
sll %r7,2 # svc number *4 sll %r8,2
l %r8,0(%r7,%r10) l %r9,0(%r8,%r10)
sysc_tracego: sysc_tracego:
lm %r3,%r6,SP_R3(%r15) lm %r3,%r7,__PT_R3(%r11)
mvc SP_ARGS(4,%r15),SP_R7(%r15) st %r7,STACK_FRAME_OVERHEAD(%r15)
l %r2,SP_ORIG_R2(%r15) l %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r8 # call sys_xxx basr %r14,%r9 # call sys_xxx
st %r2,SP_R2(%r15) # store return value st %r2,__PT_R2(%r11) # store return value
sysc_tracenogo: sysc_tracenogo:
tm __TI_flags+2(%r12),_TIF_TRACE >> 8 tm __TI_flags+2(%r12),_TIF_TRACE >> 8
bz BASED(sysc_return) jz sysc_return
l %r1,BASED(.Ltrace_exit) l %r1,BASED(.Ltrace_exit)
la %r2,SP_PTREGS(%r15) # load pt_regs lr %r2,%r11 # pass pointer to pt_regs
la %r14,BASED(sysc_return) la %r14,BASED(sysc_return)
br %r1 br %r1 # call do_syscall_trace_exit
# #
# a new process exits the kernel with ret_from_fork # a new process exits the kernel with ret_from_fork
# #
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
l %r12,__LC_THREAD_INFO
l %r13,__LC_SVC_NEW_PSW+4 l %r13,__LC_SVC_NEW_PSW+4
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? jo 0f
bo BASED(0f) st %r15,__PT_R15(%r11) # store stack pointer for new kthread
st %r15,SP_R15(%r15) # store stack pointer for new kthread 0: l %r1,BASED(.Lschedule_tail)
0: l %r1,BASED(.Lschedtail) basr %r14,%r1 # call schedule_tail
basr %r14,%r1
TRACE_IRQS_ON TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
b BASED(sysc_tracenogo) j sysc_tracenogo
# #
# kernel_execve function needs to deal with pt_regs that is not # kernel_execve function needs to deal with pt_regs that is not
...@@ -390,153 +346,98 @@ ENTRY(kernel_execve) ...@@ -390,153 +346,98 @@ ENTRY(kernel_execve)
stm %r12,%r15,48(%r15) stm %r12,%r15,48(%r15)
lr %r14,%r15 lr %r14,%r15
l %r13,__LC_SVC_NEW_PSW+4 l %r13,__LC_SVC_NEW_PSW+4
s %r15,BASED(.Lc_spsize) ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
st %r14,__SF_BACKCHAIN(%r15) st %r14,__SF_BACKCHAIN(%r15)
la %r12,SP_PTREGS(%r15) la %r12,STACK_FRAME_OVERHEAD(%r15)
xc 0(__PT_SIZE,%r12),0(%r12) xc 0(__PT_SIZE,%r12),0(%r12)
l %r1,BASED(.Ldo_execve) l %r1,BASED(.Ldo_execve)
lr %r5,%r12 lr %r5,%r12
basr %r14,%r1 basr %r14,%r1 # call do_execve
ltr %r2,%r2 ltr %r2,%r2
be BASED(0f) je 0f
a %r15,BASED(.Lc_spsize) ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
lm %r12,%r15,48(%r15) lm %r12,%r15,48(%r15)
br %r14 br %r14
# execve succeeded. # execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
l %r15,__LC_KERNEL_STACK # load ksp l %r15,__LC_KERNEL_STACK # load ksp
s %r15,BASED(.Lc_spsize) # make room for registers & psw ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs la %r11,STACK_FRAME_OVERHEAD(%r15)
mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs
l %r12,__LC_THREAD_INFO l %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
l %r1,BASED(.Lexecve_tail) l %r1,BASED(.Lexecve_tail)
basr %r14,%r1 basr %r14,%r1 # call execve_tail
b BASED(sysc_return) j sysc_return
/* /*
* Program check handler routine * Program check handler routine
*/ */
ENTRY(pgm_check_handler) ENTRY(pgm_check_handler)
/*
* First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will
* cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
* For a single stepped SVC the program check handler gets control after
* the SVC new PSW has been loaded. But we want to execute the SVC first and
* then handle the PER event. Therefore we update the SVC old PSW to point
* to the pgm_check_handler and branch to the SVC handler after we checked
* if we have to load the kernel stack register.
* For every other possible cause for PER event without the PER mask set
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA stm %r8,%r15,__LC_SAVE_AREA_SYNC
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception l %r12,__LC_THREAD_INFO
bnz BASED(pgm_per) # got per exception -> special case l %r13,__LC_SVC_NEW_PSW+4
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA lm %r8,%r9,__LC_PGM_OLD_PSW
CREATE_STACK_FRAME __LC_SAVE_AREA tmh %r8,0x0001 # test problem state bit
mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW jnz 1f # -> fault in user space
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct tmh %r8,0x4000 # PER bit set in old PSW ?
tm SP_PSW+1(%r15),0x01 # interrupting from user ? jnz 0f # -> enabled, can't be a double fault
bz BASED(pgm_no_vtime) tm __LC_PGM_ILC+3,0x80 # check for per exception
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER jnz pgm_svcper # -> single stepped svc
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER j 2f
pgm_no_vtime: 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
l %r3,__LC_PGM_ILC # load program interruption code l %r15,__LC_KERNEL_STACK
l %r4,__LC_TRANS_EXC_CODE 2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
REENABLE_IRQS la %r11,STACK_FRAME_OVERHEAD(%r15)
la %r8,0x7f stm %r0,%r7,__PT_R0(%r11)
nr %r8,%r3 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
sll %r8,2 stm %r8,%r9,__PT_PSW(%r11)
l %r1,BASED(.Ljump_table) tm __LC_PGM_ILC+3,0x80 # check for per exception
l %r1,0(%r8,%r1) # load address of handler routine jz 0f
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to interrupt-handler
pgm_exit:
b BASED(sysc_return)
#
# handle per exception
#
pgm_per:
tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
bnz BASED(pgm_per_std) # ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
be BASED(pgm_svcper)
# no interesting special case, ignore PER event
lm %r12,%r15,__LC_SAVE_AREA
lpsw 0x28
#
# Normal per exception
#
pgm_per_std:
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SAVE_AREA
mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(pgm_no_vtime2)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
l %r1,__TI_task(%r12) l %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tmh %r8,0x0001 # kernel per event ?
bz BASED(kernel_per) jz pgm_kprobe
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE oi __TI_flags+3(%r12),_TIF_PER_TRAP
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 0: l %r3,__LC_PGM_ILC # load program interruption code
l %r3,__LC_PGM_ILC # load program interruption code
l %r4,__LC_TRANS_EXC_CODE l %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
la %r8,0x7f xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
nr %r8,%r3 # clear per-event-bit and ilc
be BASED(pgm_exit2) # only per or per+check ?
sll %r8,2
l %r1,BASED(.Ljump_table) l %r1,BASED(.Ljump_table)
l %r1,0(%r8,%r1) # load address of handler routine la %r10,0x7f
la %r2,SP_PTREGS(%r15) # address of register-save area nr %r10,%r3
je sysc_return
sll %r10,2
l %r1,0(%r10,%r1) # load address of handler routine
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler basr %r14,%r1 # branch to interrupt-handler
pgm_exit2: j sysc_return
b BASED(sysc_return)
# #
# it was a single stepped SVC that is causing all the trouble # PER event in supervisor state, must be kprobes
# #
pgm_svcper: pgm_kprobe:
SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA REENABLE_IRQS
CREATE_STACK_FRAME __LC_SAVE_AREA xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct l %r1,BASED(.Ldo_per_trap)
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW lr %r2,%r11 # pass pointer to pt_regs
mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC basr %r14,%r1 # call do_per_trap
oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) j sysc_return
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
l %r8,__TI_task(%r12)
mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc)
# #
# per was called from kernel, must be kprobes # single stepped system call
# #
kernel_per: pgm_svcper:
REENABLE_IRQS oi __TI_flags+3(%r12),_TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # address of register-save area mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
l %r1,BASED(.Lhandle_per) # load adr. of per handler mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
basr %r14,%r1 # branch to do_single_step lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs
b BASED(pgm_exit)
/* /*
* IO interrupt handler routine * IO interrupt handler routine
...@@ -545,28 +446,35 @@ kernel_per: ...@@ -545,28 +446,35 @@ kernel_per:
ENTRY(io_int_handler) ENTRY(io_int_handler)
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
CREATE_STACK_FRAME __LC_SAVE_AREA+16 l %r12,__LC_THREAD_INFO
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack l %r13,__LC_SVC_NEW_PSW+4
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct lm %r8,%r9,__LC_IO_OLD_PSW
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tmh %r8,0x0001 # interrupting from user ?
bz BASED(io_no_vtime) jz io_skip
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER io_skip:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
io_no_vtime: stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
stm %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
la %r2,SP_PTREGS(%r15) # address of register-save area l %r1,BASED(.Ldo_IRQ)
basr %r14,%r1 # branch to standard irq handler lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_IRQ
io_return: io_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
TRACE_IRQS_ON TRACE_IRQS_ON
io_tif: io_tif:
tm __TI_flags+3(%r12),_TIF_WORK_INT tm __TI_flags+3(%r12),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.) jnz io_work # there is work to do (signals etc.)
io_restore: io_restore:
RESTORE_ALL __LC_RETURN_PSW,0 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
ni __LC_RETURN_PSW+1,0xfd # clean wait state bit
stpt __LC_EXIT_TIMER
lm %r0,%r15,__PT_R0(%r11)
lpsw __LC_RETURN_PSW
io_done: io_done:
# #
...@@ -577,28 +485,29 @@ io_done: ...@@ -577,28 +485,29 @@ io_done:
# Before any work can be done, a switch to the kernel stack is required. # Before any work can be done, a switch to the kernel stack is required.
# #
io_work: io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
bo BASED(io_work_user) # yes -> do resched & signal jo io_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# check for preemptive scheduling # check for preemptive scheduling
icm %r0,15,__TI_precount(%r12) icm %r0,15,__TI_precount(%r12)
bnz BASED(io_restore) # preemption disabled jnz io_restore # preemption disabled
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
bno BASED(io_restore) jno io_restore
# switch to kernel stack # switch to kernel stack
l %r1,SP_R15(%r15) l %r1,__PT_R15(%r11)
s %r1,BASED(.Lc_spsize) ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lr %r15,%r1 lr %r15,%r1
# TRACE_IRQS_ON already done at io_return, call # TRACE_IRQS_ON already done at io_return, call
# TRACE_IRQS_OFF to keep things symmetrical # TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Lpreempt_schedule_irq) l %r1,BASED(.Lpreempt_irq)
basr %r14,%r1 # call preempt_schedule_irq basr %r14,%r1 # call preempt_schedule_irq
b BASED(io_return) j io_return
#else #else
b BASED(io_restore) j io_restore
#endif #endif
# #
...@@ -606,9 +515,10 @@ io_work: ...@@ -606,9 +515,10 @@ io_work:
# #
io_work_user: io_work_user:
l %r1,__LC_KERNEL_STACK l %r1,__LC_KERNEL_STACK
s %r1,BASED(.Lc_spsize) ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lr %r15,%r1 lr %r15,%r1
# #
...@@ -618,24 +528,24 @@ io_work_user: ...@@ -618,24 +528,24 @@ io_work_user:
# #
io_work_tif: io_work_tif:
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
bo BASED(io_mcck_pending) jo io_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
bo BASED(io_reschedule) jo io_reschedule
tm __TI_flags+3(%r12),_TIF_SIGPENDING tm __TI_flags+3(%r12),_TIF_SIGPENDING
bo BASED(io_sigpending) jo io_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
bo BASED(io_notify_resume) jo io_notify_resume
b BASED(io_return) # beware of critical section cleanup j io_return # beware of critical section cleanup
# #
# _TIF_MCCK_PENDING is set, call handler # _TIF_MCCK_PENDING is set, call handler
# #
io_mcck_pending: io_mcck_pending:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Ls390_handle_mcck) l %r1,BASED(.Lhandle_mcck)
basr %r14,%r1 # TIF bit will be cleared by handler basr %r14,%r1 # TIF bit will be cleared by handler
TRACE_IRQS_OFF TRACE_IRQS_OFF
b BASED(io_return) j io_return
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
...@@ -643,37 +553,37 @@ io_mcck_pending: ...@@ -643,37 +553,37 @@ io_mcck_pending:
io_reschedule: io_reschedule:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Lschedule) l %r1,BASED(.Lschedule)
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
basr %r14,%r1 # call scheduler basr %r14,%r1 # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
b BASED(io_return) j io_return
# #
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
io_sigpending: io_sigpending:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_signal) l %r1,BASED(.Ldo_signal)
ssm __LC_SVC_NEW_PSW # reenable interrupts
lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_signal basr %r14,%r1 # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
b BASED(io_return) j io_return
# #
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
io_notify_resume: io_notify_resume:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_notify_resume) l %r1,BASED(.Ldo_notify_resume)
basr %r14,%r1 # call do_signal ssm __LC_SVC_NEW_PSW # reenable interrupts
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
b BASED(io_return) j io_return
/* /*
* External interrupt handler routine * External interrupt handler routine
...@@ -682,23 +592,25 @@ io_notify_resume: ...@@ -682,23 +592,25 @@ io_notify_resume:
ENTRY(ext_int_handler) ENTRY(ext_int_handler)
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
CREATE_STACK_FRAME __LC_SAVE_AREA+16 l %r12,__LC_THREAD_INFO
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack l %r13,__LC_SVC_NEW_PSW+4
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct lm %r8,%r9,__LC_EXT_OLD_PSW
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tmh %r8,0x0001 # interrupting from user ?
bz BASED(ext_no_vtime) jz ext_skip
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER ext_skip:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
ext_no_vtime: stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
stm %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area lr %r2,%r11 # pass pointer to pt_regs
l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
l %r4,__LC_EXT_PARAMS # get external parameters l %r4,__LC_EXT_PARAMS # get external parameters
l %r1,BASED(.Ldo_extint) l %r1,BASED(.Ldo_extint)
basr %r14,%r1 basr %r14,%r1 # call do_extint
b BASED(io_return) j io_return
__critical_end: __critical_end:
...@@ -710,82 +622,74 @@ ENTRY(mcck_int_handler) ...@@ -710,82 +622,74 @@ ENTRY(mcck_int_handler)
stck __LC_MCCK_CLOCK stck __LC_MCCK_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32 l %r12,__LC_THREAD_INFO
la %r12,__LC_MCK_OLD_PSW l %r13,__LC_SVC_NEW_PSW+4
lm %r8,%r9,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage? tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid jo mcck_panic # yes -> rest of mcck code invalid
mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA la %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
bo BASED(1f) jo 3f
la %r14,__LC_SYNC_ENTER_TIMER la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
bl BASED(0f) jl 0f
la %r14,__LC_ASYNC_ENTER_TIMER la %r14,__LC_ASYNC_ENTER_TIMER
0: clc 0(8,%r14),__LC_EXIT_TIMER 0: clc 0(8,%r14),__LC_EXIT_TIMER
bl BASED(0f) jl 1f
la %r14,__LC_EXIT_TIMER la %r14,__LC_EXIT_TIMER
0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
bl BASED(0f) jl 2f
la %r14,__LC_LAST_UPDATE_TIMER la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14) 2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
bno BASED(mcck_int_main) # no -> skip cleanup critical jno mcck_panic # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit tm %r8,0x0001 # interrupting from user ?
bnz BASED(mcck_int_main) # from user -> load async stack jz mcck_skip
clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
bhe BASED(mcck_int_main) mcck_skip:
clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
bl BASED(mcck_int_main) mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
l %r14,BASED(.Lcleanup_critical) stm %r8,%r9,__PT_PSW(%r11)
basr %r14,%r14 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
mcck_int_main: l %r1,BASED(.Ldo_machine_check)
l %r14,__LC_PANIC_STACK # are we already on the panic stack? lr %r2,%r11 # pass pointer to pt_regs
slr %r14,%r15 basr %r14,%r1 # call s390_do_machine_check
sra %r14,PAGE_SHIFT tm __PT_PSW+1(%r11),0x01 # returning to user ?
be BASED(0f) jno mcck_return
l %r15,__LC_PANIC_STACK # load panic stack
0: s %r15,BASED(.Lc_spsize) # make room for registers & psw
CREATE_STACK_FRAME __LC_SAVE_AREA+32
mvc SP_PSW(8,%r15),0(%r12)
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(mcck_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
mcck_no_vtime:
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ls390_mcck)
basr %r14,%r1 # call machine check handler
tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(mcck_return)
l %r1,__LC_KERNEL_STACK # switch to kernel stack l %r1,__LC_KERNEL_STACK # switch to kernel stack
s %r1,BASED(.Lc_spsize) ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r15)
lr %r15,%r1 lr %r15,%r1
stosm __SF_EMPTY(%r15),0x04 # turn dat on ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
bno BASED(mcck_return) jno mcck_return
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Ls390_handle_mcck) l %r1,BASED(.Lhandle_mcck)
basr %r14,%r1 # call machine check handler basr %r14,%r1 # call s390_handle_mcck
TRACE_IRQS_ON TRACE_IRQS_ON
mcck_return: mcck_return:
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
bno BASED(0f) jno 0f
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 lm %r0,%r15,__PT_R0(%r11)
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
lpsw __LC_RETURN_MCCK_PSW # back to caller lpsw __LC_RETURN_MCCK_PSW
0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 0: lm %r0,%r15,__PT_R0(%r11)
lpsw __LC_RETURN_MCCK_PSW # back to caller lpsw __LC_RETURN_MCCK_PSW
RESTORE_ALL __LC_RETURN_MCCK_PSW,0 mcck_panic:
l %r14,__LC_PANIC_STACK
slr %r14,%r15
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
/* /*
* Restart interruption handler, kick starter for additional CPUs * Restart interruption handler, kick starter for additional CPUs
...@@ -799,18 +703,18 @@ restart_base: ...@@ -799,18 +703,18 @@ restart_base:
stck __LC_LAST_UPDATE_CLOCK stck __LC_LAST_UPDATE_CLOCK
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
l %r15,__LC_SAVE_AREA+60 # load ksp l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
lam %a0,%a15,__LC_AREGS_SAVE_AREA lam %a0,%a15,__LC_AREGS_SAVE_AREA
lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone
l %r1,__LC_THREAD_INFO l %r1,__LC_THREAD_INFO
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
basr %r14,0 basr %r14,0
l %r14,restart_addr-.(%r14) l %r14,restart_addr-.(%r14)
basr %r14,%r14 # branch to start_secondary basr %r14,%r14 # call start_secondary
restart_addr: restart_addr:
.long start_secondary .long start_secondary
.align 8 .align 8
...@@ -835,19 +739,19 @@ restart_go: ...@@ -835,19 +739,19 @@ restart_go:
# PSW restart interrupt handler # PSW restart interrupt handler
# #
ENTRY(psw_restart_int_handler) ENTRY(psw_restart_int_handler)
st %r15,__LC_SAVE_AREA+48(%r0) # save r15 st %r15,__LC_SAVE_AREA_RESTART
basr %r15,0 basr %r15,0
0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
l %r15,0(%r15) l %r15,0(%r15)
ahi %r15,-SP_SIZE # make room for pt_regs ahi %r15,-__PT_SIZE # create pt_regs on stack
stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack stm %r0,%r14,__PT_R0(%r15)
mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 ahi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
basr %r14,0 basr %r14,0
1: l %r14,.Ldo_restart-1b(%r14) 1: l %r14,.Ldo_restart-1b(%r14)
basr %r14,%r14 basr %r14,%r14
basr %r14,0 # load disabled wait PSW if basr %r14,0 # load disabled wait PSW if
2: lpsw restart_psw_crash-2b(%r14) # do_restart returns 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns
.align 4 .align 4
...@@ -869,215 +773,174 @@ restart_psw_crash: ...@@ -869,215 +773,174 @@ restart_psw_crash:
*/ */
stack_overflow: stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack l %r15,__LC_PANIC_STACK # change to panic stack
sl %r15,BASED(.Lc_spsize) ahi %r15,-__PT_SIZE # create pt_regs
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack stm %r0,%r7,__PT_R0(%r15)
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack stm %r8,%r9,__PT_PSW(%r15)
la %r1,__LC_SAVE_AREA mvc __PT_R8(32,%r11),0(%r14)
ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? lr %r15,%r11
be BASED(0f) ahi %r15,-STACK_FRAME_OVERHEAD
ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? l %r1,BASED(1f)
be BASED(0f) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
la %r1,__LC_SAVE_AREA+16 lr %r2,%r11 # pass pointer to pt_regs
0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack br %r1 # branch to kernel_stack_overflow
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
l %r1,BASED(1f) # branch to kernel_stack_overflow
la %r2,SP_PTREGS(%r15) # load pt_regs
br %r1
1: .long kernel_stack_overflow 1: .long kernel_stack_overflow
#endif #endif
cleanup_table_system_call: cleanup_table:
.long system_call + 0x80000000, sysc_do_svc + 0x80000000 .long system_call + 0x80000000
cleanup_table_sysc_tif: .long sysc_do_svc + 0x80000000
.long sysc_tif + 0x80000000, sysc_restore + 0x80000000 .long sysc_tif + 0x80000000
cleanup_table_sysc_restore: .long sysc_restore + 0x80000000
.long sysc_restore + 0x80000000, sysc_done + 0x80000000 .long sysc_done + 0x80000000
cleanup_table_io_tif: .long io_tif + 0x80000000
.long io_tif + 0x80000000, io_restore + 0x80000000 .long io_restore + 0x80000000
cleanup_table_io_restore: .long io_done + 0x80000000
.long io_restore + 0x80000000, io_done + 0x80000000
cleanup_critical: cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call) cl %r9,BASED(cleanup_table) # system_call
bl BASED(0f) jl 0f
clc 4(4,%r12),BASED(cleanup_table_system_call+4) cl %r9,BASED(cleanup_table+4) # sysc_do_svc
bl BASED(cleanup_system_call) jl cleanup_system_call
0: cl %r9,BASED(cleanup_table+8) # sysc_tif
clc 4(4,%r12),BASED(cleanup_table_sysc_tif) jl 0f
bl BASED(0f) cl %r9,BASED(cleanup_table+12) # sysc_restore
clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4) jl cleanup_sysc_tif
bl BASED(cleanup_sysc_tif) cl %r9,BASED(cleanup_table+16) # sysc_done
0: jl cleanup_sysc_restore
clc 4(4,%r12),BASED(cleanup_table_sysc_restore) cl %r9,BASED(cleanup_table+20) # io_tif
bl BASED(0f) jl 0f
clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4) cl %r9,BASED(cleanup_table+24) # io_restore
bl BASED(cleanup_sysc_restore) jl cleanup_io_tif
0: cl %r9,BASED(cleanup_table+28) # io_done
clc 4(4,%r12),BASED(cleanup_table_io_tif) jl cleanup_io_restore
bl BASED(0f) 0: br %r14
clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
bl BASED(cleanup_io_tif)
0:
clc 4(4,%r12),BASED(cleanup_table_io_restore)
bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
bl BASED(cleanup_io_restore)
0:
br %r14
cleanup_system_call: cleanup_system_call:
mvc __LC_RETURN_PSW(8),0(%r12) # check if stpt has been executed
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) cl %r9,BASED(cleanup_system_call_insn)
bh BASED(0f) jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
c %r12,BASED(.Lmck_old_psw)
be BASED(0f)
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: c %r12,BASED(.Lmck_old_psw) chi %r11,__LC_SAVE_AREA_ASYNC
la %r12,__LC_SAVE_AREA+32 je 0f
be BASED(0f) mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
la %r12,__LC_SAVE_AREA+16 0: # check if stm has been executed
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) cl %r9,BASED(cleanup_system_call_insn+4)
bhe BASED(cleanup_vtime) jh 0f
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
bh BASED(0f) 0: # set up saved registers r12, and r13
mvc __LC_SAVE_AREA(16),0(%r12) st %r12,16(%r11) # r12 thread-info pointer
0: st %r13,4(%r12) st %r13,20(%r11) # r13 literal-pool pointer
l %r15,__LC_KERNEL_STACK # problem state -> load ksp # check if the user time calculation has been done
s %r15,BASED(.Lc_spsize) # make room for registers & psw cl %r9,BASED(cleanup_system_call_insn+8)
st %r15,12(%r12) jh 0f
CREATE_STACK_FRAME __LC_SAVE_AREA l %r10,__LC_EXIT_TIMER
mvc 0(4,%r12),__LC_THREAD_INFO l %r15,__LC_EXIT_TIMER+4
l %r12,__LC_THREAD_INFO SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW ADD64 %r10,%r15,__LC_USER_TIMER
mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC st %r10,__LC_USER_TIMER
oi __TI_flags+3(%r12),_TIF_SYSCALL st %r15,__LC_USER_TIMER+4
cleanup_vtime: 0: # check if the system time calculation has been done
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) cl %r9,BASED(cleanup_system_call_insn+12)
bhe BASED(cleanup_stime) jh 0f
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER l %r10,__LC_LAST_UPDATE_TIMER
cleanup_stime: l %r15,__LC_LAST_UPDATE_TIMER+4
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) SUB64 %r10,%r15,__LC_EXIT_TIMER
bh BASED(cleanup_update) ADD64 %r10,%r15,__LC_SYSTEM_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER st %r10,__LC_SYSTEM_TIMER
cleanup_update: st %r15,__LC_SYSTEM_TIMER+4
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) # set up saved register 11
la %r12,__LC_RETURN_PSW l %r15,__LC_KERNEL_STACK
ahi %r15,-__PT_SIZE
st %r15,12(%r11) # r11 pt_regs pointer
# fill pt_regs
mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
stm %r0,%r7,__PT_R0(%r15)
mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
mvc __PT_SVC_CODE(4,%r15),__LC_SVC_ILC
# setup saved register 15
ahi %r15,-STACK_FRAME_OVERHEAD
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
br %r14 br %r14
cleanup_system_call_insn: cleanup_system_call_insn:
.long sysc_saveall + 0x80000000
.long system_call + 0x80000000 .long system_call + 0x80000000
.long sysc_vtime + 0x80000000 .long sysc_stm + 0x80000000
.long sysc_stime + 0x80000000 .long sysc_vtime + 0x80000000 + 36
.long sysc_update + 0x80000000 .long sysc_vtime + 0x80000000 + 76
cleanup_sysc_tif: cleanup_sysc_tif:
mvc __LC_RETURN_PSW(4),0(%r12) l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_sysc_restore: cleanup_sysc_restore:
clc 4(4,%r12),BASED(cleanup_sysc_restore_insn) cl %r9,BASED(cleanup_sysc_restore_insn)
be BASED(2f) jhe 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER l %r9,12(%r11) # get saved pointer to pt_regs
c %r12,BASED(.Lmck_old_psw) mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
be BASED(0f) mvc 0(32,%r11),__PT_R8(%r9)
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER lm %r0,%r7,__PT_R0(%r9)
0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4) 0: lm %r8,%r9,__LC_RETURN_PSW
be BASED(2f)
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw)
la %r12,__LC_SAVE_AREA+32
be BASED(1f)
la %r12,__LC_SAVE_AREA+16
1: mvc 0(16,%r12),SP_R12(%r15)
lm %r0,%r11,SP_R0(%r15)
l %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_sysc_restore_insn: cleanup_sysc_restore_insn:
.long sysc_done - 4 + 0x80000000 .long sysc_done - 4 + 0x80000000
.long sysc_done - 8 + 0x80000000
cleanup_io_tif: cleanup_io_tif:
mvc __LC_RETURN_PSW(4),0(%r12) l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_restore: cleanup_io_restore:
clc 4(4,%r12),BASED(cleanup_io_restore_insn) cl %r9,BASED(cleanup_io_restore_insn)
be BASED(1f) jhe 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER l %r9,12(%r11) # get saved r11 pointer to pt_regs
clc 4(4,%r12),BASED(cleanup_io_restore_insn+4) mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
be BASED(1f) ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
mvc __LC_RETURN_PSW(8),SP_PSW(%r15) mvc 0(32,%r11),__PT_R8(%r9)
mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) lm %r0,%r7,__PT_R0(%r9)
lm %r0,%r11,SP_R0(%r15) 0: lm %r8,%r9,__LC_RETURN_PSW
l %r15,SP_R15(%r15)
1: la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_restore_insn: cleanup_io_restore_insn:
.long io_done - 4 + 0x80000000 .long io_done - 4 + 0x80000000
.long io_done - 8 + 0x80000000
/* /*
* Integer constants * Integer constants
*/ */
.align 4 .align 4
.Lc_spsize: .long SP_SIZE .Lnr_syscalls: .long NR_syscalls
.Lc_overhead: .long STACK_FRAME_OVERHEAD
.Lnr_syscalls: .long NR_syscalls
.L0x018: .short 0x018
.L0x020: .short 0x020
.L0x028: .short 0x028
.L0x030: .short 0x030
.L0x038: .short 0x038
.Lc_1: .long 1
/* /*
* Symbol constants * Symbol constants
*/ */
.Ls390_mcck: .long s390_do_machine_check .Ldo_machine_check: .long s390_do_machine_check
.Ls390_handle_mcck: .Lhandle_mcck: .long s390_handle_mcck
.long s390_handle_mcck .Ldo_IRQ: .long do_IRQ
.Lmck_old_psw: .long __LC_MCK_OLD_PSW .Ldo_extint: .long do_extint
.Ldo_IRQ: .long do_IRQ .Ldo_signal: .long do_signal
.Ldo_extint: .long do_extint .Ldo_notify_resume: .long do_notify_resume
.Ldo_signal: .long do_signal .Ldo_per_trap: .long do_per_trap
.Ldo_notify_resume: .Ldo_execve: .long do_execve
.long do_notify_resume .Lexecve_tail: .long execve_tail
.Lhandle_per: .long do_per_trap .Ljump_table: .long pgm_check_table
.Ldo_execve: .long do_execve .Lschedule: .long schedule
.Lexecve_tail: .long execve_tail
.Ljump_table: .long pgm_check_table
.Lschedule: .long schedule
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
.Lpreempt_schedule_irq: .Lpreempt_irq: .long preempt_schedule_irq
.long preempt_schedule_irq
#endif #endif
.Ltrace_entry: .long do_syscall_trace_enter .Ltrace_enter: .long do_syscall_trace_enter
.Ltrace_exit: .long do_syscall_trace_exit .Ltrace_exit: .long do_syscall_trace_exit
.Lschedtail: .long schedule_tail .Lschedule_tail: .long schedule_tail
.Lsysc_table: .long sys_call_table .Lsys_call_table: .long sys_call_table
.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
.Ltrace_irq_on_caller: .Lhardirqs_on: .long trace_hardirqs_on_caller
.long trace_hardirqs_on_caller .Lhardirqs_off: .long trace_hardirqs_off_caller
.Ltrace_irq_off_caller:
.long trace_hardirqs_off_caller
#endif #endif
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
.Llockdep_sys_exit: .Llockdep_sys_exit: .long lockdep_sys_exit
.long lockdep_sys_exit
#endif #endif
.Lcritical_start: .Lcritical_start: .long __critical_start + 0x80000000
.long __critical_start + 0x80000000 .Lcritical_length: .long __critical_end - __critical_start
.Lcritical_end:
.long __critical_end + 0x80000000
.Lcleanup_critical:
.long cleanup_critical
.section .rodata, "a" .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa #define SYSCALL(esa,esame,emu) .long esa
......
/* /*
* arch/s390/kernel/entry64.S * arch/s390/kernel/entry64.S
* S390 low-level entry points. * S390 low-level entry points.
...@@ -19,32 +20,22 @@ ...@@ -19,32 +20,22 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/page.h> #include <asm/page.h>
/* __PT_R0 = __PT_GPRS
* Stack layout for the system_call stack entry. __PT_R1 = __PT_GPRS + 8
* The first few entries are identical to the user_regs_struct. __PT_R2 = __PT_GPRS + 16
*/ __PT_R3 = __PT_GPRS + 24
SP_PTREGS = STACK_FRAME_OVERHEAD __PT_R4 = __PT_GPRS + 32
SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS __PT_R5 = __PT_GPRS + 40
SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW __PT_R6 = __PT_GPRS + 48
SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS __PT_R7 = __PT_GPRS + 56
SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 __PT_R8 = __PT_GPRS + 64
SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 __PT_R9 = __PT_GPRS + 72
SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 __PT_R10 = __PT_GPRS + 80
SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 __PT_R11 = __PT_GPRS + 88
SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 __PT_R12 = __PT_GPRS + 96
SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 __PT_R13 = __PT_GPRS + 104
SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 __PT_R14 = __PT_GPRS + 112
SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 __PT_R15 = __PT_GPRS + 120
SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
...@@ -59,154 +50,103 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) ...@@ -59,154 +50,103 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#define BASED(name) name-system_call(%r13) #define BASED(name) name-system_call(%r13)
.macro SPP newpp
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
jz .+8
.insn s,0xb2800000,\newpp
#endif
.endm
.macro HANDLE_SIE_INTERCEPT
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
tm __TI_flags+6(%r12),_TIF_SIE>>8
jz 0f
SPP BASED(.Lhost_id) # set host id
clc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
jl 0f
clc SP_PSW+8(8,%r15),BASED(.Lsie_done)
jhe 0f
mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
0:
#endif
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON .macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0 basr %r2,%r0
brasl %r14,trace_hardirqs_on_caller brasl %r14,trace_hardirqs_on_caller
#endif
.endm .endm
.macro TRACE_IRQS_OFF .macro TRACE_IRQS_OFF
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0 basr %r2,%r0
brasl %r14,trace_hardirqs_off_caller brasl %r14,trace_hardirqs_off_caller
.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
#endif #endif
.endm
#ifdef CONFIG_LOCKDEP
.macro LOCKDEP_SYS_EXIT .macro LOCKDEP_SYS_EXIT
tm SP_PSW+1(%r15),0x01 # returning to user ? #ifdef CONFIG_LOCKDEP
jz 0f tm __PT_PSW+1(%r11),0x01 # returning to user ?
jz .+10
brasl %r14,lockdep_sys_exit brasl %r14,lockdep_sys_exit
0:
.endm
#else
#define LOCKDEP_SYS_EXIT
#endif #endif
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
lg %r10,\lc_from
slg %r10,\lc_to
alg %r10,\lc_sum
stg %r10,\lc_sum
.endm .endm
/* .macro SPP newpp
* Register usage in interrupt handlers: #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
* R9 - pointer to current task structure tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
* R13 - pointer to literal pool jz .+8
* R14 - return register for function calls .insn s,0xb2800000,\newpp
* R15 - kernel stack pointer #endif
*/ .endm
.macro SAVE_ALL_SVC psworg,savearea .macro HANDLE_SIE_INTERCEPT scratch
stmg %r11,%r15,\savearea #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
lg %r15,__LC_KERNEL_STACK # problem state -> load ksp tm __TI_flags+6(%r12),_TIF_SIE>>8
aghi %r15,-SP_SIZE # make room for registers & psw jz .+42
lg %r11,__LC_LAST_BREAK tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
jz .+8
.insn s,0xb2800000,BASED(.Lhost_id) # set host id
lgr \scratch,%r9
slg \scratch,BASED(.Lsie_loop)
clg \scratch,BASED(.Lsie_length)
jhe .+10
lg %r9,BASED(.Lsie_loop)
#endif
.endm .endm
.macro SAVE_ALL_PGM psworg,savearea .macro CHECK_STACK stacksize,savearea
stmg %r11,%r15,\savearea
tm \psworg+1,0x01 # test problem state bit
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
jnz 1f tml %r15,\stacksize - CONFIG_STACK_GUARD
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD lghi %r14,\savearea
jnz 2f jz stack_overflow
la %r12,\psworg
j stack_overflow
#else
jz 2f
#endif #endif
1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
2: aghi %r15,-SP_SIZE # make room for registers & psw
larl %r13,system_call
lg %r11,__LC_LAST_BREAK
.endm .endm
.macro SAVE_ALL_ASYNC psworg,savearea .macro SWITCH_ASYNC savearea,stack,shift
stmg %r11,%r15,\savearea tmhh %r8,0x0001 # interrupting from user ?
larl %r13,system_call jnz 1f
lg %r11,__LC_LAST_BREAK lgr %r14,%r9
la %r12,\psworg slg %r14,BASED(.Lcritical_start)
tm \psworg+1,0x01 # test problem state bit clg %r14,BASED(.Lcritical_length)
jnz 1f # from user -> load kernel stack
clc \psworg+8(8),BASED(.Lcritical_end)
jhe 0f jhe 0f
clc \psworg+8(8),BASED(.Lcritical_start) lghi %r11,\savearea # inside critical section, do cleanup
jl 0f
brasl %r14,cleanup_critical brasl %r14,cleanup_critical
tm 1(%r12),0x01 # retest problem state after cleanup tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f jnz 1f
0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? 0: lg %r14,\stack # are we already on the target stack?
slgr %r14,%r15 slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT srag %r14,%r14,\shift
#ifdef CONFIG_CHECK_STACK
jnz 1f jnz 1f
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD CHECK_STACK 1<<\shift,\savearea
jnz 2f j 2f
j stack_overflow 1: lg %r15,\stack # load target stack
#else 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
jz 2f la %r11,STACK_FRAME_OVERHEAD(%r15)
#endif
1: lg %r15,__LC_ASYNC_STACK # load async stack
2: aghi %r15,-SP_SIZE # make room for registers & psw
.endm
.macro CREATE_STACK_FRAME savearea
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
.endm .endm
.macro RESTORE_ALL psworg,sync .macro UPDATE_VTIME scratch,enter_timer
mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore lg \scratch,__LC_EXIT_TIMER
.if !\sync slg \scratch,\enter_timer
ni \psworg+1,0xfd # clear wait state bit alg \scratch,__LC_USER_TIMER
.endif stg \scratch,__LC_USER_TIMER
lg %r14,__LC_VDSO_PER_CPU lg \scratch,__LC_LAST_UPDATE_TIMER
lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user slg \scratch,__LC_EXIT_TIMER
stpt __LC_EXIT_TIMER alg \scratch,__LC_SYSTEM_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER stg \scratch,__LC_SYSTEM_TIMER
lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
lpswe \psworg # back to caller
.endm .endm
.macro LAST_BREAK .macro LAST_BREAK scratch
srag %r10,%r11,23 srag \scratch,%r10,23
jz 0f jz .+10
stg %r11,__TI_last_break(%r12) stg %r10,__TI_last_break(%r12)
0:
.endm .endm
.macro REENABLE_IRQS .macro REENABLE_IRQS
mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) stg %r8,__LC_RETURN_PSW
ni __SF_EMPTY(%r15),0xbf ni __LC_RETURN_PSW,0xbf
ssm __SF_EMPTY(%r15) ssm __LC_RETURN_PSW
.endm .endm
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
...@@ -245,55 +185,66 @@ __critical_start: ...@@ -245,55 +185,66 @@ __critical_start:
ENTRY(system_call) ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_stmg:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA stmg %r8,%r15,__LC_SAVE_AREA_SYNC
CREATE_STACK_FRAME __LC_SAVE_AREA lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct lg %r12,__LC_THREAD_INFO
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW larl %r13,system_call
mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC sysc_per:
oi __TI_flags+7(%r12),_TIF_SYSCALL lg %r15,__LC_KERNEL_STACK
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime: sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
sysc_stime: LAST_BREAK %r13
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER stmg %r0,%r7,__PT_R0(%r11)
sysc_update: mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
LAST_BREAK mvc __PT_SVC_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc: sysc_do_svc:
llgh %r7,SP_SVC_CODE+2(%r15) oi __TI_flags+7(%r12),_TIF_SYSCALL
slag %r7,%r7,2 # shift and test for svc 0 llgh %r8,__PT_SVC_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok jnz sysc_nr_ok
# svc 0: system call number in %r1 # svc 0: system call number in %r1
llgfr %r1,%r1 # clear high word in r1 llgfr %r1,%r1 # clear high word in r1
cghi %r1,NR_syscalls cghi %r1,NR_syscalls
jnl sysc_nr_ok jnl sysc_nr_ok
sth %r1,SP_SVC_CODE+2(%r15) sth %r1,__PT_SVC_CODE+2(%r11)
slag %r7,%r1,2 # shift and test for svc 0 slag %r8,%r1,2
sysc_nr_ok: sysc_nr_ok:
larl %r10,sys_call_table larl %r10,sys_call_table # 64 bit system call table
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ? tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
jno sysc_noemu jno sysc_noemu
larl %r10,sys_call_table_emu # use 31 bit emulation system calls larl %r10,sys_call_table_emu # 31 bit system call table
sysc_noemu: sysc_noemu:
#endif #endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lgf %r9,0(%r8,%r10) # get system call add.
tm __TI_flags+6(%r12),_TIF_TRACE >> 8 tm __TI_flags+6(%r12),_TIF_TRACE >> 8
mvc SP_ARGS(8,%r15),SP_R7(%r15)
lgf %r8,0(%r7,%r10) # load address of system call routine
jnz sysc_tracesys jnz sysc_tracesys
basr %r14,%r8 # call sys_xxxx basr %r14,%r9 # call sys_xxxx
stg %r2,SP_R2(%r15) # store return value (change R2 on stack) stg %r2,__PT_R2(%r11) # store return value
sysc_return: sysc_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
sysc_tif: sysc_tif:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno sysc_restore jno sysc_restore
tm __TI_flags+7(%r12),_TIF_WORK_SVC tm __TI_flags+7(%r12),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.) jnz sysc_work # check for work
ni __TI_flags+7(%r12),255-_TIF_SYSCALL ni __TI_flags+7(%r12),255-_TIF_SYSCALL
sysc_restore: sysc_restore:
RESTORE_ALL __LC_RETURN_PSW,1 lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
sysc_done: sysc_done:
# #
...@@ -317,7 +268,7 @@ sysc_work: ...@@ -317,7 +268,7 @@ sysc_work:
# #
sysc_reschedule: sysc_reschedule:
larl %r14,sysc_return larl %r14,sysc_return
jg schedule # return point is sysc_return jg schedule
# #
# _TIF_MCCK_PENDING is set, call handler # _TIF_MCCK_PENDING is set, call handler
...@@ -331,33 +282,33 @@ sysc_mcck_pending: ...@@ -331,33 +282,33 @@ sysc_mcck_pending:
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal # call do_signal brasl %r14,do_signal
tm __TI_flags+7(%r12),_TIF_SYSCALL tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return jno sysc_return
lmg %r2,%r6,SP_R2(%r15) # load svc arguments lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lghi %r7,0 # svc 0 returns -ENOSYS lghi %r8,0 # svc 0 returns -ENOSYS
lh %r1,SP_SVC_CODE+2(%r15) # load new svc number lh %r1,__PT_SVC_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls cghi %r1,NR_syscalls
jnl sysc_nr_ok # invalid svc number -> do svc 0 jnl sysc_nr_ok # invalid svc number -> do svc 0
slag %r7,%r1,2 slag %r8,%r1,2
j sysc_nr_ok # restart svc j sysc_nr_ok # restart svc
# #
# _TIF_NOTIFY_RESUME is set, call do_notify_resume # _TIF_NOTIFY_RESUME is set, call do_notify_resume
# #
sysc_notify_resume: sysc_notify_resume:
la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return larl %r14,sysc_return
jg do_notify_resume # call do_notify_resume jg do_notify_resume
# #
# _TIF_PER_TRAP is set, call do_per_trap # _TIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
la %r2,SP_PTREGS(%r15) # address of register-save area lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return # load adr. of system return larl %r14,sysc_return
jg do_per_trap jg do_per_trap
# #
...@@ -365,41 +316,41 @@ sysc_singlestep: ...@@ -365,41 +316,41 @@ sysc_singlestep:
# and after the system call # and after the system call
# #
sysc_tracesys: sysc_tracesys:
la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r2,%r11 # pass pointer to pt_regs
la %r3,0 la %r3,0
llgh %r0,SP_SVC_CODE+2(%r15) llgh %r0,__PT_SVC_CODE+2(%r11)
stg %r0,SP_R2(%r15) stg %r0,__PT_R2(%r11)
brasl %r14,do_syscall_trace_enter brasl %r14,do_syscall_trace_enter
lghi %r0,NR_syscalls lghi %r0,NR_syscalls
clgr %r0,%r2 clgr %r0,%r2
jnh sysc_tracenogo jnh sysc_tracenogo
sllg %r7,%r2,2 # svc number *4 sllg %r8,%r2,2
lgf %r8,0(%r7,%r10) lgf %r9,0(%r8,%r10)
sysc_tracego: sysc_tracego:
lmg %r3,%r6,SP_R3(%r15) lmg %r3,%r7,__PT_R3(%r11)
mvc SP_ARGS(8,%r15),SP_R7(%r15) stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,SP_ORIG_R2(%r15) lg %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r8 # call sys_xxx basr %r14,%r9 # call sys_xxx
stg %r2,SP_R2(%r15) # store return value stg %r2,__PT_R2(%r11) # store return value
sysc_tracenogo: sysc_tracenogo:
tm __TI_flags+6(%r12),_TIF_TRACE >> 8 tm __TI_flags+6(%r12),_TIF_TRACE >> 8
jz sysc_return jz sysc_return
la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return # return point is sysc_return larl %r14,sysc_return
jg do_syscall_trace_exit jg do_syscall_trace_exit
# #
# a new process exits the kernel with ret_from_fork # a new process exits the kernel with ret_from_fork
# #
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
lg %r13,__LC_SVC_NEW_PSW+8 la %r11,STACK_FRAME_OVERHEAD(%r15)
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct lg %r12,__LC_THREAD_INFO
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jo 0f jo 0f
stg %r15,SP_R15(%r15) # store stack pointer for new kthread stg %r15,__PT_R15(%r11) # store stack pointer for new kthread
0: brasl %r14,schedule_tail 0: brasl %r14,schedule_tail
TRACE_IRQS_ON TRACE_IRQS_ON
stosm 24(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
j sysc_tracenogo j sysc_tracenogo
# #
...@@ -409,26 +360,26 @@ ENTRY(ret_from_fork) ...@@ -409,26 +360,26 @@ ENTRY(ret_from_fork)
ENTRY(kernel_execve) ENTRY(kernel_execve)
stmg %r12,%r15,96(%r15) stmg %r12,%r15,96(%r15)
lgr %r14,%r15 lgr %r14,%r15
aghi %r15,-SP_SIZE aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
stg %r14,__SF_BACKCHAIN(%r15) stg %r14,__SF_BACKCHAIN(%r15)
la %r12,SP_PTREGS(%r15) la %r12,STACK_FRAME_OVERHEAD(%r15)
xc 0(__PT_SIZE,%r12),0(%r12) xc 0(__PT_SIZE,%r12),0(%r12)
lgr %r5,%r12 lgr %r5,%r12
brasl %r14,do_execve brasl %r14,do_execve
ltgfr %r2,%r2 ltgfr %r2,%r2
je 0f je 0f
aghi %r15,SP_SIZE aghi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
lmg %r12,%r15,96(%r15) lmg %r12,%r15,96(%r15)
br %r14 br %r14
# execve succeeded. # execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
lg %r15,__LC_KERNEL_STACK # load ksp lg %r15,__LC_KERNEL_STACK # load ksp
aghi %r15,-SP_SIZE # make room for registers & psw aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
lg %r13,__LC_SVC_NEW_PSW+8 la %r11,STACK_FRAME_OVERHEAD(%r15)
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs
lg %r12,__LC_THREAD_INFO lg %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
brasl %r14,execve_tail brasl %r14,execve_tail
j sysc_return j sysc_return
...@@ -437,127 +388,72 @@ ENTRY(kernel_execve) ...@@ -437,127 +388,72 @@ ENTRY(kernel_execve)
*/ */
ENTRY(pgm_check_handler) ENTRY(pgm_check_handler)
/*
* First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will
* cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
* For a single stepped SVC the program check handler gets control after
* the SVC new PSW has been loaded. But we want to execute the SVC first and
* then handle the PER event. Therefore we update the SVC old PSW to point
* to the pgm_check_handler and branch to the SVC handler after we checked
* if we have to load the kernel stack register.
* For every other possible cause for PER event without the PER mask set
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception stmg %r8,%r15,__LC_SAVE_AREA_SYNC
jnz pgm_per # got per exception -> special case lg %r10,__LC_LAST_BREAK
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA lg %r12,__LC_THREAD_INFO
CREATE_STACK_FRAME __LC_SAVE_AREA larl %r13,system_call
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW lmg %r8,%r9,__LC_PGM_OLD_PSW
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct HANDLE_SIE_INTERCEPT %r14
HANDLE_SIE_INTERCEPT tmhh %r8,0x0001 # test problem state bit
tm SP_PSW+1(%r15),0x01 # interrupting from user ? jnz 1f # -> fault in user space
jz pgm_no_vtime tmhh %r8,0x4000 # PER bit set in old PSW ?
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER jnz 0f # -> enabled, can't be a double fault
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER tm __LC_PGM_ILC+3,0x80 # check for per exception
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER jnz pgm_svcper # -> single stepped svc
LAST_BREAK 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
pgm_no_vtime: j 2f
stg %r11,SP_ARGS(%r15) 1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
lgf %r3,__LC_PGM_ILC # load program interruption code LAST_BREAK %r14
lg %r4,__LC_TRANS_EXC_CODE lg %r15,__LC_KERNEL_STACK
REENABLE_IRQS 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
lghi %r8,0x7f la %r11,STACK_FRAME_OVERHEAD(%r15)
ngr %r8,%r3 stmg %r0,%r7,__PT_R0(%r11)
sll %r8,3 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
larl %r1,pgm_check_table stmg %r8,%r9,__PT_PSW(%r11)
lg %r1,0(%r8,%r1) # load address of handler routine stg %r10,__PT_ARGS(%r11)
la %r2,SP_PTREGS(%r15) # address of register-save area tm __LC_PGM_ILC+3,0x80 # check for per exception
basr %r14,%r1 # branch to interrupt-handler jz 0f
pgm_exit:
j sysc_return
#
# handle per exception
#
pgm_per:
tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
jnz pgm_per_std # ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
je pgm_svcper
# no interesting special case, ignore PER event
lpswe __LC_PGM_OLD_PSW
#
# Normal per exception
#
pgm_per_std:
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SAVE_AREA
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime2
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
pgm_no_vtime2:
lg %r1,__TI_task(%r12) lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tmhh %r8,0x0001 # kernel per event ?
jz kernel_per jz pgm_kprobe
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE oi __TI_flags+7(%r12),_TIF_PER_TRAP
mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 0: lgf %r3,__LC_PGM_ILC # load program interruption code
lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE lg %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
lghi %r8,0x7f xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
ngr %r8,%r3 # clear per-event-bit and ilc lghi %r10,0x7f
je pgm_exit2 ngr %r10,%r3
sll %r8,3 je sysc_return
sll %r10,3
larl %r1,pgm_check_table larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine lg %r1,0(%r10,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler basr %r14,%r1 # branch to interrupt-handler
pgm_exit2:
j sysc_return j sysc_return
# #
# it was a single stepped SVC that is causing all the trouble # PER event in supervisor state, must be kprobes
# #
pgm_svcper: pgm_kprobe:
SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA REENABLE_IRQS
CREATE_STACK_FRAME __LC_SAVE_AREA xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct lgr %r2,%r11 # pass pointer to pt_regs
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW brasl %r14,do_per_trap
mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC j sysc_return
oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
lg %r8,__TI_task(%r12)
mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc
# #
# per was called from kernel, must be kprobes # single stepped system call
# #
kernel_per: pgm_svcper:
REENABLE_IRQS oi __TI_flags+7(%r12),_TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # address of register-save area mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
brasl %r14,do_per_trap larl %r14,sysc_per
j pgm_exit stg %r14,__LC_RETURN_PSW+8
lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs
/* /*
* IO interrupt handler routine * IO interrupt handler routine
...@@ -565,21 +461,25 @@ kernel_per: ...@@ -565,21 +461,25 @@ kernel_per:
ENTRY(io_int_handler) ENTRY(io_int_handler)
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
CREATE_STACK_FRAME __LC_SAVE_AREA+40 lg %r10,__LC_LAST_BREAK
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack lg %r12,__LC_THREAD_INFO
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct larl %r13,system_call
HANDLE_SIE_INTERCEPT lmg %r8,%r9,__LC_IO_OLD_PSW
tm SP_PSW+1(%r15),0x01 # interrupting from user ? HANDLE_SIE_INTERCEPT %r14
jz io_no_vtime SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER tmhh %r8,0x0001 # interrupting from user?
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER jz io_skip
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
LAST_BREAK LAST_BREAK %r14
io_no_vtime: io_skip:
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
brasl %r14,do_IRQ # call standard irq handler lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_IRQ
io_return: io_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
TRACE_IRQS_ON TRACE_IRQS_ON
...@@ -587,7 +487,14 @@ io_tif: ...@@ -587,7 +487,14 @@ io_tif:
tm __TI_flags+7(%r12),_TIF_WORK_INT tm __TI_flags+7(%r12),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.) jnz io_work # there is work to do (signals etc.)
io_restore: io_restore:
RESTORE_ALL __LC_RETURN_PSW,0 lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
io_done: io_done:
# #
...@@ -600,7 +507,7 @@ io_done: ...@@ -600,7 +507,7 @@ io_done:
# Before any work can be done, a switch to the kernel stack is required. # Before any work can be done, a switch to the kernel stack is required.
# #
io_work: io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jo io_work_user # yes -> do resched & signal jo io_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# check for preemptive scheduling # check for preemptive scheduling
...@@ -609,10 +516,11 @@ io_work: ...@@ -609,10 +516,11 @@ io_work:
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jno io_restore jno io_restore
# switch to kernel stack # switch to kernel stack
lg %r1,SP_R15(%r15) lg %r1,__PT_R15(%r11)
aghi %r1,-SP_SIZE aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1 lgr %r15,%r1
# TRACE_IRQS_ON already done at io_return, call # TRACE_IRQS_ON already done at io_return, call
# TRACE_IRQS_OFF to keep things symmetrical # TRACE_IRQS_OFF to keep things symmetrical
...@@ -628,9 +536,10 @@ io_work: ...@@ -628,9 +536,10 @@ io_work:
# #
io_work_user: io_work_user:
lg %r1,__LC_KERNEL_STACK lg %r1,__LC_KERNEL_STACK
aghi %r1,-SP_SIZE aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1 lgr %r15,%r1
# #
...@@ -663,9 +572,9 @@ io_mcck_pending: ...@@ -663,9 +572,9 @@ io_mcck_pending:
# #
io_reschedule: io_reschedule:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
brasl %r14,schedule # call scheduler brasl %r14,schedule # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
j io_return j io_return
...@@ -674,10 +583,10 @@ io_reschedule: ...@@ -674,10 +583,10 @@ io_reschedule:
# #
io_sigpending: io_sigpending:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal # call do_signal brasl %r14,do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
j io_return j io_return
...@@ -686,10 +595,10 @@ io_sigpending: ...@@ -686,10 +595,10 @@ io_sigpending:
# #
io_notify_resume: io_notify_resume:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts ssm __LC_SVC_NEW_PSW # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_notify_resume # call do_notify_resume brasl %r14,do_notify_resume
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF TRACE_IRQS_OFF
j io_return j io_return
...@@ -699,21 +608,24 @@ io_notify_resume: ...@@ -699,21 +608,24 @@ io_notify_resume:
ENTRY(ext_int_handler) ENTRY(ext_int_handler)
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
CREATE_STACK_FRAME __LC_SAVE_AREA+40 lg %r10,__LC_LAST_BREAK
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack lg %r12,__LC_THREAD_INFO
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct larl %r13,system_call
HANDLE_SIE_INTERCEPT lmg %r8,%r9,__LC_EXT_OLD_PSW
tm SP_PSW+1(%r15),0x01 # interrupting from user ? HANDLE_SIE_INTERCEPT %r14
jz ext_no_vtime SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER tmhh %r8,0x0001 # interrupting from user ?
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER jz ext_skip
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
LAST_BREAK LAST_BREAK %r14
ext_no_vtime: ext_skip:
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
lghi %r1,4096 lghi %r1,4096
la %r2,SP_PTREGS(%r15) # address of register-save area lgr %r2,%r11 # pass pointer to pt_regs
llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
llgf %r4,__LC_EXT_PARAMS # get external parameter llgf %r4,__LC_EXT_PARAMS # get external parameter
lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
...@@ -730,81 +642,77 @@ ENTRY(mcck_int_handler) ...@@ -730,81 +642,77 @@ ENTRY(mcck_int_handler)
la %r1,4095 # revalidate r1 la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
stmg %r11,%r15,__LC_SAVE_AREA+80 lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
larl %r13,system_call larl %r13,system_call
lg %r11,__LC_LAST_BREAK lmg %r8,%r9,__LC_MCK_OLD_PSW
la %r12,__LC_MCK_OLD_PSW HANDLE_SIE_INTERCEPT %r14
tm __LC_MCCK_CODE,0x80 # system damage? tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid jo mcck_panic # yes -> rest of mcck code invalid
la %r14,4095 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 1f jo 3f
la %r14,__LC_SYNC_ENTER_TIMER la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
jl 0f jl 0f
la %r14,__LC_ASYNC_ENTER_TIMER la %r14,__LC_ASYNC_ENTER_TIMER
0: clc 0(8,%r14),__LC_EXIT_TIMER 0: clc 0(8,%r14),__LC_EXIT_TIMER
jl 0f jl 1f
la %r14,__LC_EXIT_TIMER la %r14,__LC_EXIT_TIMER
0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
jl 0f jl 2f
la %r14,__LC_LAST_UPDATE_TIMER la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14) 2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical jno mcck_panic # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
jnz mcck_int_main # from user -> load kernel stack tm %r8,0x0001 # interrupting from user ?
clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) jz mcck_skip
jhe mcck_int_main UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) LAST_BREAK %r14
jl mcck_int_main mcck_skip:
brasl %r14,cleanup_critical lghi %r14,__LC_GPREGS_SAVE_AREA
mcck_int_main: mvc __PT_R0(128,%r11),0(%r14)
lg %r14,__LC_PANIC_STACK # are we already on the panic stack? stmg %r8,%r9,__PT_PSW(%r11)
slgr %r14,%r15 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
srag %r14,%r14,PAGE_SHIFT lgr %r2,%r11 # pass pointer to pt_regs
jz 0f
lg %r15,__LC_PANIC_STACK # load panic stack
0: aghi %r15,-SP_SIZE # make room for registers & psw
CREATE_STACK_FRAME __LC_SAVE_AREA+80
mvc SP_PSW(16,%r15),0(%r12)
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update
HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz mcck_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
LAST_BREAK
mcck_no_vtime:
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,s390_do_machine_check brasl %r14,s390_do_machine_check
tm SP_PSW+1(%r15),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack lg %r1,__LC_KERNEL_STACK # switch to kernel stack
aghi %r1,-SP_SIZE aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1 lgr %r15,%r1
stosm __SF_EMPTY(%r15),0x04 # turn dat on ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jno mcck_return jno mcck_return
TRACE_IRQS_OFF TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck brasl %r14,s390_handle_mcck
TRACE_IRQS_ON TRACE_IRQS_ON
mcck_return: mcck_return:
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f jno 0f
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
0: lpswe __LC_RETURN_MCCK_PSW # back to caller mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
mcck_done: 0: lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_MCCK_PSW
mcck_panic:
lg %r14,__LC_PANIC_STACK
slgr %r14,%r15
srag %r14,%r14,PAGE_SHIFT
jz 0f
lg %r15,__LC_PANIC_STACK
0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
/* /*
* Restart interruption handler, kick starter for additional CPUs * Restart interruption handler, kick starter for additional CPUs
...@@ -818,17 +726,18 @@ restart_base: ...@@ -818,17 +726,18 @@ restart_base:
stck __LC_LAST_UPDATE_CLOCK stck __LC_LAST_UPDATE_CLOCK
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
lg %r15,__LC_SAVE_AREA+120 # load ksp lghi %r10,__LC_GPREGS_SAVE_AREA
lg %r15,120(%r10) # load ksp
lghi %r10,__LC_CREGS_SAVE_AREA lghi %r10,__LC_CREGS_SAVE_AREA
lctlg %c0,%c15,0(%r10) # get new ctl regs lctlg %c0,%c15,0(%r10) # get new ctl regs
lghi %r10,__LC_AREGS_SAVE_AREA lghi %r10,__LC_AREGS_SAVE_AREA
lam %a0,%a15,0(%r10) lam %a0,%a15,0(%r10)
lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone
lg %r1,__LC_THREAD_INFO lg %r1,__LC_THREAD_INFO
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
brasl %r14,start_secondary brasl %r14,start_secondary
.align 8 .align 8
restart_vtime: restart_vtime:
...@@ -852,16 +761,16 @@ restart_go: ...@@ -852,16 +761,16 @@ restart_go:
# PSW restart interrupt handler # PSW restart interrupt handler
# #
ENTRY(psw_restart_int_handler) ENTRY(psw_restart_int_handler)
stg %r15,__LC_SAVE_AREA+120(%r0) # save r15 stg %r15,__LC_SAVE_AREA_RESTART
larl %r15,restart_stack # load restart stack larl %r15,restart_stack # load restart stack
lg %r15,0(%r15) lg %r15,0(%r15)
aghi %r15,-SP_SIZE # make room for pt_regs aghi %r15,-__PT_SIZE # create pt_regs on stack
stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack stmg %r0,%r14,__PT_R0(%r15)
mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 aghi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
brasl %r14,do_restart brasl %r14,do_restart
larl %r14,restart_psw_crash # load disabled wait PSW if larl %r14,restart_psw_crash # load disabled wait PSW if
lpswe 0(%r14) # do_restart returns lpswe 0(%r14) # do_restart returns
.align 8 .align 8
...@@ -877,172 +786,153 @@ restart_psw_crash: ...@@ -877,172 +786,153 @@ restart_psw_crash:
* Setup a pt_regs so that show_trace can provide a good call trace. * Setup a pt_regs so that show_trace can provide a good call trace.
*/ */
stack_overflow: stack_overflow:
lg %r15,__LC_PANIC_STACK # change to panic stack lg %r11,__LC_PANIC_STACK # change to panic stack
aghi %r15,-SP_SIZE aghi %r11,-__PT_SIZE # create pt_regs
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack stmg %r0,%r7,__PT_R0(%r11)
stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack stmg %r8,%r9,__PT_PSW(%r11)
la %r1,__LC_SAVE_AREA mvc __PT_R8(64,%r11),0(%r14)
chi %r12,__LC_SVC_OLD_PSW stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
je 0f lgr %r15,%r11
chi %r12,__LC_PGM_OLD_PSW aghi %r15,-STACK_FRAME_OVERHEAD
je 0f xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
la %r1,__LC_SAVE_AREA+40 lgr %r2,%r11 # pass pointer to pt_regs
0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
la %r2,SP_PTREGS(%r15) # load pt_regs
jg kernel_stack_overflow jg kernel_stack_overflow
#endif #endif
cleanup_table_system_call: .align 8
.quad system_call, sysc_do_svc cleanup_table:
cleanup_table_sysc_tif: .quad system_call
.quad sysc_tif, sysc_restore .quad sysc_do_svc
cleanup_table_sysc_restore: .quad sysc_tif
.quad sysc_restore, sysc_done .quad sysc_restore
cleanup_table_io_tif: .quad sysc_done
.quad io_tif, io_restore .quad io_tif
cleanup_table_io_restore: .quad io_restore
.quad io_restore, io_done .quad io_done
cleanup_critical: cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_system_call) clg %r9,BASED(cleanup_table) # system_call
jl 0f jl 0f
clc 8(8,%r12),BASED(cleanup_table_system_call+8) clg %r9,BASED(cleanup_table+8) # sysc_do_svc
jl cleanup_system_call jl cleanup_system_call
0: clg %r9,BASED(cleanup_table+16) # sysc_tif
clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
jl 0f jl 0f
clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8) clg %r9,BASED(cleanup_table+24) # sysc_restore
jl cleanup_sysc_tif jl cleanup_sysc_tif
0: clg %r9,BASED(cleanup_table+32) # sysc_done
clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
jl 0f
clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
jl cleanup_sysc_restore jl cleanup_sysc_restore
0: clg %r9,BASED(cleanup_table+40) # io_tif
clc 8(8,%r12),BASED(cleanup_table_io_tif)
jl 0f jl 0f
clc 8(8,%r12),BASED(cleanup_table_io_tif+8) clg %r9,BASED(cleanup_table+48) # io_restore
jl cleanup_io_tif jl cleanup_io_tif
0: clg %r9,BASED(cleanup_table+56) # io_done
clc 8(8,%r12),BASED(cleanup_table_io_restore)
jl 0f
clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
jl cleanup_io_restore jl cleanup_io_restore
0: 0: br %r14
br %r14
cleanup_system_call: cleanup_system_call:
mvc __LC_RETURN_PSW(16),0(%r12) # check if stpt has been executed
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) clg %r9,BASED(cleanup_system_call_insn)
jh 0f jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
cghi %r12,__LC_MCK_OLD_PSW
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: cghi %r12,__LC_MCK_OLD_PSW cghi %r11,__LC_SAVE_AREA_ASYNC
la %r12,__LC_SAVE_AREA+80
je 0f je 0f
la %r12,__LC_SAVE_AREA+40 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) 0: # check if stmg has been executed
jhe cleanup_vtime clg %r9,BASED(cleanup_system_call_insn+8)
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
jh 0f jh 0f
mvc __LC_SAVE_AREA(40),0(%r12) mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp 0: # check if base register setup + TIF bit load has been done
aghi %r15,-SP_SIZE # make room for registers & psw clg %r9,BASED(cleanup_system_call_insn+16)
stg %r15,32(%r12) jhe 0f
stg %r11,0(%r12) # set up saved registers r10 and r12
CREATE_STACK_FRAME __LC_SAVE_AREA stg %r10,16(%r11) # r10 last break
mvc 8(8,%r12),__LC_THREAD_INFO stg %r12,32(%r11) # r12 thread-info pointer
lg %r12,__LC_THREAD_INFO 0: # check if the user time update has been done
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW clg %r9,BASED(cleanup_system_call_insn+24)
mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC jh 0f
oi __TI_flags+7(%r12),_TIF_SYSCALL lg %r15,__LC_EXIT_TIMER
cleanup_vtime: slg %r15,__LC_SYNC_ENTER_TIMER
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) alg %r15,__LC_USER_TIMER
jhe cleanup_stime stg %r15,__LC_USER_TIMER
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 0: # check if the system time update has been done
cleanup_stime: clg %r9,BASED(cleanup_system_call_insn+32)
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32) jh 0f
jh cleanup_update lg %r15,__LC_LAST_UPDATE_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER slg %r15,__LC_EXIT_TIMER
cleanup_update: alg %r15,__LC_SYSTEM_TIMER
stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
srag %r12,%r11,23 # do LAST_BREAK
lg %r12,__LC_THREAD_INFO lg %r9,16(%r11)
srag %r9,%r9,23
jz 0f jz 0f
stg %r11,__TI_last_break(%r12) mvc __TI_last_break(8,%r12),16(%r11)
0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) 0: # set up saved register r11
la %r12,__LC_RETURN_PSW lg %r15,__LC_KERNEL_STACK
aghi %r15,-__PT_SIZE
stg %r15,24(%r11) # r11 pt_regs pointer
# fill pt_regs
mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
stmg %r0,%r7,__PT_R0(%r15)
mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
mvc __PT_SVC_CODE(4,%r15),__LC_SVC_ILC
# setup saved register r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,sysc_do_svc
br %r14 br %r14
cleanup_system_call_insn: cleanup_system_call_insn:
.quad sysc_saveall
.quad system_call .quad system_call
.quad sysc_vtime .quad sysc_stmg
.quad sysc_stime .quad sysc_per
.quad sysc_update .quad sysc_vtime+18
.quad sysc_vtime+42
cleanup_sysc_tif: cleanup_sysc_tif:
mvc __LC_RETURN_PSW(8),0(%r12) larl %r9,sysc_tif
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_sysc_restore: cleanup_sysc_restore:
clc 8(8,%r12),BASED(cleanup_sysc_restore_insn) clg %r9,BASED(cleanup_sysc_restore_insn)
je 2f
clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
jhe 0f
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
cghi %r12,__LC_MCK_OLD_PSW
je 0f je 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER lg %r9,24(%r11) # get saved pointer to pt_regs
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
cghi %r12,__LC_MCK_OLD_PSW mvc 0(64,%r11),__PT_R8(%r9)
la %r12,__LC_SAVE_AREA+80 lmg %r0,%r7,__PT_R0(%r9)
je 1f 0: lmg %r8,%r9,__LC_RETURN_PSW
la %r12,__LC_SAVE_AREA+40
1: mvc 0(40,%r12),SP_R11(%r15)
lmg %r0,%r10,SP_R0(%r15)
lg %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_sysc_restore_insn: cleanup_sysc_restore_insn:
.quad sysc_done - 4 .quad sysc_done - 4
.quad sysc_done - 16
cleanup_io_tif: cleanup_io_tif:
mvc __LC_RETURN_PSW(8),0(%r12) larl %r9,io_tif
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_restore: cleanup_io_restore:
clc 8(8,%r12),BASED(cleanup_io_restore_insn) clg %r9,BASED(cleanup_io_restore_insn)
je 1f je 0f
clc 8(8,%r12),BASED(cleanup_io_restore_insn+8) lg %r9,24(%r11) # get saved r11 pointer to pt_regs
jhe 0f mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) mvc 0(64,%r11),__PT_R8(%r9)
mvc __LC_SAVE_AREA+80(40),SP_R11(%r15) lmg %r0,%r7,__PT_R0(%r9)
lmg %r0,%r10,SP_R0(%r15) 0: lmg %r8,%r9,__LC_RETURN_PSW
lg %r15,SP_R15(%r15)
1: la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_restore_insn: cleanup_io_restore_insn:
.quad io_done - 4 .quad io_done - 4
.quad io_done - 16
/* /*
* Integer constants * Integer constants
*/ */
.align 4 .align 8
.Lcritical_start: .Lcritical_start:
.quad __critical_start .quad __critical_start
.Lcritical_end: .Lcritical_length:
.quad __critical_end .quad __critical_end - __critical_start
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
/* /*
...@@ -1094,8 +984,8 @@ sie_fault: ...@@ -1094,8 +984,8 @@ sie_fault:
.align 8 .align 8
.Lsie_loop: .Lsie_loop:
.quad sie_loop .quad sie_loop
.Lsie_done: .Lsie_length:
.quad sie_done .quad sie_done - sie_loop
.Lhost_id: .Lhost_id:
.quad 0 .quad 0
......
...@@ -329,8 +329,8 @@ iplstart: ...@@ -329,8 +329,8 @@ iplstart:
# #
# reset files in VM reader # reset files in VM reader
# #
stidp __LC_SAVE_AREA # store cpuid stidp __LC_SAVE_AREA_SYNC # store cpuid
tm __LC_SAVE_AREA,0xff # running VM ? tm __LC_SAVE_AREA_SYNC,0xff# running VM ?
bno .Lnoreset bno .Lnoreset
la %r2,.Lreset la %r2,.Lreset
lhi %r3,26 lhi %r3,26
......
...@@ -17,11 +17,11 @@ ...@@ -17,11 +17,11 @@
# #
ENTRY(store_status) ENTRY(store_status)
/* Save register one and load save area base */ /* Save register one and load save area base */
stg %r1,__LC_SAVE_AREA+120(%r0) stg %r1,__LC_SAVE_AREA_RESTART
lghi %r1,SAVE_AREA_BASE lghi %r1,SAVE_AREA_BASE
/* General purpose registers */ /* General purpose registers */
stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
lg %r2,__LC_SAVE_AREA+120(%r0) lg %r2,__LC_SAVE_AREA_RESTART
stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
/* Control registers */ /* Control registers */
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
......
...@@ -654,7 +654,7 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -654,7 +654,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
- sizeof(struct stack_frame)); - sizeof(struct stack_frame));
memset(sf, 0, sizeof(struct stack_frame)); memset(sf, 0, sizeof(struct stack_frame));
sf->gprs[9] = (unsigned long) sf; sf->gprs[9] = (unsigned long) sf;
cpu_lowcore->save_area[15] = (unsigned long) sf; cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area, 0, 15); __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
atomic_inc(&init_mm.context.attach_count); atomic_inc(&init_mm.context.attach_count);
asm volatile( asm volatile(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment