Commit d3a73acb authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390: split TIF bits into CIF, PIF and TIF bits

The oi and ni instructions used in entry[64].S to set and clear bits
in the thread-flags are not guaranteed to be atomic in regard to other
CPUs. Split the TIF bits into CPU, pt_regs and thread-info specific
bits. Updates on the TIF bits are done with atomic instructions,
updates on CPU and pt_regs bits are done with non-atomic instructions.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent beef560b
...@@ -93,7 +93,9 @@ struct _lowcore { ...@@ -93,7 +93,9 @@ struct _lowcore {
__u32 save_area_sync[8]; /* 0x0200 */ __u32 save_area_sync[8]; /* 0x0200 */
__u32 save_area_async[8]; /* 0x0220 */ __u32 save_area_async[8]; /* 0x0220 */
__u32 save_area_restart[1]; /* 0x0240 */ __u32 save_area_restart[1]; /* 0x0240 */
__u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */
/* CPU flags. */
__u32 cpu_flags; /* 0x0244 */
/* Return psws. */ /* Return psws. */
psw_t return_psw; /* 0x0248 */ psw_t return_psw; /* 0x0248 */
...@@ -237,7 +239,9 @@ struct _lowcore { ...@@ -237,7 +239,9 @@ struct _lowcore {
__u64 save_area_sync[8]; /* 0x0200 */ __u64 save_area_sync[8]; /* 0x0200 */
__u64 save_area_async[8]; /* 0x0240 */ __u64 save_area_async[8]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */ __u64 save_area_restart[1]; /* 0x0280 */
__u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */
/* CPU flags. */
__u64 cpu_flags; /* 0x0288 */
/* Return psws. */ /* Return psws. */
psw_t return_psw; /* 0x0290 */ psw_t return_psw; /* 0x0290 */
......
...@@ -36,7 +36,7 @@ static inline void set_user_asce(struct mm_struct *mm) ...@@ -36,7 +36,7 @@ static inline void set_user_asce(struct mm_struct *mm)
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
set_fs(current->thread.mm_segment); set_fs(current->thread.mm_segment);
set_thread_flag(TIF_ASCE); set_cpu_flag(CIF_ASCE);
} }
static inline void clear_user_asce(void) static inline void clear_user_asce(void)
...@@ -54,7 +54,7 @@ static inline void load_kernel_asce(void) ...@@ -54,7 +54,7 @@ static inline void load_kernel_asce(void)
__ctl_store(asce, 1, 1); __ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce) if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_thread_flag(TIF_ASCE); set_cpu_flag(CIF_ASCE);
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
...@@ -70,7 +70,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -70,7 +70,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
__ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 7, 7);
/* Delay loading of the new ASCE to control registers CR1 & CR7 */ /* Delay loading of the new ASCE to control registers CR1 & CR7 */
set_thread_flag(TIF_ASCE); set_cpu_flag(CIF_ASCE);
atomic_inc(&next->context.attach_count); atomic_inc(&next->context.attach_count);
atomic_dec(&prev->context.attach_count); atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC) if (MACHINE_HAS_TLB_LC)
......
...@@ -11,6 +11,13 @@ ...@@ -11,6 +11,13 @@
#ifndef __ASM_S390_PROCESSOR_H #ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
#define _CIF_ASCE (1<<CIF_ASCE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -21,6 +28,21 @@ ...@@ -21,6 +28,21 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/runtime_instr.h> #include <asm/runtime_instr.h>
static inline void set_cpu_flag(int flag)
{
S390_lowcore.cpu_flags |= (1U << flag);
}
static inline void clear_cpu_flag(int flag)
{
S390_lowcore.cpu_flags &= ~(1U << flag);
}
static inline int test_cpu_flag(int flag)
{
return !!(S390_lowcore.cpu_flags & (1U << flag));
}
/* /*
* Default implementation of macro that returns current * Default implementation of macro that returns current
* instruction pointer ("program counter"). * instruction pointer ("program counter").
......
...@@ -8,6 +8,12 @@ ...@@ -8,6 +8,12 @@
#include <uapi/asm/ptrace.h> #include <uapi/asm/ptrace.h>
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
#define _PIF_SYSCALL (1<<PIF_SYSCALL)
#define _PIF_PER_TRAP (1<<PIF_PER_TRAP)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
...@@ -29,6 +35,7 @@ struct pt_regs ...@@ -29,6 +35,7 @@ struct pt_regs
unsigned int int_code; unsigned int int_code;
unsigned int int_parm; unsigned int int_parm;
unsigned long int_parm_long; unsigned long int_parm_long;
unsigned long flags;
}; };
/* /*
...@@ -79,6 +86,21 @@ struct per_struct_kernel { ...@@ -79,6 +86,21 @@ struct per_struct_kernel {
#define PER_CONTROL_SUSPENSION 0x00400000UL #define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL #define PER_CONTROL_ALTERATION 0x00200000UL
static inline void set_pt_regs_flag(struct pt_regs *regs, int flag)
{
regs->flags |= (1U << flag);
}
static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag)
{
regs->flags &= ~(1U << flag);
}
static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
{
return !!(regs->flags & (1U << flag));
}
/* /*
* These are defined as per linux/ptrace.h, which see. * These are defined as per linux/ptrace.h, which see.
*/ */
......
...@@ -28,7 +28,7 @@ extern const unsigned int sys_call_table_emu[]; ...@@ -28,7 +28,7 @@ extern const unsigned int sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task, static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return test_tsk_thread_flag(task, TIF_SYSCALL) ? return test_pt_regs_flag(regs, PIF_SYSCALL) ?
(regs->int_code & 0xffff) : -1; (regs->int_code & 0xffff) : -1;
} }
......
...@@ -77,30 +77,22 @@ static inline struct thread_info *current_thread_info(void) ...@@ -77,30 +77,22 @@ static inline struct thread_info *current_thread_info(void)
/* /*
* thread information flags bit numbers * thread information flags bit numbers
*/ */
#define TIF_SYSCALL 0 /* inside a system call */ #define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 1 /* signal pending */
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_ASCE 5 /* user asce needs fixup / uaccess */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ #define TIF_SECCOMP 5 /* secure computing */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_31BIT 16 /* 32bit process */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ #define TIF_SINGLE_STEP 19 /* This task is single stepped */
#define TIF_31BIT 17 /* 32bit process */ #define TIF_BLOCK_STEP 20 /* This task is block stepped */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
#define TIF_BLOCK_STEP 21 /* This task is block stepped */
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_ASCE (1<<TIF_ASCE)
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
......
...@@ -50,6 +50,7 @@ int main(void) ...@@ -50,6 +50,7 @@ int main(void)
DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm));
DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags));
DEFINE(__PT_SIZE, sizeof(struct pt_regs)); DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK(); BLANK();
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
...@@ -115,6 +116,7 @@ int main(void) ...@@ -115,6 +116,7 @@ int main(void)
DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags));
DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
......
...@@ -213,7 +213,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) ...@@ -213,7 +213,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
sizeof(current->thread.fp_regs)); sizeof(current->thread.fp_regs));
restore_fp_regs(current->thread.fp_regs.fprs); restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -37,18 +38,16 @@ __PT_R13 = __PT_GPRS + 524 ...@@ -37,18 +38,16 @@ __PT_R13 = __PT_GPRS + 524
__PT_R14 = __PT_GPRS + 56 __PT_R14 = __PT_GPRS + 56
__PT_R15 = __PT_GPRS + 60 __PT_R15 = __PT_GPRS + 60
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_ASCE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE)
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
_PIF_WORK = (_PIF_PER_TRAP)
#define BASED(name) name-system_call(%r13) #define BASED(name) name-system_call(%r13)
.macro TRACE_IRQS_ON .macro TRACE_IRQS_ON
...@@ -160,13 +159,7 @@ ENTRY(__switch_to) ...@@ -160,13 +159,7 @@ ENTRY(__switch_to)
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
l %r15,__THREAD_ksp(%r3) # load kernel stack of next l %r15,__THREAD_ksp(%r3) # load kernel stack of next
lhi %r6,_TIF_TRANSFER # transfer TIF bits lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
n %r6,__TI_flags(%r4) # isolate TIF bits
jz 0f
o %r6,__TI_flags(%r5) # set TIF bits of next
st %r6,__TI_flags(%r5)
ni __TI_flags+3(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14 br %r14
__critical_start: __critical_start:
...@@ -181,6 +174,7 @@ sysc_stm: ...@@ -181,6 +174,7 @@ sysc_stm:
stm %r8,%r15,__LC_SAVE_AREA_SYNC stm %r8,%r15,__LC_SAVE_AREA_SYNC
l %r12,__LC_THREAD_INFO l %r12,__LC_THREAD_INFO
l %r13,__LC_SVC_NEW_PSW+4 l %r13,__LC_SVC_NEW_PSW+4
lhi %r14,_PIF_SYSCALL
sysc_per: sysc_per:
l %r15,__LC_KERNEL_STACK l %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
...@@ -190,8 +184,8 @@ sysc_vtime: ...@@ -190,8 +184,8 @@ sysc_vtime:
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
st %r14,__PT_FLAGS(%r11)
sysc_do_svc: sysc_do_svc:
oi __TI_flags+3(%r12),_TIF_SYSCALL
l %r10,__TI_sysc_table(%r12) # 31 bit system call table l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11) lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0 sla %r8,2 # shift and test for svc0
...@@ -207,7 +201,7 @@ sysc_nr_ok: ...@@ -207,7 +201,7 @@ sysc_nr_ok:
st %r2,__PT_ORIG_GPR2(%r11) st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15) st %r7,STACK_FRAME_OVERHEAD(%r15)
l %r9,0(%r8,%r10) # get system call addr. l %r9,0(%r8,%r10) # get system call addr.
tm __TI_flags+2(%r12),_TIF_TRACE >> 8 tm __TI_flags+3(%r12),_TIF_TRACE
jnz sysc_tracesys jnz sysc_tracesys
basr %r14,%r9 # call sys_xxxx basr %r14,%r9 # call sys_xxxx
st %r2,__PT_R2(%r11) # store return value st %r2,__PT_R2(%r11) # store return value
...@@ -217,9 +211,12 @@ sysc_return: ...@@ -217,9 +211,12 @@ sysc_return:
sysc_tif: sysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno sysc_restore jno sysc_restore
tm __TI_flags+3(%r12),_TIF_WORK_SVC tm __PT_FLAGS+3(%r11),_PIF_WORK
jnz sysc_work # check for work jnz sysc_work
ni __TI_flags+3(%r12),255-_TIF_SYSCALL tm __TI_flags+3(%r12),_TIF_WORK
jnz sysc_work # check for thread work
tm __LC_CPU_FLAGS+3,_CIF_WORK
jnz sysc_work
sysc_restore: sysc_restore:
mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
...@@ -231,17 +228,17 @@ sysc_done: ...@@ -231,17 +228,17 @@ sysc_done:
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# #
sysc_work: sysc_work:
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
jo sysc_mcck_pending jo sysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule jo sysc_reschedule
tm __TI_flags+3(%r12),_TIF_PER_TRAP tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
tm __TI_flags+3(%r12),_TIF_SIGPENDING tm __TI_flags+3(%r12),_TIF_SIGPENDING
jo sysc_sigpending jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+3(%r12),_TIF_ASCE tm __LC_CPU_FLAGS+3,_CIF_ASCE
jo sysc_uaccess jo sysc_uaccess
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
...@@ -254,7 +251,7 @@ sysc_reschedule: ...@@ -254,7 +251,7 @@ sysc_reschedule:
br %r1 # call schedule br %r1 # call schedule
# #
# _TIF_MCCK_PENDING is set, call handler # _CIF_MCCK_PENDING is set, call handler
# #
sysc_mcck_pending: sysc_mcck_pending:
l %r1,BASED(.Lhandle_mcck) l %r1,BASED(.Lhandle_mcck)
...@@ -262,10 +259,10 @@ sysc_mcck_pending: ...@@ -262,10 +259,10 @@ sysc_mcck_pending:
br %r1 # TIF bit will be cleared by handler br %r1 # TIF bit will be cleared by handler
# #
# _TIF_ASCE is set, load user space asce # _CIF_ASCE is set, load user space asce
# #
sysc_uaccess: sysc_uaccess:
ni __TI_flags+3(%r12),255-_TIF_ASCE ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
lctl %c1,%c1,__LC_USER_ASCE # load primary asce lctl %c1,%c1,__LC_USER_ASCE # load primary asce
j sysc_return j sysc_return
...@@ -276,7 +273,7 @@ sysc_sigpending: ...@@ -276,7 +273,7 @@ sysc_sigpending:
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_signal) l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal basr %r14,%r1 # call do_signal
tm __TI_flags+3(%r12),_TIF_SYSCALL tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
jno sysc_return jno sysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments lm %r2,%r7,__PT_R2(%r11) # load svc arguments
l %r10,__TI_sysc_table(%r12) # 31 bit system call table l %r10,__TI_sysc_table(%r12) # 31 bit system call table
...@@ -297,10 +294,10 @@ sysc_notify_resume: ...@@ -297,10 +294,10 @@ sysc_notify_resume:
br %r1 # call do_notify_resume br %r1 # call do_notify_resume
# #
# _TIF_PER_TRAP is set, call do_per_trap # _PIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_per_trap) l %r1,BASED(.Ldo_per_trap)
la %r14,BASED(sysc_return) la %r14,BASED(sysc_return)
...@@ -330,7 +327,7 @@ sysc_tracego: ...@@ -330,7 +327,7 @@ sysc_tracego:
basr %r14,%r9 # call sys_xxx basr %r14,%r9 # call sys_xxx
st %r2,__PT_R2(%r11) # store return value st %r2,__PT_R2(%r11) # store return value
sysc_tracenogo: sysc_tracenogo:
tm __TI_flags+2(%r12),_TIF_TRACE >> 8 tm __TI_flags+3(%r12),_TIF_TRACE
jz sysc_return jz sysc_return
l %r1,BASED(.Ltrace_exit) l %r1,BASED(.Ltrace_exit)
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
...@@ -384,12 +381,13 @@ ENTRY(pgm_check_handler) ...@@ -384,12 +381,13 @@ ENTRY(pgm_check_handler)
stm %r8,%r9,__PT_PSW(%r11) stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f jz 0f
l %r1,__TI_task(%r12) l %r1,__TI_task(%r12)
tmh %r8,0x0001 # kernel per event ? tmh %r8,0x0001 # kernel per event ?
jz pgm_kprobe jz pgm_kprobe
oi __TI_flags+3(%r12),_TIF_PER_TRAP oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
...@@ -420,9 +418,9 @@ pgm_kprobe: ...@@ -420,9 +418,9 @@ pgm_kprobe:
# single stepped system call # single stepped system call
# #
pgm_svcper: pgm_svcper:
oi __TI_flags+3(%r12),_TIF_PER_TRAP
mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs
/* /*
...@@ -445,6 +443,7 @@ io_skip: ...@@ -445,6 +443,7 @@ io_skip:
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
stm %r8,%r9,__PT_PSW(%r11) stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
io_loop: io_loop:
...@@ -466,8 +465,10 @@ io_return: ...@@ -466,8 +465,10 @@ io_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
TRACE_IRQS_ON TRACE_IRQS_ON
io_tif: io_tif:
tm __TI_flags+3(%r12),_TIF_WORK_INT tm __TI_flags+3(%r12),_TIF_WORK
jnz io_work # there is work to do (signals etc.) jnz io_work # there is work to do (signals etc.)
tm __LC_CPU_FLAGS+3,_CIF_WORK
jnz io_work
io_restore: io_restore:
mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
...@@ -477,7 +478,7 @@ io_done: ...@@ -477,7 +478,7 @@ io_done:
# #
# There is work todo, find out in which context we have been interrupted: # There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK_INT work # 1) if we return to user space we can do all _TIF_WORK work
# 2) if we return to kernel code and preemptive scheduling is enabled check # 2) if we return to kernel code and preemptive scheduling is enabled check
# the preemption counter and if it is zero call preempt_schedule_irq # the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required. # Before any work can be done, a switch to the kernel stack is required.
...@@ -520,11 +521,9 @@ io_work_user: ...@@ -520,11 +521,9 @@ io_work_user:
# #
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
# #
io_work_tif: io_work_tif:
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
jo io_mcck_pending jo io_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
jo io_reschedule jo io_reschedule
...@@ -532,12 +531,12 @@ io_work_tif: ...@@ -532,12 +531,12 @@ io_work_tif:
jo io_sigpending jo io_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo io_notify_resume jo io_notify_resume
tm __TI_flags+3(%r12),_TIF_ASCE tm __LC_CPU_FLAGS+3,_CIF_ASCE
jo io_uaccess jo io_uaccess
j io_return # beware of critical section cleanup j io_return # beware of critical section cleanup
# #
# _TIF_MCCK_PENDING is set, call handler # _CIF_MCCK_PENDING is set, call handler
# #
io_mcck_pending: io_mcck_pending:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
...@@ -547,10 +546,10 @@ io_mcck_pending: ...@@ -547,10 +546,10 @@ io_mcck_pending:
j io_return j io_return
# #
# _TIF_ASCE is set, load user space asce # _CIF_ASCE is set, load user space asce
# #
io_uaccess: io_uaccess:
ni __TI_flags+3(%r12),255-_TIF_ASCE ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
lctl %c1,%c1,__LC_USER_ASCE # load primary asce lctl %c1,%c1,__LC_USER_ASCE # load primary asce
j io_return j io_return
...@@ -613,6 +612,7 @@ ext_skip: ...@@ -613,6 +612,7 @@ ext_skip:
stm %r8,%r9,__PT_PSW(%r11) stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Ldo_IRQ) l %r1,BASED(.Ldo_IRQ)
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
...@@ -677,6 +677,7 @@ mcck_skip: ...@@ -677,6 +677,7 @@ mcck_skip:
stm %r0,%r7,__PT_R0(%r11) stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
stm %r8,%r9,__PT_PSW(%r11) stm %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
l %r1,BASED(.Ldo_machine_check) l %r1,BASED(.Ldo_machine_check)
lr %r2,%r11 # pass pointer to pt_regs lr %r2,%r11 # pass pointer to pt_regs
...@@ -689,7 +690,7 @@ mcck_skip: ...@@ -689,7 +690,7 @@ mcck_skip:
la %r11,STACK_FRAME_OVERHEAD(%r15) la %r11,STACK_FRAME_OVERHEAD(%r15)
lr %r15,%r1 lr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
jno mcck_return jno mcck_return
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Lhandle_mcck) l %r1,BASED(.Lhandle_mcck)
...@@ -842,6 +843,8 @@ cleanup_system_call: ...@@ -842,6 +843,8 @@ cleanup_system_call:
stm %r0,%r7,__PT_R0(%r9) stm %r0,%r7,__PT_R0(%r9)
mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9)
mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL
# setup saved register 15 # setup saved register 15
st %r15,28(%r11) # r15 stack pointer st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit # set new psw address and exit
......
...@@ -42,13 +42,11 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER ...@@ -42,13 +42,11 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
_TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_SYSCALL_TRACEPOINT)
_TIF_MCCK_PENDING | _TIF_ASCE) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _PIF_WORK = (_PIF_PER_TRAP)
_TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE)
#define BASED(name) name-system_call(%r13) #define BASED(name) name-system_call(%r13)
...@@ -190,13 +188,7 @@ ENTRY(__switch_to) ...@@ -190,13 +188,7 @@ ENTRY(__switch_to)
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
lg %r15,__THREAD_ksp(%r3) # load kernel stack of next lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
llill %r6,_TIF_TRANSFER # transfer TIF bits lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ng %r6,__TI_flags(%r4) # isolate TIF bits
jz 0f
og %r6,__TI_flags(%r5) # set TIF bits of next
stg %r6,__TI_flags(%r5)
ni __TI_flags+7(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14 br %r14
__critical_start: __critical_start:
...@@ -211,6 +203,7 @@ sysc_stmg: ...@@ -211,6 +203,7 @@ sysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO lg %r12,__LC_THREAD_INFO
lghi %r14,_PIF_SYSCALL
sysc_per: sysc_per:
lg %r15,__LC_KERNEL_STACK lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
...@@ -221,8 +214,8 @@ sysc_vtime: ...@@ -221,8 +214,8 @@ sysc_vtime:
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
sysc_do_svc: sysc_do_svc:
oi __TI_flags+7(%r12),_TIF_SYSCALL
lg %r10,__TI_sysc_table(%r12) # address of system call table lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11) llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0 slag %r8,%r8,2 # shift and test for svc 0
...@@ -238,7 +231,7 @@ sysc_nr_ok: ...@@ -238,7 +231,7 @@ sysc_nr_ok:
stg %r2,__PT_ORIG_GPR2(%r11) stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15) stg %r7,STACK_FRAME_OVERHEAD(%r15)
lgf %r9,0(%r8,%r10) # get system call add. lgf %r9,0(%r8,%r10) # get system call add.
tm __TI_flags+6(%r12),_TIF_TRACE >> 8 tm __TI_flags+7(%r12),_TIF_TRACE
jnz sysc_tracesys jnz sysc_tracesys
basr %r14,%r9 # call sys_xxxx basr %r14,%r9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value stg %r2,__PT_R2(%r11) # store return value
...@@ -248,9 +241,12 @@ sysc_return: ...@@ -248,9 +241,12 @@ sysc_return:
sysc_tif: sysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ? tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno sysc_restore jno sysc_restore
tm __TI_flags+7(%r12),_TIF_WORK_SVC tm __PT_FLAGS+7(%r11),_PIF_WORK
jnz sysc_work
tm __TI_flags+7(%r12),_TIF_WORK
jnz sysc_work # check for work jnz sysc_work # check for work
ni __TI_flags+7(%r12),255-_TIF_SYSCALL tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz sysc_work
sysc_restore: sysc_restore:
lg %r14,__LC_VDSO_PER_CPU lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11) lmg %r0,%r10,__PT_R0(%r11)
...@@ -265,17 +261,17 @@ sysc_done: ...@@ -265,17 +261,17 @@ sysc_done:
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# #
sysc_work: sysc_work:
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo sysc_mcck_pending jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule jo sysc_reschedule
tm __TI_flags+7(%r12),_TIF_PER_TRAP tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo sysc_sigpending jo sysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+7(%r12),_TIF_ASCE tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo sysc_uaccess jo sysc_uaccess
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
...@@ -287,17 +283,17 @@ sysc_reschedule: ...@@ -287,17 +283,17 @@ sysc_reschedule:
jg schedule jg schedule
# #
# _TIF_MCCK_PENDING is set, call handler # _CIF_MCCK_PENDING is set, call handler
# #
sysc_mcck_pending: sysc_mcck_pending:
larl %r14,sysc_return larl %r14,sysc_return
jg s390_handle_mcck # TIF bit will be cleared by handler jg s390_handle_mcck # TIF bit will be cleared by handler
# #
# _TIF_ASCE is set, load user space asce # _CIF_ASCE is set, load user space asce
# #
sysc_uaccess: sysc_uaccess:
ni __TI_flags+7(%r12),255-_TIF_ASCE ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j sysc_return j sysc_return
...@@ -307,7 +303,7 @@ sysc_uaccess: ...@@ -307,7 +303,7 @@ sysc_uaccess:
sysc_sigpending: sysc_sigpending:
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal brasl %r14,do_signal
tm __TI_flags+7(%r12),_TIF_SYSCALL tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
jno sysc_return jno sysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lg %r10,__TI_sysc_table(%r12) # address of system call table lg %r10,__TI_sysc_table(%r12) # address of system call table
...@@ -327,10 +323,10 @@ sysc_notify_resume: ...@@ -327,10 +323,10 @@ sysc_notify_resume:
jg do_notify_resume jg do_notify_resume
# #
# _TIF_PER_TRAP is set, call do_per_trap # _PIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return larl %r14,sysc_return
jg do_per_trap jg do_per_trap
...@@ -357,7 +353,7 @@ sysc_tracego: ...@@ -357,7 +353,7 @@ sysc_tracego:
basr %r14,%r9 # call sys_xxx basr %r14,%r9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value stg %r2,__PT_R2(%r11) # store return value
sysc_tracenogo: sysc_tracenogo:
tm __TI_flags+6(%r12),_TIF_TRACE >> 8 tm __TI_flags+7(%r12),_TIF_TRACE
jz sysc_return jz sysc_return
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return larl %r14,sysc_return
...@@ -416,12 +412,13 @@ ENTRY(pgm_check_handler) ...@@ -416,12 +412,13 @@ ENTRY(pgm_check_handler)
stmg %r8,%r9,__PT_PSW(%r11) stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11) stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f jz 0f
tmhh %r8,0x0001 # kernel per event ? tmhh %r8,0x0001 # kernel per event ?
jz pgm_kprobe jz pgm_kprobe
oi __TI_flags+7(%r12),_TIF_PER_TRAP oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID
...@@ -451,10 +448,10 @@ pgm_kprobe: ...@@ -451,10 +448,10 @@ pgm_kprobe:
# single stepped system call # single stepped system call
# #
pgm_svcper: pgm_svcper:
oi __TI_flags+7(%r12),_TIF_PER_TRAP
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
larl %r14,sysc_per larl %r14,sysc_per
stg %r14,__LC_RETURN_PSW+8 stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs
/* /*
...@@ -479,6 +476,7 @@ io_skip: ...@@ -479,6 +476,7 @@ io_skip:
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11) stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
io_loop: io_loop:
...@@ -499,8 +497,10 @@ io_return: ...@@ -499,8 +497,10 @@ io_return:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
TRACE_IRQS_ON TRACE_IRQS_ON
io_tif: io_tif:
tm __TI_flags+7(%r12),_TIF_WORK_INT tm __TI_flags+7(%r12),_TIF_WORK
jnz io_work # there is work to do (signals etc.) jnz io_work # there is work to do (signals etc.)
tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz io_work
io_restore: io_restore:
lg %r14,__LC_VDSO_PER_CPU lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11) lmg %r0,%r10,__PT_R0(%r11)
...@@ -513,7 +513,7 @@ io_done: ...@@ -513,7 +513,7 @@ io_done:
# #
# There is work todo, find out in which context we have been interrupted: # There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK_INT work # 1) if we return to user space we can do all _TIF_WORK work
# 2) if we return to kernel code and kvm is enabled check if we need to # 2) if we return to kernel code and kvm is enabled check if we need to
# modify the psw to leave SIE # modify the psw to leave SIE
# 3) if we return to kernel code and preemptive scheduling is enabled check # 3) if we return to kernel code and preemptive scheduling is enabled check
...@@ -557,11 +557,9 @@ io_work_user: ...@@ -557,11 +557,9 @@ io_work_user:
# #
# One of the work bits is on. Find out which one. # One of the work bits is on. Find out which one.
# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
# #
io_work_tif: io_work_tif:
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo io_mcck_pending jo io_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo io_reschedule jo io_reschedule
...@@ -569,12 +567,12 @@ io_work_tif: ...@@ -569,12 +567,12 @@ io_work_tif:
jo io_sigpending jo io_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo io_notify_resume jo io_notify_resume
tm __TI_flags+7(%r12),_TIF_ASCE tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo io_uaccess jo io_uaccess
j io_return # beware of critical section cleanup j io_return # beware of critical section cleanup
# #
# _TIF_MCCK_PENDING is set, call handler # _CIF_MCCK_PENDING is set, call handler
# #
io_mcck_pending: io_mcck_pending:
# TRACE_IRQS_ON already done at io_return # TRACE_IRQS_ON already done at io_return
...@@ -583,10 +581,10 @@ io_mcck_pending: ...@@ -583,10 +581,10 @@ io_mcck_pending:
j io_return j io_return
# #
# _TIF_ASCE is set, load user space asce # _CIF_ASCE is set, load user space asce
# #
io_uaccess: io_uaccess:
ni __TI_flags+7(%r12),255-_TIF_ASCE ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j io_return j io_return
...@@ -650,6 +648,7 @@ ext_skip: ...@@ -650,6 +648,7 @@ ext_skip:
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
...@@ -716,6 +715,7 @@ mcck_skip: ...@@ -716,6 +715,7 @@ mcck_skip:
stmg %r0,%r7,__PT_R0(%r11) stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),0(%r14) mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11) stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check brasl %r14,s390_do_machine_check
...@@ -727,7 +727,7 @@ mcck_skip: ...@@ -727,7 +727,7 @@ mcck_skip:
la %r11,STACK_FRAME_OVERHEAD(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1 lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jno mcck_return jno mcck_return
TRACE_IRQS_OFF TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck brasl %r14,s390_handle_mcck
...@@ -884,6 +884,8 @@ cleanup_system_call: ...@@ -884,6 +884,8 @@ cleanup_system_call:
stmg %r0,%r7,__PT_R0(%r9) stmg %r0,%r7,__PT_R0(%r9)
mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
# setup saved register r15 # setup saved register r15
stg %r15,56(%r11) # r15 stack pointer stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit # set new psw address and exit
......
...@@ -55,7 +55,7 @@ void s390_handle_mcck(void) ...@@ -55,7 +55,7 @@ void s390_handle_mcck(void)
local_mcck_disable(); local_mcck_disable();
mcck = __get_cpu_var(cpu_mcck); mcck = __get_cpu_var(cpu_mcck);
memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
clear_thread_flag(TIF_MCCK_PENDING); clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable(); local_mcck_enable();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -313,7 +313,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -313,7 +313,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*/ */
mcck->kill_task = 1; mcck->kill_task = 1;
mcck->mcck_code = *(unsigned long long *) mci; mcck->mcck_code = *(unsigned long long *) mci;
set_thread_flag(TIF_MCCK_PENDING); set_cpu_flag(CIF_MCCK_PENDING);
} else { } else {
/* /*
* Couldn't restore all register contents while in * Couldn't restore all register contents while in
...@@ -352,12 +352,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -352,12 +352,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci->cp) { if (mci->cp) {
/* Channel report word pending */ /* Channel report word pending */
mcck->channel_report = 1; mcck->channel_report = 1;
set_thread_flag(TIF_MCCK_PENDING); set_cpu_flag(CIF_MCCK_PENDING);
} }
if (mci->w) { if (mci->w) {
/* Warning pending */ /* Warning pending */
mcck->warning = 1; mcck->warning = 1;
set_thread_flag(TIF_MCCK_PENDING); set_cpu_flag(CIF_MCCK_PENDING);
} }
nmi_exit(); nmi_exit();
} }
......
...@@ -64,7 +64,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -64,7 +64,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
local_mcck_disable(); local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) { if (test_cpu_flag(CIF_MCCK_PENDING)) {
local_mcck_enable(); local_mcck_enable();
local_irq_enable(); local_irq_enable();
return; return;
...@@ -76,7 +76,7 @@ void arch_cpu_idle(void) ...@@ -76,7 +76,7 @@ void arch_cpu_idle(void)
void arch_cpu_idle_exit(void) void arch_cpu_idle_exit(void)
{ {
if (test_thread_flag(TIF_MCCK_PENDING)) if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck(); s390_handle_mcck();
} }
...@@ -123,7 +123,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -123,7 +123,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP); clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
clear_tsk_thread_flag(p, TIF_PER_TRAP);
/* Initialize per thread user and system timer values */ /* Initialize per thread user and system timer values */
ti = task_thread_info(p); ti = task_thread_info(p);
ti->user_timer = 0; ti->user_timer = 0;
...@@ -152,6 +151,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -152,6 +151,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
} }
frame->childregs = *current_pt_regs(); frame->childregs = *current_pt_regs();
frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
frame->childregs.flags = 0;
if (new_stackp) if (new_stackp)
frame->childregs.gprs[15] = new_stackp; frame->childregs.gprs[15] = new_stackp;
......
...@@ -136,7 +136,7 @@ void ptrace_disable(struct task_struct *task) ...@@ -136,7 +136,7 @@ void ptrace_disable(struct task_struct *task)
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP); clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
clear_tsk_thread_flag(task, TIF_PER_TRAP); clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
task->thread.per_flags = 0; task->thread.per_flags = 0;
} }
...@@ -813,7 +813,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) ...@@ -813,7 +813,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
* debugger stored an invalid system call number. Skip * debugger stored an invalid system call number. Skip
* the system call and the system call restart handling. * the system call and the system call restart handling.
*/ */
clear_thread_flag(TIF_SYSCALL); clear_pt_regs_flag(regs, PIF_SYSCALL);
ret = -1; ret = -1;
} }
......
...@@ -113,7 +113,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) ...@@ -113,7 +113,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
sizeof(current->thread.fp_regs)); sizeof(current->thread.fp_regs));
restore_fp_regs(current->thread.fp_regs.fprs); restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
} }
...@@ -356,7 +356,7 @@ void do_signal(struct pt_regs *regs) ...@@ -356,7 +356,7 @@ void do_signal(struct pt_regs *regs)
* call information. * call information.
*/ */
current_thread_info()->system_call = current_thread_info()->system_call =
test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0; test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
signr = get_signal_to_deliver(&info, &ka, regs, NULL); signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) { if (signr > 0) {
...@@ -384,7 +384,7 @@ void do_signal(struct pt_regs *regs) ...@@ -384,7 +384,7 @@ void do_signal(struct pt_regs *regs)
} }
} }
/* No longer in a system call */ /* No longer in a system call */
clear_thread_flag(TIF_SYSCALL); clear_pt_regs_flag(regs, PIF_SYSCALL);
if (is_compat_task()) if (is_compat_task())
handle_signal32(signr, &ka, &info, oldset, regs); handle_signal32(signr, &ka, &info, oldset, regs);
...@@ -394,7 +394,7 @@ void do_signal(struct pt_regs *regs) ...@@ -394,7 +394,7 @@ void do_signal(struct pt_regs *regs)
} }
/* No handlers present - check for system call restart */ /* No handlers present - check for system call restart */
clear_thread_flag(TIF_SYSCALL); clear_pt_regs_flag(regs, PIF_SYSCALL);
if (current_thread_info()->system_call) { if (current_thread_info()->system_call) {
regs->int_code = current_thread_info()->system_call; regs->int_code = current_thread_info()->system_call;
switch (regs->gprs[2]) { switch (regs->gprs[2]) {
...@@ -407,9 +407,9 @@ void do_signal(struct pt_regs *regs) ...@@ -407,9 +407,9 @@ void do_signal(struct pt_regs *regs)
case -ERESTARTNOINTR: case -ERESTARTNOINTR:
/* Restart system call with magic TIF bit. */ /* Restart system call with magic TIF bit. */
regs->gprs[2] = regs->orig_gpr2; regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL); set_pt_regs_flag(regs, PIF_SYSCALL);
if (test_thread_flag(TIF_SINGLE_STEP)) if (test_thread_flag(TIF_SINGLE_STEP))
set_thread_flag(TIF_PER_TRAP); clear_pt_regs_flag(regs, PIF_PER_TRAP);
break; break;
} }
} }
......
...@@ -906,7 +906,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) ...@@ -906,7 +906,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
if (need_resched()) if (need_resched())
schedule(); schedule();
if (test_thread_flag(TIF_MCCK_PENDING)) if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck(); s390_handle_mcck();
if (!kvm_is_ucontrol(vcpu->kvm)) if (!kvm_is_ucontrol(vcpu->kvm))
......
...@@ -415,7 +415,7 @@ static inline int do_exception(struct pt_regs *regs, int access) ...@@ -415,7 +415,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* The instruction that caused the program check has * The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP. * been nullified. Don't signal single step via SIGTRAP.
*/ */
clear_tsk_thread_flag(tsk, TIF_PER_TRAP); clear_pt_regs_flag(regs, PIF_PER_TRAP);
if (notify_page_fault(regs)) if (notify_page_fault(regs))
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment