Commit 1e54622e authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Martin Schwidefsky

[S390] cleanup lowcore access from program checks

Read all required fields for program checks from the lowcore in the
first level interrupt handler in entry[64].S. If the context that
caused the fault was enabled for interrupts we can now re-enable the
irqs in entry[64].S.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 84afdcee
...@@ -84,6 +84,7 @@ int main(void) ...@@ -84,6 +84,7 @@ int main(void)
DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
......
...@@ -72,25 +72,9 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -72,25 +72,9 @@ STACK_SIZE = 1 << STACK_SHIFT
l %r1,BASED(.Ltrace_irq_off_caller) l %r1,BASED(.Ltrace_irq_off_caller)
basr %r14,%r1 basr %r14,%r1
.endm .endm
.macro TRACE_IRQS_CHECK_ON
tm SP_PSW(%r15),0x03 # irqs enabled?
bz BASED(0f)
TRACE_IRQS_ON
0:
.endm
.macro TRACE_IRQS_CHECK_OFF
tm SP_PSW(%r15),0x03 # irqs enabled?
bz BASED(0f)
TRACE_IRQS_OFF
0:
.endm
#else #else
#define TRACE_IRQS_ON #define TRACE_IRQS_ON
#define TRACE_IRQS_OFF #define TRACE_IRQS_OFF
#define TRACE_IRQS_CHECK_ON
#define TRACE_IRQS_CHECK_OFF
#endif #endif
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
...@@ -198,6 +182,12 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -198,6 +182,12 @@ STACK_SIZE = 1 << STACK_SHIFT
lpsw \psworg # back to caller lpsw \psworg # back to caller
.endm .endm
.macro REENABLE_IRQS
mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
ni __SF_EMPTY(%r15),0xbf
ssm __SF_EMPTY(%r15)
.endm
/* /*
* Scheduler resume function, called by switch_to * Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev * gpr2 = (task_struct *) prev
...@@ -440,13 +430,11 @@ kernel_execve: ...@@ -440,13 +430,11 @@ kernel_execve:
br %r14 br %r14
# execve succeeded. # execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
TRACE_IRQS_OFF
l %r15,__LC_KERNEL_STACK # load ksp l %r15,__LC_KERNEL_STACK # load ksp
s %r15,BASED(.Lc_spsize) # make room for registers & psw s %r15,BASED(.Lc_spsize) # make room for registers & psw
l %r9,__LC_THREAD_INFO l %r9,__LC_THREAD_INFO
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
l %r1,BASED(.Lexecve_tail) l %r1,BASED(.Lexecve_tail)
basr %r14,%r1 basr %r14,%r1
...@@ -483,9 +471,10 @@ pgm_check_handler: ...@@ -483,9 +471,10 @@ pgm_check_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime: pgm_no_vtime:
TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r3,__LC_PGM_ILC # load program interruption code l %r3,__LC_PGM_ILC # load program interruption code
l %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS
la %r8,0x7f la %r8,0x7f
nr %r8,%r3 nr %r8,%r3
pgm_do_call: pgm_do_call:
...@@ -495,7 +484,6 @@ pgm_do_call: ...@@ -495,7 +484,6 @@ pgm_do_call:
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r7 # branch to interrupt-handler basr %r14,%r7 # branch to interrupt-handler
pgm_exit: pgm_exit:
TRACE_IRQS_CHECK_ON
b BASED(sysc_return) b BASED(sysc_return)
# #
...@@ -523,7 +511,6 @@ pgm_per_std: ...@@ -523,7 +511,6 @@ pgm_per_std:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2: pgm_no_vtime2:
TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,__TI_task(%r9) l %r1,__TI_task(%r9)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
...@@ -533,6 +520,8 @@ pgm_no_vtime2: ...@@ -533,6 +520,8 @@ pgm_no_vtime2:
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
l %r3,__LC_PGM_ILC # load program interruption code l %r3,__LC_PGM_ILC # load program interruption code
l %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS
la %r8,0x7f la %r8,0x7f
nr %r8,%r3 # clear per-event-bit and ilc nr %r8,%r3 # clear per-event-bit and ilc
be BASED(pgm_exit2) # only per or per+check ? be BASED(pgm_exit2) # only per or per+check ?
...@@ -542,8 +531,6 @@ pgm_no_vtime2: ...@@ -542,8 +531,6 @@ pgm_no_vtime2:
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r7 # branch to interrupt-handler basr %r14,%r7 # branch to interrupt-handler
pgm_exit2: pgm_exit2:
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_return) b BASED(sysc_return)
# #
...@@ -557,13 +544,11 @@ pgm_svcper: ...@@ -557,13 +544,11 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
lh %r7,0x8a # get svc number from lowcore lh %r7,0x8a # get svc number from lowcore
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
l %r8,__TI_task(%r9) l %r8,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lm %r2,%r6,SP_R2(%r15) # load svc arguments lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc) b BASED(sysc_do_svc)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
typedef void pgm_check_handler_t(struct pt_regs *, long); typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long);
extern pgm_check_handler_t *pgm_check_table[128]; extern pgm_check_handler_t *pgm_check_table[128];
pgm_check_handler_t do_protection_exception; pgm_check_handler_t do_protection_exception;
pgm_check_handler_t do_dat_exception; pgm_check_handler_t do_dat_exception;
......
...@@ -79,25 +79,9 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ...@@ -79,25 +79,9 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
basr %r2,%r0 basr %r2,%r0
brasl %r14,trace_hardirqs_off_caller brasl %r14,trace_hardirqs_off_caller
.endm .endm
.macro TRACE_IRQS_CHECK_ON
tm SP_PSW(%r15),0x03 # irqs enabled?
jz 0f
TRACE_IRQS_ON
0:
.endm
.macro TRACE_IRQS_CHECK_OFF
tm SP_PSW(%r15),0x03 # irqs enabled?
jz 0f
TRACE_IRQS_OFF
0:
.endm
#else #else
#define TRACE_IRQS_ON #define TRACE_IRQS_ON
#define TRACE_IRQS_OFF #define TRACE_IRQS_OFF
#define TRACE_IRQS_CHECK_ON
#define TRACE_IRQS_CHECK_OFF
#endif #endif
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
...@@ -207,6 +191,12 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ...@@ -207,6 +191,12 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
0: 0:
.endm .endm
.macro REENABLE_IRQS
mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
ni __SF_EMPTY(%r15),0xbf
ssm __SF_EMPTY(%r15)
.endm
/* /*
* Scheduler resume function, called by switch_to * Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev * gpr2 = (task_struct *) prev
...@@ -443,14 +433,12 @@ kernel_execve: ...@@ -443,14 +433,12 @@ kernel_execve:
br %r14 br %r14
# execve succeeded. # execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
# TRACE_IRQS_OFF
lg %r15,__LC_KERNEL_STACK # load ksp lg %r15,__LC_KERNEL_STACK # load ksp
aghi %r15,-SP_SIZE # make room for registers & psw aghi %r15,-SP_SIZE # make room for registers & psw
lg %r13,__LC_SVC_NEW_PSW+8 lg %r13,__LC_SVC_NEW_PSW+8
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
lg %r12,__LC_THREAD_INFO lg %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
# TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
brasl %r14,execve_tail brasl %r14,execve_tail
j sysc_return j sysc_return
...@@ -490,19 +478,18 @@ pgm_check_handler: ...@@ -490,19 +478,18 @@ pgm_check_handler:
LAST_BREAK LAST_BREAK
pgm_no_vtime: pgm_no_vtime:
HANDLE_SIE_INTERCEPT HANDLE_SIE_INTERCEPT
TRACE_IRQS_CHECK_OFF
stg %r11,SP_ARGS(%r15) stg %r11,SP_ARGS(%r15)
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS
lghi %r8,0x7f lghi %r8,0x7f
ngr %r8,%r3 ngr %r8,%r3
pgm_do_call:
sll %r8,3 sll %r8,3
larl %r1,pgm_check_table larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to interrupt-handler basr %r14,%r1 # branch to interrupt-handler
pgm_exit: pgm_exit:
TRACE_IRQS_CHECK_ON
j sysc_return j sysc_return
# #
...@@ -533,7 +520,6 @@ pgm_per_std: ...@@ -533,7 +520,6 @@ pgm_per_std:
LAST_BREAK LAST_BREAK
pgm_no_vtime2: pgm_no_vtime2:
HANDLE_SIE_INTERCEPT HANDLE_SIE_INTERCEPT
TRACE_IRQS_CHECK_OFF
lg %r1,__TI_task(%r12) lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per jz kernel_per
...@@ -542,6 +528,8 @@ pgm_no_vtime2: ...@@ -542,6 +528,8 @@ pgm_no_vtime2:
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS
lghi %r8,0x7f lghi %r8,0x7f
ngr %r8,%r3 # clear per-event-bit and ilc ngr %r8,%r3 # clear per-event-bit and ilc
je pgm_exit2 je pgm_exit2
...@@ -551,8 +539,6 @@ pgm_no_vtime2: ...@@ -551,8 +539,6 @@ pgm_no_vtime2:
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to interrupt-handler basr %r14,%r1 # branch to interrupt-handler
pgm_exit2: pgm_exit2:
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
j sysc_return j sysc_return
# #
...@@ -568,13 +554,11 @@ pgm_svcper: ...@@ -568,13 +554,11 @@ pgm_svcper:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK LAST_BREAK
TRACE_IRQS_OFF
lg %r8,__TI_task(%r12) lg %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lmg %r2,%r6,SP_R2(%r15) # load svc arguments lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc j sysc_do_svc
......
This diff is collapsed.
...@@ -333,12 +333,6 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -333,12 +333,6 @@ static inline int do_exception(struct pt_regs *regs, int access,
goto out; goto out;
address = trans_exc_code & __FAIL_ADDR_MASK; address = trans_exc_code & __FAIL_ADDR_MASK;
/*
* When we get here, the fault happened in the current
* task's user address space, so we can switch on the
* interrupts again and then search the VMAs
*/
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
...@@ -397,20 +391,20 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -397,20 +391,20 @@ static inline int do_exception(struct pt_regs *regs, int access,
return fault; return fault;
} }
void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{ {
unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
int fault; int fault;
/* Protection exception is supressing, decrement psw address. */ /* Protection exception is supressing, decrement psw address. */
regs->psw.addr -= (int_code >> 16); regs->psw.addr -= (pgm_int_code >> 16);
/* /*
* Check for low-address protection. This needs to be treated * Check for low-address protection. This needs to be treated
* as a special case because the translation exception code * as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case. * field is not guaranteed to contain valid data in this case.
*/ */
if (unlikely(!(trans_exc_code & 4))) { if (unlikely(!(trans_exc_code & 4))) {
do_low_address(regs, int_code, trans_exc_code); do_low_address(regs, pgm_int_code, trans_exc_code);
return; return;
} }
fault = do_exception(regs, VM_WRITE, trans_exc_code); fault = do_exception(regs, VM_WRITE, trans_exc_code);
...@@ -418,9 +412,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) ...@@ -418,9 +412,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
do_fault_error(regs, 4, trans_exc_code, fault); do_fault_error(regs, 4, trans_exc_code, fault);
} }
void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{ {
unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
int access, fault; int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE; access = VM_READ | VM_EXEC | VM_WRITE;
...@@ -431,21 +425,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) ...@@ -431,21 +425,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
#endif #endif
fault = do_exception(regs, access, trans_exc_code); fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault)) if (unlikely(fault))
do_fault_error(regs, int_code & 255, trans_exc_code, fault); do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{ {
unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context; goto no_context;
local_irq_enable();
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
...@@ -457,16 +449,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) ...@@ -457,16 +449,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) { if (regs->psw.mask & PSW_MASK_PSTATE) {
do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
return; return;
} }
no_context: no_context:
do_no_context(regs, int_code, trans_exc_code); do_no_context(regs, pgm_int_code, trans_exc_code);
} }
#endif #endif
int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{ {
struct pt_regs regs; struct pt_regs regs;
int access, fault; int access, fault;
...@@ -477,14 +469,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) ...@@ -477,14 +469,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user)
regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE; regs.psw.addr |= PSW_ADDR_AMODE;
uaddr &= PAGE_MASK; uaddr &= PAGE_MASK;
access = write_user ? VM_WRITE : VM_READ; access = write ? VM_WRITE : VM_READ;
fault = do_exception(&regs, access, uaddr | 2); fault = do_exception(&regs, access, uaddr | 2);
if (unlikely(fault)) { if (unlikely(fault)) {
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory(); pagefault_out_of_memory();
fault = 0; fault = 0;
} else if (fault & VM_FAULT_SIGBUS) } else if (fault & VM_FAULT_SIGBUS)
do_sigbus(&regs, int_code, uaddr); do_sigbus(&regs, pgm_int_code, uaddr);
} }
return fault ? -EFAULT : 0; return fault ? -EFAULT : 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment