Commit 0c082819 authored by David Mosberger's avatar David Mosberger

ia64: Make fsyscalls work again. They broke because the streamlined syscall path

	didn't preserve b6 and r11 anymore.  Unfortunately, preserving them costs
	a few cycles (~5 cycles in the cached case).  The uncached case is hopefully
	mostly unaffected because the number of cache-lines touched is the same
	(without preserving b6 and r11, the entry-patch _almost_ got away with
	touching a single 128-byte cacheline, but not quite, because r8 also
	had to be initialized).
parent cc25b4d7
......@@ -602,7 +602,8 @@ END(ia64_ret_from_syscall)
* f6-f11: cleared
* pr: restored (user-level pr)
* b0: restored (user-level rp)
* b6-b7: cleared
* b6: restored
* b7: cleared
* ar.unat: restored (user-level ar.unat)
* ar.pfs: restored (user-level ar.pfs)
* ar.rsc: restored (user-level ar.rsc)
......@@ -615,6 +616,10 @@ END(ia64_ret_from_syscall)
*/
GLOBAL_ENTRY(ia64_leave_syscall)
PT_REGS_UNWIND_INFO(0)
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
*/
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
#else
......@@ -622,46 +627,37 @@ GLOBAL_ENTRY(ia64_leave_syscall)
#endif
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
*/
.work_processed_syscall:
#ifdef CONFIG_PREEMPT
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) ld4 r21=[r20] // preempt_count ->r21
;;
(pKStk) cmp4.eq p6,p0=r21,r0 // p6 <- preempt_count == 0
;;
#else /* CONFIG_PREEMPT */
;;
(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
.pred.rel.mutex pUStk,pKStk
(pKStk) ld4 r21=[r20] // r21 <- preempt_count
(pUStk) mov r21=0 // r21 <- 0
;;
(p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
#endif /* CONFIG_PREEMPT */
.work_processed_syscall:
(p6) ld4 r31=[r17] // load current_thread_info()->flags
adds r16=PT(LOADRS)+16,r12
adds r18=PT(AR_BSPSTORE)+16,r12
adds r17=PT(AR_BSPSTORE)+16,r12
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
;;
(p6) ld4 r31=[r18] // load current_thread_info()->flags
ld8 r19=[r16],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
nop.i 0
;;
ld8 r19=[r16] // load ar.rsc value for "loadrs"
ld8 r23=[r18],16 // load ar.bspstore (may be garbage)
ld8 r23=[r17],PT(R9)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
ld8 r22=[r16],PT(R8)-PT(B6) // load b6
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
mov ar.ssd=r0
mov f6=f0 // clear f6
mov.m ar.ccv=r0 // clear ar.ccv
(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
;;
mov ar.ccv=r0 // clear ar.ccv
mov b6=r0 // clear b6
(p6) br.cond.spnt .work_pending
;;
adds r16=PT(R8)+16,r12
adds r17=PT(R9)+16,r12
;;
// start restoring the state saved on the kernel stack (struct pt_regs):
ld8.fill r8=[r16],16
ld8.fill r9=[r17],16
mov f6=f0 // clear f6
;;
ld8.fill r10=[r16],16
ld8.fill r11=[r17],16
......@@ -669,6 +665,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
;;
ld8 r29=[r16],16 // load cr.ipsr
ld8 r28=[r17],16 // load cr.iip
mov f8=f0 // clear f8
;;
ld8 r30=[r16],16 // load cr.ifs
ld8 r25=[r17],16 // load ar.unat
......@@ -676,18 +673,19 @@ GLOBAL_ENTRY(ia64_leave_syscall)
;;
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT
mov f8=f0 // clear f8
mov ar.csd=r0 // clear ar.csd
mov f9=f0 // clear f9
mov.m ar.ssd=r0 // clear ar.ssd
mov.m ar.csd=r0 // clear ar.csd
mov f10=f0 // clear f10
;;
ld8 r26=[r16],16 // load ar.pfs
ld8 r27=[r17],PT(PR)-PT(AR_RSC) // load ar.rsc
mov f10=f0 // clear f10
mov f11=f0 // clear f11
;;
ld8 r24=[r16],PT(B0)-PT(AR_RNAT) // load ar.rnat (may be garbage)
ld8 r31=[r17],PT(R1)-PT(PR) // load predicates
mov f11=f0 // clear f11
mov b6=r22 // restore b6
;;
ld8 r21=[r16],PT(R12)-PT(B0) // load b0
ld8.fill r1=[r17],16 // load r1
......@@ -703,7 +701,6 @@ GLOBAL_ENTRY(ia64_leave_syscall)
;;
mov r16=ar.bsp // get existing backing store pointer
srlz.i // ensure interruption collection is off
mov r22=r0 // clear r22
(pUStk) mov r3=1
;;
(pUStk) st1 [r14]=r3
......@@ -729,41 +726,44 @@ END(ia64_ret_from_execve_syscall)
// fall through
GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0)
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
*/
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) ld4 r21=[r20] // preempt_count ->r21
;;
(pKStk) cmp4.eq p6,p0=r21,r0 // p6 <- preempt_count == 0
;;
#else /* CONFIG_PREEMPT */
#else
(pUStk) rsm psr.i
#endif
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
;;
(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
.work_processed_kernel:
#ifdef CONFIG_PREEMPT
adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
.pred.rel.mutex pUStk,pKStk
(pKStk) ld4 r21=[r20] // r21 <- preempt_count
(pUStk) mov r21=0 // r21 <- 0
;;
(p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
#endif /* CONFIG_PREEMPT */
.work_processed_kernel:
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
(p6) ld4 r31=[r17] // load current_thread_info()->flags
adds r22=PT(CR_IPSR)+16,r12
adds r21=PT(PR)+16,r12
;;
lfetch [r21],PT(CR_IPSR)-PT(PR)
adds r2=PT(B6)+16,r12
adds r3=PT(R16)+16,r12
;;
ld8 r28=[r2],8 // b6
ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
lfetch [r21]
ld8 r28=[r2],8 // load b6
adds r29=PT(R24)+16,r12
ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
adds r30=PT(AR_CCV)+16,r12
;;
lfetch [r22]
lfetch [r21]
(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
ld8.fill r24=[r29]
......@@ -1008,7 +1008,6 @@ skip_rbs_switch:
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
rsm psr.i // disable interrupts
;;
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
#if CONFIG_PREEMPT
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
......
......@@ -74,8 +74,15 @@ GLOBAL_ENTRY(syscall_via_break)
.prologue
.altrp b6
.body
/*
* Note: for (fast) syscall restart to work, the break instruction must be
* the first one in the bundle addressed by syscall_via_break.
*/
{ .mib
break 0x100000
nop.i 0
br.ret.sptk.many b6
}
END(syscall_via_break)
GLOBAL_ENTRY(fsys_fallback_syscall)
......
......@@ -645,7 +645,7 @@ ENTRY(break_fault)
* Registers to be saved & restored:
* CR registers: cr.ipsr, cr.iip, cr.ifs
* AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
* others: pr, b0, loadrs, r1, r12, r13, r15
* others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
* Registers to be restored only:
* r8-r11: output value from the system call.
*
......@@ -658,12 +658,13 @@ ENTRY(break_fault)
mov r18=__IA64_BREAK_SYSCALL
mov r21=ar.fpsr
mov r29=cr.ipsr
mov r20=r1
mov r19=b6
mov r25=ar.unat
mov r27=ar.rsc
mov r26=ar.pfs
mov r28=cr.iip
mov r31=pr // prepare to save predicates
mov r20=r1
;;
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
......@@ -672,53 +673,51 @@ ENTRY(break_fault)
ld1 r17=[r16] // load current->thread.on_ustack flag
st1 [r16]=r0 // clear current->thread.on_ustack flag
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
// switch from user to kernel RBS:
;;
invala
cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
;;
mov r30=r0
// switch from user to kernel RBS:
MINSTATE_START_SAVE_MIN_VIRT
br.call.sptk.many b7=setup_syscall_via_break
;;
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
;;
cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
movl r16=sys_call_table
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
movl r2=ia64_ret_from_syscall
;;
(p6) shladd r16=r15,3,r16
movl r15=ia64_ret_from_syscall
(p7) adds r16=(__NR_ni_syscall-1024)*8,r16 // force __NR_ni_syscall
shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
cmp.geu p0,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
mov rp=r2 // set the real return addr
;;
ld8 r16=[r16] // load address of syscall entry point
mov rp=r15 // set the real return addr
(p7) add r20=(__NR_ni_syscall-1024)*8,r16 // force __NR_ni_syscall
add r16=PT(CR_IPSR)+16,sp // get pointer to cr_ipsr
add r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
mov b6=r16
ld8 r20=[r20] // load address of syscall entry point
ld8 r18=[r16] // fetch cr_ipsr
// arrange things so we skip over break instruction when returning:
adds r16=PT(CR_IPSR)+16,sp // get pointer to cr_ipsr
adds r17=PT(CR_IIP)+16,sp // get pointer to cr_iip
add r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld8 r18=[r16] // fetch cr_ipsr
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
ld8 r19=[r17] // fetch cr_iip
mov b6=r20
;;
extr.u r20=r18,41,2 // extract ei field
adds r19=16,r19 // compute address of next bundle
;;
cmp.eq p6,p7=2,r20 // isr.ei==2?
adds r19=16,r19 // compute address of next bundle
;;
(p6) mov r20=0 // clear ei to 0
(p7) adds r20=1,r20 // increment ei to next slot
;;
(p6) st8 [r17]=r19 // store new cr.iip if cr.isr.ei wrapped around
dep r18=r20,r18,41,2 // insert new ei into cr.isr
(p6) st8 [r17]=r19 // store new cr.iip if cr.ipsr.ei wrapped around
dep r18=r20,r18,41,2 // insert new ei into cr.ipsr
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
;;
st8 [r16]=r18 // store new value for cr.isr
st8 [r16]=r18 // store new value for cr.ipsr
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
......@@ -774,80 +773,88 @@ END(interrupt)
* potential NaT values from the input registers.
*/
ENTRY(setup_syscall_via_break)
alloc r19=ar.pfs,8,0,0,0
tnat.nz p8,p0=in0
add r16=PT(CR_IPSR),r1 /* initialize first base pointer */
;;
st8 [r16]=r29,16 /* save cr.ipsr */
adds r17=PT(CR_IIP),r1 /* initialize second base pointer */
#if PT(B6) != 0
# error This code assumes that b6 is the first field in pt_regs.
#endif
st8 [r1]=r19 // save b6
add r16=PT(CR_IPSR),r1 // initialize first base pointer
add r17=PT(R11),r1 // initialize second base pointer
;;
(p8) mov in0=-1
alloc r19=ar.pfs,8,0,0,0 // ensur in0-in7 are writable
st8 [r16]=r29,PT(CR_IFS)-PT(CR_IPSR) // save cr.ipsr
tnat.nz p8,p0=in0
st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
tnat.nz p9,p0=in1
st8 [r17]=r28,16 /* save cr.iip */
(pKStk) mov r18=r0 // make sure r18 isn't NaT
;;
st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
mov r28=b0
(pKStk) mov r18=r0 /* make sure r18 isn't NaT */
(p8) mov in0=-1
;;
st8 [r16]=r0,PT(AR_PFS)-PT(CR_IFS) // clear cr.ifs
st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
(p9) mov in1=-1
tnat.nz p10,p0=in2
st8 [r16]=r30,16 /* save cr.ifs */
st8 [r17]=r25,16 /* save ar.unat */
(pUStk) sub r18=r18,r22 /* r18=RSE.ndirty*8 */
;;
st8 [r16]=r26,16 /* save ar.pfs */
st8 [r17]=r27,16 /* save ar.rsc */
st8 [r16]=r26,PT(AR_RNAT)-PT(AR_PFS) // save ar.pfs
st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
tnat.nz p10,p0=in2
(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
tbit.nz p15,p0=r29,IA64_PSR_I_BIT
;; /* avoid RAW on r16 & r17 */
(p10) mov in2=-1
nop.f 0
tnat.nz p11,p0=in3
(pKStk) adds r16=16,r16 /* skip over ar_rnat field */
(pKStk) adds r17=16,r17 /* skip over ar_bspstore field */
shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
;;
(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
(p10) mov in2=-1
(p11) mov in3=-1
tnat.nz p12,p0=in4
(pUStk) st8 [r16]=r24,16 /* save ar.rnat */
(pUStk) st8 [r17]=r23,16 /* save ar.bspstore */
;;
(p12) mov in4=-1
tnat.nz p13,p0=in5
st8 [r16]=r31,16 /* save predicates */
st8 [r17]=r28,16 /* save b0 */
dep r14=-1,r0,61,3
;;
st8 [r16]=r18,16 /* save ar.rsc value for "loadrs" */
st8.spill [r17]=r20,16 /* save original r1 */
adds r2=IA64_PT_REGS_R16_OFFSET,r1
(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
;;
st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
st8 [r17]=r28,PT(R1)-PT(B0) // save b0
(p12) mov in4=-1
;;
st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
(p13) mov in5=-1
;;
.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
tnat.nz p14,p0=in6
.mem.offset 0,0; st8.spill [r16]=r12,16
.mem.offset 8,0; st8.spill [r17]=r13,16
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */
;;
(p14) mov in6=-1
st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
st8.spill [r17]=r15 // save r15
tnat.nz p8,p0=in7
.mem.offset 0,0; st8 [r16]=r21,16 /* ar.fpsr */
.mem.offset 8,0; st8.spill [r17]=r15,16
adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
;;
mov r13=IA64_KR(CURRENT) /* establish `current' */
movl r1=__gp /* establish kernel global pointer */
;;
MINSTATE_END_SAVE_MIN_VIRT
stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
(p14) mov in6=-1
tnat.nz p9,p0=r15
mov r13=IA64_KR(CURRENT) // establish `current'
movl r1=__gp // establish kernel global pointer
;;
(p8) mov in7=-1
tnat.nz p9,p0=r15
MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
ssm psr.ic | PSR_DEFAULT_BITS
movl r17=FPSR_DEFAULT
adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
;;
srlz.i // guarantee that interruption collection is on
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
(p9) mov r15=-1
(p15) ssm psr.i // restore psr.i
mov.m ar.fpsr=r17
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
(p15) ssm psr.i // restore psr.i
mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
br.ret.sptk.many b7
END(setup_syscall_via_break)
......
......@@ -108,8 +108,7 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
long err;
/* restore scratch that always needs gets updated during signal delivery: */
err = __get_user(flags, &sc->sc_flags);
err = __get_user(flags, &sc->sc_flags);
err |= __get_user(nat, &sc->sc_nat);
err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
err |= __get_user(cfm, &sc->sc_cfm);
......@@ -120,21 +119,12 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_from_user(&scr->pt.b6, &sc->sc_br[6], 2*8); /* b6-b7 */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r14, &sc->sc_gr[14], 8); /* r14 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
}
scr->pt.cr_ifs = cfm | (1UL << 63);
/* establish new instruction pointer: */
......@@ -144,6 +134,16 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);
if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
}
if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) {
struct ia64_psr *psr = ia64_psr(&scr->pt);
......@@ -358,7 +358,6 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
err = __put_user(flags, &sc->sc_flags);
err |= __put_user(nat, &sc->sc_nat);
err |= PUT_SIGSET(mask, &sc->sc_mask);
err |= __put_user(cfm, &sc->sc_cfm);
......@@ -369,6 +368,7 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
......@@ -377,19 +377,19 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
if (flags & IA64_SC_FLAG_IN_SYSCALL) {
/* Clear scratch registers if the signal interrupted a system call. */
err |= __clear_user(&sc->sc_ar_ccv, 8);
err |= __put_user(0, &sc->sc_ar_ccv); /* ar.ccv */
err |= __put_user(0, &sc->sc_br[7]); /* b7 */
err |= __put_user(0, &sc->sc_gr[14]); /* r14 */
err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __clear_user(&sc->sc_br[6], 2*8); /* b6-b7 */
err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __clear_user(&sc->sc_gr[14], 8); /* r14 */
err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */
} else {
/* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __put_user(sc->sc_gr[14], &scr->pt.r14); /* r14 */
err |= __copy_to_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_to_user(&scr->pt.b6, &sc->sc_br[6], 2*8); /* b6-b7 */
err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
err |= __copy_to_user(&sc->sc_gr[14], &scr->pt.r14, 8); /* r14 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
}
return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment