Commit e37e37be authored by David Mosberger's avatar David Mosberger

ia64: Lots of formatting fixes for the optimized syscall paths.

	Fix setting of current->thread.on_ustack flag in optimized syscall exit path.
	Tune break_fault for syscall execution.
	Break ia32_execve: the ia64_execve() hack that was there is too ugly for
	words; surely we can do better...
parent 4776e929
......@@ -53,10 +53,7 @@
ENTRY(ia64_execve)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
alloc loc1=ar.pfs,3,2,4,0
/* Return with pt_reg saved. This is special because ia32 application needs scratch registers
* after return from execve.
*/
movl loc0=ia64_ret_from_execve_syscall // return with pt_reg saved. This is special because.
mov loc0=rp
.body
mov out0=in0 // filename
;; // stop bit between alloc and call
......@@ -80,7 +77,7 @@ ENTRY(ia64_execve)
* this executes in less than 20 cycles even on Itanium, so it's not worth
* optimizing for...).
*/
mov ar.unat=0; mov ar.lc=0;
mov ar.unat=0; mov ar.lc=0
mov r4=0; mov f2=f0; mov b1=r0
mov r5=0; mov f3=f0; mov b2=r0
mov r6=0; mov f4=f0; mov b3=r0
......@@ -226,14 +223,12 @@ GLOBAL_ENTRY(ia64_switch_to)
END(ia64_switch_to)
/*
* Note that interrupts are enabled during save_switch_stack and
* load_switch_stack. This means that we may get an interrupt with
* "sp" pointing to the new kernel stack while ar.bspstore is still
* pointing to the old kernel backing store area. Since ar.rsc,
* ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts,
* this is not a problem. Also, we don't need to specify unwind
* information for preserved registers that are not modified in
* save_switch_stack as the right unwind information is already
* Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
* means that we may get an interrupt with "sp" pointing to the new kernel stack while
* ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
* ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
* problem. Also, we don't need to specify unwind information for preserved registers
* that are not modified in save_switch_stack as the right unwind information is already
* specified at the call-site of save_switch_stack.
*/
......@@ -526,7 +521,7 @@ strace_save_retval:
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel:
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
.rety: br.cond.sptk ia64_leave_from_syscall
.rety: br.cond.sptk ia64_leave_syscall
strace_error:
ld8 r3=[r2] // load pt_regs.r8
......@@ -572,21 +567,66 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0
(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
.mem.offset 0,0; (p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0; (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_syscall)
// fall through
GLOBAL_ENTRY(ia64_leave_from_syscall)
/*
* ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
* need to switch to bank 0 and doesn't restore the scratch registers.
* To avoid leaking kernel bits, the scratch registers are set to
* the following known-to-be-safe values:
*
* r1: restored (global pointer)
* r2: cleared
* r3: 1 (when returning to user-level)
* r8-r11: restored (syscall return value(s))
* r12: restored (user-level stack pointer)
* r13: restored (user-level thread pointer)
* r14: cleared
* r15: restored (syscall #)
* r16-r19: cleared
* r20: user-level ar.fpsr
* r21: user-level b0
* r22: NOT HANDLED
* r23: user-level ar.bspstore
* r24: user-level ar.rnat
* r25: user-level ar.unat
* r26: user-level ar.pfs
* r27: user-level ar.rsc
* r28: user-level ip
* r29: user-level psr
* r30: user-level cfm
* r31: user-level pr
* f6-f11: cleared
* pr: restored (user-level pr)
* b0: restored (user-level rp)
* b6-b7: cleared
* ar.unat: restored (user-level ar.unat)
* ar.pfs: restored (user-level ar.pfs)
* ar.rsc: restored (user-level ar.rsc)
* ar.rnat: restored (user-level ar.rnat)
* ar.bspstore: restored (user-level ar.bspstore)
* ar.fpsr: restored (user-level ar.fpsr)
* ar.ccv: cleared
* ar.csd: cleared
* ar.ssd: cleared
*/
GLOBAL_ENTRY(ia64_leave_syscall)
PT_REGS_UNWIND_INFO(0)
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
#else
(pUStk) rsm psr.i
#endif
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
// work.need_resched etc. mustn't get changed by this CPU before it returns to
// user- or fsys-mode:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
*/
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
......@@ -595,7 +635,6 @@ GLOBAL_ENTRY(ia64_leave_from_syscall)
(pKStk) cmp4.eq p6,p0=r21,r0 // p6 <- preempt_count == 0
;;
#else /* CONFIG_PREEMPT */
(pUStk) rsm psr.i
;;
(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
......@@ -603,18 +642,18 @@ GLOBAL_ENTRY(ia64_leave_from_syscall)
.work_processed_syscall:
(p6) ld4 r31=[r17] // load current_thread_info()->flags
adds r16=PT(LOADRS)+16,r12
adds r18=PT(AR_BSPSTORE)+16, r12
adds r18=PT(AR_BSPSTORE)+16,r12
;;
ld8 r19=[r16] // load ar.rsc value for "loadrs"
ld8 rARBSPSTORE=[r18],16// load ar.bspstore (may be garbage)
ld8 r23=[r18],16 // load ar.bspstore (may be garbage)
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
mov r3=r12
mov ar.ssd=r0
mov f6=f0 // clear f6
(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
;;
mov ar.ccv=r0 // clear ar.ccv
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET+16+IA64_PT_REGS_SIZE-IA64_STK_OFFSET,r3
mov b6=r0 // clear b6
(p6) br.cond.spnt .work_pending
;;
adds r16=PT(R8)+16,r12
......@@ -623,75 +662,59 @@ GLOBAL_ENTRY(ia64_leave_from_syscall)
// start restoring the state saved on the kernel stack (struct pt_regs):
ld8.fill r8=[r16],16
ld8.fill r9=[r17],16
(pUStk) mov r3=1
;;
ld8.fill r10=[r16],16
ld8.fill r11=[r17],16
mov f7=f0 // clear f7
;;
ld8 rCRIPSR=[r16],16 // load cr.ipsr
ld8 rCRIIP=[r17],16 // load cr.iip
ld8 r29=[r16],16 // load cr.ipsr
ld8 r28=[r17],16 // load cr.iip
;;
ld8 rCRIFS=[r16],16 // load cr.ifs
ld8 rARUNAT=[r17],16 // load ar.unat
ld8 r30=[r16],16 // load cr.ifs
ld8 r25=[r17],16 // load ar.unat
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT
mov f9=f0 // clear f9
(pUStk) st1 [r14]=r3
mov ar.csd=r0
mov f8=f0 // clear f8
mov ar.csd=r0 // clear ar.csd
mov f9=f0 // clear f9
;;
ld8 rARPFS=[r16],16 // load ar.pfs
ld8 rARRSC=[r17],PT(PR)-PT(AR_RSC) // load ar.rsc
ld8 r26=[r16],16 // load ar.pfs
ld8 r27=[r17],PT(PR)-PT(AR_RSC) // load ar.rsc
mov f10=f0 // clear f10
;;
ld8 rARRNAT=[r16],PT(B0)-PT(AR_RNAT) // load ar.rnat (may be garbage)
ld8 rARPR=[r17],PT(R1)-PT(PR) // load predicates
ld8 r24=[r16],PT(B0)-PT(AR_RNAT) // load ar.rnat (may be garbage)
ld8 r31=[r17],PT(R1)-PT(PR) // load predicates
mov f11=f0 // clear f11
;;
ld8 rB0=[r16],PT(R12)-PT(B0) // load b0
ld8 r21=[r16],PT(R12)-PT(B0) // load b0
ld8.fill r1=[r17],16 // load r1
mov b6=r0 // clear b6
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
ld8.fill r12=[r16],16
ld8.fill r13=[r17],16
mov r2=r0 // clear r2
;;
ld8 rR1=[r16] // ar.fpsr
ld8 r20=[r16] // load ar.fpsr
ld8.fill r15=[r17] // load r15
mov b7=r0 // clear b7
;;
mov r16=ar.bsp // get existing backing store pointer
srlz.i // ensure interruption collection is off
mov r22=r0 // clear r22
(pUStk) mov r3=1
;;
mov ar.ssd=r0
(pUStk) st1 [r14]=r3
movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
mov r14=r0 // clear r14
;;
(pKStk) br.cond.dpnt skip_rbs_switch
/*
* Restore user backing store.
*
* NOTE: alloc, loadrs, and cover can't be predicated.
*/
cover // add current frame into dirty partition
;;
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
mov r19=ar.bsp // get new backing store pointer
sub r16=r16,r18 // krbs = old bsp - size of dirty partition
cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
;;
sub r19=r19,r16 // calculate total byte size of dirty partition
add r18=64,r18 // don't force in0-in7 into memory...
;;
shl r19=r19,16 // shift size of dirty partition into loadrs position
br.few dont_preserve_current_frame
END(ia64_leave_from_syscall)
(pKStk) br.cond.dpnt.many skip_rbs_switch
br.cond.sptk.many rbs_switch
END(ia64_leave_syscall)
GLOBAL_ENTRY(ia64_ret_from_execve_syscall)
PT_REGS_UNWIND_INFO(0)
......@@ -699,19 +722,19 @@ GLOBAL_ENTRY(ia64_ret_from_execve_syscall)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0
(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
.mem.offset 0,0; (p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0; (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_execve_syscall)
// fall through
GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0)
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
// work.need_resched etc. mustn't get changed by this CPU before it returns to
// user- or fsys-mode:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
* user- or fsys-mode, hence we disable interrupts early on:
*/
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
......@@ -744,14 +767,14 @@ GLOBAL_ENTRY(ia64_leave_kernel)
(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
ld8.fill r24=[r29]
ld8 r15=[r30] //ar.ccv
ld8 r15=[r30] // load ar.ccv
(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
;;
ld8 r29=[r2],16 // b7
ld8 r30=[r3],16 // ar.csd
ld8 r29=[r2],16 // load b7
ld8 r30=[r3],16 // load ar.csd
(p6) br.cond.spnt .work_pending
;;
ld8 r31=[r2],16 // ar.ssd
ld8 r31=[r2],16 // load ar.ssd
ld8.fill r8=[r3],16
;;
ld8.fill r9=[r2],16
......@@ -806,21 +829,21 @@ GLOBAL_ENTRY(ia64_leave_kernel)
adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12
;;
ld8 rCRIPSR=[r16],16 // load cr.ipsr
ld8 rCRIIP=[r17],16 // load cr.iip
ld8 r29=[r16],16 // load cr.ipsr
ld8 r28=[r17],16 // load cr.iip
;;
ld8 rCRIFS=[r16],16 // load cr.ifs
ld8 rARUNAT=[r17],16 // load ar.unat
ld8 r30=[r16],16 // load cr.ifs
ld8 r25=[r17],16 // load ar.unat
;;
ld8 rARPFS=[r16],16 // load ar.pfs
ld8 rARRSC=[r17],16 // load ar.rsc
ld8 r26=[r16],16 // load ar.pfs
ld8 r27=[r17],16 // load ar.rsc
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
ld8 rARRNAT=[r16],16 // load ar.rnat (may be garbage)
ld8 rARBSPSTORE=[r17],16// load ar.bspstore (may be garbage)
ld8 r24=[r16],16 // load ar.rnat (may be garbage)
ld8 r23=[r17],16// load ar.bspstore (may be garbage)
;;
ld8 rARPR=[r16],16 // load predicates
ld8 rB0=[r17],16 // load b0
ld8 r31=[r16],16 // load predicates
ld8 r21=[r17],16 // load b0
;;
ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
ld8.fill r1=[r17],16 // load r1
......@@ -829,7 +852,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
ld8.fill r13=[r17],16
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
;;
ld8 rR1=[r16],16 // ar.fpsr
ld8 r20=[r16],16 // ar.fpsr
ld8.fill r15=[r17],16
;;
ld8.fill r14=[r16],16
......@@ -852,6 +875,8 @@ GLOBAL_ENTRY(ia64_leave_kernel)
* NOTE: alloc, loadrs, and cover can't be predicated.
*/
(pNonSys) br.cond.dpnt dont_preserve_current_frame
rbs_switch:
cover // add current frame into dirty partition and set cr.ifs
;;
mov r19=ar.bsp // get new backing store pointer
......@@ -945,24 +970,31 @@ rse_clear_invalid:
loadrs
;;
skip_rbs_switch:
(pSys) mov r19=r0 // clear r19
mov b0=rB0
mov ar.pfs=rARPFS
(pUStk) mov ar.bspstore=rARBSPSTORE
(p9) mov cr.ifs=rCRIFS
(pSys) mov r16=r0 // clear r16
mov cr.ipsr=rCRIPSR
mov ar.fpsr=rR1
(pSys) mov r17=r0 // clear r17
mov cr.iip=rCRIIP
;;
(pUStk) mov ar.rnat=rARRNAT // must happen with RSE in lazy mode
(pSys) mov r18=r0 // clear r18
mov ar.rsc=rARRSC
mov ar.unat=rARUNAT
mov pr=rARPR,-1
(pSys) mov r19=r0 // clear r19 for leave_syscall, no-op otherwise
mov b0=r21
mov ar.pfs=r26
(pUStk) mov ar.bspstore=r23
(p9) mov cr.ifs=r30
(pSys) mov r16=r0 // clear r16 for leave_syscall, no-op otherwise
mov cr.ipsr=r29
mov ar.fpsr=r20
(pSys) mov r17=r0 // clear r17 for leave_syscall, no-op otherwise
mov cr.iip=r28
;;
(pUStk) mov ar.rnat=r24 // must happen with RSE in lazy mode
(pSys) mov r18=r0 // clear r18 for leave_syscall, no-op otherwise
mov ar.rsc=r27
mov ar.unat=r25
mov pr=r31,-1
rfi
/*
* On entry:
* r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
* r31 = current->thread_info->flags
* On exit:
* p6 = TRUE if work-pending-check needs to be redone
*/
.work_pending:
tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
(p6) br.cond.sptk.few .notify
......@@ -972,7 +1004,6 @@ skip_rbs_switch:
(pKStk) st4 [r20]=r21
ssm psr.i // enable interrupts
#endif
br.call.spnt.many rp=schedule
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
rsm psr.i // disable interrupts
......@@ -995,12 +1026,10 @@ END(ia64_leave_kernel)
ENTRY(handle_syscall_error)
/*
* Some system calls (e.g., ptrace, mmap) can return arbitrary
* values which could lead us to mistake a negative return
* value as a failed syscall. Those syscall must deposit
* a non-zero value in pt_regs.r8 to indicate an error.
* If pt_regs.r8 is zero, we assume that the call completed
* successfully.
* Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
* lead us to mistake a negative return value as a failed syscall. Those syscall
* must deposit a non-zero value in pt_regs.r8 to indicate an error. If
* pt_regs.r8 is zero, we assume that the call completed successfully.
*/
PT_REGS_UNWIND_INFO(0)
ld8 r3=[r2] // load pt_regs.r8
......@@ -1015,7 +1044,7 @@ ENTRY(handle_syscall_error)
;;
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
br.cond.sptk ia64_leave_from_syscall
br.cond.sptk ia64_leave_syscall
END(handle_syscall_error)
/*
......@@ -1101,10 +1130,11 @@ ENTRY(sys_rt_sigreturn)
.body
cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
;;
/* After signal handler, live registers f6-f11 are restored to the previous
* executing context values for synchronous signals (from exceptions); or they
* are cleared to 0 for asynchronous signals (from syscalls). These live registers
* will be put into pt_regs to return back to user space.
/*
* leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
* syscall-entry path does not save them we save them here instead. Note: we
* don't need to save any other registers that are not saved by the stream-lined
* syscall path, because restore_sigcontext() restores them.
*/
adds r16=PT(F6)+32,sp
adds r17=PT(F7)+32,sp
......
......@@ -5,8 +5,8 @@
* careful not to step on these!
*/
#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/
#define pKStk p2 /* will leave_kernel return to kernel-stacks? */
#define pUStk p3 /* will leave_kernel return to user-stacks? */
#define pKStk p2 /* will leave_{kernel,syscall} return to kernel-stacks? */
#define pUStk p3 /* will leave_{kernel,syscall} return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */
......
/*
* arch/ia64/kernel/ivt.S
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 2000, 2002-2003 Intel Co
......@@ -637,52 +637,49 @@ END(daccess_bit)
/////////////////////////////////////////////////////////////////////////////////////////
// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
ENTRY(break_fault)
/* System call entry/exit only saves/restores part of pt_regs, i.e. no scratch registers
* are saved/restored except r15 which contains syscall number and needs to be saved in the
* entry. This optimization is based on the assumption that applications only call glibc
* system call interface which doesn't use scratch registers after break into kernel.
* Registers saved/restored during system call entry/exit are listed as follows:
/*
* The streamlined system call entry/exit paths only save/restore the initial part
* of pt_regs. This implies that the callers of system-calls must adhere to the
* normal procedure calling conventions.
*
* Registers to be saved & restored:
* CR registers: cr_ipsr, cr_iip, cr_ifs
* AR registers: ar_unat, ar_pfs, ar_rsc, ar_rnat, ar_bspstore, ar_fpsr
* CR registers: cr.ipsr, cr.iip, cr.ifs
* AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
* others: pr, b0, loadrs, r1, r12, r13, r15
* Registers to be restored only:
* r8~r11: output value from the system call.
* r8-r11: output value from the system call.
*
* During system call exit, scratch registers (including r15) are modified/cleared to
* prevent leaking bits from kernel to user level.
* During system call exit, scratch registers (including r15) are modified/cleared
* to prevent leaking bits from kernel to user level.
*/
DBG_FAULT(11)
mov r16=cr.iim
mov r17=__IA64_BREAK_SYSCALL
mov r16=IA64_KR(CURRENT) // r16 = current (physical); 12 cycle read lat.
mov r17=cr.iim
mov r18=__IA64_BREAK_SYSCALL
mov r21=ar.fpsr
mov r29=cr.ipsr
mov r20=r1
mov r25=ar.unat
mov r27=ar.rsc
mov r26=ar.pfs
mov r28=cr.iip
mov r31=pr // prepare to save predicates
;;
cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so)
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
(p7) br.cond.spnt non_syscall
;;
mov r21=ar.fpsr;
mov rCRIPSR=cr.ipsr;
mov rR1=r1;
mov rARUNAT=ar.unat;
mov rARRSC=ar.rsc;
mov rARPFS=ar.pfs;
mov rCRIIP=cr.iip;
mov r1=IA64_KR(CURRENT); /* r1 = current (physical) */
;;
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1;
ld1 r17=[r16] // load current->thread.on_ustack flag
st1 [r16]=r0 // clear current->thread.on_ustack flag
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
// switch from user to kernel RBS:
;;
ld1 r17=[r16]; /* load current->thread.on_ustack flag */
st1 [r16]=r0; /* clear current->thread.on_ustack flag */
/* switch from user to kernel RBS: */
invala
cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
;;
invala;
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */
;;
mov rCRIFS=r0
mov r30=r0
MINSTATE_START_SAVE_MIN_VIRT
br.call.sptk.many b7=break_fault_setup
br.call.sptk.many b7=setup_syscall_via_break
;;
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
......@@ -724,7 +721,6 @@ ENTRY(break_fault)
st8 [r16]=r18 // store new value for cr.isr
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
// NOT REACHED
END(break_fault)
......@@ -772,67 +768,71 @@ END(interrupt)
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*
* setup_syscall_via_break() is a separate subroutine so that it can
* allocate stacked registers so it can safely demine any
* potential NaT values from the input registers.
*/
ENTRY(break_fault_setup)
ENTRY(setup_syscall_via_break)
alloc r19=ar.pfs,8,0,0,0
tnat.nz p8,p0=in0
add r16=PT(CR_IPSR),r1 /* initialize first base pointer */
;;
st8 [r16]=rCRIPSR,16; /* save cr.ipsr */
adds r17=PT(CR_IIP),r1; /* initialize second base pointer */
st8 [r16]=r29,16 /* save cr.ipsr */
adds r17=PT(CR_IIP),r1 /* initialize second base pointer */
;;
(p8) mov in0=-1
tnat.nz p9,p0=in1
st8 [r17]=rCRIIP,16; /* save cr.iip */
mov rCRIIP=b0;
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */
st8 [r17]=r28,16 /* save cr.iip */
mov r28=b0
(pKStk) mov r18=r0 /* make sure r18 isn't NaT */
;;
(p9) mov in1=-1
tnat.nz p10,p0=in2
st8 [r16]=rCRIFS,16; /* save cr.ifs */
st8 [r17]=rARUNAT,16; /* save ar.unat */
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */
st8 [r16]=r30,16 /* save cr.ifs */
st8 [r17]=r25,16 /* save ar.unat */
(pUStk) sub r18=r18,r22 /* r18=RSE.ndirty*8 */
;;
st8 [r16]=rARPFS,16; /* save ar.pfs */
st8 [r17]=rARRSC,16; /* save ar.rsc */
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT
st8 [r16]=r26,16 /* save ar.pfs */
st8 [r17]=r27,16 /* save ar.rsc */
tbit.nz p15,p0=r29,IA64_PSR_I_BIT
;; /* avoid RAW on r16 & r17 */
(p10) mov in2=-1
nop.f 0
tnat.nz p11,p0=in3
(pKStk) adds r16=16,r16; /* skip over ar_rnat field */
(pKStk) adds r17=16,r17; /* skip over ar_bspstore field */
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */
(pKStk) adds r16=16,r16 /* skip over ar_rnat field */
(pKStk) adds r17=16,r17 /* skip over ar_bspstore field */
shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
;;
(p11) mov in3=-1
tnat.nz p12,p0=in4
(pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */
(pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */
(pUStk) st8 [r16]=r24,16 /* save ar.rnat */
(pUStk) st8 [r17]=r23,16 /* save ar.bspstore */
;;
(p12) mov in4=-1
tnat.nz p13,p0=in5
st8 [r16]=rARPR,16; /* save predicates */
st8 [r17]=rCRIIP,16; /* save b0 */
dep r14=-1,r0,61,3;
st8 [r16]=r31,16 /* save predicates */
st8 [r17]=r28,16 /* save b0 */
dep r14=-1,r0,61,3
;;
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */
st8.spill [r17]=rR1,16; /* save original r1 */
adds r2=IA64_PT_REGS_R16_OFFSET,r1;
st8 [r16]=r18,16 /* save ar.rsc value for "loadrs" */
st8.spill [r17]=r20,16 /* save original r1 */
adds r2=IA64_PT_REGS_R16_OFFSET,r1
;;
(p13) mov in5=-1
tnat.nz p14,p0=in6
.mem.offset 0,0; st8.spill [r16]=r12,16;
.mem.offset 8,0; st8.spill [r17]=r13,16;
.mem.offset 0,0; st8.spill [r16]=r12,16
.mem.offset 8,0; st8.spill [r17]=r13,16
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */
;;
(p14) mov in6=-1
tnat.nz p8,p0=in7
.mem.offset 0,0; st8 [r16]=r21,16; /* ar.fpsr */
.mem.offset 8,0; st8.spill [r17]=r15,16;
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */
.mem.offset 0,0; st8 [r16]=r21,16 /* ar.fpsr */
.mem.offset 8,0; st8.spill [r17]=r15,16
adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
;;
mov r13=IA64_KR(CURRENT); /* establish `current' */
movl r1=__gp; /* establish kernel global pointer */
mov r13=IA64_KR(CURRENT) /* establish `current' */
movl r1=__gp /* establish kernel global pointer */
;;
MINSTATE_END_SAVE_MIN_VIRT
......@@ -849,7 +849,7 @@ ENTRY(break_fault_setup)
mov.m ar.fpsr=r17
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
br.ret.sptk.many b7
END(break_fault_setup)
END(setup_syscall_via_break)
.org ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////////////////////////
......
......@@ -4,43 +4,25 @@
#include "entry.h"
/*
* A couple of convenience macros that make writing and reading
* SAVE_MIN and SAVE_REST easier.
*/
#define rARPR r31
#define rCRIFS r30
#define rCRIPSR r29
#define rCRIIP r28
#define rARRSC r27
#define rARPFS r26
#define rARUNAT r25
#define rARRNAT r24
#define rARBSPSTORE r23
#define rKRBS r22
#define rB0 r21
#define rR1 r20
/*
* Here start the source dependent macros.
*/
/*
* For ivt.s we want to access the stack virtually so we don't have to disable translation
* on interrupts.
*
* On entry:
* r1: pointer to current task (ar.k6)
*/
#define MINSTATE_START_SAVE_MIN_VIRT \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m rARRNAT=ar.rnat; \
(pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [rKRBS]; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
......@@ -57,16 +39,16 @@
#define MINSTATE_START_SAVE_MIN_PHYS \
(pKStk) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
;; \
(pUStk) mov rARRNAT=ar.rnat; \
(pUStk) mov r24=ar.rnat; \
(pKStk) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(pUStk) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */\
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
;; \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
(pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
......@@ -100,10 +82,14 @@
* Upon exit, the state is as follows:
* psr.ic: off
* r2 = points to &pt_regs.r16
* r8 = contents of ar.ccv
* r9 = contents of ar.csd
* r10 = contents of ar.ssd
* r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
* predicate registers (other than p2, p3, and p15), b6, r3, r8, r9, r10, r11, r14, r15:
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
......@@ -111,12 +97,12 @@
*/
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov rARRSC=ar.rsc; /* M */ \
mov rR1=r1; /* A */ \
mov rARUNAT=ar.unat; /* M */ \
mov rCRIPSR=cr.ipsr; /* M */ \
mov rARPFS=ar.pfs; /* I */ \
mov rCRIIP=cr.iip; /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
mov r26=ar.pfs; /* I */ \
mov r28=cr.iip; /* M */ \
mov r21=ar.fpsr; /* M */ \
COVER; /* B;; (or nothing) */ \
;; \
......@@ -129,18 +115,18 @@
;; \
invala; /* M */ \
SAVE_IFS; \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */ \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
;; \
MINSTATE_START_SAVE_MIN \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=rCRIPSR; /* save cr.ipsr */ \
st8 [r16]=r29; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT; \
mov rCRIPSR=b0 \
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
......@@ -152,31 +138,31 @@
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
st8 [r16]=rCRIIP,16; /* save cr.iip */ \
st8 [r17]=rCRIFS,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
st8 [r16]=r28,16; /* save cr.iip */ \
st8 [r17]=r30,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r16]=rARUNAT,16; /* save ar.unat */ \
st8 [r17]=rARPFS,16; /* save ar.pfs */ \
st8 [r16]=r25,16; /* save ar.unat */ \
st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=rARRSC,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=rARRNAT,16; /* save ar.rnat */ \
st8 [r16]=r27,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r16]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r17]=rARPR,16; /* save predicates */ \
(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
st8 [r17]=r31,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
st8 [r16]=rCRIPSR,16; /* save b0 */ \
st8 [r16]=r29,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 0,0; st8.spill [r16]=rR1,16; /* save original r1 */ \
.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
......@@ -204,6 +190,12 @@
* psr.ic: on
* r2: points to &pt_regs.r16
* r3: points to &pt_regs.r17
* r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
*
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
......@@ -233,7 +225,7 @@
.mem.offset 8,0; st8.spill [r3]=r31,16; \
;; \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar_ccv */ \
st8 [r2]=r8,8; /* ar.ccv */ \
adds r3=16,r3; \
;; \
stf.spill [r2]=f6,32; \
......@@ -254,6 +246,6 @@
st8 [r25]=r10; /* ar.ssd */ \
;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
#define SAVE_MIN DO_SAVE_MIN( , mov rCRIFS=r0, )
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
......@@ -29,8 +29,6 @@
#include <asm/perfmon.h>
#endif
#define offsetof(type,field) ((unsigned long) &((type *) 0)->field)
/*
* Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total)
......
......@@ -125,14 +125,11 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
if ((flags & IA64_SC_FLAG_IN_SYSCALL)==0)
{
if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __get_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_from_user(&scr->pt.b6, &sc->sc_br[6], 2*8); /* b6-b7 */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r14, &sc->sc_gr[14], 8); /* r14 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
......@@ -176,11 +173,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
int err;
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly. It should never
* copy any pad contained in the structure to avoid
* security leaks, but must copy the generic 3 ints
* plus the relevant union member.
* If you change siginfo_t structure, please be sure this code is fixed
* accordingly. It should never copy any pad contained in the structure
* to avoid security leaks, but must copy the generic 3 ints plus the
* relevant union member.
*/
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
......@@ -379,27 +375,19 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
if (flags & IA64_SC_FLAG_IN_SYSCALL)
{
if (flags & IA64_SC_FLAG_IN_SYSCALL) {
/* Clear scratch registers if the signal interrupted a system call. */
err |= __clear_user(&sc->sc_ar_ccv, 8);
err |= __clear_user(&sc->sc_ar25,8); /* ar.csd */
err |= __clear_user(&sc->sc_ar26,8); /* ar.ssd */
err |= __clear_user(&sc->sc_br[6],8); /* b6 */
err |= __clear_user(&sc->sc_br[7],8); /* b7 */
err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __clear_user(&sc->sc_br[6], 2*8); /* b6-b7 */
err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __clear_user(&sc->sc_gr[14],8); /* r14 */
err |= __clear_user(&sc->sc_gr[16],16*8); /* r16-r31 */
} else
{
/* Copy scratch registers to sigcontext if the signal did not interrupt a syscall. */
err |= __clear_user(&sc->sc_gr[14], 8); /* r14 */
err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */
} else {
/* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __put_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_to_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_to_user(&scr->pt.b6, &sc->sc_br[6], 2*8); /* b6-b7 */
err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
err |= __copy_to_user(&sc->sc_gr[14], &scr->pt.r14, 8); /* r14 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
......
......@@ -86,8 +86,6 @@
typedef unsigned long unw_word;
typedef unsigned char unw_hash_index_t;
#define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
static struct {
spinlock_t lock; /* spinlock for unwind data */
......@@ -106,6 +104,8 @@ static struct {
/* index into unw_frame_info for preserved register i */
unsigned short preg_index[UNW_NUM_REGS];
short pt_regs_offsets[32];
/* unwind table for the kernel: */
struct unw_table kernel_table;
......@@ -155,47 +155,78 @@ static struct {
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
},
.preg_index = {
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
struct_offset(struct unw_frame_info, bsp_loc)/8,
struct_offset(struct unw_frame_info, bspstore_loc)/8,
struct_offset(struct unw_frame_info, pfs_loc)/8,
struct_offset(struct unw_frame_info, rnat_loc)/8,
struct_offset(struct unw_frame_info, psp)/8,
struct_offset(struct unw_frame_info, rp_loc)/8,
struct_offset(struct unw_frame_info, r4)/8,
struct_offset(struct unw_frame_info, r5)/8,
struct_offset(struct unw_frame_info, r6)/8,
struct_offset(struct unw_frame_info, r7)/8,
struct_offset(struct unw_frame_info, unat_loc)/8,
struct_offset(struct unw_frame_info, pr_loc)/8,
struct_offset(struct unw_frame_info, lc_loc)/8,
struct_offset(struct unw_frame_info, fpsr_loc)/8,
struct_offset(struct unw_frame_info, b1_loc)/8,
struct_offset(struct unw_frame_info, b2_loc)/8,
struct_offset(struct unw_frame_info, b3_loc)/8,
struct_offset(struct unw_frame_info, b4_loc)/8,
struct_offset(struct unw_frame_info, b5_loc)/8,
struct_offset(struct unw_frame_info, f2_loc)/8,
struct_offset(struct unw_frame_info, f3_loc)/8,
struct_offset(struct unw_frame_info, f4_loc)/8,
struct_offset(struct unw_frame_info, f5_loc)/8,
struct_offset(struct unw_frame_info, fr_loc[16 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[17 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[18 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[19 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[20 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[21 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[22 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[23 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[24 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[25 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[26 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[27 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[28 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[29 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
offsetof(struct unw_frame_info, bsp_loc)/8,
offsetof(struct unw_frame_info, bspstore_loc)/8,
offsetof(struct unw_frame_info, pfs_loc)/8,
offsetof(struct unw_frame_info, rnat_loc)/8,
offsetof(struct unw_frame_info, psp)/8,
offsetof(struct unw_frame_info, rp_loc)/8,
offsetof(struct unw_frame_info, r4)/8,
offsetof(struct unw_frame_info, r5)/8,
offsetof(struct unw_frame_info, r6)/8,
offsetof(struct unw_frame_info, r7)/8,
offsetof(struct unw_frame_info, unat_loc)/8,
offsetof(struct unw_frame_info, pr_loc)/8,
offsetof(struct unw_frame_info, lc_loc)/8,
offsetof(struct unw_frame_info, fpsr_loc)/8,
offsetof(struct unw_frame_info, b1_loc)/8,
offsetof(struct unw_frame_info, b2_loc)/8,
offsetof(struct unw_frame_info, b3_loc)/8,
offsetof(struct unw_frame_info, b4_loc)/8,
offsetof(struct unw_frame_info, b5_loc)/8,
offsetof(struct unw_frame_info, f2_loc)/8,
offsetof(struct unw_frame_info, f3_loc)/8,
offsetof(struct unw_frame_info, f4_loc)/8,
offsetof(struct unw_frame_info, f5_loc)/8,
offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
},
.pt_regs_offsets = {
[0] = -1,
offsetof(struct pt_regs, r1),
offsetof(struct pt_regs, r2),
offsetof(struct pt_regs, r3),
[4] = -1, [5] = -1, [6] = -1, [7] = -1,
offsetof(struct pt_regs, r8),
offsetof(struct pt_regs, r9),
offsetof(struct pt_regs, r10),
offsetof(struct pt_regs, r11),
offsetof(struct pt_regs, r12),
offsetof(struct pt_regs, r13),
offsetof(struct pt_regs, r14),
offsetof(struct pt_regs, r15),
offsetof(struct pt_regs, r16),
offsetof(struct pt_regs, r17),
offsetof(struct pt_regs, r18),
offsetof(struct pt_regs, r19),
offsetof(struct pt_regs, r20),
offsetof(struct pt_regs, r21),
offsetof(struct pt_regs, r22),
offsetof(struct pt_regs, r23),
offsetof(struct pt_regs, r24),
offsetof(struct pt_regs, r25),
offsetof(struct pt_regs, r26),
offsetof(struct pt_regs, r27),
offsetof(struct pt_regs, r28),
offsetof(struct pt_regs, r29),
offsetof(struct pt_regs, r30),
offsetof(struct pt_regs, r31),
},
.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
#ifdef UNW_DEBUG
......@@ -211,10 +242,6 @@ static struct {
#endif
};
#define OFF_CASE(reg, reg_num) \
case reg: \
off = struct_offset(struct pt_regs, reg_num); \
break;
/* Unwind accessors. */
/*
......@@ -223,42 +250,16 @@ static struct {
static inline unsigned long
pt_regs_off (unsigned long reg)
{
unsigned long off =0;
short off = -1;
switch (reg)
{
OFF_CASE(1,r1)
OFF_CASE(2,r2)
OFF_CASE(3,r3)
OFF_CASE(8,r8)
OFF_CASE(9,r9)
OFF_CASE(10,r10)
OFF_CASE(11,r11)
OFF_CASE(12,r12)
OFF_CASE(13,r13)
OFF_CASE(14,r14)
OFF_CASE(15,r15)
OFF_CASE(16,r16)
OFF_CASE(17,r17)
OFF_CASE(18,r18)
OFF_CASE(19,r19)
OFF_CASE(20,r20)
OFF_CASE(21,r21)
OFF_CASE(22,r22)
OFF_CASE(23,r23)
OFF_CASE(24,r24)
OFF_CASE(25,r25)
OFF_CASE(26,r26)
OFF_CASE(27,r27)
OFF_CASE(28,r28)
OFF_CASE(29,r29)
OFF_CASE(30,r30)
OFF_CASE(31,r31)
default:
if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
off = unw.pt_regs_offsets[reg];
if (off < 0) {
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
break;
off = 0;
}
return off;
return (unsigned long) off;
}
static inline struct pt_regs *
......@@ -1416,7 +1417,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval <= 11)
val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
else
UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
__FUNCTION__, rval);
......@@ -1429,11 +1430,11 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval == 0)
val = struct_offset(struct pt_regs, b0);
val = offsetof(struct pt_regs, b0);
else if (rval == 6)
val = struct_offset(struct pt_regs, b6);
val = offsetof(struct pt_regs, b6);
else
val = struct_offset(struct pt_regs, b7);
val = offsetof(struct pt_regs, b7);
}
break;
......@@ -1633,7 +1634,7 @@ build_script (struct unw_frame_info *info)
&& sr.curr.reg[UNW_REG_PSP].val != 0) {
/* new psp is sp plus frame size */
insn.opc = UNW_INSN_ADD;
insn.dst = struct_offset(struct unw_frame_info, psp)/8;
insn.dst = offsetof(struct unw_frame_info, psp)/8;
insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
script_emit(script, insn);
}
......@@ -1767,14 +1768,13 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
lazy_init:
off = unw.sw_off[val];
s[val] = (unsigned long) state->sw + off;
if (off >= struct_offset(struct switch_stack, r4)
&& off <= struct_offset(struct switch_stack, r7))
if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
/*
* We're initializing a general register: init NaT info, too. Note that
* the offset is a multiple of 8 which gives us the 3 bits needed for
* the type field.
*/
s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
goto redo;
}
......@@ -1864,7 +1864,7 @@ unw_unwind (struct unw_frame_info *info)
if ((pr & (1UL << pNonSys)) != 0)
num_regs = *info->cfm_loc & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->pt + struct_offset(struct pt_regs, ar_pfs));
(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
} else
num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
......
......@@ -130,11 +130,13 @@ struct pt_regs {
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
/* The remaining registers are NOT saved for system calls. */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
unsigned long r17; /* scratch */
unsigned long r18; /* scratch */
......@@ -155,8 +157,7 @@ struct pt_regs {
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
* Floating point registers that the kernel considers
* scratch:
* Floating point registers that the kernel considers scratch:
*/
struct ia64_fpreg f6; /* scratch */
struct ia64_fpreg f7; /* scratch */
......
......@@ -2,8 +2,8 @@
#define _ASM_IA64_PTRACE_OFFSETS_H
/*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
......@@ -14,20 +14,16 @@
* unsigned long nat_bits;
* unsigned long empty1;
* struct ia64_fpreg f2; // f2-f5
* .
* .
* :
* struct ia64_fpreg f5;
* struct ia64_fpreg f10; // f10-f31
* .
* .
* :
* struct ia64_fpreg f31;
* unsigned long r4; // r4-r7
* .
* .
* :
* unsigned long r7;
* unsigned long b1; // b1-b5
* .
* .
* :
* unsigned long b5;
* unsigned long ar_ec;
* unsigned long ar_lc;
......@@ -55,8 +51,7 @@
* unsigned long r10;
* unsigned long r11;
* unsigned long r16;
* .
* .
* :
* unsigned long r31;
* unsigned long ar_ccv;
* unsigned long ar_fpsr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment