Commit e37e37be authored by David Mosberger's avatar David Mosberger

ia64: Lots of formatting fixes for the optimized syscall paths.

	Fix setting of current->thread.on_ustack flag in optimized syscall exit path.
	Tune break_fault for syscall execution.
	Break ia32_execve: the ia64_execve() hack that was there is too ugly for
	words; surely we can do better...
parent 4776e929
This diff is collapsed.
......@@ -5,8 +5,8 @@
* careful not to step on these!
*/
#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/
#define pKStk p2 /* will leave_kernel return to kernel-stacks? */
#define pUStk p3 /* will leave_kernel return to user-stacks? */
#define pKStk p2 /* will leave_{kernel,syscall} return to kernel-stacks? */
#define pUStk p3 /* will leave_{kernel,syscall} return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */
......
/*
* arch/ia64/kernel/ivt.S
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 2000, 2002-2003 Intel Co
......@@ -637,52 +637,49 @@ END(daccess_bit)
/////////////////////////////////////////////////////////////////////////////////////////
// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
ENTRY(break_fault)
/* System call entry/exit only saves/restores part of pt_regs, i.e. no scratch registers
* are saved/restored except r15 which contains syscall number and needs to be saved in the
* entry. This optimization is based on the assumption that applications only call glibc
* system call interface which doesn't use scratch registers after break into kernel.
* Registers saved/restored during system call entry/exit are listed as follows:
*
* Registers to be saved & restored:
* CR registers: cr_ipsr, cr_iip, cr_ifs
* AR registers: ar_unat, ar_pfs, ar_rsc, ar_rnat, ar_bspstore, ar_fpsr
* others: pr, b0, loadrs, r1, r12, r13, r15
* Registers to be restored only:
* r8~r11: output value from the system call.
*
* During system call exit, scratch registers (including r15) are modified/cleared to
* prevent leaking bits from kernel to user level.
*/
/*
* The streamlined system call entry/exit paths only save/restore the initial part
* of pt_regs. This implies that the callers of system-calls must adhere to the
* normal procedure calling conventions.
*
* Registers to be saved & restored:
* CR registers: cr.ipsr, cr.iip, cr.ifs
* AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
* others: pr, b0, loadrs, r1, r12, r13, r15
* Registers to be restored only:
* r8-r11: output value from the system call.
*
* During system call exit, scratch registers (including r15) are modified/cleared
* to prevent leaking bits from kernel to user level.
*/
DBG_FAULT(11)
mov r16=cr.iim
mov r17=__IA64_BREAK_SYSCALL
mov r31=pr // prepare to save predicates
;;
cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so)
mov r16=IA64_KR(CURRENT) // r16 = current (physical); 12 cycle read lat.
mov r17=cr.iim
mov r18=__IA64_BREAK_SYSCALL
mov r21=ar.fpsr
mov r29=cr.ipsr
mov r20=r1
mov r25=ar.unat
mov r27=ar.rsc
mov r26=ar.pfs
mov r28=cr.iip
mov r31=pr // prepare to save predicates
;;
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
(p7) br.cond.spnt non_syscall
;;
mov r21=ar.fpsr;
mov rCRIPSR=cr.ipsr;
mov rR1=r1;
mov rARUNAT=ar.unat;
mov rARRSC=ar.rsc;
mov rARPFS=ar.pfs;
mov rCRIIP=cr.iip;
mov r1=IA64_KR(CURRENT); /* r1 = current (physical) */
;;
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1;
ld1 r17=[r16] // load current->thread.on_ustack flag
st1 [r16]=r0 // clear current->thread.on_ustack flag
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
// switch from user to kernel RBS:
;;
ld1 r17=[r16]; /* load current->thread.on_ustack flag */
st1 [r16]=r0; /* clear current->thread.on_ustack flag */
/* switch from user to kernel RBS: */
invala
cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
;;
invala;
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */
;;
mov rCRIFS=r0
mov r30=r0
MINSTATE_START_SAVE_MIN_VIRT
br.call.sptk.many b7=break_fault_setup
br.call.sptk.many b7=setup_syscall_via_break
;;
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
......@@ -724,7 +721,6 @@ ENTRY(break_fault)
st8 [r16]=r18 // store new value for cr.isr
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
// NOT REACHED
END(break_fault)
......@@ -748,7 +744,7 @@ ENTRY(interrupt)
mov out0=cr.ivr // pass cr.ivr as first arg
add out1=16,sp // pass pointer to pt_regs as second arg
;;
srlz.d // make sure we see the effect of cr.ivr
srlz.d // make sure we see the effect of cr.ivr
movl r14=ia64_leave_kernel
;;
mov rp=r14
......@@ -772,67 +768,71 @@ END(interrupt)
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*
* setup_syscall_via_break() is a separate subroutine so that it can
* allocate stacked registers so it can safely demine any
* potential NaT values from the input registers.
*/
ENTRY(break_fault_setup)
ENTRY(setup_syscall_via_break)
alloc r19=ar.pfs,8,0,0,0
tnat.nz p8,p0=in0
add r16=PT(CR_IPSR),r1 /* initialize first base pointer */
;;
st8 [r16]=rCRIPSR,16; /* save cr.ipsr */
adds r17=PT(CR_IIP),r1; /* initialize second base pointer */
add r16=PT(CR_IPSR),r1 /* initialize first base pointer */
;;
st8 [r16]=r29,16 /* save cr.ipsr */
adds r17=PT(CR_IIP),r1 /* initialize second base pointer */
;;
(p8) mov in0=-1
tnat.nz p9,p0=in1
st8 [r17]=rCRIIP,16; /* save cr.iip */
mov rCRIIP=b0;
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */
st8 [r17]=r28,16 /* save cr.iip */
mov r28=b0
(pKStk) mov r18=r0 /* make sure r18 isn't NaT */
;;
(p9) mov in1=-1
tnat.nz p10,p0=in2
st8 [r16]=rCRIFS,16; /* save cr.ifs */
st8 [r17]=rARUNAT,16; /* save ar.unat */
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */
;;
st8 [r16]=rARPFS,16; /* save ar.pfs */
st8 [r17]=rARRSC,16; /* save ar.rsc */
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT
;; /* avoid RAW on r16 & r17 */
st8 [r16]=r30,16 /* save cr.ifs */
st8 [r17]=r25,16 /* save ar.unat */
(pUStk) sub r18=r18,r22 /* r18=RSE.ndirty*8 */
;;
st8 [r16]=r26,16 /* save ar.pfs */
st8 [r17]=r27,16 /* save ar.rsc */
tbit.nz p15,p0=r29,IA64_PSR_I_BIT
;; /* avoid RAW on r16 & r17 */
(p10) mov in2=-1
nop.f 0
tnat.nz p11,p0=in3
(pKStk) adds r16=16,r16; /* skip over ar_rnat field */
(pKStk) adds r17=16,r17; /* skip over ar_bspstore field */
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */
(pKStk) adds r16=16,r16 /* skip over ar_rnat field */
(pKStk) adds r17=16,r17 /* skip over ar_bspstore field */
shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
;;
(p11) mov in3=-1
tnat.nz p12,p0=in4
(pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */
(pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */
(pUStk) st8 [r16]=r24,16 /* save ar.rnat */
(pUStk) st8 [r17]=r23,16 /* save ar.bspstore */
;;
(p12) mov in4=-1
tnat.nz p13,p0=in5
st8 [r16]=rARPR,16; /* save predicates */
st8 [r17]=rCRIIP,16; /* save b0 */
dep r14=-1,r0,61,3;
st8 [r16]=r31,16 /* save predicates */
st8 [r17]=r28,16 /* save b0 */
dep r14=-1,r0,61,3
;;
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */
st8.spill [r17]=rR1,16; /* save original r1 */
adds r2=IA64_PT_REGS_R16_OFFSET,r1;
st8 [r16]=r18,16 /* save ar.rsc value for "loadrs" */
st8.spill [r17]=r20,16 /* save original r1 */
adds r2=IA64_PT_REGS_R16_OFFSET,r1
;;
(p13) mov in5=-1
tnat.nz p14,p0=in6
.mem.offset 0,0; st8.spill [r16]=r12,16;
.mem.offset 8,0; st8.spill [r17]=r13,16;
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */
.mem.offset 0,0; st8.spill [r16]=r12,16
.mem.offset 8,0; st8.spill [r17]=r13,16
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */
;;
(p14) mov in6=-1
tnat.nz p8,p0=in7
.mem.offset 0,0; st8 [r16]=r21,16; /* ar.fpsr */
.mem.offset 8,0; st8.spill [r17]=r15,16;
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */
.mem.offset 0,0; st8 [r16]=r21,16 /* ar.fpsr */
.mem.offset 8,0; st8.spill [r17]=r15,16
adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
;;
mov r13=IA64_KR(CURRENT); /* establish `current' */
movl r1=__gp; /* establish kernel global pointer */
mov r13=IA64_KR(CURRENT) /* establish `current' */
movl r1=__gp /* establish kernel global pointer */
;;
MINSTATE_END_SAVE_MIN_VIRT
......@@ -842,14 +842,14 @@ ENTRY(break_fault_setup)
movl r17=FPSR_DEFAULT
adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
;;
srlz.i // guarantee that interruption collection is on
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
srlz.i // guarantee that interruption collection is on
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
(p9) mov r15=-1
(p15) ssm psr.i // restore psr.i
(p15) ssm psr.i // restore psr.i
mov.m ar.fpsr=r17
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
br.ret.sptk.many b7
END(break_fault_setup)
END(setup_syscall_via_break)
.org ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////////////////////////
......@@ -1458,12 +1458,12 @@ ENTRY(dispatch_to_ia32_handler)
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i
adds r3=8,r2 // Base pointer for SAVE_REST
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
;;
mov r15=0x80
shr r14=r14,16 // Get interrupt number
shr r14=r14,16 // Get interrupt number
;;
cmp.ne p6,p0=r14,r15
(p6) br.call.dpnt.many b6=non_ia32_syscall
......
......@@ -4,43 +4,25 @@
#include "entry.h"
/*
* A couple of convenience macros that make writing and reading
* SAVE_MIN and SAVE_REST easier.
*/
#define rARPR r31
#define rCRIFS r30
#define rCRIPSR r29
#define rCRIIP r28
#define rARRSC r27
#define rARPFS r26
#define rARUNAT r25
#define rARRNAT r24
#define rARBSPSTORE r23
#define rKRBS r22
#define rB0 r21
#define rR1 r20
/*
* Here start the source dependent macros.
*/
/*
* For ivt.s we want to access the stack virtually so we don't have to disable translation
* on interrupts.
*
* On entry:
* r1: pointer to current task (ar.k6)
*/
#define MINSTATE_START_SAVE_MIN_VIRT \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m rARRNAT=ar.rnat; \
(pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [rKRBS]; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
......@@ -57,16 +39,16 @@
#define MINSTATE_START_SAVE_MIN_PHYS \
(pKStk) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
;; \
(pUStk) mov rARRNAT=ar.rnat; \
(pUStk) mov r24=ar.rnat; \
(pKStk) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(pUStk) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */\
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
;; \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
(pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
......@@ -99,11 +81,15 @@
*
* Upon exit, the state is as follows:
* psr.ic: off
* r2 = points to &pt_regs.r16
* r2 = points to &pt_regs.r16
* r8 = contents of ar.ccv
* r9 = contents of ar.csd
* r10 = contents of ar.ssd
* r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
* predicate registers (other than p2, p3, and p15), b6, r3, r8, r9, r10, r11, r14, r15:
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
......@@ -111,12 +97,12 @@
*/
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov rARRSC=ar.rsc; /* M */ \
mov rR1=r1; /* A */ \
mov rARUNAT=ar.unat; /* M */ \
mov rCRIPSR=cr.ipsr; /* M */ \
mov rARPFS=ar.pfs; /* I */ \
mov rCRIIP=cr.iip; /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
mov r26=ar.pfs; /* I */ \
mov r28=cr.iip; /* M */ \
mov r21=ar.fpsr; /* M */ \
COVER; /* B;; (or nothing) */ \
;; \
......@@ -129,18 +115,18 @@
;; \
invala; /* M */ \
SAVE_IFS; \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */ \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
;; \
MINSTATE_START_SAVE_MIN \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=rCRIPSR; /* save cr.ipsr */ \
st8 [r16]=r29; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT; \
mov rCRIPSR=b0 \
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
......@@ -152,31 +138,31 @@
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
st8 [r16]=rCRIIP,16; /* save cr.iip */ \
st8 [r17]=rCRIFS,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
st8 [r16]=r28,16; /* save cr.iip */ \
st8 [r17]=r30,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r16]=rARUNAT,16; /* save ar.unat */ \
st8 [r17]=rARPFS,16; /* save ar.pfs */ \
st8 [r16]=r25,16; /* save ar.unat */ \
st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=rARRSC,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=rARRNAT,16; /* save ar.rnat */ \
st8 [r16]=r27,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r16]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r17]=rARPR,16; /* save predicates */ \
(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
st8 [r17]=r31,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
st8 [r16]=rCRIPSR,16; /* save b0 */ \
st8 [r16]=r29,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 0,0; st8.spill [r16]=rR1,16; /* save original r1 */ \
.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
......@@ -204,6 +190,12 @@
* psr.ic: on
* r2: points to &pt_regs.r16
* r3: points to &pt_regs.r17
* r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
*
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
......@@ -232,8 +224,8 @@
.mem.offset 0,0; st8.spill [r2]=r30,16; \
.mem.offset 8,0; st8.spill [r3]=r31,16; \
;; \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar_ccv */ \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar.ccv */ \
adds r3=16,r3; \
;; \
stf.spill [r2]=f6,32; \
......@@ -254,6 +246,6 @@
st8 [r25]=r10; /* ar.ssd */ \
;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
#define SAVE_MIN DO_SAVE_MIN( , mov rCRIFS=r0, )
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
......@@ -110,7 +110,7 @@ show_regs (struct pt_regs *regs)
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
regs->f10.u.bits[1], regs->f10.u.bits[0],
regs->f11.u.bits[1], regs->f11.u.bits[0]);
......
......@@ -29,8 +29,6 @@
#include <asm/perfmon.h>
#endif
#define offsetof(type,field) ((unsigned long) &((type *) 0)->field)
/*
* Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total)
......
......@@ -125,14 +125,11 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
if ((flags & IA64_SC_FLAG_IN_SYSCALL)==0)
{
if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __get_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_from_user(&scr->pt.b6, &sc->sc_br[6], 2*8); /* b6-b7 */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r14, &sc->sc_gr[14], 8); /* r14 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
......@@ -176,11 +173,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
int err;
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly. It should never
* copy any pad contained in the structure to avoid
* security leaks, but must copy the generic 3 ints
* plus the relevant union member.
* If you change siginfo_t structure, please be sure this code is fixed
* accordingly. It should never copy any pad contained in the structure
* to avoid security leaks, but must copy the generic 3 ints plus the
* relevant union member.
*/
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
......@@ -379,27 +375,19 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
if (flags & IA64_SC_FLAG_IN_SYSCALL)
{
if (flags & IA64_SC_FLAG_IN_SYSCALL) {
/* Clear scratch registers if the signal interrupted a system call. */
err |= __clear_user(&sc->sc_ar_ccv, 8);
err |= __clear_user(&sc->sc_ar25,8); /* ar.csd */
err |= __clear_user(&sc->sc_ar26,8); /* ar.ssd */
err |= __clear_user(&sc->sc_br[6],8); /* b6 */
err |= __clear_user(&sc->sc_br[7],8); /* b7 */
err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __clear_user(&sc->sc_br[6], 2*8); /* b6-b7 */
err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __clear_user(&sc->sc_gr[14],8); /* r14 */
err |= __clear_user(&sc->sc_gr[16],16*8); /* r16-r31 */
} else
{
/* Copy scratch registers to sigcontext if the signal did not interrupt a syscall. */
err |= __clear_user(&sc->sc_gr[14], 8); /* r14 */
err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */
} else {
/* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __put_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_to_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
err |= __copy_to_user(&scr->pt.b6, &sc->sc_br[6], 2*8); /* b6-b7 */
err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
err |= __copy_to_user(&sc->sc_gr[14], &scr->pt.r14, 8); /* r14 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
......@@ -437,7 +425,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
* in the kernel, register stack is switched in the signal trampoline).
*/
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
}
frame = (void *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1));
......
......@@ -86,8 +86,6 @@
typedef unsigned long unw_word;
typedef unsigned char unw_hash_index_t;
#define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
static struct {
spinlock_t lock; /* spinlock for unwind data */
......@@ -106,6 +104,8 @@ static struct {
/* index into unw_frame_info for preserved register i */
unsigned short preg_index[UNW_NUM_REGS];
short pt_regs_offsets[32];
/* unwind table for the kernel: */
struct unw_table kernel_table;
......@@ -155,47 +155,78 @@ static struct {
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
},
.preg_index = {
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
struct_offset(struct unw_frame_info, bsp_loc)/8,
struct_offset(struct unw_frame_info, bspstore_loc)/8,
struct_offset(struct unw_frame_info, pfs_loc)/8,
struct_offset(struct unw_frame_info, rnat_loc)/8,
struct_offset(struct unw_frame_info, psp)/8,
struct_offset(struct unw_frame_info, rp_loc)/8,
struct_offset(struct unw_frame_info, r4)/8,
struct_offset(struct unw_frame_info, r5)/8,
struct_offset(struct unw_frame_info, r6)/8,
struct_offset(struct unw_frame_info, r7)/8,
struct_offset(struct unw_frame_info, unat_loc)/8,
struct_offset(struct unw_frame_info, pr_loc)/8,
struct_offset(struct unw_frame_info, lc_loc)/8,
struct_offset(struct unw_frame_info, fpsr_loc)/8,
struct_offset(struct unw_frame_info, b1_loc)/8,
struct_offset(struct unw_frame_info, b2_loc)/8,
struct_offset(struct unw_frame_info, b3_loc)/8,
struct_offset(struct unw_frame_info, b4_loc)/8,
struct_offset(struct unw_frame_info, b5_loc)/8,
struct_offset(struct unw_frame_info, f2_loc)/8,
struct_offset(struct unw_frame_info, f3_loc)/8,
struct_offset(struct unw_frame_info, f4_loc)/8,
struct_offset(struct unw_frame_info, f5_loc)/8,
struct_offset(struct unw_frame_info, fr_loc[16 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[17 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[18 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[19 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[20 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[21 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[22 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[23 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[24 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[25 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[26 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[27 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[28 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[29 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
offsetof(struct unw_frame_info, bsp_loc)/8,
offsetof(struct unw_frame_info, bspstore_loc)/8,
offsetof(struct unw_frame_info, pfs_loc)/8,
offsetof(struct unw_frame_info, rnat_loc)/8,
offsetof(struct unw_frame_info, psp)/8,
offsetof(struct unw_frame_info, rp_loc)/8,
offsetof(struct unw_frame_info, r4)/8,
offsetof(struct unw_frame_info, r5)/8,
offsetof(struct unw_frame_info, r6)/8,
offsetof(struct unw_frame_info, r7)/8,
offsetof(struct unw_frame_info, unat_loc)/8,
offsetof(struct unw_frame_info, pr_loc)/8,
offsetof(struct unw_frame_info, lc_loc)/8,
offsetof(struct unw_frame_info, fpsr_loc)/8,
offsetof(struct unw_frame_info, b1_loc)/8,
offsetof(struct unw_frame_info, b2_loc)/8,
offsetof(struct unw_frame_info, b3_loc)/8,
offsetof(struct unw_frame_info, b4_loc)/8,
offsetof(struct unw_frame_info, b5_loc)/8,
offsetof(struct unw_frame_info, f2_loc)/8,
offsetof(struct unw_frame_info, f3_loc)/8,
offsetof(struct unw_frame_info, f4_loc)/8,
offsetof(struct unw_frame_info, f5_loc)/8,
offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
},
.pt_regs_offsets = {
[0] = -1,
offsetof(struct pt_regs, r1),
offsetof(struct pt_regs, r2),
offsetof(struct pt_regs, r3),
[4] = -1, [5] = -1, [6] = -1, [7] = -1,
offsetof(struct pt_regs, r8),
offsetof(struct pt_regs, r9),
offsetof(struct pt_regs, r10),
offsetof(struct pt_regs, r11),
offsetof(struct pt_regs, r12),
offsetof(struct pt_regs, r13),
offsetof(struct pt_regs, r14),
offsetof(struct pt_regs, r15),
offsetof(struct pt_regs, r16),
offsetof(struct pt_regs, r17),
offsetof(struct pt_regs, r18),
offsetof(struct pt_regs, r19),
offsetof(struct pt_regs, r20),
offsetof(struct pt_regs, r21),
offsetof(struct pt_regs, r22),
offsetof(struct pt_regs, r23),
offsetof(struct pt_regs, r24),
offsetof(struct pt_regs, r25),
offsetof(struct pt_regs, r26),
offsetof(struct pt_regs, r27),
offsetof(struct pt_regs, r28),
offsetof(struct pt_regs, r29),
offsetof(struct pt_regs, r30),
offsetof(struct pt_regs, r31),
},
.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
#ifdef UNW_DEBUG
......@@ -211,10 +242,6 @@ static struct {
#endif
};
#define OFF_CASE(reg, reg_num) \
case reg: \
off = struct_offset(struct pt_regs, reg_num); \
break;
/* Unwind accessors. */
/*
......@@ -223,42 +250,16 @@ static struct {
static inline unsigned long
pt_regs_off (unsigned long reg)
{
unsigned long off =0;
short off = -1;
switch (reg)
{
OFF_CASE(1,r1)
OFF_CASE(2,r2)
OFF_CASE(3,r3)
OFF_CASE(8,r8)
OFF_CASE(9,r9)
OFF_CASE(10,r10)
OFF_CASE(11,r11)
OFF_CASE(12,r12)
OFF_CASE(13,r13)
OFF_CASE(14,r14)
OFF_CASE(15,r15)
OFF_CASE(16,r16)
OFF_CASE(17,r17)
OFF_CASE(18,r18)
OFF_CASE(19,r19)
OFF_CASE(20,r20)
OFF_CASE(21,r21)
OFF_CASE(22,r22)
OFF_CASE(23,r23)
OFF_CASE(24,r24)
OFF_CASE(25,r25)
OFF_CASE(26,r26)
OFF_CASE(27,r27)
OFF_CASE(28,r28)
OFF_CASE(29,r29)
OFF_CASE(30,r30)
OFF_CASE(31,r31)
default:
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
break;
if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
off = unw.pt_regs_offsets[reg];
if (off < 0) {
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
off = 0;
}
return off;
return (unsigned long) off;
}
static inline struct pt_regs *
......@@ -1416,7 +1417,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval <= 11)
val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
else
UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
__FUNCTION__, rval);
......@@ -1429,11 +1430,11 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval == 0)
val = struct_offset(struct pt_regs, b0);
val = offsetof(struct pt_regs, b0);
else if (rval == 6)
val = struct_offset(struct pt_regs, b6);
val = offsetof(struct pt_regs, b6);
else
val = struct_offset(struct pt_regs, b7);
val = offsetof(struct pt_regs, b7);
}
break;
......@@ -1633,7 +1634,7 @@ build_script (struct unw_frame_info *info)
&& sr.curr.reg[UNW_REG_PSP].val != 0) {
/* new psp is sp plus frame size */
insn.opc = UNW_INSN_ADD;
insn.dst = struct_offset(struct unw_frame_info, psp)/8;
insn.dst = offsetof(struct unw_frame_info, psp)/8;
insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
script_emit(script, insn);
}
......@@ -1767,14 +1768,13 @@ run_script (struct unw_script *script, struct unw_frame_info *state)
lazy_init:
off = unw.sw_off[val];
s[val] = (unsigned long) state->sw + off;
if (off >= struct_offset(struct switch_stack, r4)
&& off <= struct_offset(struct switch_stack, r7))
if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
/*
* We're initializing a general register: init NaT info, too. Note that
* the offset is a multiple of 8 which gives us the 3 bits needed for
* the type field.
*/
s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
goto redo;
}
......@@ -1864,7 +1864,7 @@ unw_unwind (struct unw_frame_info *info)
if ((pr & (1UL << pNonSys)) != 0)
num_regs = *info->cfm_loc & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->pt + struct_offset(struct pt_regs, ar_pfs));
(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
} else
num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
......
......@@ -130,11 +130,13 @@ struct pt_regs {
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
/* The remaining registers are NOT saved for system calls. */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
unsigned long r17; /* scratch */
unsigned long r18; /* scratch */
......@@ -155,8 +157,7 @@ struct pt_regs {
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
* Floating point registers that the kernel considers
* scratch:
* Floating point registers that the kernel considers scratch:
*/
struct ia64_fpreg f6; /* scratch */
struct ia64_fpreg f7; /* scratch */
......
......@@ -2,8 +2,8 @@
#define _ASM_IA64_PTRACE_OFFSETS_H
/*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
......@@ -14,20 +14,16 @@
* unsigned long nat_bits;
* unsigned long empty1;
* struct ia64_fpreg f2; // f2-f5
* .
* .
* :
* struct ia64_fpreg f5;
* struct ia64_fpreg f10; // f10-f31
* .
* .
* :
* struct ia64_fpreg f31;
* unsigned long r4; // r4-r7
* .
* .
* :
* unsigned long r7;
* unsigned long b1; // b1-b5
* .
* .
* :
* unsigned long b5;
* unsigned long ar_ec;
* unsigned long ar_lc;
......@@ -55,8 +51,7 @@
* unsigned long r10;
* unsigned long r11;
* unsigned long r16;
* .
* .
* :
* unsigned long r31;
* unsigned long ar_ccv;
* unsigned long ar_fpsr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment