Commit 4af03459 authored by David Mosberger's avatar David Mosberger

ia64: Restructure pt_regs and optimize syscall path.

Patch by Rohit Seth, Fengua Yu, and Arun Sharma:

Please find attached a patch for kernel entry exit optimization. This is
based on 2.5.69 kernel.

The main items covered by this patch are:
1) Support for 16 bytes instructions as per SDM2.1 (CSD/SSD in pt_regs)
2) f10-f11 are added as additional scratch registers for kernel's use.
3) Re-arrange pt_regs to access less cache lines in system call. Reduce
scratch register saving/restoring in system call path.
4) A few instruction reorg in low-level code.
parent cb7b2a3b
......@@ -18,7 +18,7 @@ LDFLAGS_MODULE += -T arch/ia64/module.lds
AFLAGS_KERNEL := -mconstant-gp
EXTRA :=
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
-falign-functions=32 -frename-registers
CFLAGS_KERNEL := -mconstant-gp
......
......@@ -179,8 +179,10 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* datasel ar.fdr(32:47)
*
* _st[(0+TOS)%8] f8
* _st[(1+TOS)%8] f9 (f8, f9 from ptregs)
* : : : (f10..f15 from live reg)
* _st[(1+TOS)%8] f9
* _st[(2+TOS)%8] f10
* _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
* : : : (f12..f15 from live reg)
* : : :
* _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
*
......@@ -262,8 +264,8 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
__put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
/*
* save f8 and f9 from pt_regs
* save f10..f15 from live register set
* save f8..f11 from pt_regs
* save f12..f15 from live register set
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
......@@ -278,11 +280,11 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f9);
copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 10);
ia64f2ia32f(fpregp, &ptp->f10);
copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 11);
ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13);
......@@ -394,8 +396,8 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr));
/*
* restore f8, f9 onto pt_regs
* restore f10..f15 onto live registers
* restore f8..f11 onto pt_regs
* restore f12..f15 onto live registers
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
......@@ -411,11 +413,11 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f8, fpregp);
copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f9, fpregp);
copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(10, fpregp);
ia32f2ia64f(&ptp->f10, fpregp);
copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(11, fpregp);
ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
......@@ -738,11 +740,11 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) tmp << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) tmp << 32)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
#define copyseg_cs(tmp) (regs->r17 |= tmp)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) tmp << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) tmp << 16)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
#define copyseg_ds(tmp) (regs->r16 |= tmp)
#define COPY_SEG(seg) \
......
......@@ -60,30 +60,26 @@ ia32_load_segment_descriptors (struct task_struct *task)
regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
task->thread.csd = load_desc(regs->r17 >> 0); /* CSD */
task->thread.ssd = load_desc(regs->r17 >> 16); /* SSD */
regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
void
ia32_save_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
unsigned long eflag, fsr, fcr, fir, fdr;
asm ("mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
"mov %5=ar.csd;"
"mov %6=ar.ssd;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr), "=r"(csd), "=r"(ssd));
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
t->thread.csd = csd;
t->thread.ssd = ssd;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
......@@ -91,7 +87,7 @@ ia32_save_state (struct task_struct *t)
void
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = ia64_task_regs(t);
int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
......@@ -100,8 +96,6 @@ ia32_load_state (struct task_struct *t)
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
csd = t->thread.csd;
ssd = t->thread.ssd;
tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;"
......@@ -109,9 +103,7 @@ ia32_load_state (struct task_struct *t)
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
"mov ar.csd=%5;"
"mov ar.ssd=%6;"
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr), "r"(csd), "r"(ssd));
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
......
......@@ -1798,12 +1798,16 @@ put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
ia64f2ia32f(f, &ptp->f9);
break;
case 2:
ia64f2ia32f(f, &ptp->f10);
break;
case 3:
ia64f2ia32f(f, &ptp->f11);
break;
case 4:
case 5:
case 6:
case 7:
ia64f2ia32f(f, &swp->f10 + (regno - 2));
ia64f2ia32f(f, &swp->f12 + (regno - 4));
break;
}
copy_to_user(reg, f, sizeof(*reg));
......@@ -1824,12 +1828,16 @@ get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
copy_from_user(&ptp->f9, reg, sizeof(*reg));
break;
case 2:
copy_from_user(&ptp->f10, reg, sizeof(*reg));
break;
case 3:
copy_from_user(&ptp->f11, reg, sizeof(*reg));
break;
case 4:
case 5:
case 6:
case 7:
copy_from_user(&swp->f10 + (regno - 2), reg, sizeof(*reg));
copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
break;
}
return;
......
......@@ -5,10 +5,13 @@
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999, 2002-2003
* Asit Mallick <Asit.K.Mallick@intel.com>
* Don Dugger <Don.Dugger@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
*/
/*
* ia64_switch_to now places correct virtual mapping in in TR2 for
......@@ -50,7 +53,10 @@
ENTRY(ia64_execve)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
alloc loc1=ar.pfs,3,2,4,0
mov loc0=rp
/* Return with pt_reg saved. This is special because ia32 application needs scratch registers
* after return from execve.
*/
movl loc0=ia64_ret_from_execve_syscall // return with pt_reg saved. This is special because.
.body
mov out0=in0 // filename
;; // stop bit between alloc and call
......@@ -74,19 +80,18 @@ ENTRY(ia64_execve)
* this executes in less than 20 cycles even on Itanium, so it's not worth
* optimizing for...).
*/
mov ar.unat=0; mov ar.lc=0;
mov r4=0; mov f2=f0; mov b1=r0
mov r5=0; mov f3=f0; mov b2=r0
mov r6=0; mov f4=f0; mov b3=r0
mov r7=0; mov f5=f0; mov b4=r0
mov ar.unat=0; mov f10=f0; mov b5=r0
ldf.fill f11=[sp]; ldf.fill f12=[sp]; mov f13=f0
ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
mov ar.lc=0
br.ret.sptk.many rp
END(ia64_execve)
......@@ -302,8 +307,6 @@ GLOBAL_ENTRY(save_switch_stack)
st8 [r14]=r21,SW(B1)-SW(B0) // save b0
st8 [r15]=r23,SW(B3)-SW(B2) // save b2
mov r25=b4
stf.spill [r2]=f10,32
stf.spill [r3]=f11,32
mov r26=b5
;;
st8 [r14]=r22,SW(B4)-SW(B1) // save b1
......@@ -402,9 +405,6 @@ ENTRY(load_switch_stack)
ldf.fill f4=[r14],32
ldf.fill f5=[r15],32
;;
ldf.fill f10=[r14],32
ldf.fill f11=[r15],32
;;
ldf.fill f12=[r14],32
ldf.fill f13=[r15],32
;;
......@@ -526,7 +526,7 @@ strace_save_retval:
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel:
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
.rety: br.cond.sptk ia64_leave_kernel
.rety: br.cond.sptk ia64_leave_from_syscall
strace_error:
ld8 r3=[r2] // load pt_regs.r8
......@@ -579,8 +579,9 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_syscall)
// fall through
GLOBAL_ENTRY(ia64_leave_kernel)
GLOBAL_ENTRY(ia64_leave_from_syscall)
PT_REGS_UNWIND_INFO(0)
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
// work.need_resched etc. mustn't get changed by this CPU before it returns to
// user- or fsys-mode:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
......@@ -599,102 +600,243 @@ GLOBAL_ENTRY(ia64_leave_kernel)
(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
#endif /* CONFIG_PREEMPT */
.work_processed:
(p6) ld4 r18=[r17] // load current_thread_info()->flags
adds r2=PT(R8)+16,r12
adds r3=PT(R9)+16,r12
.work_processed_syscall:
(p6) ld4 r31=[r17] // load current_thread_info()->flags
adds r16=PT(LOADRS)+16,r12
adds r18=PT(AR_BSPSTORE)+16, r12
;;
ld8 r19=[r16] // load ar.rsc value for "loadrs"
ld8 rARBSPSTORE=[r18],16// load ar.bspstore (may be garbage)
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
mov r3=r12
mov f6=f0 // clear f6
(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
;;
mov ar.ccv=r0 // clear ar.ccv
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET+16+IA64_PT_REGS_SIZE-IA64_STK_OFFSET,r3
(p6) br.cond.spnt .work_pending
;;
adds r16=PT(R8)+16,r12
adds r17=PT(R9)+16,r12
;;
// start restoring the state saved on the kernel stack (struct pt_regs):
ld8.fill r8=[r2],16
ld8.fill r9=[r3],16
(p6) and r19=TIF_WORK_MASK,r18 // any work other than TIF_SYSCALL_TRACE?
ld8.fill r8=[r16],16
ld8.fill r9=[r17],16
(pUStk) mov r3=1
;;
ld8.fill r10=[r2],16
ld8.fill r11=[r3],16
ld8.fill r10=[r16],16
ld8.fill r11=[r17],16
mov f7=f0 // clear f7
;;
ld8 rCRIPSR=[r16],16 // load cr.ipsr
ld8 rCRIIP=[r17],16 // load cr.iip
;;
ld8 rCRIFS=[r16],16 // load cr.ifs
ld8 rARUNAT=[r17],16 // load ar.unat
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT
mov f9=f0 // clear f9
(pUStk) st1 [r14]=r3
mov ar.csd=r0
mov f8=f0 // clear f8
;;
ld8 rARPFS=[r16],16 // load ar.pfs
ld8 rARRSC=[r17],PT(PR)-PT(AR_RSC) // load ar.rsc
mov f10=f0 // clear f10
;;
ld8 rARRNAT=[r16],PT(B0)-PT(AR_RNAT) // load ar.rnat (may be garbage)
ld8 rARPR=[r17],PT(R1)-PT(PR) // load predicates
mov f11=f0 // clear f11
;;
ld8 rB0=[r16],PT(R12)-PT(B0) // load b0
ld8.fill r1=[r17],16 // load r1
mov b6=r0 // clear b6
;;
ld8.fill r12=[r16],16
ld8.fill r13=[r17],16
mov r2=r0 // clear r2
;;
ld8 rR1=[r16] // ar.fpsr
ld8.fill r15=[r17] // load r15
mov b7=r0 // clear b7
;;
mov r16=ar.bsp // get existing backing store pointer
srlz.i // ensure interruption collection is off
;;
mov ar.ssd=r0
movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
mov r14=r0 // clear r14
;;
(pKStk) br.cond.dpnt skip_rbs_switch
/*
* Restore user backing store.
*
* NOTE: alloc, loadrs, and cover can't be predicated.
*/
cover // add current frame into dirty partition
;;
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
mov r19=ar.bsp // get new backing store pointer
sub r16=r16,r18 // krbs = old bsp - size of dirty partition
cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
;;
sub r19=r19,r16 // calculate total byte size of dirty partition
add r18=64,r18 // don't force in0-in7 into memory...
;;
shl r19=r19,16 // shift size of dirty partition into loadrs position
br.few dont_preserve_current_frame
END(ia64_leave_from_syscall)
GLOBAL_ENTRY(ia64_ret_from_execve_syscall)
PT_REGS_UNWIND_INFO(0)
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0
(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_execve_syscall)
// fall through
GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0)
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
// work.need_resched etc. mustn't get changed by this CPU before it returns to
// user- or fsys-mode:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) ld4 r21=[r20] // preempt_count ->r21
;;
(pKStk) cmp4.eq p6,p0=r21,r0 // p6 <- preempt_count == 0
;;
#else /* CONFIG_PREEMPT */
(pUStk) rsm psr.i
;;
(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
#endif /* CONFIG_PREEMPT */
.work_processed_kernel:
(p6) ld4 r31=[r17] // load current_thread_info()->flags
adds r20=PT(CR_IPSR)+16,r12
adds r21=PT(PR)+16,r12
adds r2=PT(B6)+16,r12
adds r3=PT(R16)+16,r12
;;
ld8 r28=[r2],8 // b6
ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
adds r29=PT(R24)+16,r12
adds r30=PT(AR_CCV)+16,r12
;;
lfetch [r20]
lfetch [r21]
(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
ld8.fill r24=[r29]
ld8 r15=[r30] //ar.ccv
(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
;;
ld8.fill r16=[r2],16
ld8.fill r17=[r3],16
ld8 r29=[r2],16 // b7
ld8 r30=[r3],16 // ar.csd
(p6) br.cond.spnt .work_pending
;;
ld8 r31=[r2],16 // ar.ssd
ld8.fill r8=[r3],16
;;
ld8.fill r9=[r2],16
ld8.fill r10=[r3],PT(R17)-PT(R10)
;;
ld8.fill r11=[r2],PT(R18)-PT(R11)
ld8.fill r17=[r3],16
;;
ld8.fill r18=[r2],16
ld8.fill r19=[r3],16
;;
ld8.fill r20=[r2],16
ld8.fill r21=[r3],16
mov ar.csd=r30
mov ar.ssd=r31
;;
ld8.fill r22=[r2],16
ld8.fill r23=[r3],16
;;
ld8.fill r24=[r2],16
ld8.fill r25=[r3],16
;;
ld8.fill r26=[r2],16
ld8.fill r27=[r3],16
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT
;;
ld8.fill r28=[r2],16
ld8.fill r29=[r3],16
ld8.fill r22=[r2],24
ld8.fill r23=[r3],24
mov b6=r28
;;
ld8.fill r30=[r2],16
ld8.fill r31=[r3],16
ld8.fill r25=[r2],16
ld8.fill r26=[r3],16
mov b7=r29
;;
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT
ld8.fill r27=[r2],16
ld8.fill r28=[r3],16
;;
ld8 r1=[r2],16 // ar.ccv
ld8 r13=[r3],16 // ar.fpsr
ld8.fill r29=[r2],16
ld8.fill r30=[r3],24
;;
ld8 r14=[r2],16 // b0
ld8 r15=[r3],16+8 // b7
ld8.fill r31=[r2],PT(F9)-PT(R31)
adds r3=PT(F10)-PT(F6),r3
;;
ldf.fill f6=[r2],32
ldf.fill f7=[r3],32
ldf.fill f9=[r2],PT(F6)-PT(F9)
ldf.fill f10=[r3],PT(F8)-PT(F10)
;;
ldf.fill f8=[r2],32
ldf.fill f9=[r3],32
ldf.fill f6=[r2],PT(F7)-PT(F6)
;;
mov ar.ccv=r1
mov ar.fpsr=r13
mov b0=r14
ldf.fill f7=[r2],PT(F11)-PT(F7)
ldf.fill f8=[r3],32
;;
srlz.i // ensure interruption collection is off
mov b7=r15
mov ar.ccv=r15
;;
bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
;;
ldf.fill f11=[r2]
(pUStk) mov r18=IA64_KR(CURRENT) // Itanium 2: 12 cycle read latency
adds r16=16,r12
adds r17=24,r12
adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12
;;
ld8 rCRIPSR=[r16],16 // load cr.ipsr
ld8 rCRIIP=[r17],16 // load cr.iip
;;
ld8 rCRIFS=[r16],16 // load cr.ifs
ld8 rARUNAT=[r17],16 // load ar.unat
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
ld8 rARPFS=[r16],16 // load ar.pfs
ld8 rARRSC=[r17],16 // load ar.rsc
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
ld8 rARRNAT=[r16],16 // load ar.rnat (may be garbage)
ld8 rARBSPSTORE=[r17],16 // load ar.bspstore (may be garbage)
ld8 rARBSPSTORE=[r17],16// load ar.bspstore (may be garbage)
;;
ld8 rARPR=[r16],16 // load predicates
ld8 rB6=[r17],16 // load b6
ld8 rB0=[r17],16 // load b0
;;
ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
ld8.fill r1=[r17],16 // load r1
;;
ld8.fill r2=[r16],16
ld8.fill r3=[r17],16
;;
ld8.fill r12=[r16],16
ld8.fill r13=[r17],16
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
;;
ld8.fill r14=[r16]
ld8.fill r15=[r17]
ld8 rR1=[r16],16 // ar.fpsr
ld8.fill r15=[r17],16
;;
ld8.fill r14=[r16],16
ld8.fill r2=[r17]
(pUStk) mov r17=1
;;
ld8.fill r3=[r16]
(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
......@@ -762,7 +904,7 @@ rse_clear_invalid:
}{ .mib
mov loc3=0
mov loc4=0
(pRecurse) br.call.sptk.many b6=rse_clear_invalid
(pRecurse) br.call.sptk.many b0=rse_clear_invalid
}{ .mfi // cycle 2
mov loc5=0
......@@ -771,7 +913,7 @@ rse_clear_invalid:
}{ .mib
mov loc6=0
mov loc7=0
(pReturn) br.ret.sptk.many b6
(pReturn) br.ret.sptk.many b0
}
#else /* !CONFIG_ITANIUM */
alloc loc0=ar.pfs,2,Nregs-2,2,0
......@@ -786,14 +928,14 @@ rse_clear_invalid:
mov loc5=0
mov loc6=0
mov loc7=0
(pRecurse) br.call.sptk.many b6=rse_clear_invalid
(pRecurse) br.call.sptk.few b0=rse_clear_invalid
;;
mov loc8=0
mov loc9=0
cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
mov loc10=0
mov loc11=0
(pReturn) br.ret.sptk.many b6
(pReturn) br.ret.sptk.many b0
#endif /* !CONFIG_ITANIUM */
# undef pRecurse
# undef pReturn
......@@ -803,21 +945,26 @@ rse_clear_invalid:
loadrs
;;
skip_rbs_switch:
mov b6=rB6
(pSys) mov r19=r0 // clear r19
mov b0=rB0
mov ar.pfs=rARPFS
(pUStk) mov ar.bspstore=rARBSPSTORE
(p9) mov cr.ifs=rCRIFS
(pSys) mov r16=r0 // clear r16
mov cr.ipsr=rCRIPSR
mov ar.fpsr=rR1
(pSys) mov r17=r0 // clear r17
mov cr.iip=rCRIIP
;;
(pUStk) mov ar.rnat=rARRNAT // must happen with RSE in lazy mode
(pSys) mov r18=r0 // clear r18
mov ar.rsc=rARRSC
mov ar.unat=rARUNAT
mov pr=rARPR,-1
rfi
.work_pending:
tbit.z p6,p0=r18,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
(p6) br.cond.sptk.few .notify
#ifdef CONFIG_PREEMPT
(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
......@@ -836,12 +983,14 @@ skip_rbs_switch:
;;
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
br.cond.sptk.many .work_processed // re-check
(pLvSys)br.cond.sptk.many .work_processed_syscall // re-check
br.cond.sptk.many .work_processed_kernel // re-check
.notify:
br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
br.cond.sptk.many .work_processed // don't re-check
(pLvSys)br.cond.sptk.many .work_processed_syscall // don't re-check
br.cond.sptk.many .work_processed_kernel // don't re-check
END(ia64_leave_kernel)
ENTRY(handle_syscall_error)
......@@ -866,7 +1015,7 @@ ENTRY(handle_syscall_error)
;;
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
br.cond.sptk ia64_leave_kernel
br.cond.sptk ia64_leave_from_syscall
END(handle_syscall_error)
/*
......@@ -952,6 +1101,22 @@ ENTRY(sys_rt_sigreturn)
.body
cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
;;
/* After signal handler, live registers f6-f11 are restored to the previous
* executing context values for synchronous signals (from exceptions); or they
* are cleared to 0 for asynchronous signals (from syscalls). These live registers
* will be put into pt_regs to return back to user space.
*/
adds r16=PT(F6)+32,sp
adds r17=PT(F7)+32,sp
;;
stf.spill [r16]=f6,32
stf.spill [r17]=f7,32
;;
stf.spill [r16]=f8,32
stf.spill [r17]=f9,32
;;
stf.spill [r16]=f10
stf.spill [r17]=f11
adds out0=16,sp // out0 = &sigscratch
br.call.sptk.many rp=ia64_rt_sigreturn
.ret19: .restore sp 0
......
......@@ -4,6 +4,7 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these!
*/
#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/
#define pKStk p2 /* will leave_kernel return to kernel-stacks? */
#define pUStk p3 /* will leave_kernel return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */
......
......@@ -4,6 +4,11 @@
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 2000, 2002-2003 Intel Co
* Asit Mallick <asit.k.mallick@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Kenneth Chen <kenneth.w.chen@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
*
* 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
* 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
......@@ -632,6 +637,23 @@ END(daccess_bit)
/////////////////////////////////////////////////////////////////////////////////////////
// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
ENTRY(break_fault)
/* System call entry/exit only saves/restores part of pt_regs, i.e. no scratch registers
* are saved/restored except r15 which contains syscall number and needs to be saved in the
* entry. This optimization is based on the assumption that applications only call glibc
* system call interface which doesn't use scratch registers after break into kernel.
* Registers saved/restored during system call entry/exit are listed as follows:
*
* Registers to be saved & restored:
* CR registers: cr_ipsr, cr_iip, cr_ifs
* AR registers: ar_unat, ar_pfs, ar_rsc, ar_rnat, ar_bspstore, ar_fpsr
* others: pr, b0, loadrs, r1, r12, r13, r15
* Registers to be restored only:
* r8~r11: output value from the system call.
*
* During system call exit, scratch registers (including r15) are modified/cleared to
* prevent leaking bits from kernel to user level.
*/
DBG_FAULT(11)
mov r16=cr.iim
mov r17=__IA64_BREAK_SYSCALL
......@@ -639,23 +661,29 @@ ENTRY(break_fault)
;;
cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so)
(p7) br.cond.spnt non_syscall
SAVE_MIN // uses r31; defines r2:
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interruption collection is on
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
mov r21=ar.fpsr;
mov rCRIPSR=cr.ipsr;
mov rR1=r1;
mov rARUNAT=ar.unat;
mov rARRSC=ar.rsc;
mov rARPFS=ar.pfs;
mov rCRIIP=cr.iip;
mov r1=IA64_KR(CURRENT); /* r1 = current (physical) */
;;
(p15) ssm psr.i // restore psr.i
adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1;
;;
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
adds r3=8,r2 // set up second base pointer for SAVE_REST
ld1 r17=[r16]; /* load current->thread.on_ustack flag */
st1 [r16]=r0; /* clear current->thread.on_ustack flag */
/* switch from user to kernel RBS: */
;;
invala;
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */
;;
mov rCRIFS=r0
MINSTATE_START_SAVE_MIN_VIRT
br.call.sptk.many b7=break_fault_setup
;;
SAVE_REST
br.call.sptk.many rp=demine_args // clear NaT bits in (potential) syscall args
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
;;
......@@ -673,8 +701,8 @@ ENTRY(break_fault)
// arrange things so we skip over break instruction when returning:
adds r16=16,sp // get pointer to cr_ipsr
adds r17=24,sp // get pointer to cr_iip
adds r16=PT(CR_IPSR)+16,sp // get pointer to cr_ipsr
adds r17=PT(CR_IIP)+16,sp // get pointer to cr_iip
add r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld8 r18=[r16] // fetch cr_ipsr
......@@ -696,39 +724,11 @@ ENTRY(break_fault)
st8 [r16]=r18 // store new value for cr.isr
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
// NOT REACHED
END(break_fault)
ENTRY_MIN_ALIGN(demine_args)
alloc r2=ar.pfs,8,0,0,0
tnat.nz p8,p0=in0
tnat.nz p9,p0=in1
;;
(p8) mov in0=-1
tnat.nz p10,p0=in2
tnat.nz p11,p0=in3
(p9) mov in1=-1
tnat.nz p12,p0=in4
tnat.nz p13,p0=in5
;;
(p10) mov in2=-1
tnat.nz p14,p0=in6
tnat.nz p15,p0=in7
(p11) mov in3=-1
tnat.nz p8,p0=r15 // demining r15 is not a must, but it is safer
(p12) mov in4=-1
(p13) mov in5=-1
;;
(p14) mov in6=-1
(p15) mov in7=-1
(p8) mov r15=-1
br.ret.sptk.many rp
END(demine_args)
.org ia64_ivt+0x3000
/////////////////////////////////////////////////////////////////////////////////////////
// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
......@@ -736,7 +736,6 @@ ENTRY(interrupt)
DBG_FAULT(12)
mov r31=pr // prepare to save predicates
;;
SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
ssm psr.ic | PSR_DEFAULT_BITS
;;
......@@ -768,6 +767,90 @@ END(interrupt)
DBG_FAULT(14)
FAULT(14)
/*
* There is no particular reason for this code to be here, other than that
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*/
ENTRY(break_fault_setup)
alloc r19=ar.pfs,8,0,0,0
tnat.nz p8,p0=in0
add r16=PT(CR_IPSR),r1 /* initialize first base pointer */
;;
st8 [r16]=rCRIPSR,16; /* save cr.ipsr */
adds r17=PT(CR_IIP),r1; /* initialize second base pointer */
;;
(p8) mov in0=-1
tnat.nz p9,p0=in1
st8 [r17]=rCRIIP,16; /* save cr.iip */
mov rCRIIP=b0;
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */
;;
(p9) mov in1=-1
tnat.nz p10,p0=in2
st8 [r16]=rCRIFS,16; /* save cr.ifs */
st8 [r17]=rARUNAT,16; /* save ar.unat */
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */
;;
st8 [r16]=rARPFS,16; /* save ar.pfs */
st8 [r17]=rARRSC,16; /* save ar.rsc */
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT
;; /* avoid RAW on r16 & r17 */
(p10) mov in2=-1
nop.f 0
tnat.nz p11,p0=in3
(pKStk) adds r16=16,r16; /* skip over ar_rnat field */
(pKStk) adds r17=16,r17; /* skip over ar_bspstore field */
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */
;;
(p11) mov in3=-1
tnat.nz p12,p0=in4
(pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */
(pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */
;;
(p12) mov in4=-1
tnat.nz p13,p0=in5
st8 [r16]=rARPR,16; /* save predicates */
st8 [r17]=rCRIIP,16; /* save b0 */
dep r14=-1,r0,61,3;
;;
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */
st8.spill [r17]=rR1,16; /* save original r1 */
adds r2=IA64_PT_REGS_R16_OFFSET,r1;
;;
(p13) mov in5=-1
tnat.nz p14,p0=in6
.mem.offset 0,0; st8.spill [r16]=r12,16;
.mem.offset 8,0; st8.spill [r17]=r13,16;
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */
;;
(p14) mov in6=-1
tnat.nz p8,p0=in7
.mem.offset 0,0; st8 [r16]=r21,16; /* ar.fpsr */
.mem.offset 8,0; st8.spill [r17]=r15,16;
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */
;;
mov r13=IA64_KR(CURRENT); /* establish `current' */
movl r1=__gp; /* establish kernel global pointer */
;;
MINSTATE_END_SAVE_MIN_VIRT
tnat.nz p9,p0=r15
(p8) mov in7=-1
ssm psr.ic | PSR_DEFAULT_BITS
movl r17=FPSR_DEFAULT
adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
;;
srlz.i // guarantee that interruption collection is on
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
(p9) mov r15=-1
(p15) ssm psr.i // restore psr.i
mov.m ar.fpsr=r17
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
br.ret.sptk.many b7
END(break_fault_setup)
.org ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////////////////////////
// 0x3c00 Entry 15 (size 64 bundles) Reserved
......@@ -819,90 +902,6 @@ END(dispatch_illegal_op_fault)
DBG_FAULT(16)
FAULT(16)
#ifdef CONFIG_IA32_SUPPORT
/*
* There is no particular reason for this code to be here, other than that
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*/
// IA32 interrupt entry point
ENTRY(dispatch_to_ia32_handler)
SAVE_MIN
;;
mov r14=cr.isr
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
;;
mov r15=0x80
shr r14=r14,16 // Get interrupt number
;;
cmp.ne p6,p0=r14,r15
(p6) br.call.dpnt.many b6=non_ia32_syscall
adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
;;
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
;;
alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
;;
ld4 r8=[r14],8 // r8 == eax (syscall number)
mov r15=250 // number of entries in ia32 system call table
;;
cmp.ltu.unc p6,p7=r8,r15
ld4 out1=[r14],8 // r9 == ecx
;;
ld4 out2=[r14],8 // r10 == edx
;;
ld4 out0=[r14] // r11 == ebx
adds r14=(IA64_PT_REGS_R8_OFFSET-(8*3)) + 16,sp
;;
ld4 out5=[r14],8 // r13 == ebp
;;
ld4 out3=[r14],8 // r14 == esi
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 out4=[r14] // r15 == edi
movl r16=ia32_syscall_table
;;
(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
ld8 r16=[r16]
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
;;
mov b6=r16
movl r15=ia32_ret_from_syscall
;;
mov rp=r15
(p8) br.call.sptk.many b6=b6
br.cond.sptk ia32_trace_syscall
non_ia32_syscall:
alloc r15=ar.pfs,0,0,2,0
mov out0=r14 // interrupt #
add out1=16,sp // pointer to pt_regs
;; // avoid WAW on CFM
br.call.sptk.many rp=ia32_bad_interrupt
.ret1: movl r15=ia64_leave_kernel
;;
mov rp=r15
br.ret.sptk.many rp
END(dispatch_to_ia32_handler)
#endif /* CONFIG_IA32_SUPPORT */
.org ia64_ivt+0x4400
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4400 Entry 17 (size 64 bundles) Reserved
......@@ -1438,3 +1437,89 @@ END(ia32_interrupt)
// 0x7f00 Entry 67 (size 16 bundles) Reserved
DBG_FAULT(67)
FAULT(67)
#ifdef CONFIG_IA32_SUPPORT
/*
* There is no particular reason for this code to be here, other than that
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*/
// IA32 interrupt entry point
ENTRY(dispatch_to_ia32_handler)
SAVE_MIN
;;
mov r14=cr.isr
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
;;
mov r15=0x80
shr r14=r14,16 // Get interrupt number
;;
cmp.ne p6,p0=r14,r15
(p6) br.call.dpnt.many b6=non_ia32_syscall
adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
;;
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
ld8 r8=[r14] // get r8
;;
st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
;;
alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
;;
ld4 r8=[r14],8 // r8 == eax (syscall number)
mov r15=250 // number of entries in ia32 system call table
;;
cmp.ltu.unc p6,p7=r8,r15
ld4 out1=[r14],8 // r9 == ecx
;;
ld4 out2=[r14],8 // r10 == edx
;;
ld4 out0=[r14] // r11 == ebx
adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
;;
ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
;;
ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 out4=[r14] // r15 == edi
movl r16=ia32_syscall_table
;;
(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
ld8 r16=[r16]
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
;;
mov b6=r16
movl r15=ia32_ret_from_syscall
;;
mov rp=r15
(p8) br.call.sptk.many b6=b6
br.cond.sptk ia32_trace_syscall
non_ia32_syscall:
alloc r15=ar.pfs,0,0,2,0
mov out0=r14 // interrupt #
add out1=16,sp // pointer to pt_regs
;; // avoid WAW on CFM
br.call.sptk.many rp=ia32_bad_interrupt
.ret1: movl r15=ia64_leave_kernel
;;
mov rp=r15
br.ret.sptk.many rp
END(dispatch_to_ia32_handler)
#endif /* CONFIG_IA32_SUPPORT */
......@@ -18,7 +18,7 @@
#define rARRNAT r24
#define rARBSPSTORE r23
#define rKRBS r22
#define rB6 r21
#define rB0 r21
#define rR1 r20
/*
......@@ -110,20 +110,21 @@
* we can pass interruption state as arguments to a handler.
*/
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov rARRSC=ar.rsc; /* M */ \
mov rARUNAT=ar.unat; /* M */ \
mov rR1=r1; /* A */ \
MINSTATE_GET_CURRENT(r1); /* M (or M;;I) */ \
mov rARUNAT=ar.unat; /* M */ \
mov rCRIPSR=cr.ipsr; /* M */ \
mov rARPFS=ar.pfs; /* I */ \
mov rCRIIP=cr.iip; /* M */ \
mov rB6=b6; /* I */ /* rB6 = branch reg 6 */ \
mov r21=ar.fpsr; /* M */ \
COVER; /* B;; (or nothing) */ \
;; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
;; \
ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
/* switch from user to kernel RBS: */ \
;; \
invala; /* M */ \
......@@ -131,69 +132,73 @@
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */ \
;; \
MINSTATE_START_SAVE_MIN \
add r17=L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ \
;; \
st8 [r1]=rCRIPSR; /* save cr.ipsr */ \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
add r16=16,r1; /* initialize first base pointer */ \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=rCRIPSR; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
adds r17=8,r1; /* initialize second base pointer */ \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT; \
mov rCRIPSR=b0 \
;; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \
st8 [r17]=rCRIIP,16; /* save cr.iip */ \
st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
st8 [r16]=rCRIIP,16; /* save cr.iip */ \
st8 [r17]=rCRIFS,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r17]=rARUNAT,16; /* save ar.unat */ \
st8 [r16]=rARPFS,16; /* save ar.pfs */ \
st8 [r16]=rARUNAT,16; /* save ar.unat */ \
st8 [r17]=rARPFS,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r17]=rARRSC,16; /* save ar.rsc */ \
(pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
(pKStk) adds r16=16,r16; /* skip over ar_rnat field */ \
st8 [r16]=rARRSC,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=rARRNAT,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r16]=rARPR,16; /* save predicates */ \
(pKStk) adds r17=16,r17; /* skip over ar_bspstore field */ \
;; \
st8 [r17]=rB6,16; /* save b6 */ \
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
;; \
.mem.offset 8,0; st8.spill [r17]=rR1,16; /* save original r1 */ \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
(pUStk) st8 [r16]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r17]=rARPR,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
.mem.offset 0,0; st8.spill [r16]=r12,16; \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
.mem.offset 8,0; st8.spill [r17]=r13,16; \
.mem.offset 0,0; st8.spill [r16]=r14,16; \
st8 [r16]=rCRIPSR,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 8,0; st8.spill [r17]=r15,16; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
dep r14=-1,r0,61,3; \
;; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
.mem.offset 0,0; st8.spill [r16]=r10,16; \
.mem.offset 0,0; st8.spill [r16]=rR1,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
.mem.offset 8,0; st8.spill [r17]=r11,16; \
.mem.offset 0,0; st8.spill [r16]=r13,16; \
.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r15,16; \
.mem.offset 8,0; st8.spill [r17]=r14,16; \
dep r14=-1,r0,61,3; \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
EXTRA; \
movl r1=__gp; /* establish kernel global pointer */ \
;; \
MINSTATE_END_SAVE_MIN
/*
* SAVE_REST saves the remainder of pt_regs (with psr.ic on). This
* macro guarantees to preserve all predicate registers, r8, r9, r10,
* r11, r14, and r15.
* SAVE_REST saves the remainder of pt_regs (with psr.ic on).
*
* Assumed state upon entry:
* psr.ic: on
......@@ -202,49 +207,52 @@
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r17,16; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
.mem.offset 8,0; st8.spill [r3]=r19,16; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
;; \
mov r16=ar.ccv; /* M-unit */ \
movl r18=FPSR_DEFAULT /* L-unit */ \
;; \
mov r17=ar.fpsr; /* M-unit */ \
mov ar.fpsr=r18; /* M-unit */ \
;; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
.mem.offset 8,0; st8.spill [r3]=r21,16; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
mov r18=b0; \
mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
.mem.offset 8,0; st8.spill [r3]=r23,16; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
.mem.offset 8,0; st8.spill [r3]=r25,16; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
.mem.offset 8,0; st8.spill [r3]=r27,16; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
.mem.offset 8,0; st8.spill [r3]=r29,16; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
.mem.offset 8,0; st8.spill [r3]=r31,16; \
st8 [r2]=r16,16; /* ar.ccv */ \
;; \
st8 [r3]=r17,16; /* ar.fpsr */ \
st8 [r2]=r18,16; /* b0 */ \
;; \
st8 [r3]=r19,16+8; /* b7 */ \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar_ccv */ \
adds r3=16,r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
;; \
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32
stf.spill [r3]=f9,32; \
adds r24=PT(B6)+16,r12 \
;; \
stf.spill [r2]=f10,32; \
stf.spill [r3]=f11,32; \
adds r25=PT(B7)+16,r12 \
;; \
st8 [r24]=r18,16; /* b6 */ \
st8 [r25]=r19,16; /* b7 */ \
;; \
st8 [r24]=r9; /* ar.csd */ \
st8 [r25]=r10; /* ar.ssd */ \
;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
......
......@@ -102,6 +102,7 @@ show_regs (struct pt_regs *regs)
regs->ar_rnat, regs->ar_bspstore, regs->pr);
printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
regs->f6.u.bits[1], regs->f6.u.bits[0],
......@@ -109,6 +110,9 @@ show_regs (struct pt_regs *regs)
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
regs->f10.u.bits[1], regs->f10.u.bits[0],
regs->f11.u.bits[1], regs->f11.u.bits[0]);
printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
......@@ -471,6 +475,8 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *
dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
unw_get_ar(info, UNW_AR_LC, &dst[53]);
unw_get_ar(info, UNW_AR_EC, &dst[54]);
unw_get_ar(info, UNW_AR_CSD, &dst[55]);
unw_get_ar(info, UNW_AR_SSD, &dst[56]);
}
void
......
......@@ -29,6 +29,8 @@
#include <asm/perfmon.h>
#endif
#define offsetof(type,field) ((unsigned long) &((type *) 0)->field)
/*
* Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total)
......@@ -669,9 +671,12 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
else
ia64_flush_fph(child);
ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
} else if (addr >= PT_F10 && addr < PT_F15 + 16) {
} else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
/* scratch registers untouched by kernel (saved in pt_regs) */
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f10) + addr - PT_F10);
} else if (addr >= PT_F12 && addr < PT_F15 + 16) {
/* scratch registers untouched by kernel (saved in switch_stack) */
ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
ptr = (unsigned long *) ((long) sw + (addr - PT_NAT_BITS - 32));
} else if (addr < PT_AR_LC + 8) {
/* preserved state: */
unsigned long nat_bits, scratch_unat, dummy = 0;
......@@ -807,22 +812,69 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
else
return ia64_peek(child, sw, urbs_end, rnat_addr, data);
case PT_R1: case PT_R2: case PT_R3:
case PT_R1:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r1));
break;
case PT_R2: case PT_R3:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r2) + addr - PT_R2);
break;
case PT_R8: case PT_R9: case PT_R10: case PT_R11:
case PT_R12: case PT_R13: case PT_R14: case PT_R15:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r8)+ addr - PT_R8);
break;
case PT_R12: case PT_R13:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r12)+ addr - PT_R12);
break;
case PT_R14:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r14));
break;
case PT_R15:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r15));
break;
case PT_R16: case PT_R17: case PT_R18: case PT_R19:
case PT_R20: case PT_R21: case PT_R22: case PT_R23:
case PT_R24: case PT_R25: case PT_R26: case PT_R27:
case PT_R28: case PT_R29: case PT_R30: case PT_R31:
case PT_B0: case PT_B6: case PT_B7:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r16) + addr - PT_R16);
break;
case PT_B0:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b0));
break;
case PT_B6:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b6));
break;
case PT_B7:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b7));
break;
case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f6) + addr - PT_F6);
break;
case PT_AR_BSPSTORE:
case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS:
case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
/* scratch register */
ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_bspstore));
break;
case PT_AR_RSC:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_rsc));
break;
case PT_AR_UNAT:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_unat));
break;
case PT_AR_PFS:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_pfs));
break;
case PT_AR_CCV:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_ccv));
break;
case PT_AR_FPSR:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_fpsr));
break;
case PT_CR_IIP:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, cr_iip));
break;
case PT_PR:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, pr));
break;
/* scratch register */
default:
/* disallow accessing anything else... */
......@@ -830,6 +882,8 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
addr);
return -1;
}
} else if (addr <= PT_AR_SSD) {
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_csd) + addr - PT_AR_CSD);
} else {
/* access debug registers */
......@@ -936,7 +990,8 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr1-gr3 */
retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long) * 3);
retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
/* gr4-gr7 */
......@@ -950,7 +1005,9 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr12-gr15 */
retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 4);
retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
/* gr16-gr31 */
......@@ -978,13 +1035,13 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
}
/* fr6-fr9 */
/* fr6-fr11 */
retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 4);
retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 6);
/* fp scratch regs(10-15) */
/* fp scratch regs(12-15) */
retval |= __copy_to_user(&ppr->fr[10], &sw->f10, sizeof(struct ia64_fpreg) * 6);
retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sizeof(struct ia64_fpreg) * 4);
/* fr16-fr31 */
......@@ -1061,7 +1118,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr1-gr3 */
retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long) * 3);
retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
/* gr4-gr7 */
......@@ -1079,7 +1137,9 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr12-gr15 */
retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 4);
retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
/* gr16-gr31 */
......@@ -1107,13 +1167,13 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
}
/* fr6-fr9 */
/* fr6-fr11 */
retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 4);
retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 6);
/* fp scratch regs(10-15) */
/* fp scratch regs(12-15) */
retval |= __copy_from_user(&sw->f10, &ppr->fr[10], sizeof(ppr->fr[10]) * 6);
retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sizeof(ppr->fr[12]) * 4);
/* fr16-fr31 */
......
......@@ -115,18 +115,28 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
err |= __get_user(cfm, &sc->sc_cfm);
err |= __get_user(um, &sc->sc_um); /* user mask */
err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
if ((flags & IA64_SC_FLAG_IN_SYSCALL)==0)
{
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __get_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 3*8); /* r1-r3 */
err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 4*8); /* r12-r15 */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r14, &sc->sc_gr[14], 8); /* r14 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
}
scr->pt.cr_ifs = cfm | (1UL << 63);
......@@ -358,21 +368,42 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
err |= __put_user(cfm, &sc->sc_cfm);
err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
if (flags & IA64_SC_FLAG_IN_SYSCALL)
{
/* Clear scratch registers if the signal interrupted a system call. */
err |= __clear_user(&sc->sc_ar_ccv, 8);
err |= __clear_user(&sc->sc_ar25,8); /* ar.csd */
err |= __clear_user(&sc->sc_ar26,8); /* ar.ssd */
err |= __clear_user(&sc->sc_br[6],8); /* b6 */
err |= __clear_user(&sc->sc_br[7],8); /* b7 */
err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __clear_user(&sc->sc_gr[14],8); /* r14 */
err |= __clear_user(&sc->sc_gr[16],16*8); /* r16-r31 */
} else
{
/* Copy scratch registers to sigcontext if the signal did not interrupt a syscall. */
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __put_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 3*8); /* r1-r3 */
err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 4*8); /* r12-r15 */
err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
err |= __copy_to_user(&sc->sc_gr[14], &scr->pt.r14, 8); /* r14 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
}
return err;
}
......
......@@ -275,7 +275,6 @@ static inline int
fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
struct pt_regs *regs)
{
struct ia64_fpreg f6_11[6];
fp_state_t fp_state;
fpswa_ret_t ret;
......@@ -290,11 +289,8 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
* pointer to point to these registers.
*/
fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
f6_11[0] = regs->f6; f6_11[1] = regs->f7;
f6_11[2] = regs->f8; f6_11[3] = regs->f9;
__asm__ ("stf.spill %0=f10%P0" : "=m"(f6_11[4]));
__asm__ ("stf.spill %0=f11%P0" : "=m"(f6_11[5]));
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) f6_11;
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
/*
* unsigned long (*EFI_FPSWA) (
* unsigned long trap_type,
......@@ -310,10 +306,7 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
(unsigned long *) ipsr, (unsigned long *) fpsr,
(unsigned long *) isr, (unsigned long *) pr,
(unsigned long *) ifs, &fp_state);
regs->f6 = f6_11[0]; regs->f7 = f6_11[1];
regs->f8 = f6_11[2]; regs->f9 = f6_11[3];
__asm__ ("ldf.fill f10=%0%P0" :: "m"(f6_11[4]));
__asm__ ("ldf.fill f11=%0%P0" :: "m"(f6_11[5]));
return ret.status;
}
......
......@@ -218,8 +218,9 @@ static u16 fr_info[32]={
RSW(f2), RSW(f3), RSW(f4), RSW(f5),
RPT(f6), RPT(f7), RPT(f8), RPT(f9),
RPT(f10), RPT(f11),
RSW(f10), RSW(f11), RSW(f12), RSW(f13), RSW(f14),
RSW(f12), RSW(f13), RSW(f14),
RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
......
/*
* Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
* - Change pt_regs_off() to make it less dependant on pt_regs structure.
*/
/*
* This file implements call frame unwind support for the Linux
......@@ -209,7 +211,10 @@ static struct {
#endif
};
#define OFF_CASE(reg, reg_num) \
case reg: \
off = struct_offset(struct pt_regs, reg_num); \
break;
/* Unwind accessors. */
/*
......@@ -220,16 +225,39 @@ pt_regs_off (unsigned long reg)
{
unsigned long off =0;
if (reg >= 1 && reg <= 3)
off = struct_offset(struct pt_regs, r1) + 8*(reg - 1);
else if (reg <= 11)
off = struct_offset(struct pt_regs, r8) + 8*(reg - 8);
else if (reg <= 15)
off = struct_offset(struct pt_regs, r12) + 8*(reg - 12);
else if (reg <= 31)
off = struct_offset(struct pt_regs, r16) + 8*(reg - 16);
else
switch (reg)
{
OFF_CASE(1,r1)
OFF_CASE(2,r2)
OFF_CASE(3,r3)
OFF_CASE(8,r8)
OFF_CASE(9,r9)
OFF_CASE(10,r10)
OFF_CASE(11,r11)
OFF_CASE(12,r12)
OFF_CASE(13,r13)
OFF_CASE(14,r14)
OFF_CASE(15,r15)
OFF_CASE(16,r16)
OFF_CASE(17,r17)
OFF_CASE(18,r18)
OFF_CASE(19,r19)
OFF_CASE(20,r20)
OFF_CASE(21,r21)
OFF_CASE(22,r22)
OFF_CASE(23,r23)
OFF_CASE(24,r24)
OFF_CASE(25,r25)
OFF_CASE(26,r26)
OFF_CASE(27,r27)
OFF_CASE(28,r28)
OFF_CASE(29,r29)
OFF_CASE(30,r30)
OFF_CASE(31,r31)
default:
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
break;
}
return off;
}
......@@ -419,10 +447,10 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
if (!addr)
addr = &info->sw->f2 + (regnum - 2);
} else if (regnum <= 15) {
if (regnum <= 9)
if (regnum <= 11)
addr = &pt->f6 + (regnum - 6);
else
addr = &info->sw->f10 + (regnum - 10);
addr = &info->sw->f12 + (regnum - 12);
} else if (regnum <= 31) {
addr = info->fr_loc[regnum - 16];
if (!addr)
......@@ -512,6 +540,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
addr = &pt->ar_ccv;
break;
case UNW_AR_CSD:
addr = &pt->ar_csd;
break;
case UNW_AR_SSD:
addr = &pt->ar_ssd;
break;
default:
UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
__FUNCTION__, regnum);
......@@ -1379,7 +1415,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval <= 9)
if (rval <= 11)
val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
else
UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
......
......@@ -37,57 +37,44 @@
#define NAME PASTE(PASTE(__,SGN),PASTE(OP,di3))
GLOBAL_ENTRY(NAME)
.prologue
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f8 = in0
setf.sig f9 = in1
;;
.fframe 16
.save.f 0x20
stf.spill [sp] = f17,-16
// Convert the inputs to FP, to avoid FP software-assist faults.
INT_TO_FP(f8, f8)
;;
.save.f 0x10
stf.spill [sp] = f16
.body
INT_TO_FP(f9, f9)
;;
frcpa.s1 f17, p6 = f8, f9 // y0 = frcpa(b)
frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b)
;;
(p6) fmpy.s1 f7 = f8, f17 // q0 = a*y0
(p6) fnma.s1 f6 = f9, f17, f1 // e0 = -b*y0 + 1
(p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0
(p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1
;;
(p6) fma.s1 f16 = f7, f6, f7 // q1 = q0*e0 + q0
(p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0
(p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0
;;
#ifdef MODULO
sub in1 = r0, in1 // in1 = -b
#endif
(p6) fma.s1 f16 = f16, f7, f16 // q2 = q1*e1 + q1
(p6) fma.s1 f6 = f17, f6, f17 // y1 = y0*e0 + y0
(p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1
(p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0
;;
(p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1
(p6) fnma.s1 f7 = f9, f16, f8 // r = -b*q2 + a
(p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a
;;
#ifdef MODULO
setf.sig f8 = in0 // f8 = a
setf.sig f9 = in1 // f9 = -b
#endif
(p6) fma.s1 f17 = f7, f6, f16 // q3 = r*y2 + q2
(p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2
;;
.restore sp
ldf.fill f16 = [sp], 16
FP_TO_INT(f17, f17) // q = trunc(q3)
FP_TO_INT(f11, f11) // q = trunc(q3)
;;
#ifdef MODULO
xma.l f17 = f17, f9, f8 // r = q*(-b) + a
xma.l f11 = f11, f9, f8 // r = q*(-b) + a
;;
#endif
getf.sig r8 = f17 // transfer result to result register
ldf.fill f17 = [sp]
getf.sig r8 = f11 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
......@@ -3,6 +3,8 @@
*
* Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2002-2003 Intel Co
* Fenghua Yu <fenghua.yu@intel.com>
*
* Note that this file has dual use: when building the kernel
* natively, the file is translated into a binary and executed. When
......@@ -59,6 +61,14 @@ tab[] =
{ "IA64_TASK_TGID_OFFSET", offsetof (struct task_struct, tgid) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
{ "IA64_TASK_THREAD_ON_USTACK_OFFSET", offsetof (struct task_struct, thread.on_ustack) },
{ "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) },
{ "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) },
{ "IA64_PT_REGS_AR_CSD_OFFSET", offsetof (struct pt_regs, ar_csd) },
{ "IA64_PT_REGS_AR_SSD_OFFSET", offsetof (struct pt_regs, ar_ssd) },
{ "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) },
{ "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) },
{ "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) },
{ "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
{ "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
{ "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
......@@ -68,19 +78,16 @@ tab[] =
{ "IA64_PT_REGS_AR_RNAT_OFFSET", offsetof (struct pt_regs, ar_rnat) },
{ "IA64_PT_REGS_AR_BSPSTORE_OFFSET",offsetof (struct pt_regs, ar_bspstore) },
{ "IA64_PT_REGS_PR_OFFSET", offsetof (struct pt_regs, pr) },
{ "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) },
{ "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) },
{ "IA64_PT_REGS_LOADRS_OFFSET", offsetof (struct pt_regs, loadrs) },
{ "IA64_PT_REGS_R1_OFFSET", offsetof (struct pt_regs, r1) },
{ "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) },
{ "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) },
{ "IA64_PT_REGS_R12_OFFSET", offsetof (struct pt_regs, r12) },
{ "IA64_PT_REGS_R13_OFFSET", offsetof (struct pt_regs, r13) },
{ "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) },
{ "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) },
{ "IA64_PT_REGS_R15_OFFSET", offsetof (struct pt_regs, r15) },
{ "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) },
{ "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) },
{ "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) },
{ "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) },
{ "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) },
{ "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) },
{ "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) },
{ "IA64_PT_REGS_R16_OFFSET", offsetof (struct pt_regs, r16) },
{ "IA64_PT_REGS_R17_OFFSET", offsetof (struct pt_regs, r17) },
{ "IA64_PT_REGS_R18_OFFSET", offsetof (struct pt_regs, r18) },
......@@ -98,21 +105,18 @@ tab[] =
{ "IA64_PT_REGS_R30_OFFSET", offsetof (struct pt_regs, r30) },
{ "IA64_PT_REGS_R31_OFFSET", offsetof (struct pt_regs, r31) },
{ "IA64_PT_REGS_AR_CCV_OFFSET", offsetof (struct pt_regs, ar_ccv) },
{ "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) },
{ "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) },
{ "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) },
{ "IA64_PT_REGS_F6_OFFSET", offsetof (struct pt_regs, f6) },
{ "IA64_PT_REGS_F7_OFFSET", offsetof (struct pt_regs, f7) },
{ "IA64_PT_REGS_F8_OFFSET", offsetof (struct pt_regs, f8) },
{ "IA64_PT_REGS_F9_OFFSET", offsetof (struct pt_regs, f9) },
{ "IA64_PT_REGS_F10_OFFSET", offsetof (struct pt_regs, f10) },
{ "IA64_PT_REGS_F11_OFFSET", offsetof (struct pt_regs, f11) },
{ "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) },
{ "IA64_SWITCH_STACK_AR_FPSR_OFFSET", offsetof (struct switch_stack, ar_fpsr) },
{ "IA64_SWITCH_STACK_F2_OFFSET", offsetof (struct switch_stack, f2) },
{ "IA64_SWITCH_STACK_F3_OFFSET", offsetof (struct switch_stack, f3) },
{ "IA64_SWITCH_STACK_F4_OFFSET", offsetof (struct switch_stack, f4) },
{ "IA64_SWITCH_STACK_F5_OFFSET", offsetof (struct switch_stack, f5) },
{ "IA64_SWITCH_STACK_F10_OFFSET", offsetof (struct switch_stack, f10) },
{ "IA64_SWITCH_STACK_F11_OFFSET", offsetof (struct switch_stack, f11) },
{ "IA64_SWITCH_STACK_F12_OFFSET", offsetof (struct switch_stack, f12) },
{ "IA64_SWITCH_STACK_F13_OFFSET", offsetof (struct switch_stack, f13) },
{ "IA64_SWITCH_STACK_F14_OFFSET", offsetof (struct switch_stack, f14) },
......
......@@ -147,7 +147,7 @@ extern void ia64_init_addr_space (void);
* b0-b7
* ip cfm psr
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
......
......@@ -243,8 +243,6 @@ struct thread_struct {
__u64 fcr; /* IA32 floating pt control reg */
__u64 fir; /* IA32 fp except. instr. reg */
__u64 fdr; /* IA32 fp except. data reg */
__u64 csd; /* IA32 code selector descriptor */
__u64 ssd; /* IA32 stack selector descriptor */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
# define INIT_THREAD_IA32 .eflag = 0, \
......@@ -252,8 +250,6 @@ struct thread_struct {
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
.csd = 0, \
.ssd = 0, \
.old_k1 = 0, \
.old_iob = 0,
#else
......@@ -328,11 +324,15 @@ struct thread_struct {
regs->r24 = 0; regs->r25 = 0; regs->r26 = 0; regs->r27 = 0; \
regs->r28 = 0; regs->r29 = 0; regs->r30 = 0; regs->r31 = 0; \
regs->ar_ccv = 0; \
regs->ar_csd = 0; \
regs->ar_ssd = 0; \
regs->b0 = 0; regs->b7 = 0; \
regs->f6.u.bits[0] = 0; regs->f6.u.bits[1] = 0; \
regs->f7.u.bits[0] = 0; regs->f7.u.bits[1] = 0; \
regs->f8.u.bits[0] = 0; regs->f8.u.bits[1] = 0; \
regs->f9.u.bits[0] = 0; regs->f9.u.bits[1] = 0; \
regs->f10.u.bits[0] = 0; regs->f10.u.bits[1] = 0; \
regs->f11.u.bits[0] = 0; regs->f11.u.bits[1] = 0; \
} \
} while (0)
......
......@@ -5,6 +5,10 @@
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2003 Intel Co
* Suresh Siddha <suresh.b.siddha@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
* Arun Sharma <arun.sharma@intel.com>
*
* 12/07/98 S. Eranian added pt_regs & switch_stack
* 12/21/98 D. Mosberger updated to match latest code
......@@ -93,6 +97,16 @@
*/
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
unsigned long b7; /* scratch */
unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
unsigned long ar_ssd; /* reserved for future use (scratch) */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long cr_ipsr; /* interrupted task's psr */
unsigned long cr_iip; /* interrupted task's instruction pointer */
......@@ -106,22 +120,19 @@ struct pt_regs {
unsigned long ar_bspstore; /* RSE bspstore */
unsigned long pr; /* 64 predicate registers (1 bit each) */
unsigned long b6; /* scratch */
unsigned long b0; /* return pointer (bp) */
unsigned long loadrs; /* size of dirty partition << 16 */
unsigned long r1; /* the gp pointer */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
unsigned long r12; /* interrupted task's memory stack pointer */
unsigned long r13; /* thread pointer */
unsigned long r14; /* scratch */
unsigned long r15; /* scratch */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
......@@ -142,10 +153,7 @@ struct pt_regs {
unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long b0; /* return pointer (bp) */
unsigned long b7; /* scratch */
/*
* Floating point registers that the kernel considers
* scratch:
......@@ -154,6 +162,8 @@ struct pt_regs {
struct ia64_fpreg f7; /* scratch */
struct ia64_fpreg f8; /* scratch */
struct ia64_fpreg f9; /* scratch */
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
/*
......@@ -170,8 +180,6 @@ struct switch_stack {
struct ia64_fpreg f4; /* preserved */
struct ia64_fpreg f5; /* preserved */
struct ia64_fpreg f10; /* scratch, but untouched by kernel */
struct ia64_fpreg f11; /* scratch, but untouched by kernel */
struct ia64_fpreg f12; /* scratch, but untouched by kernel */
struct ia64_fpreg f13; /* scratch, but untouched by kernel */
struct ia64_fpreg f14; /* scratch, but untouched by kernel */
......
......@@ -11,9 +11,64 @@
*
* struct uarea {
* struct ia64_fpreg fph[96]; // f32-f127
* struct switch_stack sw;
* struct pt_regs pt;
* unsigned long rsvd1[712];
* unsigned long nat_bits;
* unsigned long empty1;
* struct ia64_fpreg f2; // f2-f5
* .
* .
* struct ia64_fpreg f5;
* struct ia64_fpreg f10; // f10-f31
* .
* .
* struct ia64_fpreg f31;
* unsigned long r4; // r4-r7
* .
* .
* unsigned long r7;
* unsigned long b1; // b1-b5
* .
* .
* unsigned long b5;
* unsigned long ar_ec;
* unsigned long ar_lc;
* unsigned long empty2[5];
* unsigned long cr_ipsr;
* unsigned long cr_iip;
* unsigned long cfm;
* unsigned long ar_unat;
* unsigned long ar_pfs;
* unsigned long ar_rsc;
* unsigned long ar_rnat;
* unsigned long ar_bspstore;
* unsigned long pr;
* unsigned long b6;
* unsigned long ar_bsp;
* unsigned long r1;
* unsigned long r2;
* unsigned long r3;
* unsigned long r12;
* unsigned long r13;
* unsigned long r14;
* unsigned long r15;
* unsigned long r8;
* unsigned long r9;
* unsigned long r10;
* unsigned long r11;
* unsigned long r16;
* .
* .
* unsigned long r31;
* unsigned long ar_ccv;
* unsigned long ar_fpsr;
* unsigned long b0;
* unsigned long b7;
* unsigned long f6;
* unsigned long f7;
* unsigned long f8;
* unsigned long f9;
* unsigned long ar_csd;
* unsigned long ar_ssd;
* unsigned long rsvd1[710];
* unsigned long dbr[8];
* unsigned long rsvd2[504];
* unsigned long ibr[8];
......@@ -119,7 +174,7 @@
#define PT_F125 0x05d0
#define PT_F126 0x05e0
#define PT_F127 0x05f0
/* switch stack: */
#define PT_NAT_BITS 0x0600
#define PT_F2 0x0610
......@@ -162,7 +217,6 @@
#define PT_AR_EC 0x0800
#define PT_AR_LC 0x0808
/* pt_regs */
#define PT_CR_IPSR 0x0830
#define PT_CR_IIP 0x0838
#define PT_CFM 0x0840
......@@ -209,6 +263,8 @@
#define PT_F7 0x0990
#define PT_F8 0x09a0
#define PT_F9 0x09b0
#define PT_AR_CSD 0x09c0
#define PT_AR_SSD 0x09c8
#define PT_DBR 0x2000 /* data breakpoint registers */
#define PT_IBR 0x3000 /* instruction breakpoint registers */
......
......@@ -56,7 +56,7 @@ struct sigcontext {
unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
unsigned long sc_loadrs; /* see description above */
unsigned long sc_ar25; /* rsvd for scratch use */
unsigned long sc_ar25; /* cmp8xchg16 uses this */
unsigned long sc_ar26; /* rsvd for scratch use */
unsigned long sc_rsvd[12]; /* reserved for future use */
/*
......
......@@ -26,7 +26,9 @@ enum unw_application_register {
UNW_AR_EC,
UNW_AR_FPSR,
UNW_AR_RSC,
UNW_AR_CCV
UNW_AR_CCV,
UNW_AR_CSD,
UNW_AR_SSD
};
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment