Commit d998bfd9 authored by Tony Luck's avatar Tony Luck

Merge intel.com:/data/home/aegl/BK/Linus

into intel.com:/data/home/aegl/BK/linux-ia64-release-2.6.11
parents b389f382 00299a67
...@@ -158,14 +158,6 @@ config IA64_BRL_EMU ...@@ -158,14 +158,6 @@ config IA64_BRL_EMU
depends on ITANIUM depends on ITANIUM
default y default y
config ITANIUM_BSTEP_SPECIFIC
bool "Itanium B-step specific code"
depends on ITANIUM
help
Select this option to build a kernel for an Itanium prototype system
with a B-step CPU. You have a B-step CPU if the "revision" field in
/proc/cpuinfo has a value in the range from 1 to 4.
# align cache-sensitive data to 128 bytes # align cache-sensitive data to 128 bytes
config IA64_L1_CACHE_SHIFT config IA64_L1_CACHE_SHIFT
int int
......
...@@ -46,8 +46,6 @@ ifeq ($(GCC_VERSION),0304) ...@@ -46,8 +46,6 @@ ifeq ($(GCC_VERSION),0304)
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif endif
cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC) += -mb-step
CFLAGS += $(cflags-y) CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
......
...@@ -73,7 +73,6 @@ CONFIG_ITANIUM=y ...@@ -73,7 +73,6 @@ CONFIG_ITANIUM=y
CONFIG_IA64_PAGE_SIZE_16KB=y CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set # CONFIG_IA64_PAGE_SIZE_64KB is not set
CONFIG_IA64_BRL_EMU=y CONFIG_IA64_BRL_EMU=y
# CONFIG_ITANIUM_BSTEP_SPECIFIC is not set
CONFIG_IA64_L1_CACHE_SHIFT=6 CONFIG_IA64_L1_CACHE_SHIFT=6
# CONFIG_NUMA is not set # CONFIG_NUMA is not set
# CONFIG_VIRTUAL_MEM_MAP is not set # CONFIG_VIRTUAL_MEM_MAP is not set
......
/* /*
* IA32 Architecture-specific signal handling support. * IA32 Architecture-specific signal handling support.
* *
* Copyright (C) 1999, 2001-2002 Hewlett-Packard Co * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 VA Linux Co
...@@ -970,11 +970,10 @@ ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -970,11 +970,10 @@ ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
} }
asmlinkage long asmlinkage long
sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
unsigned long stack) int arg6, int arg7, struct pt_regs regs)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack; unsigned long esp = (unsigned int) regs.r12;
unsigned long esp = (unsigned int) regs->r12;
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8); struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8);
sigset_t set; sigset_t set;
int eax; int eax;
...@@ -993,7 +992,7 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int ...@@ -993,7 +992,7 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext_ia32(regs, &frame->sc, &eax)) if (restore_sigcontext_ia32(&regs, &frame->sc, &eax))
goto badframe; goto badframe;
return eax; return eax;
...@@ -1003,11 +1002,10 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int ...@@ -1003,11 +1002,10 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int
} }
asmlinkage long asmlinkage long
sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4,
unsigned long stack) int arg5, int arg6, int arg7, struct pt_regs regs)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack; unsigned long esp = (unsigned int) regs.r12;
unsigned long esp = (unsigned int) regs->r12;
struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4); struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4);
sigset_t set; sigset_t set;
int eax; int eax;
...@@ -1023,7 +1021,7 @@ sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, ...@@ -1023,7 +1021,7 @@ sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext_ia32(regs, &frame->uc.uc_mcontext, &eax)) if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax))
goto badframe; goto badframe;
/* It is more difficult to avoid calling this function than to /* It is more difficult to avoid calling this function than to
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000-2003 Hewlett-Packard Co * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com> * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
* *
...@@ -1436,7 +1436,7 @@ sys32_waitpid (int pid, unsigned int *stat_addr, int options) ...@@ -1436,7 +1436,7 @@ sys32_waitpid (int pid, unsigned int *stat_addr, int options)
} }
static unsigned int static unsigned int
ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val) ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
{ {
size_t copied; size_t copied;
unsigned int ret; unsigned int ret;
...@@ -1446,7 +1446,7 @@ ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, ...@@ -1446,7 +1446,7 @@ ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr,
} }
static unsigned int static unsigned int
ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val) ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
{ {
if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
...@@ -1751,25 +1751,16 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u ...@@ -1751,25 +1751,16 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
return 0; return 0;
} }
/*
* Note that the IA32 version of `ptrace' calls the IA64 routine for
* many of the requests. This will only work for requests that do
* not need access to the calling processes `pt_regs' which is located
* at the address of `stack'. Once we call the IA64 `sys_ptrace' then
* the address of `stack' will not be the address of the `pt_regs'.
*/
asmlinkage long asmlinkage long
sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
long arg4, long arg5, long arg6, long arg7, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
struct task_struct *child; struct task_struct *child;
unsigned int value, tmp; unsigned int value, tmp;
long i, ret; long i, ret;
lock_kernel(); lock_kernel();
if (request == PTRACE_TRACEME) { if (request == PTRACE_TRACEME) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); ret = sys_ptrace(request, pid, addr, data);
goto out; goto out;
} }
...@@ -1786,7 +1777,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -1786,7 +1777,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
goto out_tsk; goto out_tsk;
if (request == PTRACE_ATTACH) { if (request == PTRACE_ATTACH) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); ret = sys_ptrace(request, pid, addr, data);
goto out_tsk; goto out_tsk;
} }
...@@ -1797,7 +1788,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -1797,7 +1788,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
switch (request) { switch (request) {
case PTRACE_PEEKTEXT: case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */ case PTRACE_PEEKDATA: /* read word at location addr */
ret = ia32_peek(regs, child, addr, &value); ret = ia32_peek(child, addr, &value);
if (ret == 0) if (ret == 0)
ret = put_user(value, (unsigned int __user *) compat_ptr(data)); ret = put_user(value, (unsigned int __user *) compat_ptr(data));
else else
...@@ -1806,7 +1797,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -1806,7 +1797,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
case PTRACE_POKETEXT: case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */ case PTRACE_POKEDATA: /* write the word at location addr */
ret = ia32_poke(regs, child, addr, data); ret = ia32_poke(child, addr, data);
goto out_tsk; goto out_tsk;
case PTRACE_PEEKUSR: /* read word at addr in USER area */ case PTRACE_PEEKUSR: /* read word at addr in USER area */
...@@ -1882,7 +1873,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -1882,7 +1873,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
case PTRACE_KILL: case PTRACE_KILL:
case PTRACE_SINGLESTEP: /* execute chile for one instruction */ case PTRACE_SINGLESTEP: /* execute chile for one instruction */
case PTRACE_DETACH: /* detach a process */ case PTRACE_DETACH: /* detach a process */
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); ret = sys_ptrace(request, pid, addr, data);
break; break;
default: default:
...@@ -1905,9 +1896,9 @@ typedef struct { ...@@ -1905,9 +1896,9 @@ typedef struct {
asmlinkage long asmlinkage long
sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32, sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, long stack) long arg2, long arg3, long arg4, long arg5, long arg6,
long arg7, struct pt_regs pt)
{ {
struct pt_regs *pt = (struct pt_regs *) &stack;
stack_t uss, uoss; stack_t uss, uoss;
ia32_stack_t buf32; ia32_stack_t buf32;
int ret; int ret;
...@@ -1928,7 +1919,7 @@ sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32, ...@@ -1928,7 +1919,7 @@ sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
} }
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL, ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
(stack_t __user *) &uoss, pt->r12); (stack_t __user *) &uoss, pt.r12);
current->sas_ss_size = buf32.ss_size; current->sas_ss_size = buf32.ss_size;
set_fs(old_fs); set_fs(old_fs);
out: out:
......
...@@ -193,9 +193,17 @@ void foo(void) ...@@ -193,9 +193,17 @@ void foo(void)
DEFINE(IA64_CLONE_VM, CLONE_VM); DEFINE(IA64_CLONE_VM, CLONE_VM);
BLANK(); BLANK();
DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc)); DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET,
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec)); offsetof (struct cpuinfo_ia64, nsec_per_cyc));
DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
offsetof (struct cpuinfo_ia64, ptce_base));
DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
offsetof (struct cpuinfo_ia64, ptce_count));
DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
offsetof (struct cpuinfo_ia64, ptce_stride));
BLANK();
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
offsetof (struct timespec, tv_nsec));
DEFINE(CLONE_SETTLS_BIT, 19); DEFINE(CLONE_SETTLS_BIT, 19);
#if CLONE_SETTLS != (1<<19) #if CLONE_SETTLS != (1<<19)
...@@ -203,19 +211,16 @@ void foo(void) ...@@ -203,19 +211,16 @@ void foo(void)
#endif #endif
BLANK(); BLANK();
/* used by arch/ia64/kernel/mca_asm.S */ DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
DEFINE(IA64_CPUINFO_PERCPU_PADDR, offsetof (struct cpuinfo_ia64, percpu_paddr)); offsetof (struct ia64_mca_cpu, proc_state_dump));
DEFINE(IA64_CPUINFO_PAL_PADDR, offsetof (struct cpuinfo_ia64, pal_paddr)); DEFINE(IA64_MCA_CPU_STACK_OFFSET,
DEFINE(IA64_CPUINFO_PA_MCA_INFO, offsetof (struct cpuinfo_ia64, ia64_pa_mca_data)); offsetof (struct ia64_mca_cpu, stack));
DEFINE(IA64_MCA_PROC_STATE_DUMP, offsetof (struct ia64_mca_cpu_s, ia64_mca_proc_state_dump)); DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
DEFINE(IA64_MCA_STACK, offsetof (struct ia64_mca_cpu_s, ia64_mca_stack)); offsetof (struct ia64_mca_cpu, stackframe));
DEFINE(IA64_MCA_STACKFRAME, offsetof (struct ia64_mca_cpu_s, ia64_mca_stackframe)); DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
DEFINE(IA64_MCA_BSPSTORE, offsetof (struct ia64_mca_cpu_s, ia64_mca_bspstore)); offsetof (struct ia64_mca_cpu, rbstore));
DEFINE(IA64_INIT_STACK, offsetof (struct ia64_mca_cpu_s, ia64_init_stack)); DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack));
/* used by head.S */
DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
BLANK(); BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
......
...@@ -415,8 +415,8 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -415,8 +415,8 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
* Abstraction Layer chapter 11 in ADAG * Abstraction Layer chapter 11 in ADAG
*/ */
static efi_memory_desc_t * void *
pal_code_memdesc (void) efi_get_pal_addr (void)
{ {
void *efi_map_start, *efi_map_end, *p; void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md; efi_memory_desc_t *md;
...@@ -474,51 +474,31 @@ pal_code_memdesc (void) ...@@ -474,51 +474,31 @@ pal_code_memdesc (void)
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif #endif
return md; return __va(md->phys_addr);
} }
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
__FUNCTION__);
return NULL; return NULL;
} }
void
efi_get_pal_addr (void)
{
efi_memory_desc_t *md = pal_code_memdesc();
u64 vaddr, mask;
struct cpuinfo_ia64 *cpuinfo;
if (md != NULL) {
vaddr = PAGE_OFFSET + md->phys_addr;
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
cpuinfo = (struct cpuinfo_ia64 *)__va(ia64_get_kr(IA64_KR_PA_CPU_INFO));
cpuinfo->pal_base = vaddr & mask;
cpuinfo->pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
}
}
void void
efi_map_pal_code (void) efi_map_pal_code (void)
{ {
efi_memory_desc_t *md = pal_code_memdesc(); void *pal_vaddr = efi_get_pal_addr ();
u64 vaddr, mask, psr; u64 psr;
if (md != NULL) {
vaddr = PAGE_OFFSET + md->phys_addr; if (!pal_vaddr)
mask = ~((1 << IA64_GRANULE_SHIFT) - 1); return;
/* /*
* Cannot write to CRx with PSR.ic=1 * Cannot write to CRx with PSR.ic=1
*/ */
psr = ia64_clear_ic(); psr = ia64_clear_ic();
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask, ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT); IA64_GRANULE_SHIFT);
ia64_set_psr(psr); /* restore psr */ ia64_set_psr(psr); /* restore psr */
ia64_srlz_i(); ia64_srlz_i();
}
} }
void __init void __init
......
...@@ -558,7 +558,7 @@ GLOBAL_ENTRY(ia64_trace_syscall) ...@@ -558,7 +558,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
.ret3: br.cond.sptk ia64_leave_syscall .ret3: br.cond.sptk .work_pending_syscall_end
strace_error: strace_error:
ld8 r3=[r2] // load pt_regs.r8 ld8 r3=[r2] // load pt_regs.r8
...@@ -621,10 +621,7 @@ GLOBAL_ENTRY(ia64_ret_from_syscall) ...@@ -621,10 +621,7 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
cmp.ge p6,p7=r8,r0 // syscall executed successfully? cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 mov r10=r0 // clear error indication in r10
;;
.mem.offset 0,0; (p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0; (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_syscall) END(ia64_ret_from_syscall)
// fall through // fall through
...@@ -709,27 +706,23 @@ ENTRY(ia64_leave_syscall) ...@@ -709,27 +706,23 @@ ENTRY(ia64_leave_syscall)
ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
mov b7=r0 // clear b7 mov b7=r0 // clear b7
;; ;;
ld8 r23=[r3],PT(R9)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
ld8 r18=[r2],PT(R8)-PT(B6) // load b6 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;; ;;
mov r16=ar.bsp // M2 get existing backing store pointer mov r16=ar.bsp // M2 get existing backing store pointer
(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
(p6) br.cond.spnt .work_pending (p6) br.cond.spnt .work_pending_syscall
;; ;;
// start restoring the state saved on the kernel stack (struct pt_regs): // start restoring the state saved on the kernel stack (struct pt_regs):
ld8.fill r8=[r2],16 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
ld8.fill r9=[r3],16 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
mov f6=f0 // clear f6 mov f6=f0 // clear f6
;; ;;
invala // M0|1 invalidate ALAT invala // M0|1 invalidate ALAT
rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection
mov f9=f0 // clear f9 mov f9=f0 // clear f9
ld8.fill r10=[r2],16
ld8.fill r11=[r3],16
mov f7=f0 // clear f7
;;
ld8 r29=[r2],16 // load cr.ipsr ld8 r29=[r2],16 // load cr.ipsr
ld8 r28=[r3],16 // load cr.iip ld8 r28=[r3],16 // load cr.iip
mov f8=f0 // clear f8 mov f8=f0 // clear f8
...@@ -760,7 +753,7 @@ ENTRY(ia64_leave_syscall) ...@@ -760,7 +753,7 @@ ENTRY(ia64_leave_syscall)
;; ;;
srlz.d // M0 ensure interruption collection is off srlz.d // M0 ensure interruption collection is off
ld8.fill r13=[r3],16 ld8.fill r13=[r3],16
nop.i 0 mov f7=f0 // clear f7
;; ;;
ld8.fill r12=[r2] // restore r12 (sp) ld8.fill r12=[r2] // restore r12 (sp)
ld8.fill r15=[r3] // restore r15 ld8.fill r15=[r3] // restore r15
...@@ -770,8 +763,8 @@ ENTRY(ia64_leave_syscall) ...@@ -770,8 +763,8 @@ ENTRY(ia64_leave_syscall)
(pUStk) st1 [r14]=r17 (pUStk) st1 [r14]=r17
mov b6=r18 // I0 restore b6 mov b6=r18 // I0 restore b6
;; ;;
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
mov r14=r0 // clear r14 mov r14=r0 // clear r14
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
(pKStk) br.cond.dpnt.many skip_rbs_switch (pKStk) br.cond.dpnt.many skip_rbs_switch
mov.m ar.ccv=r0 // clear ar.ccv mov.m ar.ccv=r0 // clear ar.ccv
...@@ -987,7 +980,7 @@ dont_preserve_current_frame: ...@@ -987,7 +980,7 @@ dont_preserve_current_frame:
shladd in0=loc1,3,r17 shladd in0=loc1,3,r17
mov in1=0 mov in1=0
;; ;;
.align 32 TEXT_ALIGN(32)
rse_clear_invalid: rse_clear_invalid:
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
// cycle 0 // cycle 0
...@@ -1083,6 +1076,12 @@ skip_rbs_switch: ...@@ -1083,6 +1076,12 @@ skip_rbs_switch:
* On exit: * On exit:
* p6 = TRUE if work-pending-check needs to be redone * p6 = TRUE if work-pending-check needs to be redone
*/ */
.work_pending_syscall:
add r2=-8,r2
add r3=-8,r3
;;
st8 [r2]=r8
st8 [r3]=r10
.work_pending: .work_pending:
tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context? tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
(p6) br.cond.sptk.few .sigdelayed (p6) br.cond.sptk.few .sigdelayed
...@@ -1104,13 +1103,13 @@ skip_rbs_switch: ...@@ -1104,13 +1103,13 @@ skip_rbs_switch:
;; ;;
(pKStk) st4 [r20]=r0 // preempt_count() <- 0 (pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif #endif
(pLvSys)br.cond.sptk.many .work_processed_syscall // re-check (pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // re-check br.cond.sptk.many .work_processed_kernel // re-check
.notify: .notify:
(pUStk) br.call.spnt.many rp=notify_resume_user (pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
(pLvSys)br.cond.sptk.many .work_processed_syscall // don't re-check (pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // don't re-check br.cond.sptk.many .work_processed_kernel // don't re-check
// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where // There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
...@@ -1121,9 +1120,17 @@ skip_rbs_switch: ...@@ -1121,9 +1120,17 @@ skip_rbs_switch:
.sigdelayed: .sigdelayed:
br.call.sptk.many rp=do_sigdelayed br.call.sptk.many rp=do_sigdelayed
cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
(pLvSys)br.cond.sptk.many .work_processed_syscall // re-check (pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // re-check br.cond.sptk.many .work_processed_kernel // re-check
.work_pending_syscall_end:
adds r2=PT(R8)+16,r12
adds r3=PT(R10)+16,r12
;;
ld8 r8=[r2]
ld8 r10=[r3]
br.cond.sptk.many .work_processed_syscall // re-check
END(ia64_leave_kernel) END(ia64_leave_kernel)
ENTRY(handle_syscall_error) ENTRY(handle_syscall_error)
...@@ -1135,17 +1142,11 @@ ENTRY(handle_syscall_error) ...@@ -1135,17 +1142,11 @@ ENTRY(handle_syscall_error)
*/ */
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
ld8 r3=[r2] // load pt_regs.r8 ld8 r3=[r2] // load pt_regs.r8
sub r9=0,r8 // negate return value to get errno
;; ;;
mov r10=-1 // return -1 in pt_regs.r10 to indicate error
cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
adds r3=16,r2 // r3=&pt_regs.r10
;;
(p6) mov r9=r8
(p6) mov r10=0
;; ;;
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit (p7) mov r10=-1
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit (p7) sub r8=0,r8 // negate return value to get errno
br.cond.sptk ia64_leave_syscall br.cond.sptk ia64_leave_syscall
END(handle_syscall_error) END(handle_syscall_error)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* to set up the kernel's global pointer and jump to the kernel * to set up the kernel's global pointer and jump to the kernel
* entry point. * entry point.
* *
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
...@@ -232,21 +232,6 @@ start_ap: ...@@ -232,21 +232,6 @@ start_ap:
;; ;;
(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
#ifdef CONFIG_IA64_EARLY_PRINTK
.rodata
alive_msg:
stringz "I'm alive and well\n"
alive_msg_end:
.previous
alloc r2=ar.pfs,0,0,2,0
movl out0=alive_msg
movl out1=alive_msg_end-alive_msg-1
;;
br.call.sptk.many rp=early_printk
1: // force new bundle
#endif /* CONFIG_IA64_EARLY_PRINTK */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
(isAP) br.call.sptk.many rp=start_secondary (isAP) br.call.sptk.many rp=start_secondary
.ret0: .ret0:
...@@ -267,7 +252,9 @@ alive_msg_end: ...@@ -267,7 +252,9 @@ alive_msg_end:
;; ;;
ld8 out0=[r3] ld8 out0=[r3]
br.call.sptk.many b0=console_print br.call.sptk.many b0=console_print
self: br.sptk.many self // endless loop
self: hint @pause
br.sptk.many self // endless loop
END(_start) END(_start)
GLOBAL_ENTRY(ia64_save_debug_regs) GLOBAL_ENTRY(ia64_save_debug_regs)
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/meminit.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -86,6 +87,12 @@ ...@@ -86,6 +87,12 @@
ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state; ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
u64 ia64_mca_serialize; u64 ia64_mca_serialize;
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
unsigned long __per_cpu_mca[NR_CPUS];
/* In mca_asm.S */ /* In mca_asm.S */
extern void ia64_monarch_init_handler (void); extern void ia64_monarch_init_handler (void);
...@@ -1195,6 +1202,41 @@ static struct irqaction mca_cpep_irqaction = { ...@@ -1195,6 +1202,41 @@ static struct irqaction mca_cpep_irqaction = {
}; };
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
/* Do per-CPU MCA-related initialization. */
void __devinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
/*
* The MCA info structure was allocated earlier and its
* physical address saved in __per_cpu_mca[cpu]. Copy that
* address * to ia64_mca_data so we can access it as a per-CPU
* variable.
*/
__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
/*
* Stash away a copy of the PTE needed to map the per-CPU page.
* We may need it during MCA recovery.
*/
__get_cpu_var(ia64_mca_per_cpu_pte) =
pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
/*
* Also, stash away a copy of the PAL address and the PTE
* needed to map it.
*/
pal_vaddr = efi_get_pal_addr();
if (!pal_vaddr)
return;
__get_cpu_var(ia64_mca_pal_base) =
GRANULEROUNDDOWN((unsigned long) pal_vaddr);
__get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
PAGE_KERNEL));
}
/* /*
* ia64_mca_init * ia64_mca_init
* *
......
...@@ -144,24 +144,26 @@ ia64_os_mca_done_dump: ...@@ -144,24 +144,26 @@ ia64_os_mca_done_dump:
// The following code purges TC and TR entries. Then reload all TC entries. // The following code purges TC and TR entries. Then reload all TC entries.
// Purge percpu data TC entries. // Purge percpu data TC entries.
begin_tlb_purge_and_reload: begin_tlb_purge_and_reload:
GET_PERCPU_PADDR(r2) // paddr of percpu_paddr in cpuinfo struct
;; #define O(member) IA64_CPUINFO_##member##_OFFSET
mov r17=r2
;; GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
adds r17=8,r17
;; ;;
ld8 r18=[r17],8 // r18=ptce_base addl r17=O(PTCE_STRIDE),r2
;; addl r2=O(PTCE_BASE),r2
ld4 r19=[r17],4 // r19=ptce_count[0]
;; ;;
ld4 r20=[r17],4 // r20=ptce_count[1] ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
ld4 r19=[r2],4 // r19=ptce_count[0]
ld4 r21=[r17],4 // r21=ptce_stride[0]
;; ;;
ld4 r21=[r17],4 // r21=ptce_stride[0] ld4 r20=[r2] // r20=ptce_count[1]
ld4 r22=[r17] // r22=ptce_stride[1]
mov r24=0 mov r24=0
;; ;;
ld4 r22=[r17],4 // r22=ptce_stride[1]
adds r20=-1,r20 adds r20=-1,r20
;; ;;
#undef O
2: 2:
cmp.ltu p6,p7=r24,r19 cmp.ltu p6,p7=r24,r19
(p7) br.cond.dpnt.few 4f (p7) br.cond.dpnt.few 4f
...@@ -201,9 +203,9 @@ begin_tlb_purge_and_reload: ...@@ -201,9 +203,9 @@ begin_tlb_purge_and_reload:
srlz.d srlz.d
;; ;;
// 3. Purge ITR for PAL code. // 3. Purge ITR for PAL code.
adds r17=40,r23 GET_THIS_PADDR(r2, ia64_mca_pal_base)
;; ;;
ld8 r16=[r17] ld8 r16=[r2]
mov r18=IA64_GRANULE_SHIFT<<2 mov r18=IA64_GRANULE_SHIFT<<2
;; ;;
ptr.i r16,r18 ptr.i r16,r18
...@@ -246,16 +248,15 @@ begin_tlb_purge_and_reload: ...@@ -246,16 +248,15 @@ begin_tlb_purge_and_reload:
srlz.d srlz.d
;; ;;
// 2. Reload DTR register for PERCPU data. // 2. Reload DTR register for PERCPU data.
GET_PERCPU_PADDR(r2) // paddr of percpu_paddr in cpuinfo struct GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
;; ;;
mov r17=r2
movl r16=PERCPU_ADDR // vaddr movl r16=PERCPU_ADDR // vaddr
movl r18=PERCPU_PAGE_SHIFT<<2 movl r18=PERCPU_PAGE_SHIFT<<2
;; ;;
mov cr.itir=r18 mov cr.itir=r18
mov cr.ifa=r16 mov cr.ifa=r16
;; ;;
ld8 r18=[r17] // pte ld8 r18=[r2] // load per-CPU PTE
mov r16=IA64_TR_PERCPU_DATA; mov r16=IA64_TR_PERCPU_DATA;
;; ;;
itr.d dtr[r16]=r18 itr.d dtr[r16]=r18
...@@ -263,13 +264,13 @@ begin_tlb_purge_and_reload: ...@@ -263,13 +264,13 @@ begin_tlb_purge_and_reload:
srlz.d srlz.d
;; ;;
// 3. Reload ITR for PAL code. // 3. Reload ITR for PAL code.
GET_CPUINFO_PAL_PADDR(r2) // paddr of pal_paddr in cpuinfo struct GET_THIS_PADDR(r2, ia64_mca_pal_pte)
;; ;;
mov r17=r2 ld8 r18=[r2] // load PAL PTE
;; ;;
ld8 r18=[r17],8 // pte GET_THIS_PADDR(r2, ia64_mca_pal_base)
;; ;;
ld8 r16=[r17] // vaddr ld8 r16=[r2] // load PAL vaddr
mov r19=IA64_GRANULE_SHIFT<<2 mov r19=IA64_GRANULE_SHIFT<<2
;; ;;
mov cr.itir=r19 mov cr.itir=r19
...@@ -308,14 +309,18 @@ err: ...@@ -308,14 +309,18 @@ err:
done_tlb_purge_and_reload: done_tlb_purge_and_reload:
// Setup new stack frame for OS_MCA handling // Setup new stack frame for OS_MCA handling
GET_MCA_BSPSTORE(r2) // paddr of bspstore save area GET_THIS_PADDR(r2, ia64_mca_data)
GET_MCA_STACKFRAME(r3);; // paddr of stack frame save area ;;
add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
;;
rse_switch_context(r6,r3,r2);; // RSC management in this new context rse_switch_context(r6,r3,r2);; // RSC management in this new context
GET_MCA_STACK(r2);; // paddr of stack save area
// stack size must be same as C array GET_THIS_PADDR(r2, ia64_mca_data)
addl r2=8*1024-16,r2;; // stack base @ bottom of array ;;
mov r12=r2 // allow 16 bytes of scratch add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
// (C calling convention) ;;
mov r12=r2 // establish new stack-pointer
// Enter virtual mode from physical mode // Enter virtual mode from physical mode
VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
...@@ -331,7 +336,10 @@ ia64_os_mca_virtual_begin: ...@@ -331,7 +336,10 @@ ia64_os_mca_virtual_begin:
ia64_os_mca_virtual_end: ia64_os_mca_virtual_end:
// restore the original stack frame here // restore the original stack frame here
GET_MCA_STACKFRAME(r2);; // phys addr of MCA save area GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
;;
movl r4=IA64_PSR_MC movl r4=IA64_PSR_MC
;; ;;
rse_return_context(r4,r3,r2) // switch from interrupt context for RSE rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
...@@ -372,8 +380,10 @@ ia64_os_mca_dispatch_end: ...@@ -372,8 +380,10 @@ ia64_os_mca_dispatch_end:
ia64_os_mca_proc_state_dump: ia64_os_mca_proc_state_dump:
// Save bank 1 GRs 16-31 which will be used by c-language code when we switch // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
// to virtual addressing mode. // to virtual addressing mode.
GET_MCA_DUMP_PADDR(r2);; // phys addr of MCA save area GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
;;
// save ar.NaT // save ar.NaT
mov r5=ar.unat // ar.unat mov r5=ar.unat // ar.unat
...@@ -603,7 +613,9 @@ end_os_mca_dump: ...@@ -603,7 +613,9 @@ end_os_mca_dump:
ia64_os_mca_proc_state_restore: ia64_os_mca_proc_state_restore:
// Restore bank1 GR16-31 // Restore bank1 GR16-31
GET_MCA_DUMP_PADDR(r2);; // phys addr of proc state dump area GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
restore_GRs: // restore bank-1 GRs 16-31 restore_GRs: // restore bank-1 GRs 16-31
bsw.1;; bsw.1;;
......
...@@ -37,10 +37,10 @@ ...@@ -37,10 +37,10 @@
* go virtual and don't want to destroy the iip or ipsr. * go virtual and don't want to destroy the iip or ipsr.
*/ */
#define MINSTATE_START_SAVE_MIN_PHYS \ #define MINSTATE_START_SAVE_MIN_PHYS \
(pKStk) mov r3=ar.k3;; \ (pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
(pKStk) addl r3=IA64_CPUINFO_PA_MCA_INFO,r3;; \ (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
(pKStk) ld8 r3 = [r3];; \ (pKStk) ld8 r3 = [r3];; \
(pKStk) addl r3=IA64_INIT_STACK,r3;; \ (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ (pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
......
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
* The initial version of perfmon.c was written by * The initial version of perfmon.c was written by
* Ganesh Venkitachalam, IBM Corp. * Ganesh Venkitachalam, IBM Corp.
* *
* Then it was modified for perfmon-1.x by Stephane Eranian and * Then it was modified for perfmon-1.x by Stephane Eranian and
* David Mosberger, Hewlett Packard Co. * David Mosberger, Hewlett Packard Co.
* *
* Version Perfmon-2.x is a rewrite of perfmon-1.x * Version Perfmon-2.x is a rewrite of perfmon-1.x
* by Stephane Eranian, Hewlett Packard Co. * by Stephane Eranian, Hewlett Packard Co.
* *
* Copyright (C) 1999-2003 Hewlett Packard Co * Copyright (C) 1999-2003, 2005 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
...@@ -4778,10 +4778,8 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) ...@@ -4778,10 +4778,8 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
* system-call entry point (must return long) * system-call entry point (must return long)
*/ */
asmlinkage long asmlinkage long
sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long arg6, long arg7, sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
long arg8, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *)&stack;
struct file *file = NULL; struct file *file = NULL;
pfm_context_t *ctx = NULL; pfm_context_t *ctx = NULL;
unsigned long flags = 0UL; unsigned long flags = 0UL;
...@@ -4905,7 +4903,7 @@ sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long ar ...@@ -4905,7 +4903,7 @@ sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long ar
if (unlikely(ret)) goto abort_locked; if (unlikely(ret)) goto abort_locked;
skip_fd: skip_fd:
ret = (*func)(ctx, args_k, count, regs); ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
call_made = 1; call_made = 1;
...@@ -6671,8 +6669,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs) ...@@ -6671,8 +6669,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
} }
#else /* !CONFIG_PERFMON */ #else /* !CONFIG_PERFMON */
asmlinkage long asmlinkage long
sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7, sys_perfmonctl (int fd, int cmd, void *arg, int count)
long arg8, long stack)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
/* /*
* Kernel support for the ptrace() and syscall tracing interfaces. * Kernel support for the ptrace() and syscall tracing interfaces.
* *
* Copyright (C) 1999-2004 Hewlett-Packard Co * Copyright (C) 1999-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Derived from the x86 and Alpha versions. Most of the code in here * Derived from the x86 and Alpha versions.
* could actually be factored into a common set of routines.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -40,9 +39,11 @@ ...@@ -40,9 +39,11 @@
* ri (restart instruction; two bits) * ri (restart instruction; two bits)
* is (instruction set; one bit) * is (instruction set; one bit)
*/ */
#define IPSR_WRITE_MASK \ #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
(IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
#define IPSR_READ_MASK IPSR_WRITE_MASK
#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
#define PFM_MASK MASK(38)
#define PTRACE_DEBUG 0 #define PTRACE_DEBUG 0
...@@ -68,23 +69,24 @@ in_syscall (struct pt_regs *pt) ...@@ -68,23 +69,24 @@ in_syscall (struct pt_regs *pt)
unsigned long unsigned long
ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
{ {
# define GET_BITS(first, last, unat) \ # define GET_BITS(first, last, unat) \
({ \ ({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \ unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \ unsigned long nbits = (last - first + 1); \
unsigned long dist; \ unsigned long mask = MASK(nbits) << first; \
if (bit < first) \ unsigned long dist; \
dist = 64 + bit - first; \ if (bit < first) \
else \ dist = 64 + bit - first; \
dist = bit - first; \ else \
ia64_rotr(unat, dist) & mask; \ dist = bit - first; \
ia64_rotr(unat, dist) & mask; \
}) })
unsigned long val; unsigned long val;
/* /*
* Registers that are stored consecutively in struct pt_regs can be handled in * Registers that are stored consecutively in struct pt_regs
* parallel. If the register order in struct_pt_regs changes, this code MUST be * can be handled in parallel. If the register order in
* updated. * struct_pt_regs changes, this code MUST be updated.
*/ */
val = GET_BITS( 1, 1, scratch_unat); val = GET_BITS( 1, 1, scratch_unat);
val |= GET_BITS( 2, 3, scratch_unat); val |= GET_BITS( 2, 3, scratch_unat);
...@@ -106,23 +108,24 @@ ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) ...@@ -106,23 +108,24 @@ ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
unsigned long unsigned long
ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
{ {
# define PUT_BITS(first, last, nat) \ # define PUT_BITS(first, last, nat) \
({ \ ({ \
unsigned long bit = ia64_unat_pos(&pt->r##first); \ unsigned long bit = ia64_unat_pos(&pt->r##first); \
unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \ unsigned long nbits = (last - first + 1); \
long dist; \ unsigned long mask = MASK(nbits) << first; \
if (bit < first) \ long dist; \
dist = 64 + bit - first; \ if (bit < first) \
else \ dist = 64 + bit - first; \
dist = bit - first; \ else \
ia64_rotl(nat & mask, dist); \ dist = bit - first; \
ia64_rotl(nat & mask, dist); \
}) })
unsigned long scratch_unat; unsigned long scratch_unat;
/* /*
* Registers that are stored consecutively in struct pt_regs can be handled in * Registers that are stored consecutively in struct pt_regs
* parallel. If the register order in struct_pt_regs changes, this code MUST be * can be handled in parallel. If the register order in
* updated. * struct_pt_regs changes, this code MUST be updated.
*/ */
scratch_unat = PUT_BITS( 1, 1, nat); scratch_unat = PUT_BITS( 1, 1, nat);
scratch_unat |= PUT_BITS( 2, 3, nat); scratch_unat |= PUT_BITS( 2, 3, nat);
...@@ -185,10 +188,12 @@ ia64_decrement_ip (struct pt_regs *regs) ...@@ -185,10 +188,12 @@ ia64_decrement_ip (struct pt_regs *regs)
} }
/* /*
* This routine is used to read an rnat bits that are stored on the kernel backing store. * This routine is used to read an rnat bits that are stored on the
* Since, in general, the alignment of the user and kernel are different, this is not * kernel backing store. Since, in general, the alignment of the user
* completely trivial. In essence, we need to construct the user RNAT based on up to two * and kernel are different, this is not completely trivial. In
* kernel RNAT values and/or the RNAT value saved in the child's pt_regs. * essence, we need to construct the user RNAT based on up to two
* kernel RNAT values and/or the RNAT value saved in the child's
* pt_regs.
* *
* user rbs * user rbs
* *
...@@ -221,24 +226,28 @@ ia64_decrement_ip (struct pt_regs *regs) ...@@ -221,24 +226,28 @@ ia64_decrement_ip (struct pt_regs *regs)
* +--------+ * +--------+
* <--- child_stack->ar_bspstore * <--- child_stack->ar_bspstore
* *
* The way to think of this code is as follows: bit 0 in the user rnat corresponds to some * The way to think of this code is as follows: bit 0 in the user rnat
* bit N (0 <= N <= 62) in one of the kernel rnat value. The kernel rnat value holding * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
* this bit is stored in variable rnat0. rnat1 is loaded with the kernel rnat value that * value. The kernel rnat value holding this bit is stored in
* variable rnat0. rnat1 is loaded with the kernel rnat value that
* form the upper bits of the user rnat value. * form the upper bits of the user rnat value.
* *
* Boundary cases: * Boundary cases:
* *
* o when reading the rnat "below" the first rnat slot on the kernel backing store, * o when reading the rnat "below" the first rnat slot on the kernel
* rnat0/rnat1 are set to 0 and the low order bits are merged in from pt->ar_rnat. * backing store, rnat0/rnat1 are set to 0 and the low order bits are
* merged in from pt->ar_rnat.
* *
* o when reading the rnat "above" the last rnat slot on the kernel backing store, * o when reading the rnat "above" the last rnat slot on the kernel
* rnat0/rnat1 gets its value from sw->ar_rnat. * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
*/ */
static unsigned long static unsigned long
get_rnat (struct task_struct *task, struct switch_stack *sw, get_rnat (struct task_struct *task, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr, unsigned long *urbs_end) unsigned long *krbs, unsigned long *urnat_addr,
unsigned long *urbs_end)
{ {
unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0, mask, m; unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
unsigned long umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs, nbits; long num_regs, nbits;
struct pt_regs *pt; struct pt_regs *pt;
...@@ -251,11 +260,12 @@ get_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -251,11 +260,12 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
else else
nbits = 63; nbits = 63;
mask = (1UL << nbits) - 1; mask = MASK(nbits);
/* /*
* First, figure out which bit number slot 0 in user-land maps to in the kernel * First, figure out which bit number slot 0 in user-land maps
* rnat. Do this by figuring out how many register slots we're beyond the user's * to in the kernel rnat. Do this by figuring out how many
* backingstore and then computing the equivalent address in kernel space. * register slots we're beyond the user's backingstore and
* then computing the equivalent address in kernel space.
*/ */
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
...@@ -265,7 +275,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -265,7 +275,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
if (ubspstore + 63 > urnat_addr) { if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */ /* some bits need to be merged in from pt->ar_rnat */
umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask; umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
urnat = (pt->ar_rnat & umask); urnat = (pt->ar_rnat & umask);
mask &= ~umask; mask &= ~umask;
if (!mask) if (!mask)
...@@ -323,12 +333,13 @@ put_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -323,12 +333,13 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
return; return;
nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
} }
mask = (1UL << nbits) - 1; mask = MASK(nbits);
/* /*
* First, figure out which bit number slot 0 in user-land maps to in the kernel * First, figure out which bit number slot 0 in user-land maps
* rnat. Do this by figuring out how many register slots we're beyond the user's * to in the kernel rnat. Do this by figuring out how many
* backingstore and then computing the equivalent address in kernel space. * register slots we're beyond the user's backingstore and
* then computing the equivalent address in kernel space.
*/ */
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
...@@ -338,7 +349,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -338,7 +349,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
if (ubspstore + 63 > urnat_addr) { if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */ /* some bits need to be place in pt->ar_rnat: */
umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask; umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
mask &= ~umask; mask &= ~umask;
if (!mask) if (!mask)
...@@ -364,25 +375,28 @@ put_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -364,25 +375,28 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
} }
static inline int static inline int
on_kernel_rbs (unsigned long addr, unsigned long bspstore, unsigned long urbs_end) on_kernel_rbs (unsigned long addr, unsigned long bspstore,
unsigned long urbs_end)
{ {
return (addr >= bspstore unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
&& addr <= (unsigned long) ia64_rse_rnat_addr((unsigned long *) urbs_end)); urbs_end);
return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
} }
/* /*
* Read a word from the user-level backing store of task CHILD. ADDR is the user-level * Read a word from the user-level backing store of task CHILD. ADDR
* address to read the word from, VAL a pointer to the return value, and USER_BSP gives * is the user-level address to read the word from, VAL a pointer to
* the end of the user-level backing store (i.e., it's the address that would be in ar.bsp * the return value, and USER_BSP gives the end of the user-level
* after the user executed a "cover" instruction). * backing store (i.e., it's the address that would be in ar.bsp after
* the user executed a "cover" instruction).
* *
* This routine takes care of accessing the kernel register backing store for those * This routine takes care of accessing the kernel register backing
* registers that got spilled there. It also takes care of calculating the appropriate * store for those registers that got spilled there. It also takes
* RNaT collection words. * care of calculating the appropriate RNaT collection words.
*/ */
long long
ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end, ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
unsigned long addr, long *val) unsigned long user_rbs_end, unsigned long addr, long *val)
{ {
unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
struct pt_regs *child_regs; struct pt_regs *child_regs;
...@@ -394,10 +408,13 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned ...@@ -394,10 +408,13 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned
child_regs = ia64_task_regs(child); child_regs = ia64_task_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore; bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore, (unsigned long) urbs_end)) { if (on_kernel_rbs(addr, (unsigned long) bspstore,
(unsigned long) urbs_end))
{
/* /*
* Attempt to read the RBS in an area that's actually on the kernel RBS => * Attempt to read the RBS in an area that's actually
* read the corresponding bits in the kernel RBS. * on the kernel RBS => read the corresponding bits in
* the kernel RBS.
*/ */
rnat_addr = ia64_rse_rnat_addr(laddr); rnat_addr = ia64_rse_rnat_addr(laddr);
ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
...@@ -410,18 +427,23 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned ...@@ -410,18 +427,23 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned
if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
/* /*
* It is implementation dependent whether the data portion of a * It is implementation dependent whether the
* NaT value gets saved on a st8.spill or RSE spill (e.g., see * data portion of a NaT value gets saved on a
* EAS 2.6, 4.4.4.6 Register Spill and Fill). To get consistent * st8.spill or RSE spill (e.g., see EAS 2.6,
* behavior across all possible IA-64 implementations, we return * 4.4.4.6 Register Spill and Fill). To get
* zero in this case. * consistent behavior across all possible
* IA-64 implementations, we return zero in
* this case.
*/ */
*val = 0; *val = 0;
return 0; return 0;
} }
if (laddr < urbs_end) { if (laddr < urbs_end) {
/* the desired word is on the kernel RBS and is not a NaT */ /*
* The desired word is on the kernel RBS and
* is not a NaT.
*/
regnum = ia64_rse_num_regs(bspstore, laddr); regnum = ia64_rse_num_regs(bspstore, laddr);
*val = *ia64_rse_skip_regs(krbs, regnum); *val = *ia64_rse_skip_regs(krbs, regnum);
return 0; return 0;
...@@ -435,43 +457,51 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned ...@@ -435,43 +457,51 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned
} }
long long
ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end, ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
unsigned long addr, long val) unsigned long user_rbs_end, unsigned long addr, long val)
{ {
unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end = (long *) user_rbs_end; unsigned long *bspstore, *krbs, regnum, *laddr;
unsigned long *urbs_end = (long *) user_rbs_end;
struct pt_regs *child_regs; struct pt_regs *child_regs;
laddr = (unsigned long *) addr; laddr = (unsigned long *) addr;
child_regs = ia64_task_regs(child); child_regs = ia64_task_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore; bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore, (unsigned long) urbs_end)) { if (on_kernel_rbs(addr, (unsigned long) bspstore,
(unsigned long) urbs_end))
{
/* /*
* Attempt to write the RBS in an area that's actually on the kernel RBS * Attempt to write the RBS in an area that's actually
* => write the corresponding bits in the kernel RBS. * on the kernel RBS => write the corresponding bits
* in the kernel RBS.
*/ */
if (ia64_rse_is_rnat_slot(laddr)) if (ia64_rse_is_rnat_slot(laddr))
put_rnat(child, child_stack, krbs, laddr, val, urbs_end); put_rnat(child, child_stack, krbs, laddr, val,
urbs_end);
else { else {
if (laddr < urbs_end) { if (laddr < urbs_end) {
regnum = ia64_rse_num_regs(bspstore, laddr); regnum = ia64_rse_num_regs(bspstore, laddr);
*ia64_rse_skip_regs(krbs, regnum) = val; *ia64_rse_skip_regs(krbs, regnum) = val;
} }
} }
} else if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) { } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
!= sizeof(val))
return -EIO; return -EIO;
}
return 0; return 0;
} }
/* /*
* Calculate the address of the end of the user-level register backing store. This is the * Calculate the address of the end of the user-level register backing
* address that would have been stored in ar.bsp if the user had executed a "cover" * store. This is the address that would have been stored in ar.bsp
* instruction right before entering the kernel. If CFMP is not NULL, it is used to * if the user had executed a "cover" instruction right before
* return the "current frame mask" that was active at the time the kernel was entered. * entering the kernel. If CFMP is not NULL, it is used to return the
* "current frame mask" that was active at the time the kernel was
* entered.
*/ */
unsigned long unsigned long
ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned long *cfmp) ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
unsigned long *cfmp)
{ {
unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
long ndirty; long ndirty;
...@@ -491,9 +521,11 @@ ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned l ...@@ -491,9 +521,11 @@ ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned l
} }
/* /*
* Synchronize (i.e, write) the RSE backing store living in kernel space to the VM of the * Synchronize (i.e, write) the RSE backing store living in kernel
* CHILD task. SW and PT are the pointers to the switch_stack and pt_regs structures, * space to the VM of the CHILD task. SW and PT are the pointers to
* respectively. USER_RBS_END is the user-level address at which the backing store ends. * the switch_stack and pt_regs structures, respectively.
* USER_RBS_END is the user-level address at which the backing store
* ends.
*/ */
long long
ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
...@@ -507,7 +539,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, ...@@ -507,7 +539,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
ret = ia64_peek(child, sw, user_rbs_end, addr, &val); ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) if (access_process_vm(child, addr, &val, sizeof(val), 1)
!= sizeof(val))
return -EIO; return -EIO;
} }
return 0; return 0;
...@@ -521,13 +554,14 @@ thread_matches (struct task_struct *thread, unsigned long addr) ...@@ -521,13 +554,14 @@ thread_matches (struct task_struct *thread, unsigned long addr)
if (ptrace_check_attach(thread, 0) < 0) if (ptrace_check_attach(thread, 0) < 0)
/* /*
* If the thread is not in an attachable state, we'll ignore it. * If the thread is not in an attachable state, we'll
* The net effect is that if ADDR happens to overlap with the * ignore it. The net effect is that if ADDR happens
* portion of the thread's register backing store that is * to overlap with the portion of the thread's
* currently residing on the thread's kernel stack, then ptrace() * register backing store that is currently residing
* may end up accessing a stale value. But if the thread isn't * on the thread's kernel stack, then ptrace() may end
* stopped, that's a problem anyhow, so we're doing as well as we * up accessing a stale value. But if the thread
* can... * isn't stopped, that's a problem anyhow, so we're
* doing as well as we can...
*/ */
return 0; return 0;
...@@ -540,10 +574,11 @@ thread_matches (struct task_struct *thread, unsigned long addr) ...@@ -540,10 +574,11 @@ thread_matches (struct task_struct *thread, unsigned long addr)
} }
/* /*
* GDB apparently wants to be able to read the register-backing store of any thread when * GDB apparently wants to be able to read the register-backing store
* attached to a given process. If we are peeking or poking an address that happens to * of any thread when attached to a given process. If we are peeking
* reside in the kernel-backing store of another thread, we need to attach to that thread, * or poking an address that happens to reside in the kernel-backing
* because otherwise we end up accessing stale data. * store of another thread, we need to attach to that thread, because
* otherwise we end up accessing stale data.
* *
* task_list_lock must be read-locked before calling this routine! * task_list_lock must be read-locked before calling this routine!
*/ */
...@@ -557,7 +592,8 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr) ...@@ -557,7 +592,8 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
if (!(mm = get_task_mm(child))) if (!(mm = get_task_mm(child)))
return child; return child;
mm_users = atomic_read(&mm->mm_users) - 1; /* -1 because of our get_task_mm()... */ /* -1 because of our get_task_mm(): */
mm_users = atomic_read(&mm->mm_users) - 1;
if (mm_users <= 1) if (mm_users <= 1)
goto out; /* not multi-threaded */ goto out; /* not multi-threaded */
...@@ -627,7 +663,8 @@ ia64_sync_fph (struct task_struct *task) ...@@ -627,7 +663,8 @@ ia64_sync_fph (struct task_struct *task)
} }
static int static int
access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access) access_fr (struct unw_frame_info *info, int regnum, int hi,
unsigned long *data, int write_access)
{ {
struct ia64_fpreg fpval; struct ia64_fpreg fpval;
int ret; int ret;
...@@ -649,7 +686,8 @@ access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, ...@@ -649,7 +686,8 @@ access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data,
* kernel exit-path, rather than the syscall-exit path. * kernel exit-path, rather than the syscall-exit path.
*/ */
static void static void
convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, unsigned long cfm) convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
unsigned long cfm)
{ {
struct unw_frame_info info, prev_info; struct unw_frame_info info, prev_info;
unsigned long ip, pr; unsigned long ip, pr;
...@@ -674,11 +712,51 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, unsigned ...@@ -674,11 +712,51 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, unsigned
} }
static int static int
access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access) access_nat_bits (struct task_struct *child, struct pt_regs *pt,
struct unw_frame_info *info,
unsigned long *data, int write_access)
{
unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
char nat = 0;
if (write_access) {
nat_bits = *data;
scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
dprintk("ptrace: failed to set ar.unat\n");
return -1;
}
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(info, regnum, &dummy, &nat);
unw_set_gr(info, regnum, dummy,
(nat_bits >> regnum) & 1);
}
} else {
if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
dprintk("ptrace: failed to read ar.unat\n");
return -1;
}
nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(info, regnum, &dummy, &nat);
nat_bits |= (nat != 0) << regnum;
}
*data = nat_bits;
}
return 0;
}
static int
access_uarea (struct task_struct *child, unsigned long addr,
unsigned long *data, int write_access)
{ {
unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm; unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
struct switch_stack *sw; struct switch_stack *sw;
struct pt_regs *pt; struct pt_regs *pt;
# define pt_reg_addr(pt, reg) ((void *) \
((unsigned long) (pt) \
+ offsetof(struct pt_regs, reg)))
pt = ia64_task_regs(child); pt = ia64_task_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16); sw = (struct switch_stack *) (child->thread.ksp + 16);
...@@ -694,17 +772,20 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -694,17 +772,20 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
ia64_sync_fph(child); ia64_sync_fph(child);
else else
ia64_flush_fph(child); ia64_flush_fph(child);
ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr); ptr = (unsigned long *)
((unsigned long) &child->thread.fph + addr);
} else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
/* scratch registers untouched by kernel (saved in pt_regs) */ /* scratch registers untouched by kernel (saved in pt_regs) */
ptr = (unsigned long *) ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
((long) pt + offsetof(struct pt_regs, f10) + addr - PT_F10);
} else if (addr >= PT_F12 && addr < PT_F15 + 16) { } else if (addr >= PT_F12 && addr < PT_F15 + 16) {
/* scratch registers untouched by kernel (saved in switch_stack) */ /*
ptr = (unsigned long *) ((long) sw + (addr - PT_NAT_BITS - 32)); * Scratch registers untouched by kernel (saved in
* switch_stack).
*/
ptr = (unsigned long *) ((long) sw
+ (addr - PT_NAT_BITS - 32));
} else if (addr < PT_AR_LC + 8) { } else if (addr < PT_AR_LC + 8) {
/* preserved state: */ /* preserved state: */
unsigned long nat_bits, scratch_unat, dummy = 0;
struct unw_frame_info info; struct unw_frame_info info;
char nat = 0; char nat = 0;
int ret; int ret;
...@@ -715,62 +796,48 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -715,62 +796,48 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
switch (addr) { switch (addr) {
case PT_NAT_BITS: case PT_NAT_BITS:
if (write_access) { return access_nat_bits(child, pt, &info,
nat_bits = *data; data, write_access);
scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) {
dprintk("ptrace: failed to set ar.unat\n");
return -1;
}
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(&info, regnum, &dummy, &nat);
unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1);
}
} else {
if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) {
dprintk("ptrace: failed to read ar.unat\n");
return -1;
}
nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
for (regnum = 4; regnum <= 7; ++regnum) {
unw_get_gr(&info, regnum, &dummy, &nat);
nat_bits |= (nat != 0) << regnum;
}
*data = nat_bits;
}
return 0;
case PT_R4: case PT_R5: case PT_R6: case PT_R7: case PT_R4: case PT_R5: case PT_R6: case PT_R7:
if (write_access) { if (write_access) {
/* read NaT bit first: */ /* read NaT bit first: */
unsigned long dummy; unsigned long dummy;
ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat); ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
&dummy, &nat);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat, return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
write_access); &nat, write_access);
case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5: case PT_B1: case PT_B2: case PT_B3:
return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access); case PT_B4: case PT_B5:
return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
write_access);
case PT_AR_EC: case PT_AR_EC:
return unw_access_ar(&info, UNW_AR_EC, data, write_access); return unw_access_ar(&info, UNW_AR_EC, data,
write_access);
case PT_AR_LC: case PT_AR_LC:
return unw_access_ar(&info, UNW_AR_LC, data, write_access); return unw_access_ar(&info, UNW_AR_LC, data,
write_access);
default: default:
if (addr >= PT_F2 && addr < PT_F5 + 16) if (addr >= PT_F2 && addr < PT_F5 + 16)
return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0, return access_fr(&info, (addr - PT_F2)/16 + 2,
data, write_access); (addr & 8) != 0, data,
write_access);
else if (addr >= PT_F16 && addr < PT_F31 + 16) else if (addr >= PT_F16 && addr < PT_F31 + 16)
return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0, return access_fr(&info,
(addr - PT_F16)/16 + 16,
(addr & 8) != 0,
data, write_access); data, write_access);
else { else {
dprintk("ptrace: rejecting access to register address 0x%lx\n", dprintk("ptrace: rejecting access to register "
addr); "address 0x%lx\n", addr);
return -1; return -1;
} }
} }
...@@ -779,34 +846,49 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -779,34 +846,49 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
switch (addr) { switch (addr) {
case PT_AR_BSP: case PT_AR_BSP:
/* /*
* By convention, we use PT_AR_BSP to refer to the end of the user-level * By convention, we use PT_AR_BSP to refer to
* backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get * the end of the user-level backing store.
* the real value of ar.bsp at the time the kernel was entered. * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
* to get the real value of ar.bsp at the time
* the kernel was entered.
* *
* Furthermore, when changing the contents of PT_AR_BSP (or * Furthermore, when changing the contents of
* PT_CFM) we MUST copy any users-level stacked registers that are * PT_AR_BSP (or PT_CFM) we MUST copy any
* stored on the kernel stack back to user-space because * users-level stacked registers that are
* otherwise, we might end up clobbering kernel stacked registers. * stored on the kernel stack back to
* Also, if this happens while the task is blocked in a system * user-space because otherwise, we might end
* call, which convert the state such that the non-system-call * up clobbering kernel stacked registers.
* exit path is used. This ensures that the proper state will be * Also, if this happens while the task is
* picked up when resuming execution. However, it *also* means * blocked in a system call, which convert the
* that once we write PT_AR_BSP/PT_CFM, it won't be possible to * state such that the non-system-call exit
* modify the syscall arguments of the pending system call any * path is used. This ensures that the proper
* longer. This shouldn't be an issue because modifying * state will be picked up when resuming
* PT_AR_BSP/PT_CFM generally implies that we're either abandoning * execution. However, it *also* means that
* the pending system call or that we defer it's re-execution * once we write PT_AR_BSP/PT_CFM, it won't be
* (e.g., due to GDB doing an inferior function call). * possible to modify the syscall arguments of
* the pending system call any longer. This
* shouldn't be an issue because modifying
* PT_AR_BSP/PT_CFM generally implies that
* we're either abandoning the pending system
* call or that we defer it's re-execution
* (e.g., due to GDB doing an inferior
* function call).
*/ */
urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) { if (write_access) {
if (*data != urbs_end) { if (*data != urbs_end) {
if (ia64_sync_user_rbs(child, sw, if (ia64_sync_user_rbs(child, sw,
pt->ar_bspstore, urbs_end) < 0) pt->ar_bspstore,
urbs_end) < 0)
return -1; return -1;
if (in_syscall(pt)) if (in_syscall(pt))
convert_to_non_syscall(child, pt, cfm); convert_to_non_syscall(child,
/* simulate user-level write of ar.bsp: */ pt,
cfm);
/*
* Simulate user-level write
* of ar.bsp:
*/
pt->loadrs = 0; pt->loadrs = 0;
pt->ar_bspstore = *data; pt->ar_bspstore = *data;
} }
...@@ -817,14 +899,17 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -817,14 +899,17 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
case PT_CFM: case PT_CFM:
urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) { if (write_access) {
if (((cfm ^ *data) & 0x3fffffffffUL) != 0) { if (((cfm ^ *data) & PFM_MASK) != 0) {
if (ia64_sync_user_rbs(child, sw, if (ia64_sync_user_rbs(child, sw,
pt->ar_bspstore, urbs_end) < 0) pt->ar_bspstore,
urbs_end) < 0)
return -1; return -1;
if (in_syscall(pt)) if (in_syscall(pt))
convert_to_non_syscall(child, pt, cfm); convert_to_non_syscall(child,
pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL) pt,
| (*data & 0x3fffffffffUL)); cfm);
pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
| (*data & PFM_MASK));
} }
} else } else
*data = cfm; *data = cfm;
...@@ -832,99 +917,94 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -832,99 +917,94 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
case PT_CR_IPSR: case PT_CR_IPSR:
if (write_access) if (write_access)
pt->cr_ipsr = ((*data & IPSR_WRITE_MASK) pt->cr_ipsr = ((*data & IPSR_MASK)
| (pt->cr_ipsr & ~IPSR_WRITE_MASK)); | (pt->cr_ipsr & ~IPSR_MASK));
else else
*data = (pt->cr_ipsr & IPSR_READ_MASK); *data = (pt->cr_ipsr & IPSR_MASK);
return 0; return 0;
case PT_AR_RNAT: case PT_AR_RNAT:
urbs_end = ia64_get_user_rbs_end(child, pt, NULL); urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
rnat_addr = (long) ia64_rse_rnat_addr((long *) urbs_end); rnat_addr = (long) ia64_rse_rnat_addr((long *)
urbs_end);
if (write_access) if (write_access)
return ia64_poke(child, sw, urbs_end, rnat_addr, *data); return ia64_poke(child, sw, urbs_end,
rnat_addr, *data);
else else
return ia64_peek(child, sw, urbs_end, rnat_addr, data); return ia64_peek(child, sw, urbs_end,
rnat_addr, data);
case PT_R1: case PT_R1:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r1)); ptr = pt_reg_addr(pt, r1);
break; break;
case PT_R2: case PT_R3: case PT_R2: case PT_R3:
ptr = (unsigned long *) ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
((long) pt + offsetof(struct pt_regs, r2) + addr - PT_R2);
break; break;
case PT_R8: case PT_R9: case PT_R10: case PT_R11: case PT_R8: case PT_R9: case PT_R10: case PT_R11:
ptr = (unsigned long *) ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
((long) pt + offsetof(struct pt_regs, r8)+ addr - PT_R8);
break; break;
case PT_R12: case PT_R13: case PT_R12: case PT_R13:
ptr = (unsigned long *) ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
((long) pt + offsetof(struct pt_regs, r12)+ addr - PT_R12);
break; break;
case PT_R14: case PT_R14:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r14)); ptr = pt_reg_addr(pt, r14);
break; break;
case PT_R15: case PT_R15:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r15)); ptr = pt_reg_addr(pt, r15);
break; break;
case PT_R16: case PT_R17: case PT_R18: case PT_R19: case PT_R16: case PT_R17: case PT_R18: case PT_R19:
case PT_R20: case PT_R21: case PT_R22: case PT_R23: case PT_R20: case PT_R21: case PT_R22: case PT_R23:
case PT_R24: case PT_R25: case PT_R26: case PT_R27: case PT_R24: case PT_R25: case PT_R26: case PT_R27:
case PT_R28: case PT_R29: case PT_R30: case PT_R31: case PT_R28: case PT_R29: case PT_R30: case PT_R31:
ptr = (unsigned long *) ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
((long) pt + offsetof(struct pt_regs, r16) + addr - PT_R16);
break; break;
case PT_B0: case PT_B0:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b0)); ptr = pt_reg_addr(pt, b0);
break; break;
case PT_B6: case PT_B6:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b6)); ptr = pt_reg_addr(pt, b6);
break; break;
case PT_B7: case PT_B7:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b7)); ptr = pt_reg_addr(pt, b7);
break; break;
case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
ptr = (unsigned long *) ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
((long) pt + offsetof(struct pt_regs, f6) + addr - PT_F6);
break; break;
case PT_AR_BSPSTORE: case PT_AR_BSPSTORE:
ptr = (unsigned long *) ptr = pt_reg_addr(pt, ar_bspstore);
((long) pt + offsetof(struct pt_regs, ar_bspstore));
break; break;
case PT_AR_RSC: case PT_AR_RSC:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_rsc)); ptr = pt_reg_addr(pt, ar_rsc);
break; break;
case PT_AR_UNAT: case PT_AR_UNAT:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_unat)); ptr = pt_reg_addr(pt, ar_unat);
break; break;
case PT_AR_PFS: case PT_AR_PFS:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_pfs)); ptr = pt_reg_addr(pt, ar_pfs);
break; break;
case PT_AR_CCV: case PT_AR_CCV:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_ccv)); ptr = pt_reg_addr(pt, ar_ccv);
break; break;
case PT_AR_FPSR: case PT_AR_FPSR:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_fpsr)); ptr = pt_reg_addr(pt, ar_fpsr);
break; break;
case PT_CR_IIP: case PT_CR_IIP:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, cr_iip)); ptr = pt_reg_addr(pt, cr_iip);
break; break;
case PT_PR: case PT_PR:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, pr)); ptr = pt_reg_addr(pt, pr);
break; break;
/* scratch register */ /* scratch register */
default: default:
/* disallow accessing anything else... */ /* disallow accessing anything else... */
dprintk("ptrace: rejecting access to register address 0x%lx\n", dprintk("ptrace: rejecting access to register "
addr); "address 0x%lx\n", addr);
return -1; return -1;
} }
} else if (addr <= PT_AR_SSD) { } else if (addr <= PT_AR_SSD) {
ptr = (unsigned long *) ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
((long) pt + offsetof(struct pt_regs, ar_csd) + addr - PT_AR_CSD);
} else { } else {
/* access debug registers */ /* access debug registers */
...@@ -937,41 +1017,46 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -937,41 +1017,46 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
} }
if (regnum >= 8) { if (regnum >= 8) {
dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); dprintk("ptrace: rejecting access to register "
"address 0x%lx\n", addr);
return -1; return -1;
} }
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
/* /*
* Check if debug registers are used by perfmon. This test must be done * Check if debug registers are used by perfmon. This
* once we know that we can do the operation, i.e. the arguments are all * test must be done once we know that we can do the
* valid, but before we start modifying the state. * operation, i.e. the arguments are all valid, but
* before we start modifying the state.
* *
* Perfmon needs to keep a count of how many processes are trying to * Perfmon needs to keep a count of how many processes
* modify the debug registers for system wide monitoring sessions. * are trying to modify the debug registers for system
* wide monitoring sessions.
* *
* We also include read access here, because they may cause the * We also include read access here, because they may
* PMU-installed debug register state (dbr[], ibr[]) to be reset. The two * cause the PMU-installed debug register state
* arrays are also used by perfmon, but we do not use * (dbr[], ibr[]) to be reset. The two arrays are also
* IA64_THREAD_DBG_VALID. The registers are restored by the PMU context * used by perfmon, but we do not use
* switch code. * IA64_THREAD_DBG_VALID. The registers are restored
* by the PMU context switch code.
*/ */
if (pfm_use_debug_registers(child)) return -1; if (pfm_use_debug_registers(child)) return -1;
#endif #endif
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
child->thread.flags |= IA64_THREAD_DBG_VALID; child->thread.flags |= IA64_THREAD_DBG_VALID;
memset(child->thread.dbr, 0, sizeof(child->thread.dbr)); memset(child->thread.dbr, 0,
memset(child->thread.ibr, 0, sizeof(child->thread.ibr)); sizeof(child->thread.dbr));
memset(child->thread.ibr, 0,
sizeof(child->thread.ibr));
} }
ptr += regnum; ptr += regnum;
if (write_access) if ((regnum & 1) && write_access) {
/* don't let the user set kernel-level breakpoints... */ /* don't let the user set kernel-level breakpoints: */
*ptr = *data & ~(7UL << 56); *ptr = *data & ~(7UL << 56);
else return 0;
*data = *ptr; }
return 0;
} }
if (write_access) if (write_access)
*ptr = *data; *ptr = *data;
...@@ -992,7 +1077,8 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -992,7 +1077,8 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
char nat = 0; char nat = 0;
int i; int i;
retval = verify_area(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)); retval = verify_area(VERIFY_WRITE, ppr,
sizeof(struct pt_all_user_regs));
if (retval != 0) { if (retval != 0) {
return -EIO; return -EIO;
} }
...@@ -1094,11 +1180,13 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -1094,11 +1180,13 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
/* fr6-fr11 */ /* fr6-fr11 */
retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 6); retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
sizeof(struct ia64_fpreg) * 6);
/* fp scratch regs(12-15) */ /* fp scratch regs(12-15) */
retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sizeof(struct ia64_fpreg) * 4); retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
sizeof(struct ia64_fpreg) * 4);
/* fr16-fr31 */ /* fr16-fr31 */
...@@ -1111,7 +1199,8 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -1111,7 +1199,8 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
/* fph */ /* fph */
ia64_flush_fph(child); ia64_flush_fph(child);
retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, sizeof(ppr->fr[32]) * 96); retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
sizeof(ppr->fr[32]) * 96);
/* preds */ /* preds */
...@@ -1138,7 +1227,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -1138,7 +1227,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
memset(&fpval, 0, sizeof(fpval)); memset(&fpval, 0, sizeof(fpval));
retval = verify_area(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)); retval = verify_area(VERIFY_READ, ppr,
sizeof(struct pt_all_user_regs));
if (retval != 0) { if (retval != 0) {
return -EIO; return -EIO;
} }
...@@ -1186,7 +1276,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -1186,7 +1276,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
for (i = 4; i < 8; i++) { for (i = 4; i < 8; i++) {
retval |= __get_user(val, &ppr->gr[i]); retval |= __get_user(val, &ppr->gr[i]);
if (unw_set_gr(&info, i, val, 0) < 0) /* NaT bit will be set via PT_NAT_BITS */ /* NaT bit will be set via PT_NAT_BITS: */
if (unw_set_gr(&info, i, val, 0) < 0)
return -EIO; return -EIO;
} }
...@@ -1230,16 +1321,19 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -1230,16 +1321,19 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
/* fr6-fr11 */ /* fr6-fr11 */
retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 6); retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
sizeof(ppr->fr[6]) * 6);
/* fp scratch regs(12-15) */ /* fp scratch regs(12-15) */
retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sizeof(ppr->fr[12]) * 4); retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
sizeof(ppr->fr[12]) * 4);
/* fr16-fr31 */ /* fr16-fr31 */
for (i = 16; i < 32; i++) { for (i = 16; i < 32; i++) {
retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); retval |= __copy_from_user(&fpval, &ppr->fr[i],
sizeof(fpval));
if (unw_set_fr(&info, i, fpval) < 0) if (unw_set_fr(&info, i, fpval) < 0)
return -EIO; return -EIO;
} }
...@@ -1247,7 +1341,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) ...@@ -1247,7 +1341,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
/* fph */ /* fph */
ia64_sync_fph(child); ia64_sync_fph(child);
retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], sizeof(ppr->fr[32]) * 96); retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
sizeof(ppr->fr[32]) * 96);
/* preds */ /* preds */
...@@ -1279,16 +1374,15 @@ ptrace_disable (struct task_struct *child) ...@@ -1279,16 +1374,15 @@ ptrace_disable (struct task_struct *child)
{ {
struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
/* make sure the single step/take-branch tra bits are not set: */ /* make sure the single step/taken-branch trap bits are not set: */
child_psr->ss = 0; child_psr->ss = 0;
child_psr->tb = 0; child_psr->tb = 0;
} }
asmlinkage long asmlinkage long
sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
long arg4, long arg5, long arg6, long arg7, long stack)
{ {
struct pt_regs *pt, *regs = (struct pt_regs *) &stack; struct pt_regs *pt;
unsigned long urbs_end, peek_or_poke; unsigned long urbs_end, peek_or_poke;
struct task_struct *child; struct task_struct *child;
struct switch_stack *sw; struct switch_stack *sw;
...@@ -1308,8 +1402,10 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1308,8 +1402,10 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
goto out; goto out;
} }
peek_or_poke = (request == PTRACE_PEEKTEXT || request == PTRACE_PEEKDATA peek_or_poke = (request == PTRACE_PEEKTEXT
|| request == PTRACE_POKETEXT || request == PTRACE_POKEDATA); || request == PTRACE_PEEKDATA
|| request == PTRACE_POKETEXT
|| request == PTRACE_POKEDATA);
ret = -ESRCH; ret = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
{ {
...@@ -1341,31 +1437,37 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1341,31 +1437,37 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
switch (request) { switch (request) {
case PTRACE_PEEKTEXT: case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */ case PTRACE_PEEKDATA:
/* read word at location addr */
urbs_end = ia64_get_user_rbs_end(child, pt, NULL); urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
ret = ia64_peek(child, sw, urbs_end, addr, &data); ret = ia64_peek(child, sw, urbs_end, addr, &data);
if (ret == 0) { if (ret == 0) {
ret = data; ret = data;
regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */ /* ensure "ret" is not mistaken as an error code: */
force_successful_syscall_return();
} }
goto out_tsk; goto out_tsk;
case PTRACE_POKETEXT: case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */ case PTRACE_POKEDATA:
/* write the word at location addr */
urbs_end = ia64_get_user_rbs_end(child, pt, NULL); urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
ret = ia64_poke(child, sw, urbs_end, addr, data); ret = ia64_poke(child, sw, urbs_end, addr, data);
goto out_tsk; goto out_tsk;
case PTRACE_PEEKUSR: /* read the word at addr in the USER area */ case PTRACE_PEEKUSR:
/* read the word at addr in the USER area */
if (access_uarea(child, addr, &data, 0) < 0) { if (access_uarea(child, addr, &data, 0) < 0) {
ret = -EIO; ret = -EIO;
goto out_tsk; goto out_tsk;
} }
ret = data; ret = data;
regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */ /* ensure "ret" is not mistaken as an error code */
force_successful_syscall_return();
goto out_tsk; goto out_tsk;
case PTRACE_POKEUSR: /* write the word at addr in the USER area */ case PTRACE_POKEUSR:
/* write the word at addr in the USER area */
if (access_uarea(child, addr, &data, 1) < 0) { if (access_uarea(child, addr, &data, 1) < 0) {
ret = -EIO; ret = -EIO;
goto out_tsk; goto out_tsk;
...@@ -1373,16 +1475,20 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1373,16 +1475,20 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ret = 0; ret = 0;
goto out_tsk; goto out_tsk;
case PTRACE_OLD_GETSIGINFO: /* for backwards-compatibility */ case PTRACE_OLD_GETSIGINFO:
/* for backwards-compatibility */
ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data); ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
goto out_tsk; goto out_tsk;
case PTRACE_OLD_SETSIGINFO: /* for backwards-compatibility */ case PTRACE_OLD_SETSIGINFO:
/* for backwards-compatibility */
ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data); ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
goto out_tsk; goto out_tsk;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_SYSCALL:
case PTRACE_CONT: /* restart after signal. */ /* continue and stop at next (return from) syscall */
case PTRACE_CONT:
/* restart after signal. */
ret = -EIO; ret = -EIO;
if (data > _NSIG) if (data > _NSIG)
goto out_tsk; goto out_tsk;
...@@ -1392,7 +1498,10 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1392,7 +1498,10 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data; child->exit_code = data;
/* make sure the single step/taken-branch trap bits are not set: */ /*
* Make sure the single step/taken-branch trap bits
* are not set:
*/
ia64_psr(pt)->ss = 0; ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0; ia64_psr(pt)->tb = 0;
...@@ -1406,19 +1515,18 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1406,19 +1515,18 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
* sigkill. Perhaps it should be put in the status * sigkill. Perhaps it should be put in the status
* that it wants to exit. * that it wants to exit.
*/ */
if (child->exit_state == EXIT_ZOMBIE) /* already dead */ if (child->exit_state == EXIT_ZOMBIE)
/* already dead */
goto out_tsk; goto out_tsk;
child->exit_code = SIGKILL; child->exit_code = SIGKILL;
/* make sure the single step/take-branch tra bits are not set: */ ptrace_disable(child);
ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0;
wake_up_process(child); wake_up_process(child);
ret = 0; ret = 0;
goto out_tsk; goto out_tsk;
case PTRACE_SINGLESTEP: /* let child execute for one instruction */ case PTRACE_SINGLESTEP:
/* let child execute for one instruction */
case PTRACE_SINGLEBLOCK: case PTRACE_SINGLEBLOCK:
ret = -EIO; ret = -EIO;
if (data > _NSIG) if (data > _NSIG)
...@@ -1437,16 +1545,19 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1437,16 +1545,19 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ret = 0; ret = 0;
goto out_tsk; goto out_tsk;
case PTRACE_DETACH: /* detach a process that was attached. */ case PTRACE_DETACH:
/* detach a process that was attached. */
ret = ptrace_detach(child, data); ret = ptrace_detach(child, data);
goto out_tsk; goto out_tsk;
case PTRACE_GETREGS: case PTRACE_GETREGS:
ret = ptrace_getregs(child, (struct pt_all_user_regs __user *) data); ret = ptrace_getregs(child,
(struct pt_all_user_regs __user *) data);
goto out_tsk; goto out_tsk;
case PTRACE_SETREGS: case PTRACE_SETREGS:
ret = ptrace_setregs(child, (struct pt_all_user_regs __user *) data); ret = ptrace_setregs(child,
(struct pt_all_user_regs __user *) data);
goto out_tsk; goto out_tsk;
default: default:
...@@ -1469,15 +1580,16 @@ syscall_trace (void) ...@@ -1469,15 +1580,16 @@ syscall_trace (void)
if (!(current->ptrace & PT_PTRACED)) if (!(current->ptrace & PT_PTRACED))
return; return;
/* /*
* The 0x80 provides a way for the tracing parent to distinguish between a syscall * The 0x80 provides a way for the tracing parent to
* stop and SIGTRAP delivery. * distinguish between a syscall stop and SIGTRAP delivery.
*/ */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); ptrace_notify(SIGTRAP
| ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
/* /*
* This isn't the same as continuing with a signal, but it will do for normal use. * This isn't the same as continuing with a signal, but it
* strace only continues with a signal if the stopping signal is not SIGTRAP. * will do for normal use. strace only continues with a
* -brl * signal if the stopping signal is not SIGTRAP. -brl
*/ */
if (current->exit_code) { if (current->exit_code) {
send_sig(current->exit_code, current, 1); send_sig(current->exit_code, current, 1);
...@@ -1489,21 +1601,22 @@ syscall_trace (void) ...@@ -1489,21 +1601,22 @@ syscall_trace (void)
asmlinkage void asmlinkage void
syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack) long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
long syscall; long syscall;
if (unlikely(current->audit_context)) { if (unlikely(current->audit_context)) {
if (IS_IA32_PROCESS(regs)) if (IS_IA32_PROCESS(&regs))
syscall = regs->r1; syscall = regs.r1;
else else
syscall = regs->r15; syscall = regs.r15;
audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3); audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3);
} }
if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED)) if (test_thread_flag(TIF_SYSCALL_TRACE)
&& (current->ptrace & PT_PTRACED))
syscall_trace(); syscall_trace();
} }
...@@ -1511,11 +1624,13 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, ...@@ -1511,11 +1624,13 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
asmlinkage void asmlinkage void
syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack) long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{ {
if (unlikely(current->audit_context)) if (unlikely(current->audit_context))
audit_syscall_exit(current, ((struct pt_regs *) &stack)->r8); audit_syscall_exit(current, regs.r8);
if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED)) if (test_thread_flag(TIF_SYSCALL_TRACE)
&& (current->ptrace & PT_PTRACED))
syscall_trace(); syscall_trace();
} }
...@@ -60,7 +60,6 @@ ...@@ -60,7 +60,6 @@
unsigned long __per_cpu_offset[NR_CPUS]; unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset); EXPORT_SYMBOL(__per_cpu_offset);
#endif #endif
unsigned long __per_cpu_mca[NR_CPUS];
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
...@@ -388,7 +387,7 @@ setup_arch (char **cmdline_p) ...@@ -388,7 +387,7 @@ setup_arch (char **cmdline_p)
/* enable IA-64 Machine Check Abort Handling unless disabled */ /* enable IA-64 Machine Check Abort Handling unless disabled */
if (!strstr(saved_command_line, "nomca")) if (!strstr(saved_command_line, "nomca"))
ia64_mca_init(); ia64_mca_init();
platform_setup(cmdline_p); platform_setup(cmdline_p);
paging_init(); paging_init();
} }
...@@ -602,7 +601,6 @@ void ...@@ -602,7 +601,6 @@ void
cpu_init (void) cpu_init (void)
{ {
extern void __devinit ia64_mmu_init (void *); extern void __devinit ia64_mmu_init (void *);
extern void set_mca_pointer (struct cpuinfo_ia64 *, void *);
unsigned long num_phys_stacked; unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi; pal_vm_info_2_u_t vmi;
unsigned int max_ctx; unsigned int max_ctx;
...@@ -611,6 +609,8 @@ cpu_init (void) ...@@ -611,6 +609,8 @@ cpu_init (void)
cpu_data = per_cpu_init(); cpu_data = per_cpu_init();
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data - (void *) __per_cpu_start));
get_max_cacheline_size(); get_max_cacheline_size();
/* /*
...@@ -657,7 +657,7 @@ cpu_init (void) ...@@ -657,7 +657,7 @@ cpu_init (void)
BUG(); BUG();
ia64_mmu_init(ia64_imva(cpu_data)); ia64_mmu_init(ia64_imva(cpu_data));
set_mca_pointer(cpu_info, cpu_data); ia64_mca_cpu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
ia32_cpu_init(); ia32_cpu_init();
......
...@@ -84,12 +84,11 @@ ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch ...@@ -84,12 +84,11 @@ ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch
} }
asmlinkage long asmlinkage long
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, long arg3, long arg4, sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
long arg5, long arg6, long arg7, long stack) long arg3, long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{ {
struct pt_regs *pt = (struct pt_regs *) &stack; return do_sigaltstack(uss, uoss, regs.r12);
return do_sigaltstack(uss, uoss, pt->r12);
} }
static long static long
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* This file contains various system calls that have different calling * This file contains various system calls that have different calling
* conventions on different platforms. * conventions on different platforms.
* *
* Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -163,10 +163,9 @@ ia64_brk (unsigned long brk) ...@@ -163,10 +163,9 @@ ia64_brk (unsigned long brk)
* and r9) as this is faster than doing a copy_to_user(). * and r9) as this is faster than doing a copy_to_user().
*/ */
asmlinkage long asmlinkage long
sys_pipe (long arg0, long arg1, long arg2, long arg3, sys_pipe (void)
long arg4, long arg5, long arg6, long arg7, long stack)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack; struct pt_regs *regs = ia64_task_regs(current);
int fd[2]; int fd[2];
int retval; int retval;
......
...@@ -358,11 +358,10 @@ struct illegal_op_return { ...@@ -358,11 +358,10 @@ struct illegal_op_return {
}; };
struct illegal_op_return struct illegal_op_return
ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2, ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
unsigned long arg3, unsigned long arg4, unsigned long arg5, long arg4, long arg5, long arg6, long arg7,
unsigned long arg6, unsigned long arg7, unsigned long stack) struct pt_regs regs)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack;
struct illegal_op_return rv; struct illegal_op_return rv;
struct siginfo si; struct siginfo si;
char buf[128]; char buf[128];
...@@ -371,19 +370,19 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2, ...@@ -371,19 +370,19 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
{ {
extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long); extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
rv = ia64_emulate_brl(regs, ec); rv = ia64_emulate_brl(&regs, ec);
if (rv.fkt != (unsigned long) -1) if (rv.fkt != (unsigned long) -1)
return rv; return rv;
} }
#endif #endif
sprintf(buf, "IA-64 Illegal operation fault"); sprintf(buf, "IA-64 Illegal operation fault");
die_if_kernel(buf, regs, 0); die_if_kernel(buf, &regs, 0);
memset(&si, 0, sizeof(si)); memset(&si, 0, sizeof(si));
si.si_signo = SIGILL; si.si_signo = SIGILL;
si.si_code = ILL_ILLOPC; si.si_code = ILL_ILLOPC;
si.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
force_sig_info(SIGILL, &si, current); force_sig_info(SIGILL, &si, current);
rv.fkt = 0; rv.fkt = 0;
return rv; return rv;
...@@ -391,11 +390,10 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2, ...@@ -391,11 +390,10 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
void void
ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long iim, unsigned long itir, unsigned long arg5, unsigned long iim, unsigned long itir, long arg5, long arg6,
unsigned long arg6, unsigned long arg7, unsigned long stack) long arg7, struct pt_regs regs)
{ {
struct pt_regs *regs = (struct pt_regs *) &stack; unsigned long code, error = isr, iip;
unsigned long code, error = isr;
struct siginfo siginfo; struct siginfo siginfo;
char buf[128]; char buf[128];
int result, sig; int result, sig;
...@@ -415,10 +413,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -415,10 +413,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
* This fault was due to lfetch.fault, set "ed" bit in the psr to cancel * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
* the lfetch. * the lfetch.
*/ */
ia64_psr(regs)->ed = 1; ia64_psr(&regs)->ed = 1;
return; return;
} }
iip = regs.cr_iip + ia64_psr(&regs)->ri;
switch (vector) { switch (vector) {
case 24: /* General Exception */ case 24: /* General Exception */
code = (isr >> 4) & 0xf; code = (isr >> 4) & 0xf;
...@@ -428,8 +428,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -428,8 +428,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if (code == 8) { if (code == 8) {
# ifdef CONFIG_IA64_PRINT_HAZARDS # ifdef CONFIG_IA64_PRINT_HAZARDS
printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n", printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, current->comm, current->pid,
regs->pr); regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
# endif # endif
return; return;
} }
...@@ -437,14 +437,14 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -437,14 +437,14 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 25: /* Disabled FP-Register */ case 25: /* Disabled FP-Register */
if (isr & 2) { if (isr & 2) {
disabled_fph_fault(regs); disabled_fph_fault(&regs);
return; return;
} }
sprintf(buf, "Disabled FPL fault---not supposed to happen!"); sprintf(buf, "Disabled FPL fault---not supposed to happen!");
break; break;
case 26: /* NaT Consumption */ case 26: /* NaT Consumption */
if (user_mode(regs)) { if (user_mode(&regs)) {
void __user *addr; void __user *addr;
if (((isr >> 4) & 0xf) == 2) { if (((isr >> 4) & 0xf) == 2) {
...@@ -456,7 +456,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -456,7 +456,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
/* register NaT consumption */ /* register NaT consumption */
sig = SIGILL; sig = SIGILL;
code = ILL_ILLOPN; code = ILL_ILLOPN;
addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); addr = (void __user *) (regs.cr_iip
+ ia64_psr(&regs)->ri);
} }
siginfo.si_signo = sig; siginfo.si_signo = sig;
siginfo.si_code = code; siginfo.si_code = code;
...@@ -467,17 +468,17 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -467,17 +468,17 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
siginfo.si_isr = isr; siginfo.si_isr = isr;
force_sig_info(sig, &siginfo, current); force_sig_info(sig, &siginfo, current);
return; return;
} else if (ia64_done_with_exception(regs)) } else if (ia64_done_with_exception(&regs))
return; return;
sprintf(buf, "NaT consumption"); sprintf(buf, "NaT consumption");
break; break;
case 31: /* Unsupported Data Reference */ case 31: /* Unsupported Data Reference */
if (user_mode(regs)) { if (user_mode(&regs)) {
siginfo.si_signo = SIGILL; siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_ILLOPN; siginfo.si_code = ILL_ILLOPN;
siginfo.si_errno = 0; siginfo.si_errno = 0;
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void __user *) iip;
siginfo.si_imm = vector; siginfo.si_imm = vector;
siginfo.si_flags = __ISR_VALID; siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr; siginfo.si_isr = isr;
...@@ -490,7 +491,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -490,7 +491,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 29: /* Debug */ case 29: /* Debug */
case 35: /* Taken Branch Trap */ case 35: /* Taken Branch Trap */
case 36: /* Single Step Trap */ case 36: /* Single Step Trap */
if (fsys_mode(current, regs)) { if (fsys_mode(current, &regs)) {
extern char __kernel_syscall_via_break[]; extern char __kernel_syscall_via_break[];
/* /*
* Got a trap in fsys-mode: Taken Branch Trap and Single Step trap * Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
...@@ -498,13 +499,13 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -498,13 +499,13 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
*/ */
if (unlikely(vector == 29)) { if (unlikely(vector == 29)) {
die("Got debug trap in fsys-mode---not supposed to happen!", die("Got debug trap in fsys-mode---not supposed to happen!",
regs, 0); &regs, 0);
return; return;
} }
/* re-do the system call via break 0x100000: */ /* re-do the system call via break 0x100000: */
regs->cr_iip = (unsigned long) __kernel_syscall_via_break; regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
ia64_psr(regs)->ri = 0; ia64_psr(&regs)->ri = 0;
ia64_psr(regs)->cpl = 3; ia64_psr(&regs)->cpl = 3;
return; return;
} }
switch (vector) { switch (vector) {
...@@ -515,8 +516,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -515,8 +516,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
* Erratum 10 (IFA may contain incorrect address) now has * Erratum 10 (IFA may contain incorrect address) now has
* "NoFix" status. There are no plans for fixing this. * "NoFix" status. There are no plans for fixing this.
*/ */
if (ia64_psr(regs)->is == 0) if (ia64_psr(&regs)->is == 0)
ifa = regs->cr_iip; ifa = regs.cr_iip;
#endif #endif
break; break;
case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
...@@ -533,12 +534,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -533,12 +534,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 32: /* fp fault */ case 32: /* fp fault */
case 33: /* fp trap */ case 33: /* fp trap */
result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr); result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) { if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
siginfo.si_signo = SIGFPE; siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0; siginfo.si_errno = 0;
siginfo.si_code = FPE_FLTINV; siginfo.si_code = FPE_FLTINV;
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void __user *) iip;
siginfo.si_flags = __ISR_VALID; siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr; siginfo.si_isr = isr;
siginfo.si_imm = 0; siginfo.si_imm = 0;
...@@ -554,19 +555,18 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -554,19 +555,18 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
* interesting work (e.g., signal delivery is done in the kernel * interesting work (e.g., signal delivery is done in the kernel
* exit path). * exit path).
*/ */
ia64_psr(regs)->lp = 0; ia64_psr(&regs)->lp = 0;
return; return;
} else { } else {
/* Unimplemented Instr. Address Trap */ /* Unimplemented Instr. Address Trap */
if (user_mode(regs)) { if (user_mode(&regs)) {
siginfo.si_signo = SIGILL; siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_BADIADDR; siginfo.si_code = ILL_BADIADDR;
siginfo.si_errno = 0; siginfo.si_errno = 0;
siginfo.si_flags = 0; siginfo.si_flags = 0;
siginfo.si_isr = 0; siginfo.si_isr = 0;
siginfo.si_imm = 0; siginfo.si_imm = 0;
siginfo.si_addr = (void __user *) siginfo.si_addr = (void __user *) iip;
(regs->cr_iip + ia64_psr(regs)->ri);
force_sig_info(SIGILL, &siginfo, current); force_sig_info(SIGILL, &siginfo, current);
return; return;
} }
...@@ -576,23 +576,23 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -576,23 +576,23 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 45: case 45:
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
if (ia32_exception(regs, isr) == 0) if (ia32_exception(&regs, isr) == 0)
return; return;
#endif #endif
printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
regs->cr_iip, ifa, isr); iip, ifa, isr);
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
break; break;
case 46: case 46:
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
if (ia32_intercept(regs, isr) == 0) if (ia32_intercept(&regs, isr) == 0)
return; return;
#endif #endif
printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
regs->cr_iip, ifa, isr, iim); iip, ifa, isr, iim);
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
return; return;
...@@ -604,6 +604,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -604,6 +604,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
sprintf(buf, "Fault %lu", vector); sprintf(buf, "Fault %lu", vector);
break; break;
} }
die_if_kernel(buf, regs, error); die_if_kernel(buf, &regs, error);
force_sig(SIGILL, current); force_sig(SIGILL, current);
} }
...@@ -169,7 +169,6 @@ find_memory (void) ...@@ -169,7 +169,6 @@ find_memory (void)
find_initrd(); find_initrd();
} }
#ifdef CONFIG_SMP
/** /**
* per_cpu_init - setup per-cpu variables * per_cpu_init - setup per-cpu variables
* *
...@@ -178,30 +177,41 @@ find_memory (void) ...@@ -178,30 +177,41 @@ find_memory (void)
void * void *
per_cpu_init (void) per_cpu_init (void)
{ {
void *cpu_data, *mca_data; void *mca_data, *my_data;
int cpu; int cpu;
#ifdef CONFIG_SMP
/* /*
* get_free_pages() cannot be used before cpu_init() done. BSP * get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page(). * get_zeroed_page().
*/ */
if (smp_processor_id() == 0) { if (smp_processor_id() == 0) {
void *cpu_data;
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
mca_data = alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE; cpu_data += PERCPU_PAGE_SIZE;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
__per_cpu_mca[cpu] = (unsigned long)__pa(mca_data);
mca_data += PERCPU_MCA_SIZE;
} }
} }
return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; my_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()];
#else
my_data = (void *) __phys_per_cpu_start;
#endif
if (smp_processor_id() == 0) {
mca_data = alloc_bootmem(sizeof (struct ia64_mca_cpu) * NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
__per_cpu_mca[cpu] = __pa(mca_data);
mca_data += sizeof (struct ia64_mca_cpu);
}
}
return my_data;
} }
#endif /* CONFIG_SMP */
static int static int
count_pages (u64 start, u64 end, void *arg) count_pages (u64 start, u64 end, void *arg)
......
...@@ -339,7 +339,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -339,7 +339,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
pernodesize += node * L1_CACHE_BYTES; pernodesize += node * L1_CACHE_BYTES;
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus; pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_mca_cpu)) * phys_cpus;
pernodesize = PAGE_ALIGN(pernodesize); pernodesize = PAGE_ALIGN(pernodesize);
pernode = NODEDATA_ALIGN(start, node); pernode = NODEDATA_ALIGN(start, node);
...@@ -363,7 +363,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -363,7 +363,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mca_data_phys = (void *)pernode; mca_data_phys = (void *)pernode;
pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus; pernode += L1_CACHE_ALIGN(sizeof(struct ia64_mca_cpu)) * phys_cpus;
/* /*
* Copy the static per-cpu data into the region we * Copy the static per-cpu data into the region we
...@@ -384,7 +384,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -384,7 +384,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
* will be put in the cpuinfo structure. * will be put in the cpuinfo structure.
*/ */
__per_cpu_mca[cpu] = __pa(mca_data_phys); __per_cpu_mca[cpu] = __pa(mca_data_phys);
mca_data_phys += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)); mca_data_phys += L1_CACHE_ALIGN(sizeof(struct ia64_mca_cpu));
} }
__per_cpu_offset[cpu] = (char*)__va(cpu_data) - __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
__per_cpu_start; __per_cpu_start;
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern void ia64_tlb_init (void); extern void ia64_tlb_init (void);
extern void efi_get_pal_addr (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
...@@ -292,27 +291,6 @@ setup_gate (void) ...@@ -292,27 +291,6 @@ setup_gate (void)
ia64_patch_gate(); ia64_patch_gate();
} }
void
set_mca_pointer(struct cpuinfo_ia64 *cpuinfo, void *cpu_data)
{
void *my_cpu_data = ia64_imva(cpu_data);
/*
* The MCA info structure was allocated earlier and a physical address pointer
* saved in __per_cpu_mca[cpu]. Move that pointer into the cpuinfo structure.
*/
cpuinfo->ia64_pa_mca_data = (__u64 *)__per_cpu_mca[smp_processor_id()];
cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(cpuinfo));
/*
* Set pal_base and pal_paddr in cpuinfo structure.
*/
efi_get_pal_addr();
}
void __devinit void __devinit
ia64_mmu_init (void *my_cpu_data) ia64_mmu_init (void *my_cpu_data)
{ {
......
...@@ -71,7 +71,7 @@ pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value) ...@@ -71,7 +71,7 @@ pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
u64 addr, mode, data = 0; u64 addr, mode, data = 0;
int result = 0; int result = 0;
if ((seg > 255) || (bus > 255) || (devfn > 255) || (reg > 4095)) if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL; return -EINVAL;
if ((seg | reg) <= 255) { if ((seg | reg) <= 255) {
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/sn/arch.h> #include <asm/sn/arch.h>
#include <asm/sn/sn_cpuid.h> #include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h> #include <asm/sn/pda.h>
#include "shubio.h" #include <asm/sn/shubio.h>
#include <asm/nodedata.h> #include <asm/nodedata.h>
#include <asm/delay.h> #include <asm/delay.h>
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include "ioerror.h" #include "ioerror.h"
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include "shubio.h" #include <asm/sn/shubio.h>
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include "ioerror.h" #include "ioerror.h"
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include "shubio.h" #include <asm/sn/shubio.h>
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
......
...@@ -36,9 +36,7 @@ ...@@ -36,9 +36,7 @@
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/shub_mmr.h> #include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h> #include <asm/sn/nodepda.h>
#include <asm/sn/shubio.h>
/* This is ugly and jbarnes has promised me to fix this later */
#include "../../arch/ia64/sn/include/shubio.h"
MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>"); MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
MODULE_DESCRIPTION("SGI Altix RTC Timer"); MODULE_DESCRIPTION("SGI Altix RTC Timer");
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
*/ */
#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */ #define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */ #define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
#define IA64_KR_PA_CPU_INFO 3 /* ar.k3: phys addr of this cpu's cpu_info struct */ #define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */ #define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */ #define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */ #define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#ifndef _ASM_IA64_MCA_H #ifndef _ASM_IA64_MCA_H
#define _ASM_IA64_MCA_H #define _ASM_IA64_MCA_H
#define IA64_MCA_STACK_SIZE 8192
#if !defined(__ASSEMBLY__) #if !defined(__ASSEMBLY__)
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -102,21 +104,21 @@ typedef struct ia64_mca_os_to_sal_state_s { ...@@ -102,21 +104,21 @@ typedef struct ia64_mca_os_to_sal_state_s {
*/ */
} ia64_mca_os_to_sal_state_t; } ia64_mca_os_to_sal_state_t;
#define IA64_MCA_STACK_SIZE 1024 /* Per-CPU MCA state that is too big for normal per-CPU variables. */
#define IA64_MCA_STACK_SIZE_BYTES (1024 * 8)
#define IA64_MCA_BSPSTORE_SIZE 1024
typedef struct ia64_mca_cpu_s { struct ia64_mca_cpu {
u64 ia64_mca_stack[IA64_MCA_STACK_SIZE] __attribute__((aligned(16))); u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */
u64 ia64_mca_proc_state_dump[512] __attribute__((aligned(16))); u64 proc_state_dump[512];
u64 ia64_mca_stackframe[32] __attribute__((aligned(16))); u64 stackframe[32];
u64 ia64_mca_bspstore[IA64_MCA_BSPSTORE_SIZE] __attribute__((aligned(16))); u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
u64 ia64_init_stack[KERNEL_STACK_SIZE/8] __attribute__((aligned(16))); u64 init_stack[KERNEL_STACK_SIZE/8];
} ia64_mca_cpu_t; } __attribute__ ((aligned(16)));
#define PERCPU_MCA_SIZE sizeof(ia64_mca_cpu_t) /* Array of physical addresses of each CPU's MCA area. */
extern unsigned long __per_cpu_mca[NR_CPUS];
extern void ia64_mca_init(void); extern void ia64_mca_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void); extern void ia64_os_mca_dispatch_end(void);
extern void ia64_mca_ucmc_handler(void); extern void ia64_mca_ucmc_handler(void);
......
...@@ -46,40 +46,9 @@ ...@@ -46,40 +46,9 @@
mov temp = 0x7 ;; \ mov temp = 0x7 ;; \
dep addr = temp, addr, 61, 3 dep addr = temp, addr, 61, 3
/* #define GET_THIS_PADDR(reg, var) \
* This macro gets the physical address of this cpu's cpuinfo structure. mov reg = IA64_KR(PER_CPU_DATA);; \
*/ addl reg = THIS_CPU(var), reg
#define GET_PERCPU_PADDR(reg) \
mov reg = ar.k3;; \
addl reg = IA64_CPUINFO_PERCPU_PADDR,reg
#define GET_CPUINFO_PAL_PADDR(reg) \
mov reg = ar.k3;; \
addl reg = IA64_CPUINFO_PAL_PADDR,reg
/*
* This macro gets the physical address of this cpu's MCA save structure.
*/
#define GET_CPUINFO_MCA_PADDR(reg) \
mov reg = ar.k3;; \
addl reg = IA64_CPUINFO_PA_MCA_INFO,reg;; \
ld8 reg = [reg]
#define GET_MCA_BSPSTORE(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_BSPSTORE,reg
#define GET_MCA_STACKFRAME(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_STACKFRAME,reg
#define GET_MCA_STACK(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_STACK,reg
#define GET_MCA_DUMP_PADDR(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_PROC_STATE_DUMP,reg
/* /*
* This macro jumps to the instruction at the given virtual address * This macro jumps to the instruction at the given virtual address
......
...@@ -46,18 +46,14 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); ...@@ -46,18 +46,14 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
extern void setup_per_cpu_areas (void); extern void setup_per_cpu_areas (void);
extern void *per_cpu_init(void);
#else /* ! SMP */ #else /* ! SMP */
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var #define __get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start)
#endif /* SMP */ #endif /* SMP */
extern unsigned long __per_cpu_mca[NR_CPUS];
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
...@@ -69,6 +65,8 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; ...@@ -69,6 +65,8 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
*/ */
#define __ia64_per_cpu_var(var) (per_cpu__##var) #define __ia64_per_cpu_var(var) (per_cpu__##var)
extern void *per_cpu_init(void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_PERCPU_H */ #endif /* _ASM_IA64_PERCPU_H */
...@@ -151,12 +151,9 @@ struct cpuinfo_ia64 { ...@@ -151,12 +151,9 @@ struct cpuinfo_ia64 {
__u64 itc_freq; /* frequency of ITC counter */ __u64 itc_freq; /* frequency of ITC counter */
__u64 proc_freq; /* frequency of processor */ __u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */ __u64 cyc_per_usec; /* itc_freq/1000000 */
__u64 percpu_paddr;
__u64 ptce_base; __u64 ptce_base;
__u32 ptce_count[2]; __u32 ptce_count[2];
__u32 ptce_stride[2]; __u32 ptce_stride[2];
__u64 pal_paddr;
__u64 pal_base;
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -177,7 +174,6 @@ struct cpuinfo_ia64 { ...@@ -177,7 +174,6 @@ struct cpuinfo_ia64 {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct ia64_node_data *node_data; struct ia64_node_data *node_data;
#endif #endif
__u64 *ia64_pa_mca_data; /* prt to MCA/INIT processor state */
}; };
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
/* /*
* IA-64 Linux syscall numbers and inline-functions. * IA-64 Linux syscall numbers and inline-functions.
* *
* Copyright (C) 1998-2004 Hewlett-Packard Co * Copyright (C) 1998-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
...@@ -376,15 +376,13 @@ struct pt_regs; ...@@ -376,15 +376,13 @@ struct pt_regs;
struct sigaction; struct sigaction;
long sys_execve(char __user *filename, char __user * __user *argv, long sys_execve(char __user *filename, char __user * __user *argv,
char __user * __user *envp, struct pt_regs *regs); char __user * __user *envp, struct pt_regs *regs);
asmlinkage long sys_pipe(long arg0, long arg1, long arg2, long arg3, asmlinkage long sys_pipe(void);
long arg4, long arg5, long arg6, long arg7, long stack);
asmlinkage long sys_ptrace(long request, pid_t pid, asmlinkage long sys_ptrace(long request, pid_t pid,
unsigned long addr, unsigned long data, unsigned long addr, unsigned long data);
long arg4, long arg5, long arg6, long arg7, long stack);
asmlinkage long sys_rt_sigaction(int sig, asmlinkage long sys_rt_sigaction(int sig,
const struct sigaction __user *act, const struct sigaction __user *act,
struct sigaction __user *oact, struct sigaction __user *oact,
size_t sigsetsize); size_t sigsetsize);
/* /*
* "Conditional" syscalls * "Conditional" syscalls
......
...@@ -289,6 +289,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out) ...@@ -289,6 +289,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out)
} }
extern void efi_init (void); extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void); extern void efi_map_pal_code (void);
extern void efi_map_memmap(void); extern void efi_map_memmap(void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment