Commit 8c2000be authored by David Mosberger's avatar David Mosberger

ia64: Merge with 2.5.59.

parents 6a3354a9 887b478a
...@@ -4,40 +4,40 @@ ...@@ -4,40 +4,40 @@
platform. This document provides information specific to IA-64 platform. This document provides information specific to IA-64
ONLY, to get additional information about the Linux kernel also ONLY, to get additional information about the Linux kernel also
read the original Linux README provided with the kernel. read the original Linux README provided with the kernel.
INSTALLING the kernel: INSTALLING the kernel:
- IA-64 kernel installation is the same as the other platforms, see - IA-64 kernel installation is the same as the other platforms, see
original README for details. original README for details.
SOFTWARE REQUIREMENTS SOFTWARE REQUIREMENTS
Compiling and running this kernel requires an IA-64 compliant GCC Compiling and running this kernel requires an IA-64 compliant GCC
compiler. And various software packages also compiled with an compiler. And various software packages also compiled with an
IA-64 compliant GCC compiler. IA-64 compliant GCC compiler.
CONFIGURING the kernel: CONFIGURING the kernel:
Configuration is the same, see original README for details. Configuration is the same, see original README for details.
COMPILING the kernel: COMPILING the kernel:
- Compiling this kernel doesn't differ from other platform so read - Compiling this kernel doesn't differ from other platform so read
the original README for details BUT make sure you have an IA-64 the original README for details BUT make sure you have an IA-64
compliant GCC compiler. compliant GCC compiler.
IA-64 SPECIFICS IA-64 SPECIFICS
- General issues: - General issues:
o Hardly any performance tuning has been done. Obvious targets o Hardly any performance tuning has been done. Obvious targets
include the library routines (IP checksum, etc.). Less include the library routines (IP checksum, etc.). Less
obvious targets include making sure we don't flush the TLB obvious targets include making sure we don't flush the TLB
needlessly, etc. needlessly, etc.
o SMP locks cleanup/optimization o SMP locks cleanup/optimization
o IA32 support. Currently experimental. It mostly works. o IA32 support. Currently experimental. It mostly works.
This diff is collapsed.
...@@ -768,6 +768,9 @@ source "arch/ia64/hp/sim/Kconfig" ...@@ -768,6 +768,9 @@ source "arch/ia64/hp/sim/Kconfig"
menu "Kernel hacking" menu "Kernel hacking"
config FSYS
bool "Light-weight system-call support (via epc)"
choice choice
prompt "Physical memory granularity" prompt "Physical memory granularity"
default IA64_GRANULE_64MB default IA64_GRANULE_64MB
......
...@@ -58,9 +58,13 @@ all compressed: vmlinux.gz ...@@ -58,9 +58,13 @@ all compressed: vmlinux.gz
vmlinux.gz: vmlinux vmlinux.gz: vmlinux
$(call makeboot,vmlinux.gz) $(call makeboot,vmlinux.gz)
check: vmlinux
arch/ia64/scripts/unwcheck.sh vmlinux
archmrproper: archmrproper:
archclean: archclean:
$(Q)$(MAKE) -f scripts/Makefile.clean obj=arch/ia64/boot $(Q)$(MAKE) -f scripts/Makefile.clean obj=arch/ia64/boot
$(Q)$(MAKE) -f scripts/Makefile.clean obj=arch/ia64/tools
CLEAN_FILES += include/asm-ia64/offsets.h vmlinux.gz bootloader CLEAN_FILES += include/asm-ia64/offsets.h vmlinux.gz bootloader
......
...@@ -95,12 +95,19 @@ END(sys32_sigsuspend) ...@@ -95,12 +95,19 @@ END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone) GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
/* /*
* We need to call schedule_tail() to complete the scheduling process. * We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task. * address of the previously executing task.
*/ */
br.call.sptk.many rp=ia64_invoke_schedule_tail br.call.sptk.many rp=ia64_invoke_schedule_tail
}
.ret1: .ret1:
#endif #endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
......
...@@ -95,8 +95,6 @@ ia32_load_state (struct task_struct *t) ...@@ -95,8 +95,6 @@ ia32_load_state (struct task_struct *t)
struct pt_regs *regs = ia64_task_regs(t); struct pt_regs *regs = ia64_task_regs(t);
int nr = smp_processor_id(); /* LDT and TSS depend on CPU number: */ int nr = smp_processor_id(); /* LDT and TSS depend on CPU number: */
nr = smp_processor_id();
eflag = t->thread.eflag; eflag = t->thread.eflag;
fsr = t->thread.fsr; fsr = t->thread.fsr;
fcr = t->thread.fcr; fcr = t->thread.fcr;
......
...@@ -2011,6 +2011,10 @@ semctl32 (int first, int second, int third, void *uptr) ...@@ -2011,6 +2011,10 @@ semctl32 (int first, int second, int third, void *uptr)
else else
fourth.__pad = (void *)A(pad); fourth.__pad = (void *)A(pad);
switch (third) { switch (third) {
default:
err = -EINVAL;
break;
case IPC_INFO: case IPC_INFO:
case IPC_RMID: case IPC_RMID:
case IPC_SET: case IPC_SET:
......
...@@ -12,6 +12,7 @@ obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o \ ...@@ -12,6 +12,7 @@ obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o \
semaphore.o setup.o \ semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_VARS) += efivars.o
......
...@@ -888,4 +888,26 @@ acpi_irq_to_vector (u32 irq) ...@@ -888,4 +888,26 @@ acpi_irq_to_vector (u32 irq)
return gsi_to_vector(irq); return gsi_to_vector(irq);
} }
int __init
acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
{
int vector = 0;
u32 irq_base;
char *iosapic_address;
if (acpi_madt->flags.pcat_compat && (gsi < 16))
return isa_irq_to_vector(gsi);
if (!iosapic_register_intr)
return 0;
/* Find the IOSAPIC */
if (!acpi_find_iosapic(gsi, &irq_base, &iosapic_address)) {
/* Turn it on */
vector = iosapic_register_intr (gsi, polarity, trigger,
irq_base, iosapic_address);
}
return vector;
}
#endif /* CONFIG_ACPI_BOOT */ #endif /* CONFIG_ACPI_BOOT */
...@@ -33,15 +33,6 @@ ...@@ -33,15 +33,6 @@
#define EFI_DEBUG 0 #define EFI_DEBUG 0
#ifdef CONFIG_HUGETLB_PAGE
/* By default at total of 512MB is reserved huge pages. */
#define HTLBZONE_SIZE_DEFAULT 0x20000000
unsigned long htlbzone_pages = (HTLBZONE_SIZE_DEFAULT >> HPAGE_SHIFT);
#endif
extern efi_status_t efi_call_phys (void *, ...); extern efi_status_t efi_call_phys (void *, ...);
struct efi efi; struct efi efi;
...@@ -497,25 +488,6 @@ efi_init (void) ...@@ -497,25 +488,6 @@ efi_init (void)
++cp; ++cp;
} }
} }
#ifdef CONFIG_HUGETLB_PAGE
/* Just duplicating the above algo for lpzone start */
for (cp = saved_command_line; *cp; ) {
if (memcmp(cp, "lpmem=", 6) == 0) {
cp += 6;
htlbzone_pages = memparse(cp, &end);
htlbzone_pages = (htlbzone_pages >> HPAGE_SHIFT);
if (end != cp)
break;
cp = end;
} else {
while (*cp != ' ' && *cp)
++cp;
while (*cp == ' ')
++cp;
}
}
printk("Total HugeTLB_Page memory pages requested 0x%lx \n", htlbzone_pages);
#endif
if (mem_limit != ~0UL) if (mem_limit != ~0UL)
printk("Ignoring memory above %luMB\n", mem_limit >> 20); printk("Ignoring memory above %luMB\n", mem_limit >> 20);
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* Kernel entry points. * Kernel entry points.
* *
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
/* /*
* Global (preserved) predicate usage on syscall entry/exit path: * Global (preserved) predicate usage on syscall entry/exit path:
* *
* pKern: See entry.h. * pKStk: See entry.h.
* pUser: See entry.h. * pUStk: See entry.h.
* pSys: See entry.h. * pSys: See entry.h.
* pNonSys: !pSys * pNonSys: !pSys
*/ */
...@@ -63,7 +63,7 @@ ENTRY(ia64_execve) ...@@ -63,7 +63,7 @@ ENTRY(ia64_execve)
sxt4 r8=r8 // return 64-bit result sxt4 r8=r8 // return 64-bit result
;; ;;
stf.spill [sp]=f0 stf.spill [sp]=f0
(p6) cmp.ne pKern,pUser=r0,r0 // a successful execve() lands us in user-mode... (p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode...
mov rp=loc0 mov rp=loc0
(p6) mov ar.pfs=r0 // clear ar.pfs on success (p6) mov ar.pfs=r0 // clear ar.pfs on success
(p7) br.ret.sptk.many rp (p7) br.ret.sptk.many rp
...@@ -193,7 +193,7 @@ GLOBAL_ENTRY(ia64_switch_to) ...@@ -193,7 +193,7 @@ GLOBAL_ENTRY(ia64_switch_to)
;; ;;
(p6) srlz.d (p6) srlz.d
ld8 sp=[r21] // load kernel stack pointer of new task ld8 sp=[r21] // load kernel stack pointer of new task
mov IA64_KR(CURRENT)=r20 // update "current" application register mov IA64_KR(CURRENT)=in0 // update "current" application register
mov r8=r13 // return pointer to previously running task mov r8=r13 // return pointer to previously running task
mov r13=in0 // set "current" pointer mov r13=in0 // set "current" pointer
;; ;;
...@@ -507,7 +507,14 @@ END(invoke_syscall_trace) ...@@ -507,7 +507,14 @@ END(invoke_syscall_trace)
GLOBAL_ENTRY(ia64_trace_syscall) GLOBAL_ENTRY(ia64_trace_syscall)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
}
.ret6: br.call.sptk.many rp=b6 // do the syscall .ret6: br.call.sptk.many rp=b6 // do the syscall
strace_check_retval: strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed? cmp.lt p6,p0=r8,r0 // syscall failed?
...@@ -537,12 +544,19 @@ END(ia64_trace_syscall) ...@@ -537,12 +544,19 @@ END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone) GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
/* /*
* We need to call schedule_tail() to complete the scheduling process. * We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task. * address of the previously executing task.
*/ */
br.call.sptk.many rp=ia64_invoke_schedule_tail br.call.sptk.many rp=ia64_invoke_schedule_tail
}
.ret8: .ret8:
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;; ;;
...@@ -569,11 +583,12 @@ END(ia64_ret_from_syscall) ...@@ -569,11 +583,12 @@ END(ia64_ret_from_syscall)
// fall through // fall through
GLOBAL_ENTRY(ia64_leave_kernel) GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
// work.need_resched etc. mustn't get changed by this CPU before it returns to userspace: // work.need_resched etc. mustn't get changed by this CPU before it returns to
(pUser) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUser // user- or fsys-mode:
(pUser) rsm psr.i (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
(pUStk) rsm psr.i
;; ;;
(pUser) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13 (pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;; ;;
.work_processed: .work_processed:
(p6) ld4 r18=[r17] // load current_thread_info()->flags (p6) ld4 r18=[r17] // load current_thread_info()->flags
...@@ -635,9 +650,9 @@ GLOBAL_ENTRY(ia64_leave_kernel) ...@@ -635,9 +650,9 @@ GLOBAL_ENTRY(ia64_leave_kernel)
;; ;;
srlz.i // ensure interruption collection is off srlz.i // ensure interruption collection is off
mov b7=r15 mov b7=r15
bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
;; ;;
bsw.0 // switch back to bank 0 (pUStk) mov r18=IA64_KR(CURRENT) // Itanium 2: 12 cycle read latency
;;
adds r16=16,r12 adds r16=16,r12
adds r17=24,r12 adds r17=24,r12
;; ;;
...@@ -665,16 +680,21 @@ GLOBAL_ENTRY(ia64_leave_kernel) ...@@ -665,16 +680,21 @@ GLOBAL_ENTRY(ia64_leave_kernel)
;; ;;
ld8.fill r12=[r16],16 ld8.fill r12=[r16],16
ld8.fill r13=[r17],16 ld8.fill r13=[r17],16
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
;; ;;
ld8.fill r14=[r16] ld8.fill r14=[r16]
ld8.fill r15=[r17] ld8.fill r15=[r17]
(pUStk) mov r17=1
;;
(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
shr.u r18=r19,16 // get byte size of existing "dirty" partition shr.u r18=r19,16 // get byte size of existing "dirty" partition
;; ;;
mov r16=ar.bsp // get existing backing store pointer mov r16=ar.bsp // get existing backing store pointer
movl r17=THIS_CPU(ia64_phys_stacked_size_p8) movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;; ;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
(pKern) br.cond.dpnt skip_rbs_switch (pKStk) br.cond.dpnt skip_rbs_switch
/* /*
* Restore user backing store. * Restore user backing store.
* *
...@@ -710,21 +730,9 @@ dont_preserve_current_frame: ...@@ -710,21 +730,9 @@ dont_preserve_current_frame:
shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1 shr.u loc1=r18,9 // RNaTslots <= dirtySize / (64*8) + 1
sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
;; ;;
#if 1
.align 32 // see comment below about gas bug...
#endif
mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
shladd in0=loc1,3,r17 shladd in0=loc1,3,r17
mov in1=0 mov in1=0
#if 0
// gas-2.12.90 is unable to generate a stop bit after .align, which is bad,
// because alloc must be at the beginning of an insn-group.
.align 32
#else
nop 0
nop 0
nop 0
#endif
;; ;;
rse_clear_invalid: rse_clear_invalid:
#ifdef CONFIG_ITANIUM #ifdef CONFIG_ITANIUM
...@@ -788,12 +796,12 @@ rse_clear_invalid: ...@@ -788,12 +796,12 @@ rse_clear_invalid:
skip_rbs_switch: skip_rbs_switch:
mov b6=rB6 mov b6=rB6
mov ar.pfs=rARPFS mov ar.pfs=rARPFS
(pUser) mov ar.bspstore=rARBSPSTORE (pUStk) mov ar.bspstore=rARBSPSTORE
(p9) mov cr.ifs=rCRIFS (p9) mov cr.ifs=rCRIFS
mov cr.ipsr=rCRIPSR mov cr.ipsr=rCRIPSR
mov cr.iip=rCRIIP mov cr.iip=rCRIIP
;; ;;
(pUser) mov ar.rnat=rARRNAT // must happen with RSE in lazy mode (pUStk) mov ar.rnat=rARRNAT // must happen with RSE in lazy mode
mov ar.rsc=rARRSC mov ar.rsc=rARRSC
mov ar.unat=rARUNAT mov ar.unat=rARUNAT
mov pr=rARPR,-1 mov pr=rARPR,-1
...@@ -963,17 +971,16 @@ ENTRY(sys_rt_sigreturn) ...@@ -963,17 +971,16 @@ ENTRY(sys_rt_sigreturn)
END(sys_rt_sigreturn) END(sys_rt_sigreturn)
GLOBAL_ENTRY(ia64_prepare_handle_unaligned) GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
//
// r16 = fake ar.pfs, we simply need to make sure
// privilege is still 0
//
mov r16=r0
.prologue .prologue
/*
* r16 = fake ar.pfs, we simply need to make sure privilege is still 0
*/
mov r16=r0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
.ret21: .body .ret21: .body
DO_LOAD_SWITCH_STACK DO_LOAD_SWITCH_STACK
br.cond.sptk.many rp // goes to ia64_leave_kernel br.cond.sptk.many rp // goes to ia64_leave_kernel
END(ia64_prepare_handle_unaligned) END(ia64_prepare_handle_unaligned)
// //
...@@ -1235,8 +1242,8 @@ sys_call_table: ...@@ -1235,8 +1242,8 @@ sys_call_table:
data8 sys_sched_setaffinity data8 sys_sched_setaffinity
data8 sys_sched_getaffinity data8 sys_sched_getaffinity
data8 sys_set_tid_address data8 sys_set_tid_address
data8 ia64_ni_syscall // available. (was sys_alloc_hugepages) data8 ia64_ni_syscall
data8 ia64_ni_syscall // available (was sys_free_hugepages) data8 ia64_ni_syscall // 1235
data8 sys_exit_group data8 sys_exit_group
data8 sys_lookup_dcookie data8 sys_lookup_dcookie
data8 sys_io_setup data8 sys_io_setup
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be * Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these! * careful not to step on these!
*/ */
#define pKern p2 /* will leave_kernel return to kernel-mode? */ #define pKStk p2 /* will leave_kernel return to kernel-stacks? */
#define pUser p3 /* will leave_kernel return to user-mode? */ #define pUStk p3 /* will leave_kernel return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */ #define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */ #define pNonSys p5 /* complement of pSys */
......
This diff is collapsed.
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* This file contains the code that gets mapped at the upper end of each task's text * This file contains the code that gets mapped at the upper end of each task's text
* region. For now, it contains the signal trampoline code only. * region. For now, it contains the signal trampoline code only.
* *
* Copyright (C) 1999-2002 Hewlett-Packard Co * Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
...@@ -14,6 +14,87 @@ ...@@ -14,6 +14,87 @@
#include <asm/page.h> #include <asm/page.h>
.section .text.gate, "ax" .section .text.gate, "ax"
.start_gate:
#if CONFIG_FSYS
#include <asm/errno.h>
/*
* On entry:
* r11 = saved ar.pfs
* r15 = system call #
* b0 = saved return address
* b6 = return address
* On exit:
* r11 = saved ar.pfs
* r15 = system call #
* b0 = saved return address
* all other "scratch" registers: undefined
* all "preserved" registers: same as on entry
*/
GLOBAL_ENTRY(syscall_via_epc)
.prologue
.altrp b6
.body
{
/*
* Note: the kernel cannot assume that the first two instructions in this
* bundle get executed. The remaining code must be safe even if
* they do not get executed.
*/
adds r17=-1024,r15
mov r10=0 // default to successful syscall execution
epc
}
;;
rsm psr.be
movl r18=fsyscall_table
mov r16=IA64_KR(CURRENT)
mov r19=255
;;
shladd r18=r17,3,r18
cmp.geu p6,p0=r19,r17 // (syscall > 0 && syscall <= 1024+255)?
;;
srlz.d // ensure little-endian byteorder is in effect
(p6) ld8 r18=[r18]
;;
(p6) mov b7=r18
(p6) br.sptk.many b7
mov r10=-1
mov r8=ENOSYS
MCKINLEY_E9_WORKAROUND
br.ret.sptk.many b6
END(syscall_via_epc)
GLOBAL_ENTRY(syscall_via_break)
.prologue
.altrp b6
.body
break 0x100000
br.ret.sptk.many b6
END(syscall_via_break)
GLOBAL_ENTRY(fsys_fallback_syscall)
/*
* It would be better/fsyser to do the SAVE_MIN magic directly here, but for now
* we simply fall back on doing a system-call via break. Good enough
* to get started. (Note: we have to do this through the gate page again, since
* the br.ret will switch us back to user-level privilege.)
*
* XXX Move this back to fsys.S after changing it over to avoid break 0x100000.
*/
movl r2=(syscall_via_break - .start_gate) + GATE_ADDR
;;
MCKINLEY_E9_WORKAROUND
mov b7=r2
br.ret.sptk.many b7
END(fsys_fallback_syscall)
#endif /* CONFIG_FSYS */
# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
...@@ -63,15 +144,18 @@ ...@@ -63,15 +144,18 @@
* call stack. * call stack.
*/ */
#define SIGTRAMP_SAVES \
.unwabi @svr4, 's' // mark this as a sigtramp handler (saves scratch regs) \
.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF \
.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF \
.savesp pr, PR_OFF+SIGCONTEXT_OFF \
.savesp rp, RP_OFF+SIGCONTEXT_OFF \
.vframesp SP_OFF+SIGCONTEXT_OFF
GLOBAL_ENTRY(ia64_sigtramp) GLOBAL_ENTRY(ia64_sigtramp)
// describe the state that is active when we get here: // describe the state that is active when we get here:
.prologue .prologue
.unwabi @svr4, 's' // mark this as a sigtramp handler (saves scratch regs) SIGTRAMP_SAVES
.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF
.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF
.savesp pr, PR_OFF+SIGCONTEXT_OFF
.savesp rp, RP_OFF+SIGCONTEXT_OFF
.vframesp SP_OFF+SIGCONTEXT_OFF
.body .body
.label_state 1 .label_state 1
...@@ -156,10 +240,11 @@ back_from_restore_rbs: ...@@ -156,10 +240,11 @@ back_from_restore_rbs:
ldf.fill f14=[base0],32 ldf.fill f14=[base0],32
ldf.fill f15=[base1],32 ldf.fill f15=[base1],32
mov r15=__NR_rt_sigreturn mov r15=__NR_rt_sigreturn
.restore sp // pop .prologue
break __BREAK_SYSCALL break __BREAK_SYSCALL
.body .prologue
.copy_state 1 SIGTRAMP_SAVES
setup_rbs: setup_rbs:
mov ar.rsc=0 // put RSE into enforced lazy mode mov ar.rsc=0 // put RSE into enforced lazy mode
;; ;;
...@@ -171,6 +256,7 @@ setup_rbs: ...@@ -171,6 +256,7 @@ setup_rbs:
;; ;;
.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
st8 [r14]=r16 // save sc_ar_rnat st8 [r14]=r16 // save sc_ar_rnat
.body
adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp
mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16 mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16
...@@ -182,10 +268,11 @@ setup_rbs: ...@@ -182,10 +268,11 @@ setup_rbs:
;; ;;
st8 [r14]=r15 // save sc_loadrs st8 [r14]=r15 // save sc_loadrs
mov ar.rsc=0xf // set RSE into eager mode, pl 3 mov ar.rsc=0xf // set RSE into eager mode, pl 3
.restore sp // pop .prologue
br.cond.sptk back_from_setup_rbs br.cond.sptk back_from_setup_rbs
.prologue .prologue
.copy_state 1 SIGTRAMP_SAVES
.spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
.body .body
restore_rbs: restore_rbs:
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* to set up the kernel's global pointer and jump to the kernel * to set up the kernel's global pointer and jump to the kernel
* entry point. * entry point.
* *
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
...@@ -143,17 +143,14 @@ start_ap: ...@@ -143,17 +143,14 @@ start_ap:
movl r2=init_thread_union movl r2=init_thread_union
cmp.eq isBP,isAP=r0,r0 cmp.eq isBP,isAP=r0,r0
#endif #endif
;;
extr r3=r2,0,61 // r3 == phys addr of task struct
mov r16=KERNEL_TR_PAGE_NUM mov r16=KERNEL_TR_PAGE_NUM
;; ;;
// load the "current" pointer (r13) and ar.k6 with the current task // load the "current" pointer (r13) and ar.k6 with the current task
mov r13=r2 mov IA64_KR(CURRENT)=r2 // virtual address
mov IA64_KR(CURRENT)=r3 // Physical address
// initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL) // initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL)
mov IA64_KR(CURRENT_STACK)=r16 mov IA64_KR(CURRENT_STACK)=r16
mov r13=r2
/* /*
* Reserve space at the top of the stack for "struct pt_regs". Kernel threads * Reserve space at the top of the stack for "struct pt_regs". Kernel threads
* don't store interesting values in that structure, but the space still needs * don't store interesting values in that structure, but the space still needs
......
...@@ -142,4 +142,8 @@ EXPORT_SYMBOL(efi_dir); ...@@ -142,4 +142,8 @@ EXPORT_SYMBOL(efi_dir);
EXPORT_SYMBOL(ia64_mv); EXPORT_SYMBOL(ia64_mv);
#endif #endif
EXPORT_SYMBOL(machvec_noop); EXPORT_SYMBOL(machvec_noop);
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
#endif
...@@ -752,7 +752,7 @@ iosapic_parse_prt (void) ...@@ -752,7 +752,7 @@ iosapic_parse_prt (void)
if (index < 0) { if (index < 0) {
printk(KERN_WARNING"IOSAPIC: GSI 0x%x has no IOSAPIC!\n", gsi); printk(KERN_WARNING"IOSAPIC: GSI 0x%x has no IOSAPIC!\n", gsi);
return; continue;
} }
addr = iosapic_lists[index].addr; addr = iosapic_lists[index].addr;
gsi_base = iosapic_lists[index].gsi_base; gsi_base = iosapic_lists[index].gsi_base;
......
...@@ -178,7 +178,7 @@ init_IRQ (void) ...@@ -178,7 +178,7 @@ init_IRQ (void)
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
#endif #endif
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
perfmon_init_percpu(); pfm_init_percpu();
#endif #endif
platform_irq_init(); platform_irq_init();
} }
......
This diff is collapsed.
...@@ -30,25 +30,23 @@ ...@@ -30,25 +30,23 @@
* on interrupts. * on interrupts.
*/ */
#define MINSTATE_START_SAVE_MIN_VIRT \ #define MINSTATE_START_SAVE_MIN_VIRT \
(pUser) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
dep r1=-1,r1,61,3; /* r1 = current (virtual) */ \
;; \ ;; \
(pUser) mov.m rARRNAT=ar.rnat; \ (pUStk) mov.m rARRNAT=ar.rnat; \
(pUser) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ (pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKern) mov r1=sp; /* get sp */ \ (pKStk) mov r1=sp; /* get sp */ \
;; \ ;; \
(pUser) lfetch.fault.excl.nt1 [rKRBS]; \ (pUStk) lfetch.fault.excl.nt1 [rKRBS]; \
(pUser) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \ (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUser) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ (pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
;; \ ;; \
(pUser) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \ (pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \ ;; \
(pUser) mov r18=ar.bsp; \ (pUStk) mov r18=ar.bsp; \
(pUser) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
#define MINSTATE_END_SAVE_MIN_VIRT \ #define MINSTATE_END_SAVE_MIN_VIRT \
or r13=r13,r14; /* make `current' a kernel virtual address */ \
bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
;; ;;
...@@ -57,21 +55,21 @@ ...@@ -57,21 +55,21 @@
* go virtual and dont want to destroy the iip or ipsr. * go virtual and dont want to destroy the iip or ipsr.
*/ */
#define MINSTATE_START_SAVE_MIN_PHYS \ #define MINSTATE_START_SAVE_MIN_PHYS \
(pKern) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \ (pKStk) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \
(pUser) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUser) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ (pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
;; \ ;; \
(pUser) mov rARRNAT=ar.rnat; \ (pUStk) mov rARRNAT=ar.rnat; \
(pKern) dep r1=0,sp,61,3; /* compute physical addr of sp */ \ (pKStk) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
(pUser) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUser) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \ (pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
(pUser) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */\ (pUStk) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */\
;; \ ;; \
(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
(pUser) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \ (pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
;; \ ;; \
(pUser) mov r18=ar.bsp; \ (pUStk) mov r18=ar.bsp; \
(pUser) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
#define MINSTATE_END_SAVE_MIN_PHYS \ #define MINSTATE_END_SAVE_MIN_PHYS \
or r12=r12,r14; /* make sp a kernel virtual address */ \ or r12=r12,r14; /* make sp a kernel virtual address */ \
...@@ -79,11 +77,13 @@ ...@@ -79,11 +77,13 @@
;; ;;
#ifdef MINSTATE_VIRT #ifdef MINSTATE_VIRT
# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
#endif #endif
#ifdef MINSTATE_PHYS #ifdef MINSTATE_PHYS
# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; dep reg=0,reg,61,3
# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
#endif #endif
...@@ -110,23 +110,26 @@ ...@@ -110,23 +110,26 @@
* we can pass interruption state as arguments to a handler. * we can pass interruption state as arguments to a handler.
*/ */
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
mov rARRSC=ar.rsc; \ mov rARRSC=ar.rsc; /* M */ \
mov rARPFS=ar.pfs; \ mov rARUNAT=ar.unat; /* M */ \
mov rR1=r1; \ mov rR1=r1; /* A */ \
mov rARUNAT=ar.unat; \ MINSTATE_GET_CURRENT(r1); /* M (or M;;I) */ \
mov rCRIPSR=cr.ipsr; \ mov rCRIPSR=cr.ipsr; /* M */ \
mov rB6=b6; /* rB6 = branch reg 6 */ \ mov rARPFS=ar.pfs; /* I */ \
mov rCRIIP=cr.iip; \ mov rCRIIP=cr.iip; /* M */ \
mov r1=IA64_KR(CURRENT); /* r1 = current (physical) */ \ mov rB6=b6; /* I */ /* rB6 = branch reg 6 */ \
COVER; \ COVER; /* B;; (or nothing) */ \
;; \ ;; \
invala; \ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1; \
extr.u r16=rCRIPSR,32,2; /* extract psr.cpl */ \ ;; \
;; \ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
cmp.eq pKern,pUser=r0,r16; /* are we in kernel mode already? (psr.cpl==0) */ \ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
/* switch from user to kernel RBS: */ \ /* switch from user to kernel RBS: */ \
;; \ ;; \
invala; /* M */ \
SAVE_IFS; \ SAVE_IFS; \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */ \
;; \
MINSTATE_START_SAVE_MIN \ MINSTATE_START_SAVE_MIN \
add r17=L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ \ add r17=L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ \
;; \ ;; \
...@@ -138,23 +141,23 @@ ...@@ -138,23 +141,23 @@
;; \ ;; \
lfetch.fault.excl.nt1 [r17]; \ lfetch.fault.excl.nt1 [r17]; \
adds r17=8,r1; /* initialize second base pointer */ \ adds r17=8,r1; /* initialize second base pointer */ \
(pKern) mov r18=r0; /* make sure r18 isn't NaT */ \ (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \ ;; \
st8 [r17]=rCRIIP,16; /* save cr.iip */ \ st8 [r17]=rCRIIP,16; /* save cr.iip */ \
st8 [r16]=rCRIFS,16; /* save cr.ifs */ \ st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
(pUser) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \ (pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
;; \ ;; \
st8 [r17]=rARUNAT,16; /* save ar.unat */ \ st8 [r17]=rARUNAT,16; /* save ar.unat */ \
st8 [r16]=rARPFS,16; /* save ar.pfs */ \ st8 [r16]=rARPFS,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \ ;; \
st8 [r17]=rARRSC,16; /* save ar.rsc */ \ st8 [r17]=rARRSC,16; /* save ar.rsc */ \
(pUser) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \ (pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
(pKern) adds r16=16,r16; /* skip over ar_rnat field */ \ (pKStk) adds r16=16,r16; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \ ;; /* avoid RAW on r16 & r17 */ \
(pUser) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \ (pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r16]=rARPR,16; /* save predicates */ \ st8 [r16]=rARPR,16; /* save predicates */ \
(pKern) adds r17=16,r17; /* skip over ar_bspstore field */ \ (pKStk) adds r17=16,r17; /* skip over ar_bspstore field */ \
;; \ ;; \
st8 [r17]=rB6,16; /* save b6 */ \ st8 [r17]=rB6,16; /* save b6 */ \
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \ st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* *
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999-2001 Hewlett-Packard Co * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
* David Mosberger <davidm@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* *
...@@ -114,7 +114,7 @@ GLOBAL_ENTRY(ia64_pal_call_stacked) ...@@ -114,7 +114,7 @@ GLOBAL_ENTRY(ia64_pal_call_stacked)
;; ;;
rsm psr.i rsm psr.i
mov b7 = loc2 mov b7 = loc2
;; ;;
br.call.sptk.many rp=b7 // now make the call br.call.sptk.many rp=b7 // now make the call
.ret0: mov psr.l = loc3 .ret0: mov psr.l = loc3
mov ar.pfs = loc1 mov ar.pfs = loc1
...@@ -131,15 +131,15 @@ END(ia64_pal_call_stacked) ...@@ -131,15 +131,15 @@ END(ia64_pal_call_stacked)
* in0 Index of PAL service * in0 Index of PAL service
* in2 - in3 Remaning PAL arguments * in2 - in3 Remaning PAL arguments
* *
* PSR_DB, PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
* So we don't need to clear them. * So we don't need to clear them.
*/ */
#define PAL_PSR_BITS_TO_CLEAR \ #define PAL_PSR_BITS_TO_CLEAR \
(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT | \
IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
IA64_PSR_DFL | IA64_PSR_DFH) IA64_PSR_DFL | IA64_PSR_DFH)
#define PAL_PSR_BITS_TO_SET \ #define PAL_PSR_BITS_TO_SET \
(IA64_PSR_BN) (IA64_PSR_BN)
...@@ -161,7 +161,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) ...@@ -161,7 +161,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
;; ;;
mov loc3 = psr // save psr mov loc3 = psr // save psr
adds r8 = 1f-1b,r8 // calculate return address for call adds r8 = 1f-1b,r8 // calculate return address for call
;; ;;
mov loc4=ar.rsc // save RSE configuration mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical dep.z loc2=loc2,0,61 // convert pal entry point to physical
dep.z r8=r8,0,61 // convert rp to physical dep.z r8=r8,0,61 // convert rp to physical
...@@ -275,7 +275,6 @@ END(ia64_save_scratch_fpregs) ...@@ -275,7 +275,6 @@ END(ia64_save_scratch_fpregs)
* Inputs: * Inputs:
* in0 Address of stack storage for fp regs * in0 Address of stack storage for fp regs
*/ */
GLOBAL_ENTRY(ia64_load_scratch_fpregs) GLOBAL_ENTRY(ia64_load_scratch_fpregs)
alloc r3=ar.pfs,1,0,0,0 alloc r3=ar.pfs,1,0,0,0
add r2=16,in0 add r2=16,in0
......
This diff is collapsed.
/*
* This file contains the architected PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x)) #define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined(CONFIG_MCKINLEY) #if defined(CONFIG_ITANIUM) || defined (CONFIG_MCKINLEY)
#error "This file should only be used when CONFIG_ITANIUM and CONFIG_MCKINLEY are not defined" #error "This file should not be used when CONFIG_ITANIUM or CONFIG_MCKINLEY is defined"
#endif #endif
static pfm_reg_desc_t pmc_desc[PMU_MAX_PMCS]={ static pfm_reg_desc_t pmc_gen_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
...@@ -13,10 +20,10 @@ static pfm_reg_desc_t pmc_desc[PMU_MAX_PMCS]={ ...@@ -13,10 +20,10 @@ static pfm_reg_desc_t pmc_desc[PMU_MAX_PMCS]={
/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
static pfm_reg_desc_t pmd_desc[PMU_MAX_PMDS]={ static pfm_reg_desc_t pmd_gen_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
...@@ -25,5 +32,17 @@ static pfm_reg_desc_t pmd_desc[PMU_MAX_PMDS]={ ...@@ -25,5 +32,17 @@ static pfm_reg_desc_t pmd_desc[PMU_MAX_PMDS]={
/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}}, /* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}}, /* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}}, /* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
disabled: 1,
ovfl_val: (1UL << 32) - 1,
num_ibrs: 8,
num_dbrs: 8,
pmd_desc: pfm_gen_pmd_desc,
pmc_desc: pfm_gen_pmc_desc
}; };
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs); static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs); static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={ static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
...@@ -33,7 +33,7 @@ static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={ ...@@ -33,7 +33,7 @@ static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={ static pfm_reg_desc_t pfm_ita_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}}, /* pmd0 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}}, /* pmd1 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}}, /* pmd2 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
...@@ -55,6 +55,19 @@ static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={ ...@@ -55,6 +55,19 @@ static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={
{ PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
disabled: 1,
ovfl_val: (1UL << 32) - 1,
num_ibrs: 8,
num_dbrs: 8,
pmd_desc: pfm_ita_pmd_desc,
pmc_desc: pfm_ita_pmc_desc
};
static int static int
pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs) pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{ {
......
...@@ -16,7 +16,7 @@ static int pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigne ...@@ -16,7 +16,7 @@ static int pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigne
static int pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs); static int pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs); static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
...@@ -36,7 +36,7 @@ static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={ ...@@ -36,7 +36,7 @@ static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={ static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}}, /* pmd0 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}}, /* pmd1 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}}, /* pmd2 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
...@@ -58,6 +58,19 @@ static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={ ...@@ -58,6 +58,19 @@ static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */ { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
}; };
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
disabled: 1,
ovfl_val: (1UL << 47) - 1,
num_ibrs: 8,
num_dbrs: 8,
pmd_desc: pfm_mck_pmd_desc,
pmc_desc: pfm_mck_pmc_desc
};
/* /*
* PMC reserved fields must have their power-up values preserved * PMC reserved fields must have their power-up values preserved
*/ */
......
/* /*
* Architecture-specific setup. * Architecture-specific setup.
* *
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
...@@ -96,7 +96,7 @@ show_regs (struct pt_regs *regs) ...@@ -96,7 +96,7 @@ show_regs (struct pt_regs *regs)
{ {
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
printk("\nPid: %d, comm: %20s\n", current->pid, current->comm); printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
print_symbol("ip is at %s\n", ip); print_symbol("ip is at %s\n", ip);
...@@ -144,6 +144,13 @@ show_regs (struct pt_regs *regs) ...@@ -144,6 +144,13 @@ show_regs (struct pt_regs *regs)
void void
do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall) do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
{ {
if (fsys_mode(current, &scr->pt)) {
/* defer signal-handling etc. until we return to privilege-level 0. */
if (!ia64_psr(&scr->pt)->lp)
ia64_psr(&scr->pt)->lp = 1;
return;
}
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
if (current->thread.pfm_ovfl_block_reset) if (current->thread.pfm_ovfl_block_reset)
pfm_ovfl_block_reset(); pfm_ovfl_block_reset();
...@@ -198,6 +205,10 @@ cpu_idle (void *unused) ...@@ -198,6 +205,10 @@ cpu_idle (void *unused)
void void
ia64_save_extra (struct task_struct *task) ia64_save_extra (struct task_struct *task)
{ {
#ifdef CONFIG_PERFMON
unsigned long info;
#endif
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
ia64_save_debug_regs(&task->thread.dbr[0]); ia64_save_debug_regs(&task->thread.dbr[0]);
...@@ -205,8 +216,9 @@ ia64_save_extra (struct task_struct *task) ...@@ -205,8 +216,9 @@ ia64_save_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task); pfm_save_regs(task);
if (__get_cpu_var(pfm_syst_wide)) info = __get_cpu_var(pfm_syst_info);
pfm_syst_wide_update_task(task, 0); if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 0);
#endif #endif
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
...@@ -218,6 +230,10 @@ ia64_save_extra (struct task_struct *task) ...@@ -218,6 +230,10 @@ ia64_save_extra (struct task_struct *task)
void void
ia64_load_extra (struct task_struct *task) ia64_load_extra (struct task_struct *task)
{ {
#ifdef CONFIG_PERFMON
unsigned long info;
#endif
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
ia64_load_debug_regs(&task->thread.dbr[0]); ia64_load_debug_regs(&task->thread.dbr[0]);
...@@ -225,8 +241,9 @@ ia64_load_extra (struct task_struct *task) ...@@ -225,8 +241,9 @@ ia64_load_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task); pfm_load_regs(task);
if (__get_cpu_var(pfm_syst_wide)) info = __get_cpu_var(pfm_syst_info);
pfm_syst_wide_update_task(task, 1); if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 1);
#endif #endif
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
......
...@@ -833,21 +833,19 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -833,21 +833,19 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
return -1; return -1;
} }
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
/* /*
* Check if debug registers are used * Check if debug registers are used by perfmon. This test must be done
* by perfmon. This test must be done once we know that we can * once we know that we can do the operation, i.e. the arguments are all
* do the operation, i.e. the arguments are all valid, but before * valid, but before we start modifying the state.
* we start modifying the state.
* *
* Perfmon needs to keep a count of how many processes are * Perfmon needs to keep a count of how many processes are trying to
* trying to modify the debug registers for system wide monitoring * modify the debug registers for system wide monitoring sessions.
* sessions.
* *
* We also include read access here, because they may cause * We also include read access here, because they may cause the
* the PMU-installed debug register state (dbr[], ibr[]) to * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two
* be reset. The two arrays are also used by perfmon, but * arrays are also used by perfmon, but we do not use
* we do not use IA64_THREAD_DBG_VALID. The registers are restored * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context
* by the PMU context switch code. * switch code.
*/ */
if (pfm_use_debug_registers(child)) return -1; if (pfm_use_debug_registers(child)) return -1;
#endif #endif
......
...@@ -265,7 +265,7 @@ smp_callin (void) ...@@ -265,7 +265,7 @@ smp_callin (void)
extern void ia64_init_itm(void); extern void ia64_init_itm(void);
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
extern void perfmon_init_percpu(void); extern void pfm_init_percpu(void);
#endif #endif
cpuid = smp_processor_id(); cpuid = smp_processor_id();
...@@ -300,7 +300,7 @@ smp_callin (void) ...@@ -300,7 +300,7 @@ smp_callin (void)
#endif #endif
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
perfmon_init_percpu(); pfm_init_percpu();
#endif #endif
local_irq_enable(); local_irq_enable();
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <asm/shmparam.h> #include <asm/shmparam.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
unsigned long unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
...@@ -31,6 +30,20 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -31,6 +30,20 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
if (len > RGN_MAP_LIMIT) if (len > RGN_MAP_LIMIT)
return -ENOMEM; return -ENOMEM;
#ifdef CONFIG_HUGETLB_PAGE
#define COLOR_HALIGN(addr) ((addr + HPAGE_SIZE - 1) & ~(HPAGE_SIZE - 1))
#define TASK_HPAGE_BASE ((REGION_HPAGE << REGION_SHIFT) | HPAGE_SIZE)
if (filp && is_file_hugepages(filp)) {
if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE -1)))
addr = TASK_HPAGE_BASE;
addr = COLOR_HALIGN(addr);
}
else {
if (REGION_NUMBER(addr) == REGION_HPAGE)
addr = 0;
}
#endif
if (!addr) if (!addr)
addr = TASK_UNMAPPED_BASE; addr = TASK_UNMAPPED_BASE;
......
/* /*
* Architecture-specific trap handling. * Architecture-specific trap handling.
* *
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
...@@ -524,6 +524,23 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -524,6 +524,23 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 29: /* Debug */ case 29: /* Debug */
case 35: /* Taken Branch Trap */ case 35: /* Taken Branch Trap */
case 36: /* Single Step Trap */ case 36: /* Single Step Trap */
if (fsys_mode(current, regs)) {
extern char syscall_via_break[], __start_gate_section[];
/*
* Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
* need special handling; Debug trap is not supposed to happen.
*/
if (unlikely(vector == 29)) {
die("Got debug trap in fsys-mode---not supposed to happen!",
regs, 0);
return;
}
/* re-do the system call via break 0x100000: */
regs->cr_iip = GATE_ADDR + (syscall_via_break - __start_gate_section);
ia64_psr(regs)->ri = 0;
ia64_psr(regs)->cpl = 3;
return;
}
switch (vector) { switch (vector) {
case 29: case 29:
siginfo.si_code = TRAP_HWBKPT; siginfo.si_code = TRAP_HWBKPT;
...@@ -563,19 +580,31 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -563,19 +580,31 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
} }
return; return;
case 34: /* Unimplemented Instruction Address Trap */ case 34:
if (user_mode(regs)) { if (isr & 0x2) {
siginfo.si_signo = SIGILL; /* Lower-Privilege Transfer Trap */
siginfo.si_code = ILL_BADIADDR; /*
siginfo.si_errno = 0; * Just clear PSR.lp and then return immediately: all the
siginfo.si_flags = 0; * interesting work (e.g., signal delivery is done in the kernel
siginfo.si_isr = 0; * exit path).
siginfo.si_imm = 0; */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri); ia64_psr(regs)->lp = 0;
force_sig_info(SIGILL, &siginfo, current);
return; return;
} else {
/* Unimplemented Instr. Address Trap */
if (user_mode(regs)) {
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_BADIADDR;
siginfo.si_errno = 0;
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_imm = 0;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
force_sig_info(SIGILL, &siginfo, current);
return;
}
sprintf(buf, "Unimplemented Instruction Address fault");
} }
sprintf(buf, "Unimplemented Instruction Address fault");
break; break;
case 45: case 45:
......
...@@ -331,12 +331,8 @@ set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat) ...@@ -331,12 +331,8 @@ set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
return; return;
} }
/* if (!user_stack(current, regs)) {
* Avoid using user_mode() here: with "epc", we cannot use the privilege level to DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1);
* infer whether the interrupt task was running on the kernel backing store.
*/
if (regs->r12 >= TASK_SIZE) {
DPRINT("ignoring kernel write to r%lu; register isn't on the RBS!", r1);
return; return;
} }
...@@ -406,11 +402,7 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *na ...@@ -406,11 +402,7 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *na
return; return;
} }
/* if (!user_stack(current, regs)) {
* Avoid using user_mode() here: with "epc", we cannot use the privilege level to
* infer whether the interrupt task was running on the kernel backing store.
*/
if (regs->r12 >= TASK_SIZE) {
DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1); DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);
goto fail; goto fail;
} }
......
...@@ -1997,16 +1997,18 @@ unw_create_gate_table (void) ...@@ -1997,16 +1997,18 @@ unw_create_gate_table (void)
{ {
extern char __start_gate_section[], __stop_gate_section[]; extern char __start_gate_section[], __stop_gate_section[];
unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base; unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base;
const struct unw_table_entry *entry, *first; const struct unw_table_entry *entry, *first, *unw_table_end;
extern int ia64_unw_end;
size_t info_size, size; size_t info_size, size;
char *info; char *info;
start = (unsigned long) __start_gate_section - segbase; start = (unsigned long) __start_gate_section - segbase;
end = (unsigned long) __stop_gate_section - segbase; end = (unsigned long) __stop_gate_section - segbase;
unw_table_end = (struct unw_table_entry *) &ia64_unw_end;
size = 0; size = 0;
first = lookup(&unw.kernel_table, start); first = lookup(&unw.kernel_table, start);
for (entry = first; entry->start_offset < end; ++entry) for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry)
size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
size += 8; /* reserve space for "end of table" marker */ size += 8; /* reserve space for "end of table" marker */
...@@ -2021,7 +2023,7 @@ unw_create_gate_table (void) ...@@ -2021,7 +2023,7 @@ unw_create_gate_table (void)
lp = unw.gate_table; lp = unw.gate_table;
info = (char *) unw.gate_table + size; info = (char *) unw.gate_table + size;
for (entry = first; entry->start_offset < end; ++entry, lp += 3) { for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry, lp += 3) {
info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
info -= info_size; info -= info_size;
memcpy(info, (char *) segbase + entry->info_offset, info_size); memcpy(info, (char *) segbase + entry->info_offset, info_size);
......
...@@ -159,7 +159,7 @@ GLOBAL_ENTRY(__copy_user) ...@@ -159,7 +159,7 @@ GLOBAL_ENTRY(__copy_user)
mov ar.ec=2 mov ar.ec=2
(p10) br.dpnt.few .aligned_src_tail (p10) br.dpnt.few .aligned_src_tail
;; ;;
.align 32 // .align 32
1: 1:
EX(.ex_handler, (p16) ld8 r34=[src0],16) EX(.ex_handler, (p16) ld8 r34=[src0],16)
EK(.ex_handler, (p16) ld8 r38=[src1],16) EK(.ex_handler, (p16) ld8 r38=[src1],16)
...@@ -316,7 +316,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) ...@@ -316,7 +316,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
(p7) mov ar.lc = r21 (p7) mov ar.lc = r21
(p8) mov ar.lc = r0 (p8) mov ar.lc = r0
;; ;;
.align 32 // .align 32
1: lfetch.fault [src_pre_mem], 128 1: lfetch.fault [src_pre_mem], 128
lfetch.fault.excl [dst_pre_mem], 128 lfetch.fault.excl [dst_pre_mem], 128
br.cloop.dptk.few 1b br.cloop.dptk.few 1b
...@@ -522,7 +522,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -522,7 +522,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
shrp r21=r22,r38,shift; /* speculative work */ \ shrp r21=r22,r38,shift; /* speculative work */ \
br.sptk.few .unaligned_src_tail /* branch out of jump table */ \ br.sptk.few .unaligned_src_tail /* branch out of jump table */ \
;; ;;
.align 32 // .align 32
.jump_table: .jump_table:
COPYU(8) // unaligned cases COPYU(8) // unaligned cases
.jmp1: .jmp1:
......
...@@ -125,7 +125,7 @@ GLOBAL_ENTRY(memset) ...@@ -125,7 +125,7 @@ GLOBAL_ENTRY(memset)
(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
;; } ;; }
.align 32 // -------------------------- // L1A: store ahead into cache lines; fill later // .align 32 // -------------------------- // L1A: store ahead into cache lines; fill later
{ .mmi { .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range and tmp = -(LINE_SIZE), cnt // compute end of range
mov ptr9 = ptr1 // used for prefetching mov ptr9 = ptr1 // used for prefetching
...@@ -194,7 +194,7 @@ GLOBAL_ENTRY(memset) ...@@ -194,7 +194,7 @@ GLOBAL_ENTRY(memset)
br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
;; } ;; }
.align 32 // .align 32
.l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later .l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later
{ .mmi { .mmi
and tmp = -(LINE_SIZE), cnt // compute end of range and tmp = -(LINE_SIZE), cnt // compute end of range
...@@ -261,7 +261,7 @@ GLOBAL_ENTRY(memset) ...@@ -261,7 +261,7 @@ GLOBAL_ENTRY(memset)
and cnt = 0x1f, cnt // compute the remaining cnt and cnt = 0x1f, cnt // compute the remaining cnt
mov.i ar.lc = loopcnt mov.i ar.lc = loopcnt
;; } ;; }
.align 32 // .align 32
.l2: // ------------------------------------ // L2A: store 32B in 2 cycles .l2: // ------------------------------------ // L2A: store 32B in 2 cycles
{ .mmb { .mmb
stf8 [ptr1] = fvalue, 8 stf8 [ptr1] = fvalue, 8
......
This diff is collapsed.
...@@ -342,13 +342,6 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -342,13 +342,6 @@ ia64_mmu_init (void *my_cpu_data)
* Set up the page tables. * Set up the page tables.
*/ */
#ifdef CONFIG_HUGETLB_PAGE
long htlbpagemem;
int htlbpage_max;
extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
#endif
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
void void
paging_init (void) paging_init (void)
...@@ -462,29 +455,4 @@ mem_init (void) ...@@ -462,29 +455,4 @@ mem_init (void)
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init(); ia32_gdt_init();
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
{
long i;
int j;
struct page *page, *map;
if ((htlbzone_pages << (HPAGE_SHIFT - PAGE_SHIFT)) >= max_low_pfn)
htlbzone_pages = (max_low_pfn >> ((HPAGE_SHIFT - PAGE_SHIFT) + 1));
INIT_LIST_HEAD(&htlbpage_freelist);
for (i = 0; i < htlbzone_pages; i++) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
if (!page)
break;
map = page;
for (j = 0; j < (HPAGE_SIZE/PAGE_SIZE); j++) {
SetPageReserved(map);
map++;
}
list_add(&page->list, &htlbpage_freelist);
}
printk("Total Huge_TLB_Page memory pages allocated %ld \n", i);
htlbzone_pages = htlbpagemem = i;
htlbpage_max = (int)i;
}
#endif
} }
#!/bin/sh
# Usage: unwcheck.sh <executable_file_name>
# Pre-requisite: readelf [from Gnu binutils package]
# Purpose: Check the following invariant
# For each code range in the input binary:
# Sum[ lengths of unwind regions] = Number of slots in code range.
# Author : Harish Patil
# First version: January 2002
# Modified : 2/13/2002
# Modified : 3/15/2002: duplicate detection
readelf -u $1 | gawk '\
function todec(hexstr){
dec = 0;
l = length(hexstr);
for (i = 1; i <= l; i++)
{
c = substr(hexstr, i, 1);
if (c == "A")
dec = dec*16 + 10;
else if (c == "B")
dec = dec*16 + 11;
else if (c == "C")
dec = dec*16 + 12;
else if (c == "D")
dec = dec*16 + 13;
else if (c == "E")
dec = dec*16 + 14;
else if (c == "F")
dec = dec*16 + 15;
else
dec = dec*16 + c;
}
return dec;
}
BEGIN { first = 1; sum_rlen = 0; no_slots = 0; errors=0; no_code_ranges=0; }
{
if (NF==5 && $3=="info")
{
no_code_ranges += 1;
if (first == 0)
{
if (sum_rlen != no_slots)
{
print full_code_range;
print " ", "lo = ", lo, " hi =", hi;
print " ", "sum_rlen = ", sum_rlen, "no_slots = " no_slots;
print " "," ", "*******ERROR ***********";
print " "," ", "sum_rlen:", sum_rlen, " != no_slots:" no_slots;
errors += 1;
}
sum_rlen = 0;
}
full_code_range = $0;
code_range = $2;
gsub("..$", "", code_range);
gsub("^.", "", code_range);
split(code_range, addr, "-");
lo = toupper(addr[1]);
code_range_lo[no_code_ranges] = addr[1];
occurs[addr[1]] += 1;
full_range[addr[1]] = $0;
gsub("0X.[0]*", "", lo);
hi = toupper(addr[2]);
gsub("0X.[0]*", "", hi);
no_slots = (todec(hi) - todec(lo))/ 16*3
first = 0;
}
if (index($0,"rlen") > 0 )
{
rlen_str = substr($0, index($0,"rlen"));
rlen = rlen_str;
gsub("rlen=", "", rlen);
gsub(")", "", rlen);
sum_rlen = sum_rlen + rlen;
}
}
END {
if (first == 0)
{
if (sum_rlen != no_slots)
{
print "code_range=", code_range;
print " ", "lo = ", lo, " hi =", hi;
print " ", "sum_rlen = ", sum_rlen, "no_slots = " no_slots;
print " "," ", "*******ERROR ***********";
print " "," ", "sum_rlen:", sum_rlen, " != no_slots:" no_slots;
errors += 1;
}
}
no_duplicates = 0;
for (i=1; i<=no_code_ranges; i++)
{
cr = code_range_lo[i];
if (reported_cr[cr]==1) continue;
if ( occurs[cr] > 1)
{
reported_cr[cr] = 1;
print "Code range low ", code_range_lo[i], ":", full_range[cr], " occurs: ", occurs[cr], " times.";
print " ";
no_duplicates++;
}
}
print "======================================"
print "Total errors:", errors, "/", no_code_ranges, " duplicates:", no_duplicates;
print "======================================"
}
'
...@@ -4,14 +4,7 @@ TARGET = include/asm-ia64/offsets.h ...@@ -4,14 +4,7 @@ TARGET = include/asm-ia64/offsets.h
src = $(obj) src = $(obj)
all: clean-files := print_offsets.s print_offsets offsets.h
fastdep:
mrproper: clean
clean:
rm -f $(obj)/print_offsets.s $(obj)/print_offsets $(obj)/offsets.h
$(TARGET): $(obj)/offsets.h $(TARGET): $(obj)/offsets.h
@if ! cmp -s $(obj)/offsets.h ${TARGET}; then \ @if ! cmp -s $(obj)/offsets.h ${TARGET}; then \
......
/* /*
* Utility to generate asm-ia64/offsets.h. * Utility to generate asm-ia64/offsets.h.
* *
* Copyright (C) 1999-2002 Hewlett-Packard Co * Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Note that this file has dual use: when building the kernel * Note that this file has dual use: when building the kernel
...@@ -53,7 +53,9 @@ tab[] = ...@@ -53,7 +53,9 @@ tab[] =
{ "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) }, { "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
{ "", 0 }, /* spacer */ { "", 0 }, /* spacer */
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) }, { "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
{ "IA64_TASK_THREAD_ON_USTACK_OFFSET", offsetof (struct task_struct, thread.on_ustack) },
{ "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) }, { "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) },
{ "IA64_TASK_TGID_OFFSET", offsetof (struct task_struct, tgid) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) }, { "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
{ "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) }, { "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
{ "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) }, { "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
......
...@@ -131,10 +131,6 @@ SECTIONS ...@@ -131,10 +131,6 @@ SECTIONS
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET) .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
{ *(.data.cacheline_aligned) } { *(.data.cacheline_aligned) }
/* Kernel symbol names for modules: */
.kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
{ *(.kstrtab) }
/* Per-cpu data: */ /* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE); . = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .; __phys_per_cpu_start = .;
......
...@@ -2,15 +2,22 @@ ...@@ -2,15 +2,22 @@
#define _ASM_IA64_ASMMACRO_H #define _ASM_IA64_ASMMACRO_H
/* /*
* Copyright (C) 2000-2001 Hewlett-Packard Co * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/config.h>
#define ENTRY(name) \ #define ENTRY(name) \
.align 32; \ .align 32; \
.proc name; \ .proc name; \
name: name:
#define ENTRY_MIN_ALIGN(name) \
.align 16; \
.proc name; \
name:
#define GLOBAL_ENTRY(name) \ #define GLOBAL_ENTRY(name) \
.global name; \ .global name; \
ENTRY(name) ENTRY(name)
...@@ -52,4 +59,13 @@ ...@@ -52,4 +59,13 @@
99: x 99: x
#endif #endif
#ifdef CONFIG_MCKINLEY
/* workaround for Itanium 2 Errata 9: */
# define MCKINLEY_E9_WORKAROUND \
br.call.sptk.many b7=1f;; \
1:
#else
# define MCKINLEY_E9_WORKAROUND
#endif
#endif /* _ASM_IA64_ASMMACRO_H */ #endif /* _ASM_IA64_ASMMACRO_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_BITOPS_H #define _ASM_IA64_BITOPS_H
/* /*
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
...@@ -320,7 +320,7 @@ __ffs (unsigned long x) ...@@ -320,7 +320,7 @@ __ffs (unsigned long x)
static inline unsigned long static inline unsigned long
ia64_fls (unsigned long x) ia64_fls (unsigned long x)
{ {
double d = x; long double d = x;
long exp; long exp;
__asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d)); __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
......
...@@ -4,10 +4,12 @@ ...@@ -4,10 +4,12 @@
/* /*
* ELF-specific definitions. * ELF-specific definitions.
* *
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co * Copyright (C) 1998-1999, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/config.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -88,6 +90,11 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); ...@@ -88,6 +90,11 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */ relevant until we have real hardware to play with... */
#define ELF_PLATFORM 0 #define ELF_PLATFORM 0
/*
* This should go into linux/elf.h...
*/
#define AT_SYSINFO 32
#ifdef __KERNEL__ #ifdef __KERNEL__
struct elf64_hdr; struct elf64_hdr;
extern void ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter); extern void ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter);
...@@ -99,7 +106,14 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); ...@@ -99,7 +106,14 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs) #define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#ifdef CONFIG_FSYS
#define ARCH_DLINFO \
do { \
extern int syscall_via_epc; \
NEW_AUX_ENT(AT_SYSINFO, syscall_via_epc); \
} while (0)
#endif #endif
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_ELF_H */ #endif /* _ASM_IA64_ELF_H */
...@@ -4,10 +4,12 @@ ...@@ -4,10 +4,12 @@
/* /*
* Compiler-dependent intrinsics. * Compiler-dependent intrinsics.
* *
* Copyright (C) 2002 Hewlett-Packard Co * Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/config.h>
/* /*
* Force an unresolved reference if someone tries to use * Force an unresolved reference if someone tries to use
* ia64_fetch_and_add() with a bad value. * ia64_fetch_and_add() with a bad value.
......
...@@ -28,6 +28,36 @@ ...@@ -28,6 +28,36 @@
#include <asm/processor.h> #include <asm/processor.h>
#define MMU_CONTEXT_DEBUG 0
#if MMU_CONTEXT_DEBUG
#include <ia64intrin.h>
extern struct mmu_trace_entry {
char op;
u8 cpu;
u32 context;
void *mm;
} mmu_tbuf[1024];
extern volatile int mmu_tbuf_index;
# define MMU_TRACE(_op,_cpu,_mm,_ctx) \
do { \
int i = __sync_fetch_and_add(&mmu_tbuf_index, 1) % ARRAY_SIZE(mmu_tbuf); \
struct mmu_trace_entry e; \
e.op = (_op); \
e.cpu = (_cpu); \
e.mm = (_mm); \
e.context = (_ctx); \
mmu_tbuf[i] = e; \
} while (0)
#else
# define MMU_TRACE(op,cpu,mm,ctx) do { ; } while (0)
#endif
struct ia64_ctx { struct ia64_ctx {
spinlock_t lock; spinlock_t lock;
unsigned int next; /* next context number to use */ unsigned int next; /* next context number to use */
...@@ -91,6 +121,7 @@ get_mmu_context (struct mm_struct *mm) ...@@ -91,6 +121,7 @@ get_mmu_context (struct mm_struct *mm)
static inline int static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm) init_new_context (struct task_struct *p, struct mm_struct *mm)
{ {
MMU_TRACE('N', smp_processor_id(), mm, 0);
mm->context = 0; mm->context = 0;
return 0; return 0;
} }
...@@ -99,6 +130,7 @@ static inline void ...@@ -99,6 +130,7 @@ static inline void
destroy_context (struct mm_struct *mm) destroy_context (struct mm_struct *mm)
{ {
/* Nothing to do. */ /* Nothing to do. */
MMU_TRACE('D', smp_processor_id(), mm, mm->context);
} }
static inline void static inline void
...@@ -138,7 +170,9 @@ activate_context (struct mm_struct *mm) ...@@ -138,7 +170,9 @@ activate_context (struct mm_struct *mm)
do { do {
context = get_mmu_context(mm); context = get_mmu_context(mm);
MMU_TRACE('A', smp_processor_id(), mm, context);
reload_context(context); reload_context(context);
MMU_TRACE('a', smp_processor_id(), mm, context);
/* in the unlikely event of a TLB-flush by another thread, redo the load: */ /* in the unlikely event of a TLB-flush by another thread, redo the load: */
} while (unlikely(context != mm->context)); } while (unlikely(context != mm->context));
} }
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#define PFM_FL_INHERIT_ALL 0x02 /* always clone pfm_context across fork() */ #define PFM_FL_INHERIT_ALL 0x02 /* always clone pfm_context across fork() */
#define PFM_FL_NOTIFY_BLOCK 0x04 /* block task on user level notifications */ #define PFM_FL_NOTIFY_BLOCK 0x04 /* block task on user level notifications */
#define PFM_FL_SYSTEM_WIDE 0x08 /* create a system wide context */ #define PFM_FL_SYSTEM_WIDE 0x08 /* create a system wide context */
#define PFM_FL_EXCL_IDLE 0x20 /* exclude idle task from system wide session */
/* /*
* PMC flags * PMC flags
...@@ -86,11 +87,12 @@ typedef struct { ...@@ -86,11 +87,12 @@ typedef struct {
unsigned long reg_long_reset; /* reset after sampling buffer overflow (large) */ unsigned long reg_long_reset; /* reset after sampling buffer overflow (large) */
unsigned long reg_short_reset;/* reset after counter overflow (small) */ unsigned long reg_short_reset;/* reset after counter overflow (small) */
unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */ unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
unsigned long reg_random_seed; /* seed value when randomization is used */ unsigned long reg_random_seed; /* seed value when randomization is used */
unsigned long reg_random_mask; /* bitmask used to limit random value */ unsigned long reg_random_mask; /* bitmask used to limit random value */
unsigned long reg_last_reset_value;/* last value used to reset the PMD (PFM_READ_PMDS) */
unsigned long reserved[14]; /* for future use */ unsigned long reserved[13]; /* for future use */
} pfarg_reg_t; } pfarg_reg_t;
typedef struct { typedef struct {
...@@ -123,7 +125,7 @@ typedef struct { ...@@ -123,7 +125,7 @@ typedef struct {
* Define the version numbers for both perfmon as a whole and the sampling buffer format. * Define the version numbers for both perfmon as a whole and the sampling buffer format.
*/ */
#define PFM_VERSION_MAJ 1U #define PFM_VERSION_MAJ 1U
#define PFM_VERSION_MIN 1U #define PFM_VERSION_MIN 3U
#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff)) #define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
#define PFM_SMPL_VERSION_MAJ 1U #define PFM_SMPL_VERSION_MAJ 1U
...@@ -156,13 +158,17 @@ typedef struct { ...@@ -156,13 +158,17 @@ typedef struct {
unsigned long stamp; /* timestamp */ unsigned long stamp; /* timestamp */
unsigned long ip; /* where did the overflow interrupt happened */ unsigned long ip; /* where did the overflow interrupt happened */
unsigned long regs; /* bitmask of which registers overflowed */ unsigned long regs; /* bitmask of which registers overflowed */
unsigned long period; /* unused */ unsigned long reserved; /* unused */
} perfmon_smpl_entry_t; } perfmon_smpl_entry_t;
extern int perfmonctl(pid_t pid, int cmd, void *arg, int narg); extern int perfmonctl(pid_t pid, int cmd, void *arg, int narg);
#ifdef __KERNEL__ #ifdef __KERNEL__
typedef struct {
void (*handler)(int irq, void *arg, struct pt_regs *regs);
} pfm_intr_handler_desc_t;
extern void pfm_save_regs (struct task_struct *); extern void pfm_save_regs (struct task_struct *);
extern void pfm_load_regs (struct task_struct *); extern void pfm_load_regs (struct task_struct *);
...@@ -174,9 +180,24 @@ extern void pfm_cleanup_owners (struct task_struct *); ...@@ -174,9 +180,24 @@ extern void pfm_cleanup_owners (struct task_struct *);
extern int pfm_use_debug_registers(struct task_struct *); extern int pfm_use_debug_registers(struct task_struct *);
extern int pfm_release_debug_registers(struct task_struct *); extern int pfm_release_debug_registers(struct task_struct *);
extern int pfm_cleanup_smpl_buf(struct task_struct *); extern int pfm_cleanup_smpl_buf(struct task_struct *);
extern void pfm_syst_wide_update_task(struct task_struct *, int); extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
extern void pfm_ovfl_block_reset(void); extern void pfm_ovfl_block_reset(void);
extern void perfmon_init_percpu(void); extern void pfm_init_percpu(void);
/*
* hooks to allow VTune/Prospect to cooperate with perfmon.
* (reserved for system wide monitoring modules only)
*/
extern int pfm_install_alternate_syswide_subsystem(pfm_intr_handler_desc_t *h);
extern int pfm_remove_alternate_syswide_subsystem(pfm_intr_handler_desc_t *h);
/*
* describe the content of the local_cpu_date->pfm_syst_info field
*/
#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exist */
#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_PROCESSOR_H #define _ASM_IA64_PROCESSOR_H
/* /*
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
...@@ -223,7 +223,10 @@ typedef struct { ...@@ -223,7 +223,10 @@ typedef struct {
struct siginfo; struct siginfo;
struct thread_struct { struct thread_struct {
__u64 flags; /* various thread flags (see IA64_THREAD_*) */ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
__u8 on_ustack; /* executing on user-stacks? */
__u8 pad[3];
__u64 ksp; /* kernel stack pointer */ __u64 ksp; /* kernel stack pointer */
__u64 map_base; /* base address for get_unmapped_area() */ __u64 map_base; /* base address for get_unmapped_area() */
__u64 task_size; /* limit for task size */ __u64 task_size; /* limit for task size */
...@@ -277,6 +280,7 @@ struct thread_struct { ...@@ -277,6 +280,7 @@ struct thread_struct {
#define INIT_THREAD { \ #define INIT_THREAD { \
.flags = 0, \ .flags = 0, \
.on_ustack = 0, \
.ksp = 0, \ .ksp = 0, \
.map_base = DEFAULT_MAP_BASE, \ .map_base = DEFAULT_MAP_BASE, \
.task_size = DEFAULT_TASK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_PTRACE_H #define _ASM_IA64_PTRACE_H
/* /*
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* *
...@@ -218,6 +218,13 @@ struct switch_stack { ...@@ -218,6 +218,13 @@ struct switch_stack {
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
# define fsys_mode(task,regs) \
({ \
struct task_struct *_task = (task); \
struct pt_regs *_regs = (regs); \
!user_mode(regs) && user_stack(task, regs); \
})
struct task_struct; /* forward decl */ struct task_struct; /* forward decl */
......
...@@ -74,6 +74,27 @@ typedef struct { ...@@ -74,6 +74,27 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0) #define spin_lock_init(x) ((x)->lock = 0)
#define DEBUG_SPIN_LOCK 0
#if DEBUG_SPIN_LOCK
#include <ia64intrin.h>
#define _raw_spin_lock(x) \
do { \
unsigned long _timeout = 1000000000; \
volatile unsigned int _old = 0, _new = 1, *_ptr = &((x)->lock); \
do { \
if (_timeout-- == 0) { \
extern void dump_stack (void); \
printk("kernel DEADLOCK at %s:%d?\n", __FILE__, __LINE__); \
dump_stack(); \
} \
} while (__sync_val_compare_and_swap(_ptr, _old, _new) != _old); \
} while (0)
#else
/* /*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when * rather than a simple xchg to avoid writing the cache-line when
...@@ -95,6 +116,8 @@ typedef struct { ...@@ -95,6 +116,8 @@ typedef struct {
";;\n" \ ";;\n" \
:: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory") :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
#endif /* !DEBUG_SPIN_LOCK */
#define spin_is_locked(x) ((x)->lock != 0) #define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
......
...@@ -117,62 +117,51 @@ ia64_insn_group_barrier (void) ...@@ -117,62 +117,51 @@ ia64_insn_group_barrier (void)
*/ */
/* For spinlocks etc */ /* For spinlocks etc */
/* clearing psr.i is implicitly serialized (visible by next insn) */
/* setting psr.i requires data serialization */
#define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \
"rsm psr.i;;" \
: "=r" (x) :: "memory")
#define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
#define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"(p6) srlz.d" \
:: "r" ((x) & IA64_PSR_I) \
: "p6", "p7", "memory")
#ifdef CONFIG_IA64_DEBUG_IRQ #ifdef CONFIG_IA64_DEBUG_IRQ
extern unsigned long last_cli_ip; extern unsigned long last_cli_ip;
# define local_irq_save(x) \ # define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip))
do { \
unsigned long ip, psr; \
\
__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
if (psr & (1UL << 14)) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
last_cli_ip = ip; \
} \
(x) = psr; \
} while (0)
# define local_irq_disable() \ # define local_irq_save(x) \
do { \ do { \
unsigned long ip, psr; \ unsigned long psr; \
\ \
__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \ __local_irq_save(psr); \
if (psr & (1UL << 14)) { \ if (psr & IA64_PSR_I) \
__asm__ ("mov %0=ip" : "=r"(ip)); \ __save_ip(); \
last_cli_ip = ip; \ (x) = psr; \
} \
} while (0) } while (0)
# define local_irq_restore(x) \ # define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0)
do { \
unsigned long ip, old_psr, psr = (x); \ # define local_irq_restore(x) \
\ do { \
__asm__ __volatile__ ("mov %0=psr;" \ unsigned long old_psr, psr = (x); \
"cmp.ne p6,p7=%1,r0;;" \ \
"(p6) ssm psr.i;" \ local_save_flags(old_psr); \
"(p7) rsm psr.i;;" \ __local_irq_restore(psr); \
"(p6) srlz.d" \ if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \
: "=r" (old_psr) : "r"((psr) & IA64_PSR_I) \ __save_ip(); \
: "p6", "p7", "memory"); \
if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) { \
__asm__ ("mov %0=ip" : "=r"(ip)); \
last_cli_ip = ip; \
} \
} while (0) } while (0)
#else /* !CONFIG_IA64_DEBUG_IRQ */ #else /* !CONFIG_IA64_DEBUG_IRQ */
/* clearing of psr.i is implicitly serialized (visible by next insn) */ # define local_irq_save(x) __local_irq_save(x)
# define local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" \ # define local_irq_disable() __local_irq_disable()
: "=r" (x) :: "memory") # define local_irq_restore(x) __local_irq_restore(x)
# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
/* (potentially) setting psr.i requires data serialization: */
# define local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
"(p6) ssm psr.i;" \
"(p7) rsm psr.i;;" \
"srlz.d" \
:: "r"((x) & IA64_PSR_I) \
: "p6", "p7", "memory")
#endif /* !CONFIG_IA64_DEBUG_IRQ */ #endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") #define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
...@@ -216,8 +205,8 @@ extern void ia64_save_extra (struct task_struct *task); ...@@ -216,8 +205,8 @@ extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task); extern void ia64_load_extra (struct task_struct *task);
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
DECLARE_PER_CPU(int, pfm_syst_wide); DECLARE_PER_CPU(unsigned long, pfm_syst_info);
# define PERFMON_IS_SYSWIDE() (get_cpu_var(pfm_syst_wide) != 0) # define PERFMON_IS_SYSWIDE() (get_cpu_var(pfm_syst_info) & 0x1)
#else #else
# define PERFMON_IS_SYSWIDE() (0) # define PERFMON_IS_SYSWIDE() (0)
#endif #endif
......
...@@ -47,19 +47,22 @@ local_finish_flush_tlb_mm (struct mm_struct *mm) ...@@ -47,19 +47,22 @@ local_finish_flush_tlb_mm (struct mm_struct *mm)
static inline void static inline void
flush_tlb_mm (struct mm_struct *mm) flush_tlb_mm (struct mm_struct *mm)
{ {
MMU_TRACE('F', smp_processor_id(), mm, mm->context);
if (!mm) if (!mm)
return; goto out;
mm->context = 0; mm->context = 0;
if (atomic_read(&mm->mm_users) == 0) if (atomic_read(&mm->mm_users) == 0)
return; /* happens as a result of exit_mmap() */ goto out; /* happens as a result of exit_mmap() */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_flush_tlb_mm(mm); smp_flush_tlb_mm(mm);
#else #else
local_finish_flush_tlb_mm(mm); local_finish_flush_tlb_mm(mm);
#endif #endif
out:
MMU_TRACE('f', smp_processor_id(), mm, mm->context);
} }
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
/* /*
* IA-64 Linux syscall numbers and inline-functions. * IA-64 Linux syscall numbers and inline-functions.
* *
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
...@@ -223,8 +223,8 @@ ...@@ -223,8 +223,8 @@
#define __NR_sched_setaffinity 1231 #define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232 #define __NR_sched_getaffinity 1232
#define __NR_set_tid_address 1233 #define __NR_set_tid_address 1233
/* #define __NR_alloc_hugepages 1234 reusable */ /* 1234 available for reuse */
/* #define __NR_free_hugepages 1235 reusable */ /* 1235 available for reuse */
#define __NR_exit_group 1236 #define __NR_exit_group 1236
#define __NR_lookup_dcookie 1237 #define __NR_lookup_dcookie 1237
#define __NR_io_setup 1238 #define __NR_io_setup 1238
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment