Commit 75b08514 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents b1d7a6ca 358e377e
On some platforms, so-called memory-mapped I/O is weakly ordered. On such
platforms, driver writers are responsible for ensuring that I/O writes to
memory-mapped addresses on their device arrive in the order intended. This is
typically done by reading a 'safe' device or bridge register, causing the I/O
chipset to flush pending writes to the device before any reads are posted. A
driver would usually use this technique immediately prior to the exit of a
critical section of code protected by spinlocks. This would ensure that
subsequent writes to I/O space arrived only after all prior writes (much like a
memory barrier op, mb(), only with respect to I/O).
A more concrete example from a hypothetical device driver:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
...
In the case above, the device may receive newval2 before it receives newval,
which could cause problems. Fixing it is easy enough though:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: (void)readl(safe_register); /* maybe a config register? */
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: (void)readl(safe_register); /* maybe a config register? */
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
Here, the reads from safe_register will cause the I/O chipset to flush any
pending writes before actually posting the read to the chipset, preventing
possible data corruption.
...@@ -401,6 +401,15 @@ config HUGETLB_PAGE_SIZE_256KB ...@@ -401,6 +401,15 @@ config HUGETLB_PAGE_SIZE_256KB
endchoice endchoice
config IA64_PAL_IDLE
bool "Use PAL_HALT_LIGHT in idle loop"
---help---
Say Y here to enable use of PAL_HALT_LIGHT in the cpu_idle loop.
This allows the CPU to enter a low power state when idle. You
can enable CONFIG_IA64_PALINFO and check /proc/pal/cpu0/power_info
to see the power consumption and latency for this state. If you're
unsure your firmware supports it, answer N.
config SMP config SMP
bool "SMP support" bool "SMP support"
---help--- ---help---
......
...@@ -14,6 +14,7 @@ export AWK ...@@ -14,6 +14,7 @@ export AWK
OBJCOPYFLAGS := --strip-all OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static LDFLAGS_vmlinux := -static
LDFLAGS_MODULE += -T arch/ia64/module.lds
AFLAGS_KERNEL := -mconstant-gp AFLAGS_KERNEL := -mconstant-gp
EXTRA := EXTRA :=
...@@ -23,7 +24,7 @@ CFLAGS_KERNEL := -mconstant-gp ...@@ -23,7 +24,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.') GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC)) GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP))
ifeq ($(GAS_STATUS),buggy) ifeq ($(GAS_STATUS),buggy)
$(error Sorry, you need a newer version of the assember, one that is built from \ $(error Sorry, you need a newer version of the assember, one that is built from \
...@@ -50,11 +51,8 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ ...@@ -50,11 +51,8 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \
arch/ia64/hp/sim/ arch/ia64/hp/sim/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/kernel/ \ core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/
arch/ia64/sn/io/ \
arch/ia64/sn/io/sn2/ \
arch/ia64/sn/io/sn2/pcibr/ \
arch/ia64/sn/kernel/sn2/
drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
# #
targets-$(CONFIG_IA64_HP_SIM) += bootloader targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader
targets := vmlinux.bin vmlinux.gz $(targets-y) targets := vmlinux.bin vmlinux.gz $(targets-y)
quiet_cmd_cptotop = LN $@ quiet_cmd_cptotop = LN $@
......
...@@ -1497,7 +1497,7 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len) ...@@ -1497,7 +1497,7 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len)
ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
sprintf(buf, "%s rev %d.%d\n", "Hewlett Packard zx1 SBA", sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA",
((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF)); ((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF));
sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf, sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf,
(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages); (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages);
......
...@@ -253,7 +253,7 @@ ia32_syscall_table: ...@@ -253,7 +253,7 @@ ia32_syscall_table:
data8 sys_umount /* recycled never used phys( */ data8 sys_umount /* recycled never used phys( */
data8 sys32_ni_syscall /* old lock syscall holder */ data8 sys32_ni_syscall /* old lock syscall holder */
data8 sys32_ioctl data8 sys32_ioctl
data8 sys32_fcntl /* 55 */ data8 compat_sys_fcntl /* 55 */
data8 sys32_ni_syscall /* old mpx syscall holder */ data8 sys32_ni_syscall /* old mpx syscall holder */
data8 sys_setpgid data8 sys_setpgid
data8 sys32_ni_syscall /* old ulimit syscall holder */ data8 sys32_ni_syscall /* old ulimit syscall holder */
...@@ -419,7 +419,7 @@ ia32_syscall_table: ...@@ -419,7 +419,7 @@ ia32_syscall_table:
data8 sys_mincore data8 sys_mincore
data8 sys_madvise data8 sys_madvise
data8 sys_getdents64 /* 220 */ data8 sys_getdents64 /* 220 */
data8 sys32_fcntl64 data8 compat_sys_fcntl64
data8 sys_ni_syscall /* reserved for TUX */ data8 sys_ni_syscall /* reserved for TUX */
data8 sys_ni_syscall /* reserved for Security */ data8 sys_ni_syscall /* reserved for Security */
data8 sys_gettid data8 sys_gettid
......
...@@ -114,6 +114,7 @@ copy_siginfo_from_user32 (siginfo_t *to, siginfo_t32 *from) ...@@ -114,6 +114,7 @@ copy_siginfo_from_user32 (siginfo_t *to, siginfo_t32 *from)
int int
copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
{ {
unsigned int addr;
int err; int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t32))) if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t32)))
...@@ -148,6 +149,12 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) ...@@ -148,6 +149,12 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd); err |= __put_user(from->si_fd, &to->si_fd);
break; break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
addr = (unsigned long) from->si_ptr;
err |= __put_user(addr, &to->si_ptr);
break;
/* case __SI_RT: This is not generated by the kernel as of now. */ /* case __SI_RT: This is not generated by the kernel as of now. */
} }
} }
......
...@@ -119,10 +119,8 @@ nargs (unsigned int arg, char **ap) ...@@ -119,10 +119,8 @@ nargs (unsigned int arg, char **ap)
asmlinkage long asmlinkage long
sys32_execve (char *filename, unsigned int argv, unsigned int envp, sys32_execve (char *filename, unsigned int argv, unsigned int envp,
int dummy3, int dummy4, int dummy5, int dummy6, int dummy7, struct pt_regs *regs)
int stack)
{ {
struct pt_regs *regs = (struct pt_regs *)&stack;
unsigned long old_map_base, old_task_size, tssd; unsigned long old_map_base, old_task_size, tssd;
char **av, **ae; char **av, **ae;
int na, ne, len; int na, ne, len;
...@@ -1701,7 +1699,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) ...@@ -1701,7 +1699,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return shmctl32(first, second, (void *)AA(ptr)); return shmctl32(first, second, (void *)AA(ptr));
default: default:
return -EINVAL; return -ENOSYS;
} }
return -EINVAL; return -EINVAL;
} }
...@@ -2156,26 +2154,23 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2156,26 +2154,23 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = -ESRCH; ret = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
child = find_task_by_pid(pid); child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!child) if (!child)
goto out; goto out;
ret = -EPERM; ret = -EPERM;
if (pid == 1) /* no messing around with init! */ if (pid == 1) /* no messing around with init! */
goto out; goto out_tsk;
if (request == PTRACE_ATTACH) { if (request == PTRACE_ATTACH) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
goto out; goto out_tsk;
}
ret = -ESRCH;
if (!(child->ptrace & PT_PTRACED))
goto out;
if (child->state != TASK_STOPPED) {
if (request != PTRACE_KILL)
goto out;
} }
if (child->parent != current)
goto out; ret = ptrace_check_attach(child, request == PTRACE_KILL);
if (ret < 0)
goto out_tsk;
switch (request) { switch (request) {
case PTRACE_PEEKTEXT: case PTRACE_PEEKTEXT:
...@@ -2185,12 +2180,12 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2185,12 +2180,12 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = put_user(value, (unsigned int *) A(data)); ret = put_user(value, (unsigned int *) A(data));
else else
ret = -EIO; ret = -EIO;
goto out; goto out_tsk;
case PTRACE_POKETEXT: case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */ case PTRACE_POKEDATA: /* write the word at location addr */
ret = ia32_poke(regs, child, addr, data); ret = ia32_poke(regs, child, addr, data);
goto out; goto out_tsk;
case PTRACE_PEEKUSR: /* read word at addr in USER area */ case PTRACE_PEEKUSR: /* read word at addr in USER area */
ret = -EIO; ret = -EIO;
...@@ -2265,43 +2260,13 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2265,43 +2260,13 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
break; break;
} }
out_tsk:
put_task_struct(child);
out: out:
unlock_kernel(); unlock_kernel();
return ret; return ret;
} }
extern asmlinkage long sys_fcntl (unsigned int fd, unsigned int cmd, unsigned long arg);
asmlinkage long
sys32_fcntl (unsigned int fd, unsigned int cmd, unsigned int arg)
{
mm_segment_t old_fs;
struct flock f;
long ret;
switch (cmd) {
case F_GETLK:
case F_SETLK:
case F_SETLKW:
if (get_compat_flock(&f, (struct compat_flock *) A(arg)))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long) &f);
set_fs(old_fs);
if (cmd == F_GETLK && put_compat_flock(&f, (struct compat_flock *) A(arg)))
return -EFAULT;
return ret;
default:
/*
* `sys_fcntl' lies about arg, for the F_SETOWN
* sub-function arg can have a negative value.
*/
return sys_fcntl(fd, cmd, arg);
}
}
asmlinkage long sys_ni_syscall(void); asmlinkage long sys_ni_syscall(void);
asmlinkage long asmlinkage long
...@@ -2596,66 +2561,6 @@ sys32_setgroups16 (int gidsetsize, short *grouplist) ...@@ -2596,66 +2561,6 @@ sys32_setgroups16 (int gidsetsize, short *grouplist)
return ret; return ret;
} }
/*
* Unfortunately, the x86 compiler aligns variables of type "long long" to a 4 byte boundary
* only, which means that the x86 version of "struct flock64" doesn't match the ia64 version
* of struct flock.
*/
static inline long
ia32_put_flock (struct flock *l, unsigned long addr)
{
return (put_user(l->l_type, (short *) addr)
| put_user(l->l_whence, (short *) (addr + 2))
| put_user(l->l_start, (long *) (addr + 4))
| put_user(l->l_len, (long *) (addr + 12))
| put_user(l->l_pid, (int *) (addr + 20)));
}
static inline long
ia32_get_flock (struct flock *l, unsigned long addr)
{
unsigned int start_lo, start_hi, len_lo, len_hi;
int err = (get_user(l->l_type, (short *) addr)
| get_user(l->l_whence, (short *) (addr + 2))
| get_user(start_lo, (int *) (addr + 4))
| get_user(start_hi, (int *) (addr + 8))
| get_user(len_lo, (int *) (addr + 12))
| get_user(len_hi, (int *) (addr + 16))
| get_user(l->l_pid, (int *) (addr + 20)));
l->l_start = ((unsigned long) start_hi << 32) | start_lo;
l->l_len = ((unsigned long) len_hi << 32) | len_lo;
return err;
}
asmlinkage long
sys32_fcntl64 (unsigned int fd, unsigned int cmd, unsigned int arg)
{
mm_segment_t old_fs;
struct flock f;
long ret;
switch (cmd) {
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
if (ia32_get_flock(&f, arg))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long) &f);
set_fs(old_fs);
if (cmd == F_GETLK && ia32_put_flock(&f, arg))
return -EFAULT;
break;
default:
ret = sys32_fcntl(fd, cmd, arg);
break;
}
return ret;
}
asmlinkage long asmlinkage long
sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi) sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
{ {
......
...@@ -4,16 +4,15 @@ ...@@ -4,16 +4,15 @@
extra-y := head.o init_task.o extra-y := head.o init_task.o
obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o \ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o \
irq.o irq_ia64.o irq_lsapic.o ivt.o \ ivt.o machvec.o pal.o perfmon.o process.o ptrace.o sal.o semaphore.o setup.o signal.o \
machvec.o pal.o process.o perfmon.o ptrace.o sal.o \ sys_ia64.o time.o traps.o unaligned.o unwind.o
semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_FSYS) += fsys.o obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
/*
* arch/ia64/kernel/acpi-ext.c
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
*
* Vendor specific extensions to ACPI. These are used by both
* HP and NEC.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <asm/acpi-ext.h>
/*
* Note: Strictly speaking, this is only needed for HP and NEC machines.
* However, NEC machines identify themselves as DIG-compliant, so there is
* no easy way to #ifdef this out.
*/
acpi_status
hp_acpi_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
int i, offset = 0;
acpi_status status;
struct acpi_buffer buf;
struct acpi_resource_vendor *res;
struct acpi_hp_vendor_long *hp_res;
efi_guid_t vendor_guid;
*csr_base = 0;
*csr_length = 0;
status = acpi_get_crs(obj, &buf);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
return status;
}
res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
if (!res) {
printk(KERN_ERR PREFIX "Failed to find config space for device\n");
acpi_dispose_crs(&buf);
return AE_NOT_FOUND;
}
hp_res = (struct acpi_hp_vendor_long *)(res->reserved);
if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
printk(KERN_ERR PREFIX "Unknown Vendor data\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
printk(KERN_ERR PREFIX "Vendor GUID does not match\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
for (i = 0 ; i < 8 ; i++) {
*csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
*csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
}
acpi_dispose_crs(&buf);
return AE_OK;
}
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com> * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
* Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
* *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
...@@ -109,8 +109,6 @@ acpi_get_sysname (void) ...@@ -109,8 +109,6 @@ acpi_get_sysname (void)
return "sn2"; return "sn2";
# elif defined (CONFIG_IA64_DIG) # elif defined (CONFIG_IA64_DIG)
return "dig"; return "dig";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# else # else
# error Unknown platform. Fix acpi.c. # error Unknown platform. Fix acpi.c.
# endif # endif
...@@ -176,6 +174,73 @@ acpi_dispose_crs (struct acpi_buffer *buf) ...@@ -176,6 +174,73 @@ acpi_dispose_crs (struct acpi_buffer *buf)
kfree(buf->pointer); kfree(buf->pointer);
} }
void
acpi_get_crs_addr (struct acpi_buffer *buf, int type, u64 *base, u64 *size, u64 *tra)
{
int offset = 0;
struct acpi_resource_address16 *addr16;
struct acpi_resource_address32 *addr32;
struct acpi_resource_address64 *addr64;
for (;;) {
struct acpi_resource *res = acpi_get_crs_next(buf, &offset);
if (!res)
return;
switch (res->id) {
case ACPI_RSTYPE_ADDRESS16:
addr16 = (struct acpi_resource_address16 *) &res->data;
if (type == addr16->resource_type) {
*base = addr16->min_address_range;
*size = addr16->address_length;
*tra = addr16->address_translation_offset;
return;
}
break;
case ACPI_RSTYPE_ADDRESS32:
addr32 = (struct acpi_resource_address32 *) &res->data;
if (type == addr32->resource_type) {
*base = addr32->min_address_range;
*size = addr32->address_length;
*tra = addr32->address_translation_offset;
return;
}
break;
case ACPI_RSTYPE_ADDRESS64:
addr64 = (struct acpi_resource_address64 *) &res->data;
if (type == addr64->resource_type) {
*base = addr64->min_address_range;
*size = addr64->address_length;
*tra = addr64->address_translation_offset;
return;
}
break;
}
}
}
int
acpi_get_addr_space(void *obj, u8 type, u64 *base, u64 *length, u64 *tra)
{
acpi_status status;
struct acpi_buffer buf;
*base = 0;
*length = 0;
*tra = 0;
status = acpi_get_crs((acpi_handle)obj, &buf);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
return status;
}
acpi_get_crs_addr(&buf, type, base, length, tra);
acpi_dispose_crs(&buf);
return AE_OK;
}
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_BOOT #ifdef CONFIG_ACPI_BOOT
...@@ -808,6 +873,7 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count) ...@@ -808,6 +873,7 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count)
list_for_each(node, &acpi_prt.entries) { list_for_each(node, &acpi_prt.entries) {
entry = (struct acpi_prt_entry *)node; entry = (struct acpi_prt_entry *)node;
vector[i].segment = entry->id.segment;
vector[i].bus = entry->id.bus; vector[i].bus = entry->id.bus;
vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff; vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff;
vector[i].pin = entry->pin; vector[i].pin = entry->pin;
......
...@@ -91,7 +91,7 @@ ENTRY(ia64_execve) ...@@ -91,7 +91,7 @@ ENTRY(ia64_execve)
END(ia64_execve) END(ia64_execve)
/* /*
* sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 child_tidptr, u64 parent_tidptr, * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
* u64 tls) * u64 tls)
*/ */
GLOBAL_ENTRY(sys_clone2) GLOBAL_ENTRY(sys_clone2)
...@@ -105,10 +105,10 @@ GLOBAL_ENTRY(sys_clone2) ...@@ -105,10 +105,10 @@ GLOBAL_ENTRY(sys_clone2)
mov out1=in1 mov out1=in1
mov out3=in2 mov out3=in2
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;; ;;
(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
mov out5=in4 // parent_tidptr: valid only w/CLONE_PARENT_SETTID mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
...@@ -126,12 +126,12 @@ GLOBAL_ENTRY(sys_clone2) ...@@ -126,12 +126,12 @@ GLOBAL_ENTRY(sys_clone2)
END(sys_clone2) END(sys_clone2)
/* /*
* sys_clone(u64 flags, u64 ustack_base, u64 user_tid, u64 tls) * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
* Deprecated. Use sys_clone2() instead. * Deprecated. Use sys_clone2() instead.
*/ */
GLOBAL_ENTRY(sys_clone) GLOBAL_ENTRY(sys_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,4,2,5,0 alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp mov loc0=rp
...@@ -140,9 +140,10 @@ GLOBAL_ENTRY(sys_clone) ...@@ -140,9 +140,10 @@ GLOBAL_ENTRY(sys_clone)
mov out1=in1 mov out1=in1
mov out3=16 // stacksize (compensates for 16-byte scratch area) mov out3=16 // stacksize (compensates for 16-byte scratch area)
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in2 // out4 = user_tid (optional) mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;; ;;
(p6) st8 [r2]=in3 // store TLS in r13 (tp) (p6) st8 [r2]=in4 // store TLS in r13 (tp)
mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
......
...@@ -533,15 +533,15 @@ fsyscall_table: ...@@ -533,15 +533,15 @@ fsyscall_table:
data8 fsys_fallback_syscall // epoll_wait // 1245 data8 fsys_fallback_syscall // epoll_wait // 1245
data8 fsys_fallback_syscall // restart_syscall data8 fsys_fallback_syscall // restart_syscall
data8 fsys_fallback_syscall // semtimedop data8 fsys_fallback_syscall // semtimedop
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_create
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_settime
data8 fsys_fallback_syscall // 1250 data8 fsys_fallback_syscall // timer_gettime // 1250
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_getoverrun
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_delete
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // clock_settime
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // clock_gettime
data8 fsys_fallback_syscall // 1255 data8 fsys_fallback_syscall // clock_getres // 1255
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // clock_nanosleep
data8 fsys_fallback_syscall data8 fsys_fallback_syscall
data8 fsys_fallback_syscall data8 fsys_fallback_syscall
data8 fsys_fallback_syscall data8 fsys_fallback_syscall
......
...@@ -733,73 +733,3 @@ SET_REG(b4); ...@@ -733,73 +733,3 @@ SET_REG(b4);
SET_REG(b5); SET_REG(b5);
#endif /* CONFIG_IA64_BRL_EMU */ #endif /* CONFIG_IA64_BRL_EMU */
#ifdef CONFIG_SMP
/*
* This routine handles spinlock contention. It uses a simple exponential backoff
* algorithm to reduce unnecessary bus traffic. The initial delay is selected from
* the low-order bits of the cycle counter (a cheap "randomizer"). I'm sure this
* could use additional tuning, especially on systems with a large number of CPUs.
* Also, I think the maximum delay should be made a function of the number of CPUs in
* the system. --davidm 00/08/05
*
* WARNING: This is not a normal procedure. It gets called from C code without
* the compiler knowing about it. Thus, we must not use any scratch registers
* beyond those that were declared "clobbered" at the call-site (see spin_lock()
* macro). We may not even use the stacked registers, because that could overwrite
* output registers. Similarly, we can't use the scratch stack area as it may be
* in use, too.
*
* Inputs:
* ar.ccv = 0 (and available for use)
* r28 = available for use
* r29 = available for use
* r30 = non-zero (and available for use)
* r31 = address of lock we're trying to acquire
* p15 = available for use
*/
# define delay r28
# define timeout r29
# define tmp r30
GLOBAL_ENTRY(ia64_spinlock_contention)
mov tmp=ar.itc
;;
and delay=0x3f,tmp
;;
.retry: add timeout=tmp,delay
shl delay=delay,1
;;
dep delay=delay,r0,0,13 // limit delay to 8192 cycles
;;
// delay a little...
.wait: sub tmp=tmp,timeout
or delay=0xf,delay // make sure delay is non-zero (otherwise we get stuck with 0)
;;
cmp.lt p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.cond.sptk .wait
;;
ld4 tmp=[r31]
;;
cmp.ne p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.cond.sptk .retry // lock is still busy
;;
// try acquiring lock (we know ar.ccv is still zero!):
mov tmp=1
;;
cmpxchg4.acq tmp=[r31],tmp,ar.ccv
;;
cmp.eq p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.ret.sptk.many b7 // got lock -> return
br .retry // still no luck, retry
END(ia64_spinlock_contention)
#endif
...@@ -57,9 +57,7 @@ EXPORT_SYMBOL_NOVERS(__up); ...@@ -57,9 +57,7 @@ EXPORT_SYMBOL_NOVERS(__up);
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
#include <asm/processor.h> #include <asm/processor.h>
# ifndef CONFIG_NUMA
EXPORT_SYMBOL(cpu_info__per_cpu); EXPORT_SYMBOL(cpu_info__per_cpu);
# endif
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h> #include <asm/system.h>
...@@ -147,3 +145,19 @@ EXPORT_SYMBOL(machvec_noop); ...@@ -147,3 +145,19 @@ EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem); EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem); EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
#endif #endif
#ifdef CONFIG_NUMA
#include <asm/numa.h>
EXPORT_SYMBOL(cpu_to_node_map);
#endif
#include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_from_blocked_task);
EXPORT_SYMBOL(unw_init_running);
EXPORT_SYMBOL(unw_unwind);
EXPORT_SYMBOL(unw_unwind_to_user);
EXPORT_SYMBOL(unw_access_gr);
EXPORT_SYMBOL(unw_access_br);
EXPORT_SYMBOL(unw_access_fr);
EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr);
...@@ -42,6 +42,10 @@ ...@@ -42,6 +42,10 @@
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -67,7 +71,7 @@ u64 ia64_mca_proc_state_dump[512]; ...@@ -67,7 +71,7 @@ u64 ia64_mca_proc_state_dump[512];
u64 ia64_mca_stack[1024] __attribute__((aligned(16))); u64 ia64_mca_stack[1024] __attribute__((aligned(16)));
u64 ia64_mca_stackframe[32]; u64 ia64_mca_stackframe[32];
u64 ia64_mca_bspstore[1024]; u64 ia64_mca_bspstore[1024];
u64 ia64_init_stack[KERNEL_STACK_SIZE] __attribute__((aligned(16))); u64 ia64_init_stack[KERNEL_STACK_SIZE/8] __attribute__((aligned(16)));
u64 ia64_mca_sal_data_area[1356]; u64 ia64_mca_sal_data_area[1356];
u64 ia64_tlb_functional; u64 ia64_tlb_functional;
u64 ia64_os_mca_recovery_successful; u64 ia64_os_mca_recovery_successful;
...@@ -105,6 +109,19 @@ static struct irqaction mca_cpe_irqaction = { ...@@ -105,6 +109,19 @@ static struct irqaction mca_cpe_irqaction = {
.name = "cpe_hndlr" .name = "cpe_hndlr"
}; };
#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
#define CMC_HISTORY_LENGTH 5
static struct timer_list cpe_poll_timer;
static struct timer_list cmc_poll_timer;
/*
* Start with this in the wrong state so we won't play w/ timers
* before the system is ready.
*/
static int cmc_polling_enabled = 1;
/* /*
* ia64_mca_log_sal_error_record * ia64_mca_log_sal_error_record
* *
...@@ -152,7 +169,8 @@ mca_handler_platform (void) ...@@ -152,7 +169,8 @@ mca_handler_platform (void)
void void
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{ {
IA64_MCA_DEBUG("ia64_mca_cpe_int_handler: received interrupt. vector = %#x\n", cpe_irq); IA64_MCA_DEBUG("ia64_mca_cpe_int_handler: received interrupt. CPU:%d vector = %#x\n",
smp_processor_id(), cpe_irq);
/* Get the CMC error record and log it */ /* Get the CMC error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE, 0); ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE, 0);
...@@ -295,6 +313,60 @@ ia64_mca_cmc_vector_setup (void) ...@@ -295,6 +313,60 @@ ia64_mca_cmc_vector_setup (void)
smp_processor_id(), ia64_get_cmcv()); smp_processor_id(), ia64_get_cmcv());
} }
/*
* ia64_mca_cmc_vector_disable
*
* Mask the corrected machine check vector register in the processor.
* This function is invoked on a per-processor basis.
*
* Inputs
* dummy(unused)
*
* Outputs
* None
*/
void
ia64_mca_cmc_vector_disable (void *dummy)
{
cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv();
cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval);
IA64_MCA_DEBUG("ia64_mca_cmc_vector_disable: CPU %d corrected "
"machine check vector %#x disabled.\n",
smp_processor_id(), cmcv.cmcv_vector);
}
/*
* ia64_mca_cmc_vector_enable
*
* Unmask the corrected machine check vector register in the processor.
* This function is invoked on a per-processor basis.
*
* Inputs
* dummy(unused)
*
* Outputs
* None
*/
void
ia64_mca_cmc_vector_enable (void *dummy)
{
cmcv_reg_t cmcv;
cmcv = (cmcv_reg_t)ia64_get_cmcv();
cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
ia64_set_cmcv(cmcv.cmcv_regval);
IA64_MCA_DEBUG("ia64_mca_cmc_vector_enable: CPU %d corrected "
"machine check vector %#x enabled.\n",
smp_processor_id(), cmcv.cmcv_vector);
}
#if defined(MCA_TEST) #if defined(MCA_TEST)
...@@ -396,7 +468,7 @@ ia64_mca_init(void) ...@@ -396,7 +468,7 @@ ia64_mca_init(void)
SAL_MC_PARAM_MECHANISM_INT, SAL_MC_PARAM_MECHANISM_INT,
IA64_MCA_RENDEZ_VECTOR, IA64_MCA_RENDEZ_VECTOR,
IA64_MCA_RENDEZ_TIMEOUT, IA64_MCA_RENDEZ_TIMEOUT,
0))) SAL_MC_PARAM_RZ_ALWAYS)))
{ {
printk(KERN_ERR "ia64_mca_init: Failed to register rendezvous interrupt " printk(KERN_ERR "ia64_mca_init: Failed to register rendezvous interrupt "
"with SAL. rc = %ld\n", rc); "with SAL. rc = %ld\n", rc);
...@@ -494,9 +566,7 @@ ia64_mca_init(void) ...@@ -494,9 +566,7 @@ ia64_mca_init(void)
setup_irq(irq, &mca_cpe_irqaction); setup_irq(irq, &mca_cpe_irqaction);
} }
ia64_mca_register_cpev(cpev); ia64_mca_register_cpev(cpev);
} else }
printk(KERN_ERR
"ia64_mca_init: Failed to get routed CPEI vector from ACPI.\n");
} }
/* Initialize the areas set aside by the OS to buffer the /* Initialize the areas set aside by the OS to buffer the
...@@ -610,14 +680,11 @@ void ...@@ -610,14 +680,11 @@ void
ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
{ {
unsigned long flags; unsigned long flags;
int cpu = 0; int cpu = smp_processor_id();
/* Mask all interrupts */ /* Mask all interrupts */
local_irq_save(flags); local_irq_save(flags);
#ifdef CONFIG_SMP
cpu = cpu_logical_id(hard_smp_processor_id());
#endif
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has /* Register with the SAL monarch that the slave has
* reached SAL * reached SAL
...@@ -751,11 +818,68 @@ ia64_mca_ucmc_handler(void) ...@@ -751,11 +818,68 @@ ia64_mca_ucmc_handler(void)
void void
ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
{ {
static unsigned long cmc_history[CMC_HISTORY_LENGTH];
static int index;
static spinlock_t cmc_history_lock = SPIN_LOCK_UNLOCKED;
IA64_MCA_DEBUG("ia64_mca_cmc_int_handler: received interrupt vector = %#x on CPU %d\n", IA64_MCA_DEBUG("ia64_mca_cmc_int_handler: received interrupt vector = %#x on CPU %d\n",
cmc_irq, smp_processor_id()); cmc_irq, smp_processor_id());
/* Get the CMC error record and log it */ /* Get the CMC error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC, 0); ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC, 0);
spin_lock(&cmc_history_lock);
if (!cmc_polling_enabled) {
int i, count = 1; /* we know 1 happened now */
unsigned long now = jiffies;
for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
if (now - cmc_history[i] <= HZ)
count++;
}
IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
if (count >= CMC_HISTORY_LENGTH) {
/*
* CMC threshold exceeded, clear the history
* so we have a fresh start when we return
*/
for (index = 0 ; index < CMC_HISTORY_LENGTH; index++)
cmc_history[index] = 0;
index = 0;
/* Switch to polling mode */
cmc_polling_enabled = 1;
/*
* Unlock & enable interrupts before
* smp_call_function or risk deadlock
*/
spin_unlock(&cmc_history_lock);
ia64_mca_cmc_vector_disable(NULL);
local_irq_enable();
smp_call_function(ia64_mca_cmc_vector_disable, NULL, 1, 1);
/*
* Corrected errors will still be corrected, but
* make sure there's a log somewhere that indicates
* something is generating more than we can handle.
*/
printk(KERN_WARNING "ia64_mca_cmc_int_handler: WARNING: Switching to polling CMC handler, error records may be lost\n");
mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
/* lock already released, get out now */
return;
} else {
cmc_history[index++] = now;
if (index == CMC_HISTORY_LENGTH)
index = 0;
}
}
spin_unlock(&cmc_history_lock);
} }
/* /*
...@@ -768,6 +892,7 @@ typedef struct ia64_state_log_s ...@@ -768,6 +892,7 @@ typedef struct ia64_state_log_s
{ {
spinlock_t isl_lock; spinlock_t isl_lock;
int isl_index; int isl_index;
unsigned long isl_count;
ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
} ia64_state_log_t; } ia64_state_log_t;
...@@ -784,11 +909,145 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; ...@@ -784,11 +909,145 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
#define IA64_LOG_INDEX_INC(it) \ #define IA64_LOG_INDEX_INC(it) \
ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
ia64_state_log[it].isl_count++;}
#define IA64_LOG_INDEX_DEC(it) \ #define IA64_LOG_INDEX_DEC(it) \
ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
/*
* ia64_mca_cmc_int_caller
*
* Call CMC interrupt handler, only purpose is to have a
* smp_call_function callable entry.
*
* Inputs : dummy(unused)
* Outputs : None
* */
static void
ia64_mca_cmc_int_caller(void *dummy)
{
ia64_mca_cmc_int_handler(0, NULL, NULL);
}
/*
* ia64_mca_cmc_poll
*
* Poll for Corrected Machine Checks (CMCs)
*
* Inputs : dummy(unused)
* Outputs : None
*
*/
static void
ia64_mca_cmc_poll (unsigned long dummy)
{
int start_count;
start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
/* Call the interrupt handler */
smp_call_function(ia64_mca_cmc_int_caller, NULL, 1, 1);
local_irq_disable();
ia64_mca_cmc_int_caller(NULL);
local_irq_enable();
/*
* If no log recored, switch out of polling mode.
*/
if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
printk(KERN_WARNING "ia64_mca_cmc_poll: Returning to interrupt driven CMC handler\n");
cmc_polling_enabled = 0;
smp_call_function(ia64_mca_cmc_vector_enable, NULL, 1, 1);
ia64_mca_cmc_vector_enable(NULL);
} else {
mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
}
}
/*
* ia64_mca_cpe_int_caller
*
* Call CPE interrupt handler, only purpose is to have a
* smp_call_function callable entry.
*
* Inputs : dummy(unused)
* Outputs : None
* */
static void
ia64_mca_cpe_int_caller(void *dummy)
{
ia64_mca_cpe_int_handler(0, NULL, NULL);
}
/*
* ia64_mca_cpe_poll
*
* Poll for Corrected Platform Errors (CPEs), dynamically adjust
* polling interval based on occurance of an event.
*
* Inputs : dummy(unused)
* Outputs : None
*
*/
static void
ia64_mca_cpe_poll (unsigned long dummy)
{
int start_count;
static int poll_time = MAX_CPE_POLL_INTERVAL;
start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
/* Call the interrupt handler */
smp_call_function(ia64_mca_cpe_int_caller, NULL, 1, 1);
local_irq_disable();
ia64_mca_cpe_int_caller(NULL);
local_irq_enable();
/*
* If a log was recorded, increase our polling frequency,
* otherwise, backoff.
*/
if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time/2);
} else {
poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
}
mod_timer(&cpe_poll_timer, jiffies + poll_time);
}
/*
* ia64_mca_late_init
*
* Opportunity to setup things that require initialization later
* than ia64_mca_init. Setup a timer to poll for CPEs if the
* platform doesn't support an interrupt driven mechanism.
*
* Inputs : None
* Outputs : Status
*/
static int __init
ia64_mca_late_init(void)
{
init_timer(&cmc_poll_timer);
cmc_poll_timer.function = ia64_mca_cmc_poll;
/* Reset to the correct state */
cmc_polling_enabled = 0;
init_timer(&cpe_poll_timer);
cpe_poll_timer.function = ia64_mca_cpe_poll;
/* If platform doesn't support CPEI, get the timer going. */
if (acpi_request_vector(ACPI_INTERRUPT_CPEI) < 0)
ia64_mca_cpe_poll(0UL);
return 0;
}
device_initcall(ia64_mca_late_init);
/* /*
* C portion of the OS INIT handler * C portion of the OS INIT handler
...@@ -949,7 +1208,6 @@ ia64_log_get(int sal_info_type, prfunc_t prfunc) ...@@ -949,7 +1208,6 @@ ia64_log_get(int sal_info_type, prfunc_t prfunc)
return total_len; return total_len;
} else { } else {
IA64_LOG_UNLOCK(sal_info_type); IA64_LOG_UNLOCK(sal_info_type);
prfunc("ia64_log_get: No SAL error record available for type %d\n", sal_info_type);
return 0; return 0;
} }
} }
......
/*
* IA-64-specific support for kernel module loader.
*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Loosely based on patch by Rusty Russell.
*/
/* relocs tested so far:
DIR64LSB
FPTR64LSB
GPREL22
LDXMOV
LDXMOV
LTOFF22
LTOFF22X
LTOFF22X
LTOFF_FPTR22
PCREL21B
PCREL64LSB
SECREL32LSB
SEGREL64LSB
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#define ARCH_MODULE_DEBUG 0
#if ARCH_MODULE_DEBUG
# define DEBUGP printk
# define inline
#else
# define DEBUGP(fmt , a...)
#endif
#ifdef CONFIG_ITANIUM
# define USE_BRL 0
#else
# define USE_BRL 1
#endif
#define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
/* Define some relocation helper macros/types: */
#define FORMAT_SHIFT 0
#define FORMAT_BITS 3
#define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
#define VALUE_SHIFT 3
#define VALUE_BITS 5
#define VALUE_MASK ((1 << VALUE_BITS) - 1)
enum reloc_target_format {
/* direct encoded formats: */
RF_NONE = 0,
RF_INSN14 = 1,
RF_INSN22 = 2,
RF_INSN64 = 3,
RF_32MSB = 4,
RF_32LSB = 5,
RF_64MSB = 6,
RF_64LSB = 7,
/* formats that cannot be directly decoded: */
RF_INSN60,
RF_INSN21B, /* imm21 form 1 */
RF_INSN21M, /* imm21 form 2 */
RF_INSN21F /* imm21 form 3 */
};
enum reloc_value_formula {
RV_DIRECT = 4, /* S + A */
RV_GPREL = 5, /* @gprel(S + A) */
RV_LTREL = 6, /* @ltoff(S + A) */
RV_PLTREL = 7, /* @pltoff(S + A) */
RV_FPTR = 8, /* @fptr(S + A) */
RV_PCREL = 9, /* S + A - P */
RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
RV_SEGREL = 11, /* @segrel(S + A) */
RV_SECREL = 12, /* @secrel(S + A) */
RV_BDREL = 13, /* BD + A */
RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
RV_PCREL2 = 15, /* S + A - P */
RV_SPECIAL = 16, /* various (see below) */
RV_RSVD17 = 17,
RV_TPREL = 18, /* @tprel(S + A) */
RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
RV_DTPMOD = 20, /* @dtpmod(S + A) */
RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
RV_DTPREL = 22, /* @dtprel(S + A) */
RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
RV_RSVD24 = 24,
RV_RSVD25 = 25,
RV_RSVD26 = 26,
RV_RSVD27 = 27
/* 28-31 reserved for implementation-specific purposes. */
};
#define N(reloc) [R_IA64_##reloc] = #reloc
static const char *reloc_name[256] = {
N(NONE), N(IMM14), N(IMM22), N(IMM64),
N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
};
#undef N
struct got_entry {
uint64_t val;
};
struct fdesc {
uint64_t ip;
uint64_t gp;
};
/* Opaque struct for insns, to protect against derefs. */
struct insn;
static inline uint64_t
bundle (const struct insn *insn)
{
return (uint64_t) insn & ~0xfUL;
}
static inline int
slot (const struct insn *insn)
{
return (uint64_t) insn & 0x3;
}
/* Patch instruction with "val" where "mask" has 1 bits. */
static void
apply (struct insn *insn, uint64_t mask, uint64_t val)
{
uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) bundle(insn);
# define insn_mask ((1UL << 41) - 1)
unsigned long shift;
b0 = b[0]; b1 = b[1];
shift = 5 + 41 * slot(insn); /* 5 bits of template, then 3 x 41-bit instructions */
if (shift >= 64) {
m1 = mask << (shift - 64);
v1 = val << (shift - 64);
} else {
m0 = mask << shift; m1 = mask >> (64 - shift);
v0 = val << shift; v1 = val >> (64 - shift);
b[0] = (b0 & ~m0) | (v0 & m0);
}
b[1] = (b1 & ~m1) | (v1 & m1);
}
static int
apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
{
if (slot(insn) != 2) {
printk(KERN_ERR "%s: illegal slot number %d for IMM64\n",
mod->name, slot(insn));
return 0;
}
apply(insn, 0x01fffefe000, ( ((val & 0x8000000000000000) >> 27) /* bit 63 -> 36 */
| ((val & 0x0000000000200000) << 0) /* bit 21 -> 21 */
| ((val & 0x00000000001f0000) << 6) /* bit 16 -> 22 */
| ((val & 0x000000000000ff80) << 20) /* bit 7 -> 27 */
| ((val & 0x000000000000007f) << 13) /* bit 0 -> 13 */));
apply((void *) insn - 1, 0x1ffffffffff, val >> 22);
return 1;
}
static int
apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
{
if (slot(insn) != 2) {
printk(KERN_ERR "%s: illegal slot number %d for IMM60\n",
mod->name, slot(insn));
return 0;
}
if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
return 0;
}
apply(insn, 0x011ffffe000, ( ((val & 0x1000000000000000) >> 24) /* bit 60 -> 36 */
| ((val & 0x00000000000fffff) << 13) /* bit 0 -> 13 */));
apply((void *) insn - 1, 0x1fffffffffc, val >> 18);
return 1;
}
static int
apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
{
if (val + (1 << 21) >= (1 << 22)) {
printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
return 0;
}
apply(insn, 0x01fffcfe000, ( ((val & 0x200000) << 15) /* bit 21 -> 36 */
| ((val & 0x1f0000) << 6) /* bit 16 -> 22 */
| ((val & 0x00ff80) << 20) /* bit 7 -> 27 */
| ((val & 0x00007f) << 13) /* bit 0 -> 13 */));
return 1;
}
static int
apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
{
if (val + (1 << 20) >= (1 << 21)) {
printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
return 0;
}
apply(insn, 0x11ffffe000, ( ((val & 0x100000) << 16) /* bit 20 -> 36 */
| ((val & 0x0fffff) << 13) /* bit 0 -> 13 */));
return 1;
}
#if USE_BRL
struct plt_entry {
/* Three instruction bundles in PLT. */
unsigned char bundle[2][16];
};
static const struct plt_entry ia64_plt_template = {
{
{
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
0x00, 0x00, 0x00, 0x60
},
{
0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
0x08, 0x00, 0x00, 0xc0
}
}
};
static int
patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
{
if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
&& apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
(target_ip - (int64_t) plt->bundle[1]) / 16))
return 1;
return 0;
}
unsigned long
plt_target (struct plt_entry *plt)
{
uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
long off;
b0 = b[0]; b1 = b[1];
off = ( ((b1 & 0x00fffff000000000) >> 36) /* imm20b -> bit 0 */
| ((b0 >> 48) << 20) | ((b1 & 0x7fffff) << 36) /* imm39 -> bit 20 */
| ((b1 & 0x0800000000000000) << 1)); /* i -> bit 60 */
return (long) plt->bundle[1] + 16*off;
}
#else /* !USE_BRL */
struct plt_entry {
/* Three instruction bundles in PLT. */
unsigned char bundle[3][16];
};
static const struct plt_entry ia64_plt_template = {
{
{
0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
0x02, 0x00, 0x00, 0x60
},
{
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
0x00, 0x00, 0x00, 0x60
},
{
0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
0x60, 0x00, 0x80, 0x00 /* br.few b6 */
}
}
};
static int
patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
{
if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
&& apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
return 1;
return 0;
}
unsigned long
plt_target (struct plt_entry *plt)
{
uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
b0 = b[0]; b1 = b[1];
return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
| ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
| ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
| ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
| ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
| ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
}
#endif /* !USE_BRL */
void *
module_alloc (unsigned long size)
{
if (!size)
return NULL;
return vmalloc(size);
}
void
module_free (struct module *mod, void *module_region)
{
vfree(module_region);
}
/* Have we already seen one of these relocations? */
/* FIXME: we could look in other sections, too --RR */
static int
duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
{
unsigned int i;
for (i = 0; i < num; i++) {
if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
return 1;
}
return 0;
}
/* Count how many GOT entries we may need */
static unsigned int
count_gots (const Elf64_Rela *rela, unsigned int num)
{
unsigned int i, ret = 0;
/* Sure, this is order(n^2), but it's usually short, and not
time critical */
for (i = 0; i < num; i++) {
switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_IA64_LTOFF22:
case R_IA64_LTOFF22X:
case R_IA64_LTOFF64I:
case R_IA64_LTOFF_FPTR22:
case R_IA64_LTOFF_FPTR64I:
case R_IA64_LTOFF_FPTR32MSB:
case R_IA64_LTOFF_FPTR32LSB:
case R_IA64_LTOFF_FPTR64MSB:
case R_IA64_LTOFF_FPTR64LSB:
if (!duplicate_reloc(rela, i))
ret++;
break;
}
}
return ret;
}
/* Count how many PLT entries we may need */
static unsigned int
count_plts (const Elf64_Rela *rela, unsigned int num)
{
unsigned int i, ret = 0;
/* Sure, this is order(n^2), but it's usually short, and not
time critical */
for (i = 0; i < num; i++) {
switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_IA64_PCREL21B:
case R_IA64_PLTOFF22:
case R_IA64_PLTOFF64I:
case R_IA64_PLTOFF64MSB:
case R_IA64_PLTOFF64LSB:
case R_IA64_IPLTMSB:
case R_IA64_IPLTLSB:
if (!duplicate_reloc(rela, i))
ret++;
break;
}
}
return ret;
}
/* We need to create an function-descriptors for any internal function
which is referenced. */
static unsigned int
count_fdescs (const Elf64_Rela *rela, unsigned int num)
{
unsigned int i, ret = 0;
/* Sure, this is order(n^2), but it's usually short, and not time critical. */
for (i = 0; i < num; i++) {
switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_IA64_FPTR64I:
case R_IA64_FPTR32LSB:
case R_IA64_FPTR32MSB:
case R_IA64_FPTR64LSB:
case R_IA64_FPTR64MSB:
case R_IA64_LTOFF_FPTR22:
case R_IA64_LTOFF_FPTR32LSB:
case R_IA64_LTOFF_FPTR32MSB:
case R_IA64_LTOFF_FPTR64I:
case R_IA64_LTOFF_FPTR64LSB:
case R_IA64_LTOFF_FPTR64MSB:
case R_IA64_IPLTMSB:
case R_IA64_IPLTLSB:
/*
* Jumps to static functions sometimes go straight to their
* offset. Of course, that may not be possible if the jump is
* from init -> core or vice. versa, so we need to generate an
* FDESC (and PLT etc) for that.
*/
case R_IA64_PCREL21B:
if (!duplicate_reloc(rela, i))
ret++;
break;
}
}
return ret;
}
int
module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
struct module *mod)
{
unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
/*
* To store the PLTs and function-descriptors, we expand the .text section for
* core module-code and the .init.text section for initialization code.
*/
for (s = sechdrs; s < sechdrs_end; ++s)
if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
mod->arch.core_plt = s;
else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
mod->arch.init_plt = s;
else if (strcmp(".got", secstrings + s->sh_name) == 0)
mod->arch.got = s;
else if (strcmp(".opd", secstrings + s->sh_name) == 0)
mod->arch.opd = s;
else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
mod->arch.unwind = s;
if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
printk(KERN_ERR "%s: sections missing\n", mod->name);
return -ENOEXEC;
}
/* GOT and PLTs can occur in any relocated section... */
for (s = sechdrs + 1; s < sechdrs_end; ++s) {
const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
if (s->sh_type != SHT_RELA)
continue;
gots += count_gots(rels, numrels);
fdescs += count_fdescs(rels, numrels);
if (strstr(secstrings + s->sh_name, ".init"))
init_plts += count_plts(rels, numrels);
else
core_plts += count_plts(rels, numrels);
}
mod->arch.core_plt->sh_type = SHT_NOBITS;
mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.core_plt->sh_addralign = 16;
mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
mod->arch.init_plt->sh_type = SHT_NOBITS;
mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.init_plt->sh_addralign = 16;
mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
mod->arch.got->sh_type = SHT_NOBITS;
mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
mod->arch.got->sh_addralign = 8;
mod->arch.got->sh_size = gots * sizeof(struct got_entry);
mod->arch.opd->sh_type = SHT_NOBITS;
mod->arch.opd->sh_flags = SHF_ALLOC;
mod->arch.opd->sh_addralign = 8;
mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
__FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
mod->arch.got->sh_size, mod->arch.opd->sh_size);
return 0;
}
static inline int
in_init (const struct module *mod, uint64_t addr)
{
return addr - (uint64_t) mod->module_init < mod->init_size;
}
static inline int
in_core (const struct module *mod, uint64_t addr)
{
return addr - (uint64_t) mod->module_core < mod->core_size;
}
static inline int
is_internal (const struct module *mod, uint64_t value)
{
return in_init(mod, value) || in_core(mod, value);
}
/*
* Get gp-relative offset for the linkage-table entry of VALUE.
*/
static uint64_t
get_ltoff (struct module *mod, uint64_t value, int *okp)
{
struct got_entry *got, *e;
if (!*okp)
return 0;
got = (void *) mod->arch.got->sh_addr;
for (e = got; e < got + mod->arch.next_got_entry; ++e)
if (e->val == value)
goto found;
/* Not enough GOT entries? */
if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size))
BUG();
e->val = value;
++mod->arch.next_got_entry;
found:
return (uint64_t) e - mod->arch.gp;
}
static inline int
gp_addressable (struct module *mod, uint64_t value)
{
return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
}
/* Get PC-relative PLT entry for this value. Returns 0 on failure. */
static uint64_t
get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
{
struct plt_entry *plt, *plt_end;
uint64_t target_ip, target_gp;
if (!*okp)
return 0;
if (in_init(mod, (uint64_t) insn)) {
plt = (void *) mod->arch.init_plt->sh_addr;
plt_end = (void *) plt + mod->arch.init_plt->sh_size;
} else {
plt = (void *) mod->arch.core_plt->sh_addr;
plt_end = (void *) plt + mod->arch.core_plt->sh_size;
}
/* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
target_ip = ((uint64_t *) value)[0];
target_gp = ((uint64_t *) value)[1];
/* Look for existing PLT entry. */
while (plt->bundle[0][0]) {
if (plt_target(plt) == target_ip)
goto found;
if (++plt >= plt_end)
BUG();
}
*plt = ia64_plt_template;
if (!patch_plt(mod, plt, target_ip, target_gp)) {
*okp = 0;
return 0;
}
#if ARCH_MODULE_DEBUG
if (plt_target(plt) != target_ip) {
printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
__FUNCTION__, target_ip, plt_target(plt));
*okp = 0;
return 0;
}
#endif
found:
return (uint64_t) plt;
}
/* Get function descriptor for VALUE. */
static uint64_t
get_fdesc (struct module *mod, uint64_t value, int *okp)
{
struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
if (!*okp)
return 0;
if (!value) {
printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
return 0;
}
if (!is_internal(mod, value))
/*
* If it's not a module-local entry-point, "value" already points to a
* function-descriptor.
*/
return value;
/* Look for existing function descriptor. */
while (fdesc->ip) {
if (fdesc->ip == value)
return (uint64_t)fdesc;
if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
BUG();
}
/* Create new one */
fdesc->ip = value;
fdesc->gp = mod->arch.gp;
return (uint64_t) fdesc;
}
static inline int
do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
Elf64_Shdr *sec, void *location)
{
enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
uint64_t val;
int ok = 1;
val = sym->st_value + addend;
switch (formula) {
case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
case RV_DIRECT:
break;
case RV_GPREL: val -= mod->arch.gp; break;
case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
case RV_SECREL: val -= sec->sh_addr; break;
case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
case RV_PCREL:
switch (r_type) {
case R_IA64_PCREL21B:
/* special because it can cross into other module/kernel-core. */
if (!is_internal(mod, val))
val = get_plt(mod, location, val, &ok);
/* FALL THROUGH */
default:
val -= bundle(location);
break;
case R_IA64_PCREL32MSB:
case R_IA64_PCREL32LSB:
case R_IA64_PCREL64MSB:
case R_IA64_PCREL64LSB:
val -= (uint64_t) location;
break;
}
switch (r_type) {
case R_IA64_PCREL60B: format = RF_INSN60; break;
case R_IA64_PCREL21B: format = RF_INSN21B; break;
case R_IA64_PCREL21M: format = RF_INSN21M; break;
case R_IA64_PCREL21F: format = RF_INSN21F; break;
default: break;
}
break;
case RV_BDREL:
val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
break;
case RV_LTV:
/* can link-time value relocs happen here? */
BUG();
break;
case RV_PCREL2:
if (r_type == R_IA64_PCREL21BI) {
if (!is_internal(mod, val)) {
printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
__FUNCTION__, reloc_name[r_type], val);
return -ENOEXEC;
}
format = RF_INSN21B;
}
val -= bundle(location);
break;
case RV_SPECIAL:
switch (r_type) {
case R_IA64_IPLTMSB:
case R_IA64_IPLTLSB:
val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
format = RF_64LSB;
if (r_type == R_IA64_IPLTMSB)
format = RF_64MSB;
break;
case R_IA64_SUB:
val = addend - sym->st_value;
format = RF_INSN64;
break;
case R_IA64_LTOFF22X:
if (gp_addressable(mod, val))
val -= mod->arch.gp;
else
val = get_ltoff(mod, val, &ok);
format = RF_INSN22;
break;
case R_IA64_LDXMOV:
if (gp_addressable(mod, val)) {
/* turn "ld8" into "mov": */
DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
apply(location, 0x1fff80fe000, 0x10000000000);
}
return 0;
default:
if (reloc_name[r_type])
printk(KERN_ERR "%s: special reloc %s not supported",
mod->name, reloc_name[r_type]);
else
printk(KERN_ERR "%s: unknown special reloc %x\n",
mod->name, r_type);
return -ENOEXEC;
}
break;
case RV_TPREL:
case RV_LTREL_TPREL:
case RV_DTPMOD:
case RV_LTREL_DTPMOD:
case RV_DTPREL:
case RV_LTREL_DTPREL:
printk(KERN_ERR "%s: %s reloc not supported\n",
mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
return -ENOEXEC;
default:
printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
return -ENOEXEC;
}
if (!ok)
return -ENOEXEC;
DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val,
reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
switch (format) {
case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
case RF_INSN22: ok = apply_imm22(mod, location, val); break;
case RF_INSN64: ok = apply_imm64(mod, location, val); break;
case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
case RF_32MSB: /* ia64 Linux is little-endian... */
case RF_64MSB: /* ia64 Linux is little-endian... */
case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
return -ENOEXEC;
default:
printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
return -ENOEXEC;
}
return ok ? 0 : -ENOEXEC;
}
int
apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relsec, struct module *mod)
{
unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
Elf64_Shdr *target_sec;
int ret;
DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__,
relsec, n, sechdrs[relsec].sh_info);
target_sec = sechdrs + sechdrs[relsec].sh_info;
if (target_sec->sh_entsize == ~0UL)
/*
* If target section wasn't allocated, we don't need to relocate it.
* Happens, e.g., for debug sections.
*/
return 0;
if (!mod->arch.gp) {
/*
* XXX Should have an arch-hook for running this after final section
* addresses have been selected...
*/
/* See if gp can cover the entire core module: */
uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2;
if (mod->core_size >= MAX_LTOFF)
/*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module.
*/
gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2;
mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
}
for (i = 0; i < n; i++) {
ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
((Elf64_Sym *) sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rela[i].r_info)),
rela[i].r_addend, target_sec,
(void *) target_sec->sh_addr + rela[i].r_offset);
if (ret < 0)
return ret;
}
return 0;
}
int
apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relsec, struct module *mod)
{
printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
return -ENOEXEC;
}
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init);
if (mod->arch.unwind)
mod->arch.unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
(void *) mod->arch.unwind->sh_addr,
((void *) mod->arch.unwind->sh_addr
+ mod->arch.unwind->sh_size));
return 0;
}
void
module_arch_cleanup (struct module *mod)
{
if (mod->arch.unwind)
unw_remove_unwind_table(mod->arch.unw_table);
}
...@@ -27,9 +27,7 @@ ...@@ -27,9 +27,7 @@
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#ifdef CONFIG_SMP
#include <linux/smp.h> #include <linux/smp.h>
#endif
MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>"); MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
MODULE_DESCRIPTION("/proc interface to IA-64 PAL"); MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
...@@ -37,12 +35,6 @@ MODULE_LICENSE("GPL"); ...@@ -37,12 +35,6 @@ MODULE_LICENSE("GPL");
#define PALINFO_VERSION "0.5" #define PALINFO_VERSION "0.5"
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
typedef int (*palinfo_func_t)(char*); typedef int (*palinfo_func_t)(char*);
typedef struct { typedef struct {
...@@ -933,7 +925,7 @@ palinfo_init(void) ...@@ -933,7 +925,7 @@ palinfo_init(void)
*/ */
for (i=0; i < NR_CPUS; i++) { for (i=0; i < NR_CPUS; i++) {
if (!cpu_is_online(i)) continue; if (!cpu_online(i)) continue;
sprintf(cpustr,CPUSTR, i); sprintf(cpustr,CPUSTR, i);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/wrapper.h> #include <linux/wrapper.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/smp.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -134,12 +135,6 @@ ...@@ -134,12 +135,6 @@
#define PFM_CPUINFO_CLEAR(v) __get_cpu_var(pfm_syst_info) &= ~(v) #define PFM_CPUINFO_CLEAR(v) __get_cpu_var(pfm_syst_info) &= ~(v)
#define PFM_CPUINFO_SET(v) __get_cpu_var(pfm_syst_info) |= (v) #define PFM_CPUINFO_SET(v) __get_cpu_var(pfm_syst_info) |= (v)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/* /*
* debugging * debugging
*/ */
...@@ -1082,7 +1077,7 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) ...@@ -1082,7 +1077,7 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
* and it must be a valid CPU * and it must be a valid CPU
*/ */
cpu = ffz(~pfx->ctx_cpu_mask); cpu = ffz(~pfx->ctx_cpu_mask);
if (cpu_is_online(cpu) == 0) { if (cpu_online(cpu) == 0) {
DBprintk(("CPU%d is not online\n", cpu)); DBprintk(("CPU%d is not online\n", cpu));
return -EINVAL; return -EINVAL;
} }
...@@ -3153,7 +3148,7 @@ pfm_proc_info(char *page) ...@@ -3153,7 +3148,7 @@ pfm_proc_info(char *page)
p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val); p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
for(i=0; i < NR_CPUS; i++) { for(i=0; i < NR_CPUS; i++) {
if (cpu_is_online(i) == 0) continue; if (cpu_online(i) == 0) continue;
p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count); p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count); p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count); p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
......
...@@ -66,10 +66,7 @@ do_show_stack (struct unw_frame_info *info, void *arg) ...@@ -66,10 +66,7 @@ do_show_stack (struct unw_frame_info *info, void *arg)
void void
show_trace_task (struct task_struct *task) show_trace_task (struct task_struct *task)
{ {
struct unw_frame_info info; show_stack(task);
unw_init_from_blocked_task(&info, task);
do_show_stack(&info, 0);
} }
void void
...@@ -169,7 +166,10 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall ...@@ -169,7 +166,10 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
void void
default_idle (void) default_idle (void)
{ {
/* may want to do PAL_LIGHT_HALT here... */ #ifdef CONFIG_IA64_PAL_IDLE
if (!need_resched())
safe_halt();
#endif
} }
void __attribute__((noreturn)) void __attribute__((noreturn))
...@@ -177,6 +177,10 @@ cpu_idle (void *unused) ...@@ -177,6 +177,10 @@ cpu_idle (void *unused)
{ {
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
idle = default_idle;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!need_resched()) if (!need_resched())
min_xtp(); min_xtp();
...@@ -186,10 +190,7 @@ cpu_idle (void *unused) ...@@ -186,10 +190,7 @@ cpu_idle (void *unused)
#ifdef CONFIG_IA64_SGI_SN #ifdef CONFIG_IA64_SGI_SN
snidle(); snidle();
#endif #endif
if (pm_idle) (*idle)();
(*pm_idle)();
else
default_idle();
} }
#ifdef CONFIG_IA64_SGI_SN #ifdef CONFIG_IA64_SGI_SN
...@@ -581,6 +582,15 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) ...@@ -581,6 +582,15 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
tid = clone(flags | CLONE_VM | CLONE_UNTRACED, 0); tid = clone(flags | CLONE_VM | CLONE_UNTRACED, 0);
if (parent != current) { if (parent != current) {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(ia64_task_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
}
#endif
result = (*fn)(arg); result = (*fn)(arg);
_exit(result); _exit(result);
} }
...@@ -751,7 +761,7 @@ dup_task_struct(struct task_struct *orig) ...@@ -751,7 +761,7 @@ dup_task_struct(struct task_struct *orig)
} }
void void
__put_task_struct (struct task_struct *tsk) free_task_struct (struct task_struct *tsk)
{ {
free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER); free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER);
} }
...@@ -191,6 +191,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) ...@@ -191,6 +191,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]); err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]);
err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]); err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]);
} }
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value);
break; break;
default: default:
err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_uid, &to->si_uid);
......
...@@ -279,12 +279,15 @@ smp_callin (void) ...@@ -279,12 +279,15 @@ smp_callin (void)
smp_setup_percpu_timer(); smp_setup_percpu_timer();
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/* /*
* Synchronize the ITC with the BP * Synchronize the ITC with the BP
*/ */
Dprintk("Going to syncup ITC with BP.\n"); Dprintk("Going to syncup ITC with BP.\n");
ia64_sync_itc(0); ia64_sync_itc(0);
}
/* /*
* Get our bogomips. * Get our bogomips.
*/ */
......
...@@ -60,7 +60,7 @@ do_profile (unsigned long ip) ...@@ -60,7 +60,7 @@ do_profile (unsigned long ip)
} }
/* /*
* Return the number of micro-seconds that elapsed since the last update to jiffy. The * Return the number of nano-seconds that elapsed since the last update to jiffy. The
* xtime_lock must be at least read-locked when calling this routine. * xtime_lock must be at least read-locked when calling this routine.
*/ */
static inline unsigned long static inline unsigned long
...@@ -86,6 +86,9 @@ gettimeoffset (void) ...@@ -86,6 +86,9 @@ gettimeoffset (void)
void void
do_settimeofday (struct timeval *tv) do_settimeofday (struct timeval *tv)
{ {
time_t sec = tv->tv_sec;
long nsec = tv->tv_usec * 1000;
write_seqlock_irq(&xtime_lock); write_seqlock_irq(&xtime_lock);
{ {
/* /*
...@@ -94,22 +97,22 @@ do_settimeofday (struct timeval *tv) ...@@ -94,22 +97,22 @@ do_settimeofday (struct timeval *tv)
* Discover what correction gettimeofday would have done, and then undo * Discover what correction gettimeofday would have done, and then undo
* it! * it!
*/ */
tv->tv_usec -= gettimeoffset(); nsec -= gettimeoffset();
tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
while (tv->tv_usec < 0) { while (nsec < 0) {
tv->tv_usec += 1000000; nsec += 1000000000;
tv->tv_sec--; sec--;
} }
xtime.tv_sec = tv->tv_sec; xtime.tv_sec = sec;
xtime.tv_nsec = 1000 * tv->tv_usec; xtime.tv_nsec = nsec;
time_adjust = 0; /* stop active adjtime() */ time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC; time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT; time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT;
} }
write_sequnlock_irq(&xtime_lock); write_sequnlock_irq(&xtime_lock);
clock_was_set();
} }
void void
......
...@@ -338,8 +338,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -338,8 +338,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
fpu_swa_count = 0; fpu_swa_count = 0;
if ((++fpu_swa_count < 5) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { if ((++fpu_swa_count < 5) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
last_time = jiffies; last_time = jiffies;
printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx\n", printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri); current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
} }
exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
......
...@@ -253,6 +253,11 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char ...@@ -253,6 +253,11 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
struct pt_regs *pt; struct pt_regs *pt;
if ((unsigned) regnum - 1 >= 127) { if ((unsigned) regnum - 1 >= 127) {
if (regnum == 0 && !write) {
*val = 0; /* read r0 always returns 0 */
*nat = 0;
return 0;
}
UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
__FUNCTION__, regnum); __FUNCTION__, regnum);
return -1; return -1;
...@@ -318,13 +323,8 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char ...@@ -318,13 +323,8 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
} }
} else { } else {
/* access a scratch register */ /* access a scratch register */
if (!info->pt) {
UNW_DPRINT(0, "unwind.%s: no pt-regs; cannot access r%d\n",
__FUNCTION__, regnum);
return -1;
}
pt = get_scratch_regs(info); pt = get_scratch_regs(info);
addr = (unsigned long *) (pt + pt_regs_off(regnum)); addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
if (info->pri_unat_loc) if (info->pri_unat_loc)
nat_addr = info->pri_unat_loc; nat_addr = info->pri_unat_loc;
else else
......
...@@ -87,12 +87,31 @@ ia64_outl (unsigned int val, unsigned long port) ...@@ -87,12 +87,31 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl(val, port); __ia64_outl(val, port);
} }
void unsigned char
ia64_mmiob (void) ia64_readb (void *addr)
{
return __ia64_readb (addr);
}
unsigned short
ia64_readw (void *addr)
{ {
__ia64_mmiob(); return __ia64_readw (addr);
} }
unsigned int
ia64_readl (void *addr)
{
return __ia64_readl (addr);
}
unsigned long
ia64_readq (void *addr)
{
return __ia64_readq (addr)
}
/* define aliases: */ /* define aliases: */
asm (".global __ia64_inb, __ia64_inw, __ia64_inl"); asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
...@@ -105,7 +124,11 @@ asm ("__ia64_outb = ia64_outb"); ...@@ -105,7 +124,11 @@ asm ("__ia64_outb = ia64_outb");
asm ("__ia64_outw = ia64_outw"); asm ("__ia64_outw = ia64_outw");
asm ("__ia64_outl = ia64_outl"); asm ("__ia64_outl = ia64_outl");
asm (".global __ia64_mmiob"); asm (".global __ia64_readb, __ia64_readw, __ia64_readl, __ia64_readq");
asm ("__ia64_mmiob = ia64_mmiob"); asm ("__ia64_readb = ia64_readb");
asm ("__ia64_readw = ia64_readw");
asm ("__ia64_readl = ia64_readl");
asm ("__ia64_readq = ia64_readq");
#endif /* CONFIG_IA64_GENERIC */ #endif /* CONFIG_IA64_GENERIC */
...@@ -473,12 +473,6 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int ...@@ -473,12 +473,6 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction); sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
} }
unsigned long
swiotlb_dma_address (struct scatterlist *sg)
{
return sg->dma_address;
}
/* /*
* Return whether the given PCI device DMA address mask can be supported properly. For * Return whether the given PCI device DMA address mask can be supported properly. For
* example, if your device can only drive the low 24-bits during PCI bus mastering, then * example, if your device can only drive the low 24-bits during PCI bus mastering, then
...@@ -497,7 +491,6 @@ EXPORT_SYMBOL(swiotlb_map_sg); ...@@ -497,7 +491,6 @@ EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg); EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single); EXPORT_SYMBOL(swiotlb_sync_single);
EXPORT_SYMBOL(swiotlb_sync_sg); EXPORT_SYMBOL(swiotlb_sync_sg);
EXPORT_SYMBOL(swiotlb_dma_address);
EXPORT_SYMBOL(swiotlb_alloc_consistent); EXPORT_SYMBOL(swiotlb_alloc_consistent);
EXPORT_SYMBOL(swiotlb_free_consistent); EXPORT_SYMBOL(swiotlb_free_consistent);
EXPORT_SYMBOL(swiotlb_pci_dma_supported); EXPORT_SYMBOL(swiotlb_pci_dma_supported);
...@@ -194,6 +194,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -194,6 +194,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (current->pid == 1) { if (current->pid == 1) {
yield(); yield();
down_read(&mm->mmap_sem);
goto survive; goto survive;
} }
printk(KERN_CRIT "VM: killing process %s\n", current->comm); printk(KERN_CRIT "VM: killing process %s\n", current->comm);
......
SECTIONS {
/* Group unwind sections into a single section: */
.IA_64.unwind_info : { *(.IA_64.unwind_info*) }
.IA_64.unwind : { *(.IA_64.unwind*) }
/*
* Create place-holder sections to hold the PLTs, GOT, and
* official procedure-descriptors (.opd).
*/
.core.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
.got : { BYTE(0) }
.opd : { BYTE(0) }
}
...@@ -49,11 +49,13 @@ struct pci_fixup pcibios_fixups[1]; ...@@ -49,11 +49,13 @@ struct pci_fixup pcibios_fixups[1];
/* /*
* Low-level SAL-based PCI configuration access functions. Note that SAL * Low-level SAL-based PCI configuration access functions. Note that SAL
* calls are already serialized (via sal_lock), so we don't need another * calls are already serialized (via sal_lock), so we don't need another
* synchronization mechanism here. Not using segment number (yet). * synchronization mechanism here.
*/ */
#define PCI_SAL_ADDRESS(bus, dev, fn, reg) \ #define PCI_SAL_ADDRESS(seg, bus, dev, fn, reg) \
((u64)(bus << 16) | (u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg)) ((u64)(seg << 24) | (u64)(bus << 16) | \
(u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg))
static int static int
__pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value) __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
...@@ -61,10 +63,10 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value) ...@@ -61,10 +63,10 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
int result = 0; int result = 0;
u64 data = 0; u64 data = 0;
if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) if (!value || (seg > 255) || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL; return -EINVAL;
result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, &data); result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(seg, bus, dev, fn, reg), len, &data);
*value = (u32) data; *value = (u32) data;
...@@ -74,24 +76,24 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value) ...@@ -74,24 +76,24 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
static int static int
__pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value) __pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
{ {
if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) if ((seg > 255) || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL; return -EINVAL;
return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, value); return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(seg, bus, dev, fn, reg), len, value);
} }
static int static int
pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{ {
return __pci_sal_read(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), return __pci_sal_read(PCI_SEGMENT(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, value); where, size, value);
} }
static int static int
pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{ {
return __pci_sal_write(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), return __pci_sal_write(PCI_SEGMENT(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, value); where, size, value);
} }
...@@ -114,24 +116,91 @@ pci_acpi_init (void) ...@@ -114,24 +116,91 @@ pci_acpi_init (void)
subsys_initcall(pci_acpi_init); subsys_initcall(pci_acpi_init);
static void __init
pcibios_fixup_resource(struct resource *res, u64 offset)
{
res->start += offset;
res->end += offset;
}
void __init
pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
{
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (!dev->resource[i].start)
continue;
if (dev->resource[i].flags & IORESOURCE_MEM)
pcibios_fixup_resource(&dev->resource[i],
PCI_CONTROLLER(dev)->mem_offset);
}
}
/* Called by ACPI when it finds a new root bus. */ /* Called by ACPI when it finds a new root bus. */
static struct pci_controller *
alloc_pci_controller(int seg)
{
struct pci_controller *controller;
controller = kmalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
return NULL;
memset(controller, 0, sizeof(*controller));
controller->segment = seg;
return controller;
}
struct pci_bus * struct pci_bus *
pcibios_scan_root (int bus) scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
{ {
struct list_head *list; struct pci_bus *b;
struct pci_bus *pci_bus;
list_for_each(list, &pci_root_buses) {
pci_bus = pci_bus_b(list);
if (pci_bus->number == bus) {
/* Already scanned */
printk("PCI: Bus (%02x) already probed\n", bus);
return pci_bus;
}
}
printk("PCI: Probing PCI hardware on bus (%02x)\n", bus); /*
return pci_scan_bus(bus, pci_root_ops, NULL); * We know this is a new root bus we haven't seen before, so
* scan it, even if we've seen the same bus number in a different
* segment.
*/
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b)
return NULL;
memset(b, 0, sizeof(*b));
INIT_LIST_HEAD(&b->children);
INIT_LIST_HEAD(&b->devices);
list_add_tail(&b->node, &pci_root_buses);
b->number = b->secondary = bus;
b->resource[0] = &ioport_resource;
b->resource[1] = &iomem_resource;
b->sysdata = sysdata;
b->ops = ops;
b->subordinate = pci_do_scan_bus(b);
return b;
}
struct pci_bus *
pcibios_scan_root(void *handle, int seg, int bus)
{
struct pci_controller *controller;
u64 base, size, offset;
printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus);
controller = alloc_pci_controller(seg);
if (!controller)
return NULL;
controller->acpi_handle = handle;
acpi_get_addr_space(handle, ACPI_MEMORY_RANGE, &base, &size, &offset);
controller->mem_offset = offset;
return scan_root_bus(bus, pci_root_ops, controller);
} }
/* /*
...@@ -140,6 +209,11 @@ pcibios_scan_root (int bus) ...@@ -140,6 +209,11 @@ pcibios_scan_root (int bus)
void __devinit void __devinit
pcibios_fixup_bus (struct pci_bus *b) pcibios_fixup_bus (struct pci_bus *b)
{ {
struct list_head *ln;
for (ln = b->devices.next; ln != &b->devices; ln = ln->next)
pcibios_fixup_device_resources(pci_dev_b(ln), b);
return; return;
} }
......
#!/bin/sh #!/bin/sh
dir=$(dirname $0) dir=$(dirname $0)
CC=$1 CC=$1
OBJDUMP=$2
$CC -c $dir/check-gas-asm.S $CC -c $dir/check-gas-asm.S
res=$(objdump -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ') res=$($OBJDUMP -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ')
if [ $res != ".text" ]; then if [ $res != ".text" ]; then
echo buggy echo buggy
else else
......
# arch/ia64/sn/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn ia64 subplatform
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += kernel/ # io/
# arch/ia64/sn/fakeprom/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
# for more details. # for more details.
# #
# Copyright (c) 2000-2001 Silicon Graphics, Inc. All rights reserved. # Copyright (c) 2000-2003 Silicon Graphics, Inc. All rights reserved.
# #
# Medusa fake PROM support
#
EXTRA_TARGETS := fpromasm.o main.o fw-emu.o fpmem.o klgraph_init.o \
fprom vmlinux.sym
OBJS := $(obj)/fpromasm.o $(obj)/main.o $(obj)/fw-emu.o $(obj)/fpmem.o \
$(obj)/klgraph_init.o
LDFLAGS_fprom = -static -T
obj-y=fpromasm.o main.o fw-emu.o fpmem.o klgraph_init.o .PHONY: fprom
fprom: $(OBJ) fprom: $(obj)/fprom
$(LD) -static -Tfprom.lds -o fprom $(OBJ) $(LIB)
.S.o: $(obj)/fprom: $(src)/fprom.lds $(OBJS) arch/ia64/lib/lib.a FORCE
$(CC) -D__ASSEMBLY__ $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $< $(call if_changed,ld)
.c.o:
$(CC) $(CFLAGS) $(CFLAGS_KERNEL) -c -o $*.o $<
clean: $(obj)/vmlinux.sym: $(src)/make_textsym System.map
rm -f *.o fprom $(src)/make_textsym vmlinux > vmlinux.sym
$(call cmd,cptotop)
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
This directory contains the files required to build This directory contains the files required to build
the fake PROM image that is currently being used to the fake PROM image that is currently being used to
boot IA64 kernels running under the SGI Medusa kernel. boot IA64 kernels running under the SGI Medusa kernel.
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/ */
...@@ -168,13 +168,13 @@ GetBankSize(int index, node_memmap_t nmemmap) ...@@ -168,13 +168,13 @@ GetBankSize(int index, node_memmap_t nmemmap)
#endif #endif
void void
build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes) build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes, long attr)
{ {
md->type = type; md->type = type;
md->phys_addr = paddr; md->phys_addr = paddr;
md->virt_addr = 0; md->virt_addr = 0;
md->num_pages = numbytes >> 12; md->num_pages = numbytes >> 12;
md->attribute = EFI_MEMORY_WB; md->attribute = attr;
} }
int int
...@@ -236,28 +236,40 @@ build_efi_memmap(void *md, int mdsize) ...@@ -236,28 +236,40 @@ build_efi_memmap(void *md, int mdsize)
*/ */
if (bank == 0) { if (bank == 0) {
if (cnode == 0) { if (cnode == 0) {
hole = 2*1024*1024;
build_mem_desc(md, EFI_PAL_CODE, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 1*1024*1024; hole = 1*1024*1024;
build_mem_desc(md, EFI_PAL_CODE, paddr, hole); build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, hole, EFI_MEMORY_UC);
numbytes -= hole; numbytes -= hole;
paddr += hole; paddr += hole;
count++ ; count++ ;
md += mdsize; md += mdsize;
hole = 3*1024*1024; hole = 1*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole); build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole; numbytes -= hole;
paddr += hole; paddr += hole;
count++ ; count++ ;
md += mdsize; md += mdsize;
} else { } else {
hole = PROMRESERVED_SIZE; hole = 2*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole); build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 2*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_UC);
numbytes -= hole; numbytes -= hole;
paddr += hole; paddr += hole;
count++ ; count++ ;
md += mdsize; md += mdsize;
} }
} }
build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes); build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes, EFI_MEMORY_WB|EFI_MEMORY_WB);
md += mdsize ; md += mdsize ;
count++ ; count++ ;
......
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
OUTPUT_FORMAT("elf64-ia64-little") OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64) OUTPUT_ARCH(ia64)
......
# arch/ia64/sn/io/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -5,7 +6,8 @@ ...@@ -5,7 +6,8 @@
# #
# Copyright (C) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
# #
# Makefile for the sn kernel routines. # Makefile for the sn io routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
...@@ -19,4 +21,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \ ...@@ -19,4 +21,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \
alenlist.o pci.o pci_dma.o ate_utils.o \ alenlist.o pci.o pci_dma.o ate_utils.o \
ifconfig_net.o io.o ioconfig_bus.o ifconfig_net.o io.o ioconfig_bus.o
obj-$(CONFIG_IA64_SGI_SN2) += sn2/
obj-$(CONFIG_PCIBA) += pciba.o obj-$(CONFIG_PCIBA) += pciba.o
# arch/ia64/sn/io/sn2/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -6,14 +7,14 @@ ...@@ -6,14 +7,14 @@
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
# #
# Makefile for the sn2 specific io routines. # Makefile for the sn2 specific io routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += bte_error.o geo_op.o klconflib.o klgraph.o l1.o \ obj-y += pcibr/ bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \ l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \
pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \ pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \
shub_intr.o shubio.o xbow.o xtalk.o shub_intr.o shubio.o xbow.o xtalk.o
obj-$(CONFIG_KDB) += kdba_io.o obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
...@@ -154,7 +154,7 @@ int iobrick_module_get(nasid_t nasid) ...@@ -154,7 +154,7 @@ int iobrick_module_get(nasid_t nasid)
return ret; return ret;
} }
#ifdef CONFIG_PCI
/* /*
* iobrick_module_get_nasid() returns a module_id which has the brick * iobrick_module_get_nasid() returns a module_id which has the brick
* type encoded in bits 15-12, but this is not the true brick type... * type encoded in bits 15-12, but this is not the true brick type...
...@@ -185,7 +185,7 @@ iobrick_type_get_nasid(nasid_t nasid) ...@@ -185,7 +185,7 @@ iobrick_type_get_nasid(nasid_t nasid)
return -1; /* unknown brick */ return -1; /* unknown brick */
} }
#endif
int iobrick_module_get_nasid(nasid_t nasid) int iobrick_module_get_nasid(nasid_t nasid)
{ {
int io_moduleid; int io_moduleid;
......
# arch/ia64/sn/io/sn2/pcibr/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -6,6 +7,7 @@ ...@@ -6,6 +7,7 @@
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
# #
# Makefile for the sn2 specific pci bridge routines. # Makefile for the sn2 specific pci bridge routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
......
# arch/ia64/sn/kernel/Makefile # arch/ia64/sn/kernel/Makefile
# #
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All Rights Reserved. # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
# #
# This program is free software; you can redistribute it and/or modify it # Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
# under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
#
# http://oss.sgi.com/projects/GenInfo/NoticeExplan
# #
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
.S.s: obj-y := probe.o setup.o sn_asm.o sv.o bte.o iomv.o \
$(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -o $*.s $< irq.o mca.o
.S.o:
$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
obj-y = probe.o setup.o sn_asm.o sv.o bte.o iomv.o obj-$(CONFIG_IA64_SGI_SN2) += sn2/
obj-$(CONFIG_IA64_SGI_SN1) += irq.o mca.o
obj-$(CONFIG_IA64_SGI_SN2) += irq.o mca.o
obj-$(CONFIG_IA64_SGI_AUTOTEST) += llsc4.o misctest.o obj-$(CONFIG_IA64_SGI_AUTOTEST) += llsc4.o misctest.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o obj-$(CONFIG_MODULES) += sn_ksyms.o
......
...@@ -237,7 +237,7 @@ sn_setup(char **cmdline_p) ...@@ -237,7 +237,7 @@ sn_setup(char **cmdline_p)
"%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR); "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
panic("PROM version too old\n"); panic("PROM version too old\n");
} }
#ifdef CONFIG_PCI
#ifdef CONFIG_IA64_SGI_SN2 #ifdef CONFIG_IA64_SGI_SN2
{ {
extern void io_sh_swapper(int, int); extern void io_sh_swapper(int, int);
...@@ -253,7 +253,7 @@ sn_setup(char **cmdline_p) ...@@ -253,7 +253,7 @@ sn_setup(char **cmdline_p)
(void)get_master_baseio_nasid(); (void)get_master_baseio_nasid();
} }
#endif #endif
#endif /* CONFIG_PCI */
status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift); status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift);
if (status != 0 || ticks_per_sec < 100000) { if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n"); printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n");
...@@ -349,7 +349,7 @@ sn_init_pdas(char **cmdline_p) ...@@ -349,7 +349,7 @@ sn_init_pdas(char **cmdline_p)
for (cnode=0; cnode < numnodes; cnode++) for (cnode=0; cnode < numnodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr)); memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr));
#ifdef CONFIG_PCI
/* /*
* Set up IO related platform-dependent nodepda fields. * Set up IO related platform-dependent nodepda fields.
* The following routine actually sets up the hubinfo struct * The following routine actually sets up the hubinfo struct
...@@ -359,6 +359,7 @@ sn_init_pdas(char **cmdline_p) ...@@ -359,6 +359,7 @@ sn_init_pdas(char **cmdline_p)
init_platform_nodepda(nodepdaindr[cnode], cnode); init_platform_nodepda(nodepdaindr[cnode], cnode);
bte_init_node (nodepdaindr[cnode], cnode); bte_init_node (nodepdaindr[cnode], cnode);
} }
#endif
} }
/** /**
......
#
# arch/ia64/sn/kernel/sn2/Makefile # arch/ia64/sn/kernel/sn2/Makefile
# #
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved. # This file is subject to the terms and conditions of the GNU General Public
# # License. See the file "COPYING" in the main directory of this archive
# This program is free software; you can redistribute it and/or modify it # for more details.
# under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
# #
# You should have received a copy of the GNU General Public # Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
# License along with this program; if not, write the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
# #
# http://oss.sgi.com/projects/GenInfo/NoticeExplan # sn2 specific kernel files
# #
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o \ obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o
sn_proc_fs.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
*
* The generic kernel requires function pointers to these routines, so
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <asm/sn/sn2/io.h>
#ifdef CONFIG_IA64_GENERIC
unsigned int
sn_inb (unsigned long port)
{
return __sn_inb(port);
}
unsigned int
sn_inw (unsigned long port)
{
return __sn_inw(port);
}
unsigned int
sn_inl (unsigned long port)
{
return __sn_inl(port);
}
void
sn_outb (unsigned char val, unsigned long port)
{
__sn_outb(val, port);
}
void
sn_outw (unsigned short val, unsigned long port)
{
__sn_outw(val, port);
}
void
sn_outl (unsigned int val, unsigned long port)
{
__sn_outl(val, port);
}
unsigned char
sn_readb (void *addr)
{
return __sn_readb (addr);
}
unsigned short
sn_readw (void *addr)
{
return __sn_readw (addr);
}
unsigned int
sn_readl (void *addr)
{
return __sn_readl (addr);
}
unsigned long
sn_readq (void *addr)
{
return __sn_readq (addr)
}
/* define aliases: */
asm (".global __sn_inb, __sn_inw, __sn_inl");
asm ("__sn_inb = sn_inb");
asm ("__sn_inw = sn_inw");
asm ("__sn_inl = sn_inl");
asm (".global __sn_outb, __sn_outw, __sn_outl");
asm ("__sn_outb = sn_outb");
asm ("__sn_outw = sn_outw");
asm ("__sn_outl = sn_outl");
asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
asm ("__sn_readb = sn_readb");
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
#endif /* CONFIG_IA64_GENERIC */
...@@ -3,10 +3,11 @@ ...@@ -3,10 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/ */
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/sn/simulator.h> #include <asm/sn/simulator.h>
...@@ -46,8 +47,10 @@ sn_io_addr(unsigned long port) ...@@ -46,8 +47,10 @@ sn_io_addr(unsigned long port)
} }
} }
EXPORT_SYMBOL(sn_io_addr);
/** /**
* sn2_mmiob - I/O space memory barrier * sn_mmiob - I/O space memory barrier
* *
* Acts as a memory mapped I/O barrier for platforms that queue writes to * Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after * I/O space. This ensures that subsequent writes to I/O space arrive after
...@@ -60,9 +63,9 @@ sn_io_addr(unsigned long port) ...@@ -60,9 +63,9 @@ sn_io_addr(unsigned long port)
* *
*/ */
void void
sn2_mmiob (void) sn_mmiob (void)
{ {
while ((((volatile unsigned long) (*pda->pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) != while ((((volatile unsigned long) (*pda.pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1); udelay(1);
} }
/*
* ia64/platform/hp/common/hp_acpi.h
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
*
* Vendor specific extensions to ACPI. The HP-specific extensiosn are also used by NEC.
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
struct acpi_hp_vendor_long {
u8 guid_id;
u8 guid[16];
u8 csr_base[8];
u8 csr_length[8];
};
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
extern acpi_status acpi_get_crs (acpi_handle, struct acpi_buffer *);
extern struct acpi_resource *acpi_get_crs_next (struct acpi_buffer *, int *);
extern union acpi_resource_data *acpi_get_crs_type (struct acpi_buffer *, int *, int);
extern void acpi_dispose_crs (struct acpi_buffer *);
#endif /* _ASM_IA64_ACPI_EXT_H */
...@@ -100,7 +100,9 @@ const char *acpi_get_sysname (void); ...@@ -100,7 +100,9 @@ const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type); int acpi_request_vector (u32 int_type);
int acpi_get_prt (struct pci_vector_struct **vectors, int *count); int acpi_get_prt (struct pci_vector_struct **vectors, int *count);
int acpi_get_interrupt_model (int *type); int acpi_get_interrupt_model (int *type);
int acpi_register_irq (u32 gsi, u32 polarity, u32 trigger);
int acpi_irq_to_vector (u32 irq); int acpi_irq_to_vector (u32 irq);
int acpi_get_addr_space (void *obj, u8 type, u64 *base, u64 *length,u64 *tra);
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
#include <asm/numa.h> #include <asm/numa.h>
......
...@@ -55,6 +55,13 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -55,6 +55,13 @@ ia64_atomic_sub (int i, atomic_t *v)
return new; return new;
} }
#define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
? ia64_fetch_and_add(i, &(v)->counter) \
: ia64_atomic_add(i, v))
/* /*
* Atomically add I to V and return TRUE if the resulting value is * Atomically add I to V and return TRUE if the resulting value is
* negative. * negative.
...@@ -62,15 +69,9 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -62,15 +69,9 @@ ia64_atomic_sub (int i, atomic_t *v)
static __inline__ int static __inline__ int
atomic_add_negative (int i, atomic_t *v) atomic_add_negative (int i, atomic_t *v)
{ {
return ia64_atomic_add(i, v) < 0; return atomic_add_return(i, v) < 0;
} }
#define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
? ia64_fetch_and_add(i, &(v)->counter) \
: ia64_atomic_add(i, v))
#define atomic_sub_return(i,v) \ #define atomic_sub_return(i,v) \
((__builtin_constant_p(i) && \ ((__builtin_constant_p(i) && \
......
...@@ -275,7 +275,7 @@ __test_and_change_bit (int nr, void *addr) ...@@ -275,7 +275,7 @@ __test_and_change_bit (int nr, void *addr)
} }
static __inline__ int static __inline__ int
test_bit (int nr, volatile void *addr) test_bit (int nr, const volatile void *addr)
{ {
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
} }
......
...@@ -27,6 +27,11 @@ typedef s32 compat_daddr_t; ...@@ -27,6 +27,11 @@ typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t; typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t; typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec { struct compat_timespec {
compat_time_t tv_sec; compat_time_t tv_sec;
s32 tv_nsec; s32 tv_nsec;
...@@ -68,6 +73,22 @@ struct compat_flock { ...@@ -68,6 +73,22 @@ struct compat_flock {
compat_pid_t l_pid; compat_pid_t l_pid;
}; };
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs { struct compat_statfs {
int f_type; int f_type;
int f_bsize; int f_bsize;
...@@ -88,4 +109,20 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */ ...@@ -88,4 +109,20 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word; typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not be used for syscall parameters,
* just declare them as pointers because the syscall entry code will have appropriately
* comverted them already.
*/
typedef u32 compat_uptr_t;
static inline void *
compat_ptr (compat_uptr_t uptr)
{
return (void *) (unsigned long) uptr;
}
#endif /* _ASM_IA64_COMPAT_H */ #endif /* _ASM_IA64_COMPAT_H */
...@@ -78,9 +78,6 @@ struct flock { ...@@ -78,9 +78,6 @@ struct flock {
pid_t l_pid; pid_t l_pid;
}; };
#ifdef __KERNEL__
# define flock64 flock
#endif
#define F_LINUX_SPECIFIC_BASE 1024 #define F_LINUX_SPECIFIC_BASE 1024
#endif /* _ASM_IA64_FCNTL_H */ #endif /* _ASM_IA64_FCNTL_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_FPU_H #define _ASM_IA64_FPU_H
/* /*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
...@@ -57,8 +57,9 @@ ...@@ -57,8 +57,9 @@
struct ia64_fpreg { struct ia64_fpreg {
union { union {
unsigned long bits[2]; unsigned long bits[2];
long double __dummy; /* force 16-byte alignment */
} u; } u;
} __attribute__ ((aligned (16))); };
# endif /* __ASSEMBLY__ */ # endif /* __ASSEMBLY__ */
......
...@@ -18,10 +18,6 @@ ...@@ -18,10 +18,6 @@
#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK) #define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */ #define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/* sigcontext.h */ /* sigcontext.h */
/* /*
* As documented in the iBCS2 standard.. * As documented in the iBCS2 standard..
...@@ -214,8 +210,11 @@ typedef struct siginfo32 { ...@@ -214,8 +210,11 @@ typedef struct siginfo32 {
/* POSIX.1b timers */ /* POSIX.1b timers */
struct { struct {
unsigned int _timer1; timer_t _tid; /* timer id */
unsigned int _timer2; int _overrun; /* overrun count */
char _pad[sizeof(unsigned int) - sizeof(int)];
sigval_t32 _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer; } _timer;
/* POSIX.1b signals */ /* POSIX.1b signals */
......
...@@ -46,14 +46,10 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -46,14 +46,10 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
else if ((i) == -4) \ else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
else if ((i) == -2) \
IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
else if ((i) == -1) \ else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
else if ((i) == 1) \ else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
else if ((i) == 2) \
IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
else if ((i) == 4) \ else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
else if ((i) == 8) \ else if ((i) == 8) \
......
...@@ -69,22 +69,6 @@ phys_to_virt (unsigned long address) ...@@ -69,22 +69,6 @@ phys_to_virt (unsigned long address)
*/ */
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") #define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
/**
* __ia64_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
* all previous writes. For most ia64 platforms, this is a simple
* 'mf.a' instruction, so the address is ignored. For other platforms,
* the address may be required to ensure proper ordering of writes to I/O space
* since a 'dummy' read might be necessary to barrier the write operation.
*/
static inline void
__ia64_mmiob (void)
{
__ia64_mf_a();
}
static inline const unsigned long static inline const unsigned long
__ia64_get_io_port_base (void) __ia64_get_io_port_base (void)
{ {
...@@ -287,7 +271,6 @@ __outsl (unsigned long port, void *src, unsigned long count) ...@@ -287,7 +271,6 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define __outb platform_outb #define __outb platform_outb
#define __outw platform_outw #define __outw platform_outw
#define __outl platform_outl #define __outl platform_outl
#define __mmiob platform_mmiob
#define inb(p) __inb(p) #define inb(p) __inb(p)
#define inw(p) __inw(p) #define inw(p) __inw(p)
...@@ -301,31 +284,35 @@ __outsl (unsigned long port, void *src, unsigned long count) ...@@ -301,31 +284,35 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define outsb(p,s,c) __outsb(p,s,c) #define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c) #define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c) #define outsl(p,s,c) __outsl(p,s,c)
#define mmiob() __mmiob()
/* /*
* The address passed to these functions are ioremap()ped already. * The address passed to these functions are ioremap()ped already.
*
* We need these to be machine vectors since some platforms don't provide
* DMA coherence via PIO reads (PCI drivers and the spec imply that this is
* a good idea). Writes are ok though for all existing ia64 platforms (and
* hopefully it'll stay that way).
*/ */
static inline unsigned char static inline unsigned char
__readb (void *addr) __ia64_readb (void *addr)
{ {
return *(volatile unsigned char *)addr; return *(volatile unsigned char *)addr;
} }
static inline unsigned short static inline unsigned short
__readw (void *addr) __ia64_readw (void *addr)
{ {
return *(volatile unsigned short *)addr; return *(volatile unsigned short *)addr;
} }
static inline unsigned int static inline unsigned int
__readl (void *addr) __ia64_readl (void *addr)
{ {
return *(volatile unsigned int *) addr; return *(volatile unsigned int *) addr;
} }
static inline unsigned long static inline unsigned long
__readq (void *addr) __ia64_readq (void *addr)
{ {
return *(volatile unsigned long *) addr; return *(volatile unsigned long *) addr;
} }
...@@ -354,6 +341,11 @@ __writeq (unsigned long val, void *addr) ...@@ -354,6 +341,11 @@ __writeq (unsigned long val, void *addr)
*(volatile unsigned long *) addr = val; *(volatile unsigned long *) addr = val;
} }
#define __readb platform_readb
#define __readw platform_readw
#define __readl platform_readl
#define __readq platform_readq
#define readb(a) __readb((void *)(a)) #define readb(a) __readb((void *)(a))
#define readw(a) __readw((void *)(a)) #define readw(a) __readw((void *)(a))
#define readl(a) __readl((void *)(a)) #define readl(a) __readl((void *)(a))
......
...@@ -43,7 +43,6 @@ typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int ...@@ -43,7 +43,6 @@ typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int
typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int); typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int);
typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int); typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int);
typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int); typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int);
typedef unsigned long ia64_mv_pci_dma_address (struct scatterlist *);
typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64); typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64);
/* /*
...@@ -61,7 +60,10 @@ typedef unsigned int ia64_mv_inl_t (unsigned long); ...@@ -61,7 +60,10 @@ typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long); typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long); typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long); typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef void ia64_mv_mmiob_t (void); typedef unsigned char ia64_mv_readb_t (void *);
typedef unsigned short ia64_mv_readw_t (void *);
typedef unsigned int ia64_mv_readl_t (void *);
typedef unsigned long ia64_mv_readq_t (void *);
extern void machvec_noop (void); extern void machvec_noop (void);
...@@ -99,7 +101,6 @@ extern void machvec_noop (void); ...@@ -99,7 +101,6 @@ extern void machvec_noop (void);
# define platform_pci_unmap_sg ia64_mv.unmap_sg # define platform_pci_unmap_sg ia64_mv.unmap_sg
# define platform_pci_dma_sync_single ia64_mv.sync_single # define platform_pci_dma_sync_single ia64_mv.sync_single
# define platform_pci_dma_sync_sg ia64_mv.sync_sg # define platform_pci_dma_sync_sg ia64_mv.sync_sg
# define platform_pci_dma_address ia64_mv.dma_address
# define platform_pci_dma_supported ia64_mv.dma_supported # define platform_pci_dma_supported ia64_mv.dma_supported
# define platform_irq_desc ia64_mv.irq_desc # define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_irq_to_vector ia64_mv.irq_to_vector
...@@ -110,7 +111,10 @@ extern void machvec_noop (void); ...@@ -110,7 +111,10 @@ extern void machvec_noop (void);
# define platform_outb ia64_mv.outb # define platform_outb ia64_mv.outb
# define platform_outw ia64_mv.outw # define platform_outw ia64_mv.outw
# define platform_outl ia64_mv.outl # define platform_outl ia64_mv.outl
# define platofrm_mmiob ia64_mv.mmiob # define platform_readb ia64_mv.readb
# define platform_readw ia64_mv.readw
# define platform_readl ia64_mv.readl
# define platform_readq ia64_mv.readq
# endif # endif
/* __attribute__((__aligned__(16))) is required to make size of the /* __attribute__((__aligned__(16))) is required to make size of the
...@@ -138,7 +142,6 @@ struct ia64_machine_vector { ...@@ -138,7 +142,6 @@ struct ia64_machine_vector {
ia64_mv_pci_unmap_sg *unmap_sg; ia64_mv_pci_unmap_sg *unmap_sg;
ia64_mv_pci_dma_sync_single *sync_single; ia64_mv_pci_dma_sync_single *sync_single;
ia64_mv_pci_dma_sync_sg *sync_sg; ia64_mv_pci_dma_sync_sg *sync_sg;
ia64_mv_pci_dma_address *dma_address;
ia64_mv_pci_dma_supported *dma_supported; ia64_mv_pci_dma_supported *dma_supported;
ia64_mv_irq_desc *irq_desc; ia64_mv_irq_desc *irq_desc;
ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_irq_to_vector *irq_to_vector;
...@@ -149,8 +152,11 @@ struct ia64_machine_vector { ...@@ -149,8 +152,11 @@ struct ia64_machine_vector {
ia64_mv_outb_t *outb; ia64_mv_outb_t *outb;
ia64_mv_outw_t *outw; ia64_mv_outw_t *outw;
ia64_mv_outl_t *outl; ia64_mv_outl_t *outl;
ia64_mv_mmiob_t *mmiob; ia64_mv_readb_t *readb;
} __attribute__((__aligned__(16))); ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
};
#define MACHVEC_INIT(name) \ #define MACHVEC_INIT(name) \
{ \ { \
...@@ -173,7 +179,6 @@ struct ia64_machine_vector { ...@@ -173,7 +179,6 @@ struct ia64_machine_vector {
platform_pci_unmap_sg, \ platform_pci_unmap_sg, \
platform_pci_dma_sync_single, \ platform_pci_dma_sync_single, \
platform_pci_dma_sync_sg, \ platform_pci_dma_sync_sg, \
platform_pci_dma_address, \
platform_pci_dma_supported, \ platform_pci_dma_supported, \
platform_irq_desc, \ platform_irq_desc, \
platform_irq_to_vector, \ platform_irq_to_vector, \
...@@ -184,7 +189,10 @@ struct ia64_machine_vector { ...@@ -184,7 +189,10 @@ struct ia64_machine_vector {
platform_outb, \ platform_outb, \
platform_outw, \ platform_outw, \
platform_outl, \ platform_outl, \
platform_mmiob \ platform_readb, \
platform_readw, \
platform_readl, \
platform_readq, \
} }
extern struct ia64_machine_vector ia64_mv; extern struct ia64_machine_vector ia64_mv;
...@@ -206,7 +214,6 @@ extern ia64_mv_pci_map_sg swiotlb_map_sg; ...@@ -206,7 +214,6 @@ extern ia64_mv_pci_map_sg swiotlb_map_sg;
extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg; extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_pci_dma_sync_single swiotlb_sync_single; extern ia64_mv_pci_dma_sync_single swiotlb_sync_single;
extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg; extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg;
extern ia64_mv_pci_dma_address swiotlb_dma_address;
extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
/* /*
...@@ -267,9 +274,6 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; ...@@ -267,9 +274,6 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_pci_dma_sync_sg #ifndef platform_pci_dma_sync_sg
# define platform_pci_dma_sync_sg swiotlb_sync_sg # define platform_pci_dma_sync_sg swiotlb_sync_sg
#endif #endif
#ifndef platform_pci_dma_address
# define platform_pci_dma_address swiotlb_dma_address
#endif
#ifndef platform_pci_dma_supported #ifndef platform_pci_dma_supported
# define platform_pci_dma_supported swiotlb_pci_dma_supported # define platform_pci_dma_supported swiotlb_pci_dma_supported
#endif #endif
...@@ -300,8 +304,17 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; ...@@ -300,8 +304,17 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_outl #ifndef platform_outl
# define platform_outl __ia64_outl # define platform_outl __ia64_outl
#endif #endif
#ifndef platform_mmiob #ifndef platform_readb
# define platform_mmiob __ia64_mmiob # define platform_readb __ia64_readb
#endif
#ifndef platform_readw
# define platform_readw __ia64_readw
#endif
#ifndef platform_readl
# define platform_readl __ia64_readl
#endif
#ifndef platform_readq
# define platform_readq __ia64_readq
#endif #endif
#endif /* _ASM_IA64_MACHVEC_H */ #endif /* _ASM_IA64_MACHVEC_H */
...@@ -8,7 +8,6 @@ extern ia64_mv_pci_map_single sba_map_single; ...@@ -8,7 +8,6 @@ extern ia64_mv_pci_map_single sba_map_single;
extern ia64_mv_pci_unmap_single sba_unmap_single; extern ia64_mv_pci_unmap_single sba_unmap_single;
extern ia64_mv_pci_map_sg sba_map_sg; extern ia64_mv_pci_map_sg sba_map_sg;
extern ia64_mv_pci_unmap_sg sba_unmap_sg; extern ia64_mv_pci_unmap_sg sba_unmap_sg;
extern ia64_mv_pci_dma_address sba_dma_address;
extern ia64_mv_pci_dma_supported sba_dma_supported; extern ia64_mv_pci_dma_supported sba_dma_supported;
/* /*
...@@ -29,7 +28,6 @@ extern ia64_mv_pci_dma_supported sba_dma_supported; ...@@ -29,7 +28,6 @@ extern ia64_mv_pci_dma_supported sba_dma_supported;
#define platform_pci_unmap_sg sba_unmap_sg #define platform_pci_unmap_sg sba_unmap_sg
#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop) #define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop)
#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop) #define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop)
#define platform_pci_dma_address sba_dma_address
#define platform_pci_dma_supported sba_dma_supported #define platform_pci_dma_supported sba_dma_supported
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ #endif /* _ASM_IA64_MACHVEC_HPZX1_h */
...@@ -16,7 +16,6 @@ extern ia64_mv_inl_t __ia64_inl; ...@@ -16,7 +16,6 @@ extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb; extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw; extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl; extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiob_t __ia64_mmiob;
#define MACHVEC_HELPER(name) \ #define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
......
...@@ -44,7 +44,6 @@ extern ia64_mv_inl_t sn1_inl; ...@@ -44,7 +44,6 @@ extern ia64_mv_inl_t sn1_inl;
extern ia64_mv_outb_t sn1_outb; extern ia64_mv_outb_t sn1_outb;
extern ia64_mv_outw_t sn1_outw; extern ia64_mv_outw_t sn1_outw;
extern ia64_mv_outl_t sn1_outl; extern ia64_mv_outl_t sn1_outl;
extern ia64_mv_mmiob_t sn_mmiob;
extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent; extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn1_pci_free_consistent; extern ia64_mv_pci_free_consistent sn1_pci_free_consistent;
extern ia64_mv_pci_map_single sn1_pci_map_single; extern ia64_mv_pci_map_single sn1_pci_map_single;
...@@ -53,7 +52,6 @@ extern ia64_mv_pci_map_sg sn1_pci_map_sg; ...@@ -53,7 +52,6 @@ extern ia64_mv_pci_map_sg sn1_pci_map_sg;
extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg; extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single; extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg; extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg;
extern ia64_mv_pci_dma_address sn1_dma_address;
/* /*
* This stuff has dual use! * This stuff has dual use!
...@@ -74,7 +72,6 @@ extern ia64_mv_pci_dma_address sn1_dma_address; ...@@ -74,7 +72,6 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_outb sn1_outb #define platform_outb sn1_outb
#define platform_outw sn1_outw #define platform_outw sn1_outw
#define platform_outl sn1_outl #define platform_outl sn1_outl
#define platform_mmiob sn_mmiob
#define platform_pci_dma_init machvec_noop #define platform_pci_dma_init machvec_noop
#define platform_pci_alloc_consistent sn1_pci_alloc_consistent #define platform_pci_alloc_consistent sn1_pci_alloc_consistent
#define platform_pci_free_consistent sn1_pci_free_consistent #define platform_pci_free_consistent sn1_pci_free_consistent
...@@ -84,6 +81,5 @@ extern ia64_mv_pci_dma_address sn1_dma_address; ...@@ -84,6 +81,5 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_pci_unmap_sg sn1_pci_unmap_sg #define platform_pci_unmap_sg sn1_pci_unmap_sg
#define platform_pci_dma_sync_single sn1_pci_dma_sync_single #define platform_pci_dma_sync_single sn1_pci_dma_sync_single
#define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg #define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg
#define platform_pci_dma_address sn1_dma_address
#endif /* _ASM_IA64_MACHVEC_SN1_h */ #endif /* _ASM_IA64_MACHVEC_SN1_h */
/* /*
* Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License * under the terms of version 2 of the GNU General Public License
...@@ -41,13 +41,16 @@ extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; ...@@ -41,13 +41,16 @@ extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_irq_desc sn_irq_desc; extern ia64_mv_irq_desc sn_irq_desc;
extern ia64_mv_irq_to_vector sn_irq_to_vector; extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_inb_t sn_inb; extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t sn_inw; extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t sn_inl; extern ia64_mv_inl_t __sn_inl;
extern ia64_mv_outb_t sn_outb; extern ia64_mv_outb_t __sn_outb;
extern ia64_mv_outw_t sn_outw; extern ia64_mv_outw_t __sn_outw;
extern ia64_mv_outl_t sn_outl; extern ia64_mv_outl_t __sn_outl;
extern ia64_mv_mmiob_t sn2_mmiob; extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent; extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn_pci_free_consistent; extern ia64_mv_pci_free_consistent sn_pci_free_consistent;
extern ia64_mv_pci_map_single sn_pci_map_single; extern ia64_mv_pci_map_single sn_pci_map_single;
...@@ -56,7 +59,6 @@ extern ia64_mv_pci_map_sg sn_pci_map_sg; ...@@ -56,7 +59,6 @@ extern ia64_mv_pci_map_sg sn_pci_map_sg;
extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg; extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single; extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg; extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg;
extern ia64_mv_pci_dma_address sn_dma_address;
extern ia64_mv_pci_dma_supported sn_pci_dma_supported; extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
/* /*
...@@ -72,13 +74,17 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported; ...@@ -72,13 +74,17 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_irq_init sn_irq_init #define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI #define platform_send_ipi sn2_send_IPI
#define platform_global_tlb_purge sn2_global_tlb_purge #define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_inb sn_inb #define platform_pci_fixup sn_pci_fixup
#define platform_inw sn_inw #define platform_inb __sn_inb
#define platform_inl sn_inl #define platform_inw __sn_inw
#define platform_outb sn_outb #define platform_inl __sn_inl
#define platform_outw sn_outw #define platform_outb __sn_outb
#define platform_outl sn_outl #define platform_outw __sn_outw
#define platform_mmiob sn2_mmiob #define platform_outl __sn_outl
#define platform_readb __sn_readb
#define platform_readw __sn_readw
#define platform_readl __sn_readl
#define platform_readq __sn_readq
#define platform_irq_desc sn_irq_desc #define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector #define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq #define platform_local_vector_to_irq sn_local_vector_to_irq
...@@ -91,7 +97,6 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported; ...@@ -91,7 +97,6 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_pci_unmap_sg sn_pci_unmap_sg #define platform_pci_unmap_sg sn_pci_unmap_sg
#define platform_pci_dma_sync_single sn_pci_dma_sync_single #define platform_pci_dma_sync_single sn_pci_dma_sync_single
#define platform_pci_dma_sync_sg sn_pci_dma_sync_sg #define platform_pci_dma_sync_sg sn_pci_dma_sync_sg
#define platform_pci_dma_address sn_dma_address
#define platform_pci_dma_supported sn_pci_dma_supported #define platform_pci_dma_supported sn_pci_dma_supported
#endif /* _ASM_IA64_MACHVEC_SN2_H */ #endif /* _ASM_IA64_MACHVEC_SN2_H */
...@@ -24,7 +24,7 @@ enum { ...@@ -24,7 +24,7 @@ enum {
IA64_MCA_FAILURE = 1 IA64_MCA_FAILURE = 1
}; };
#define IA64_MCA_RENDEZ_TIMEOUT (100 * HZ) /* 1000 milliseconds */ #define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
#define IA64_CMC_INT_DISABLE 0 #define IA64_CMC_INT_DISABLE 0
#define IA64_CMC_INT_ENABLE 1 #define IA64_CMC_INT_ENABLE 1
......
#ifndef _ASM_IA64_MODULE_H #ifndef _ASM_IA64_MODULE_H
#define _ASM_IA64_MODULE_H #define _ASM_IA64_MODULE_H
/* Module support currently broken (due to in-kernel module loader). */ /*
* IA-64-specific support for kernel module loader.
*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
void *unw_table; /* unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
};
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#define MODULE_PROC_FAMILY "ia64"
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
#endif /* _ASM_IA64_MODULE_H */ #endif /* _ASM_IA64_MODULE_H */
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define PCIBIOS_MIN_MEM 0x10000000 #define PCIBIOS_MIN_MEM 0x10000000
void pcibios_config_init(void); void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus); struct pci_bus * pcibios_scan_root(void *acpi_handle, int segment, int bus);
struct pci_dev; struct pci_dev;
...@@ -58,7 +58,6 @@ extern int pcibios_prep_mwi (struct pci_dev *); ...@@ -58,7 +58,6 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_unmap_sg platform_pci_unmap_sg #define pci_unmap_sg platform_pci_unmap_sg
#define pci_dma_sync_single platform_pci_dma_sync_single #define pci_dma_sync_single platform_pci_dma_sync_single
#define pci_dma_sync_sg platform_pci_dma_sync_sg #define pci_dma_sync_sg platform_pci_dma_sync_sg
#define sg_dma_address platform_pci_dma_address
#define pci_dma_supported platform_pci_dma_supported #define pci_dma_supported platform_pci_dma_supported
/* pci_unmap_{single,page} is not a nop, thus... */ /* pci_unmap_{single,page} is not a nop, thus... */
...@@ -92,11 +91,23 @@ extern int pcibios_prep_mwi (struct pci_dev *); ...@@ -92,11 +91,23 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_controller_num(PDEV) (0) #define pci_controller_num(PDEV) (0)
#define sg_dma_len(sg) ((sg)->dma_length) #define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine); enum pci_mmap_state mmap_state, int write_combine);
struct pci_controller {
void *acpi_handle;
void *iommu;
int segment;
u64 mem_offset;
};
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
#define PCI_SEGMENT(busdev) (PCI_CONTROLLER(busdev)->segment)
/* generic pci stuff */ /* generic pci stuff */
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
......
...@@ -59,6 +59,9 @@ ...@@ -59,6 +59,9 @@
#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
#define _PAGE_PROTNONE (__IA64_UL(1) << 63) #define _PAGE_PROTNONE (__IA64_UL(1) << 63)
/* Valid only for a PTE with the present bit cleared: */
#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
#define _PFN_MASK _PAGE_PPN_MASK #define _PFN_MASK _PAGE_PPN_MASK
#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D) #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D)
...@@ -253,6 +256,7 @@ ia64_phys_addr_valid (unsigned long addr) ...@@ -253,6 +256,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
/* /*
* Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
* access rights: * access rights:
...@@ -402,12 +406,35 @@ pte_same (pte_t a, pte_t b) ...@@ -402,12 +406,35 @@ pte_same (pte_t a, pte_t b)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void); extern void paging_init (void);
#define __swp_type(entry) (((entry).val >> 1) & 0xff) /*
* Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
* bits in the swap-type field of the swap pte. It would be nice to
* enforce that, but we can't easily include <linux/swap.h> here.
* (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
*
* Format of swap pte:
* bit 0 : present bit (must be zero)
* bit 1 : _PAGE_FILE (must be zero)
* bits 2- 8: swap-type
* bits 9-62: swap offset
* bit 63 : _PAGE_PROTNONE bit
*
* Format of file pte:
* bit 0 : present bit (must be zero)
* bit 1 : _PAGE_FILE (must be one)
* bits 2-62: file_offset/PAGE_SIZE
* bit 63 : _PAGE_PROTNONE bit
*/
#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
#define __swp_offset(entry) (((entry).val << 1) >> 10) #define __swp_offset(entry) (((entry).val << 1) >> 10)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) }) #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PTE_FILE_MAX_BITS 61
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
#define io_remap_page_range remap_page_range /* XXX is this right? */ #define io_remap_page_range remap_page_range /* XXX is this right? */
/* /*
......
...@@ -379,7 +379,7 @@ extern unsigned long get_wchan (struct task_struct *p); ...@@ -379,7 +379,7 @@ extern unsigned long get_wchan (struct task_struct *p);
static inline unsigned long static inline unsigned long
ia64_get_kr (unsigned long regnum) ia64_get_kr (unsigned long regnum)
{ {
unsigned long r; unsigned long r = 0;
switch (regnum) { switch (regnum) {
case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break; case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
...@@ -915,13 +915,13 @@ ia64_tpa (__u64 addr) ...@@ -915,13 +915,13 @@ ia64_tpa (__u64 addr)
#define ARCH_HAS_SPINLOCK_PREFETCH #define ARCH_HAS_SPINLOCK_PREFETCH
#define PREFETCH_STRIDE 256 #define PREFETCH_STRIDE 256
extern inline void static inline void
prefetch (const void *x) prefetch (const void *x)
{ {
__asm__ __volatile__ ("lfetch [%0]" : : "r"(x)); __asm__ __volatile__ ("lfetch [%0]" : : "r"(x));
} }
extern inline void static inline void
prefetchw (const void *x) prefetchw (const void *x)
{ {
__asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x)); __asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x));
......
...@@ -37,13 +37,23 @@ extern spinlock_t sal_lock; ...@@ -37,13 +37,23 @@ extern spinlock_t sal_lock;
result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7) result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
# define SAL_CALL(result,args...) do { \ # define SAL_CALL(result,args...) do { \
unsigned long flags; \ unsigned long __ia64_sc_flags; \
struct ia64_fpreg fr[6]; \ struct ia64_fpreg __ia64_sc_fr[6]; \
ia64_save_scratch_fpregs(fr); \ ia64_save_scratch_fpregs(__ia64_sc_fr); \
spin_lock_irqsave(&sal_lock, flags); \ spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
__SAL_CALL(result,args); \ __SAL_CALL(result, args); \
spin_unlock_irqrestore(&sal_lock, flags); \ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
ia64_load_scratch_fpregs(fr); \ ia64_load_scratch_fpregs(__ia64_sc_fr); \
} while (0)
# define SAL_CALL_NOLOCK(result,args...) do { \
unsigned long __ia64_scn_flags; \
struct ia64_fpreg __ia64_scn_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scn_fr); \
local_irq_save(__ia64_scn_flags); \
__SAL_CALL(result, args); \
local_irq_restore(__ia64_scn_flags); \
ia64_load_scratch_fpregs(__ia64_scn_fr); \
} while (0) } while (0)
#define SAL_SET_VECTORS 0x01000000 #define SAL_SET_VECTORS 0x01000000
...@@ -686,13 +696,14 @@ ia64_sal_get_state_info_size (u64 sal_info_type) ...@@ -686,13 +696,14 @@ ia64_sal_get_state_info_size (u64 sal_info_type)
/* /*
* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
* the monarch processor. * the monarch processor. Must not lock, because it will not return on any cpu until the
* monarch processor sends a wake up.
*/ */
static inline s64 static inline s64
ia64_sal_mc_rendez (void) ia64_sal_mc_rendez (void)
{ {
struct ia64_sal_retval isrv; struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0); SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
return isrv.status; return isrv.status;
} }
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co * Copyright (C) 2001-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#ifndef _ASM_IA64_SMP_H #ifndef _ASM_IA64_SMP_H
...@@ -74,7 +74,7 @@ cpu_logical_id (int cpuid) ...@@ -74,7 +74,7 @@ cpu_logical_id (int cpuid)
int i; int i;
for (i = 0; i < NR_CPUS; ++i) for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(i) == (__u32) cpuid) if (cpu_physical_id(i) == cpuid)
break; break;
return i; return i;
} }
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Ralf Baechle * Copyright (C) 2000 Ralf Baechle
* Copyright (C) 2000-2001 Silicon Graphics, Inc.
*/ */
#ifndef _ASM_IA64_SN_IO_H #ifndef _ASM_IA64_SN_IO_H
#define _ASM_IA64_SN_IO_H #define _ASM_IA64_SN_IO_H
...@@ -78,4 +78,9 @@ ...@@ -78,4 +78,9 @@
#include <asm/sn/sn2/shubio.h> #include <asm/sn/sn2/shubio.h>
#endif #endif
/*
* Used to ensure write ordering (like mb(), but for I/O space)
*/
extern void sn_mmiob(void);
#endif /* _ASM_IA64_SN_IO_H */ #endif /* _ASM_IA64_SN_IO_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_SN2_IO_H
#define _ASM_SN_SN2_IO_H
extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
extern void sn_dma_flush(unsigned long);
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
* inX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned int
__sn_inb (unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
unsigned char ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline unsigned int
__sn_inw (unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
unsigned short ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline unsigned int
__sn_inl (unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
unsigned int ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline void
__sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
static inline void
__sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
static inline void
__sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
/*
* The following routines are SN Platform specific, called when
* a reference is made to readX/writeX set macros. SN Platform
* readX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned char
__sn_readb (void *addr)
{
unsigned char val;
val = *(volatile unsigned char *)addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned short
__sn_readw (void *addr)
{
unsigned short val;
val = *(volatile unsigned short *)addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned int
__sn_readl (void *addr)
{
unsigned int val;
val = *(volatile unsigned int *) addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned long
__sn_readq (void *addr)
{
unsigned long val;
val = *(volatile unsigned long *) addr;
sn_dma_flush((unsigned long)addr);
return val;
}
/*
* For generic and SN2 kernels, we have a set of fast access
* PIO macros. These macros are provided on SN Platform
* because the normal inX and readX macros perform an
* additional task of flushing Post DMA request on the Bridge.
*
* These routines should be self explainatory.
*/
static inline unsigned int
sn_inb_fast (unsigned long port)
{
volatile unsigned char *addr = (unsigned char *)port;
unsigned char ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inw_fast (unsigned long port)
{
volatile unsigned short *addr = (unsigned short *)port;
unsigned short ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inl_fast (unsigned long port)
{
volatile unsigned int *addr = (unsigned int *)port;
unsigned int ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned char
sn_readb_fast (void *addr)
{
return *(volatile unsigned char *)addr;
}
static inline unsigned short
sn_readw_fast (void *addr)
{
return *(volatile unsigned short *)addr;
}
static inline unsigned int
sn_readl_fast (void *addr)
{
return *(volatile unsigned int *) addr;
}
static inline unsigned long
sn_readq_fast (void *addr)
{
return *(volatile unsigned long *) addr;
}
#endif
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_SPINLOCK_H #define _ASM_IA64_SPINLOCK_H
/* /*
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* *
...@@ -15,58 +15,6 @@ ...@@ -15,58 +15,6 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#undef NEW_LOCK
#ifdef NEW_LOCK
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#define _raw_spin_lock(x) \
{ \
register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
\
__asm__ __volatile__ ( \
"mov r30=1\n" \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq r30=[%0],r30,ar.ccv\n" \
";;\n" \
"cmp.ne p15,p0=r30,r0\n" \
"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
";;\n" \
"1:\n" /* force a new bundle */ \
:: "r"(addr) \
: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
}
#define _raw_spin_trylock(x) \
({ \
register long result; \
\
__asm__ __volatile__ ( \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq %0=[%2],%1,ar.ccv\n" \
: "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory"); \
(result == 0); \
})
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#else /* !NEW_LOCK */
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} spinlock_t; } spinlock_t;
...@@ -123,8 +71,6 @@ do { \ ...@@ -123,8 +71,6 @@ do { \
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#endif /* !NEW_LOCK */
typedef struct { typedef struct {
volatile int read_counter:31; volatile int read_counter:31;
volatile int write_lock:1; volatile int write_lock:1;
...@@ -136,7 +82,7 @@ typedef struct { ...@@ -136,7 +82,7 @@ typedef struct {
#define _raw_read_lock(rw) \ #define _raw_read_lock(rw) \
do { \ do { \
int tmp = 0; \ int __read_lock_tmp = 0; \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \ __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \
";;\n" \ ";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \ "tbit.nz p6,p0 = %0, 31\n" \
...@@ -151,15 +97,15 @@ do { \ ...@@ -151,15 +97,15 @@ do { \
"br.cond.sptk.few 1b\n" \ "br.cond.sptk.few 1b\n" \
";;\n" \ ";;\n" \
".previous\n" \ ".previous\n" \
: "=&r" (tmp) \ : "=&r" (__read_lock_tmp) \
: "r" (rw) : "p6", "memory"); \ : "r" (rw) : "p6", "memory"); \
} while(0) } while(0)
#define _raw_read_unlock(rw) \ #define _raw_read_unlock(rw) \
do { \ do { \
int tmp = 0; \ int __read_unlock_tmp = 0; \
__asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \ __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \
: "=r" (tmp) \ : "=r" (__read_unlock_tmp) \
: "r" (rw) \ : "r" (rw) \
: "memory"); \ : "memory"); \
} while(0) } while(0)
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/types.h> #include <linux/types.h>
struct pci_vector_struct { struct pci_vector_struct {
__u16 segment; /* PCI Segment number */
__u16 bus; /* PCI Bus number */ __u16 bus; /* PCI Bus number */
__u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
__u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
...@@ -108,7 +109,7 @@ ia64_insn_group_barrier (void) ...@@ -108,7 +109,7 @@ ia64_insn_group_barrier (void)
#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#define set_wmb(var, value) do { (var) = (value); mb(); } while (0) #define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
#define safe_halt() ia64_pal_halt(1) /* PAL_HALT */ #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
/* /*
* The group barrier in front of the rsm & ssm are necessary to ensure * The group barrier in front of the rsm & ssm are necessary to ensure
......
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
* The main single-value unaligned transfer routines. Derived from * The main single-value unaligned transfer routines. Derived from
* the Linux/Alpha version. * the Linux/Alpha version.
* *
* Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2003 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#define get_unaligned(ptr) \ #define get_unaligned(ptr) \
((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr)))) ((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr))))
...@@ -16,106 +16,101 @@ ...@@ -16,106 +16,101 @@
#define put_unaligned(x,ptr) \ #define put_unaligned(x,ptr) \
ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr))) ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
* packed structures to talk about such things with.
*/
struct __una_u64 { __u64 x __attribute__((packed)); }; struct __una_u64 { __u64 x __attribute__((packed)); };
struct __una_u32 { __u32 x __attribute__((packed)); }; struct __una_u32 { __u32 x __attribute__((packed)); };
struct __una_u16 { __u16 x __attribute__((packed)); }; struct __una_u16 { __u16 x __attribute__((packed)); };
static inline unsigned long static inline unsigned long
__uldq (const unsigned long * r11) __uld8 (const unsigned long * addr)
{ {
const struct __una_u64 *ptr = (const struct __una_u64 *) r11; const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
return ptr->x; return ptr->x;
} }
static inline unsigned long static inline unsigned long
__uldl (const unsigned int * r11) __uld4 (const unsigned int * addr)
{ {
const struct __una_u32 *ptr = (const struct __una_u32 *) r11; const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
return ptr->x; return ptr->x;
} }
static inline unsigned long static inline unsigned long
__uldw (const unsigned short * r11) __uld2 (const unsigned short * addr)
{ {
const struct __una_u16 *ptr = (const struct __una_u16 *) r11; const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
return ptr->x; return ptr->x;
} }
static inline void static inline void
__ustq (unsigned long r5, unsigned long * r11) __ust8 (unsigned long val, unsigned long * addr)
{ {
struct __una_u64 *ptr = (struct __una_u64 *) r11; struct __una_u64 *ptr = (struct __una_u64 *) addr;
ptr->x = r5; ptr->x = val;
} }
static inline void static inline void
__ustl (unsigned long r5, unsigned int * r11) __ust4 (unsigned long val, unsigned int * addr)
{ {
struct __una_u32 *ptr = (struct __una_u32 *) r11; struct __una_u32 *ptr = (struct __una_u32 *) addr;
ptr->x = r5; ptr->x = val;
} }
static inline void static inline void
__ustw (unsigned long r5, unsigned short * r11) __ust2 (unsigned long val, unsigned short * addr)
{ {
struct __una_u16 *ptr = (struct __una_u16 *) r11; struct __una_u16 *ptr = (struct __una_u16 *) addr;
ptr->x = r5; ptr->x = val;
} }
/* /*
* This function doesn't actually exist. The idea is that when * This function doesn't actually exist. The idea is that when someone uses the macros
* someone uses the macros below with an unsupported size (datatype), * below with an unsupported size (datatype), the linker will alert us to the problem via
* the linker will alert us to the problem via an unresolved reference * an unresolved reference error.
* error.
*/ */
extern unsigned long ia64_bad_unaligned_access_length (void); extern unsigned long ia64_bad_unaligned_access_length (void);
#define ia64_get_unaligned(_ptr,size) \ #define ia64_get_unaligned(_ptr,size) \
({ \ ({ \
const void *ptr = (_ptr); \ const void *__ia64_ptr = (_ptr); \
unsigned long val; \ unsigned long __ia64_val; \
\ \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
val = *(const unsigned char *) ptr; \ __ia64_val = *(const unsigned char *) __ia64_ptr; \
break; \ break; \
case 2: \ case 2: \
val = __uldw((const unsigned short *)ptr); \ __ia64_val = __uld2((const unsigned short *)__ia64_ptr); \
break; \ break; \
case 4: \ case 4: \
val = __uldl((const unsigned int *)ptr); \ __ia64_val = __uld4((const unsigned int *)__ia64_ptr); \
break; \ break; \
case 8: \ case 8: \
val = __uldq((const unsigned long *)ptr); \ __ia64_val = __uld8((const unsigned long *)__ia64_ptr); \
break; \ break; \
default: \ default: \
val = ia64_bad_unaligned_access_length(); \ __ia64_val = ia64_bad_unaligned_access_length(); \
} \ } \
val; \ __ia64_val; \
}) })
#define ia64_put_unaligned(_val,_ptr,size) \ #define ia64_put_unaligned(_val,_ptr,size) \
do { \ do { \
const void *ptr = (_ptr); \ const void *__ia64_ptr = (_ptr); \
unsigned long val = (_val); \ unsigned long __ia64_val = (_val); \
\ \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
*(unsigned char *)ptr = (val); \ *(unsigned char *)__ia64_ptr = (__ia64_val); \
break; \ break; \
case 2: \ case 2: \
__ustw(val, (unsigned short *)ptr); \ __ust2(__ia64_val, (unsigned short *)__ia64_ptr); \
break; \ break; \
case 4: \ case 4: \
__ustl(val, (unsigned int *)ptr); \ __ust4(__ia64_val, (unsigned int *)__ia64_ptr); \
break; \ break; \
case 8: \ case 8: \
__ustq(val, (unsigned long *)ptr); \ __ust8(__ia64_val, (unsigned long *)__ia64_ptr); \
break; \ break; \
default: \ default: \
ia64_bad_unaligned_access_length(); \ ia64_bad_unaligned_access_length(); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment