Commit 04032d58 authored by David Mosberger's avatar David Mosberger

Merge

parents f01d7733 9973adf4
On some platforms, so-called memory-mapped I/O is weakly ordered. On such
platforms, driver writers are responsible for ensuring that I/O writes to
memory-mapped addresses on their device arrive in the order intended. This is
typically done by reading a 'safe' device or bridge register, causing the I/O
chipset to flush pending writes to the device before any reads are posted. A
driver would usually use this technique immediately prior to the exit of a
critical section of code protected by spinlocks. This would ensure that
subsequent writes to I/O space arrived only after all prior writes (much like a
memory barrier op, mb(), only with respect to I/O).
A more concrete example from a hypothetical device driver:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
...
In the case above, the device may receive newval2 before it receives newval,
which could cause problems. Fixing it is easy enough though:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: (void)readl(safe_register); /* maybe a config register? */
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: (void)readl(safe_register); /* maybe a config register? */
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
Here, the reads from safe_register will cause the I/O chipset to flush any
pending writes before actually posting the read to the chipset, preventing
possible data corruption.
...@@ -401,6 +401,15 @@ config HUGETLB_PAGE_SIZE_256KB ...@@ -401,6 +401,15 @@ config HUGETLB_PAGE_SIZE_256KB
endchoice endchoice
config IA64_PAL_IDLE
bool "Use PAL_HALT_LIGHT in idle loop"
---help---
Say Y here to enable use of PAL_HALT_LIGHT in the cpu_idle loop.
This allows the CPU to enter a low power state when idle. You
can enable CONFIG_IA64_PALINFO and check /proc/pal/cpu0/power_info
to see the power consumption and latency for this state. If you're
unsure your firmware supports it, answer N.
config SMP config SMP
bool "SMP support" bool "SMP support"
---help--- ---help---
......
...@@ -14,6 +14,7 @@ export AWK ...@@ -14,6 +14,7 @@ export AWK
OBJCOPYFLAGS := --strip-all OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static LDFLAGS_vmlinux := -static
LDFLAGS_MODULE += -T arch/ia64/module.lds
AFLAGS_KERNEL := -mconstant-gp AFLAGS_KERNEL := -mconstant-gp
EXTRA := EXTRA :=
...@@ -23,7 +24,7 @@ CFLAGS_KERNEL := -mconstant-gp ...@@ -23,7 +24,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.') GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC)) GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP))
ifeq ($(GAS_STATUS),buggy) ifeq ($(GAS_STATUS),buggy)
$(error Sorry, you need a newer version of the assember, one that is built from \ $(error Sorry, you need a newer version of the assember, one that is built from \
...@@ -50,11 +51,8 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ ...@@ -50,11 +51,8 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \
arch/ia64/hp/sim/ arch/ia64/hp/sim/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/kernel/ \ core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/
arch/ia64/sn/io/ \
arch/ia64/sn/io/sn2/ \
arch/ia64/sn/io/sn2/pcibr/ \
arch/ia64/sn/kernel/sn2/
drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
# #
targets-$(CONFIG_IA64_HP_SIM) += bootloader targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader
targets := vmlinux.bin vmlinux.gz $(targets-y) targets := vmlinux.bin vmlinux.gz $(targets-y)
quiet_cmd_cptotop = LN $@ quiet_cmd_cptotop = LN $@
......
...@@ -1497,7 +1497,7 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len) ...@@ -1497,7 +1497,7 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len)
ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
sprintf(buf, "%s rev %d.%d\n", "Hewlett Packard zx1 SBA", sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA",
((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF)); ((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF));
sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf, sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf,
(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages); (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages);
......
...@@ -148,6 +148,11 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) ...@@ -148,6 +148,11 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd); err |= __put_user(from->si_fd, &to->si_fd);
break; break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_ptr, &to->si_ptr); /*XXX eric: not sure the size is correct because it contains pointer*/
break;
/* case __SI_RT: This is not generated by the kernel as of now. */ /* case __SI_RT: This is not generated by the kernel as of now. */
} }
} }
......
...@@ -119,10 +119,8 @@ nargs (unsigned int arg, char **ap) ...@@ -119,10 +119,8 @@ nargs (unsigned int arg, char **ap)
asmlinkage long asmlinkage long
sys32_execve (char *filename, unsigned int argv, unsigned int envp, sys32_execve (char *filename, unsigned int argv, unsigned int envp,
int dummy3, int dummy4, int dummy5, int dummy6, int dummy7, struct pt_regs *regs)
int stack)
{ {
struct pt_regs *regs = (struct pt_regs *)&stack;
unsigned long old_map_base, old_task_size, tssd; unsigned long old_map_base, old_task_size, tssd;
char **av, **ae; char **av, **ae;
int na, ne, len; int na, ne, len;
...@@ -1701,7 +1699,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) ...@@ -1701,7 +1699,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return shmctl32(first, second, (void *)AA(ptr)); return shmctl32(first, second, (void *)AA(ptr));
default: default:
return -EINVAL; return -ENOSYS;
} }
return -EINVAL; return -EINVAL;
} }
...@@ -2156,26 +2154,23 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2156,26 +2154,23 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = -ESRCH; ret = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
child = find_task_by_pid(pid); child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!child) if (!child)
goto out; goto out;
ret = -EPERM; ret = -EPERM;
if (pid == 1) /* no messing around with init! */ if (pid == 1) /* no messing around with init! */
goto out; goto out_tsk;
if (request == PTRACE_ATTACH) { if (request == PTRACE_ATTACH) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack); ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
goto out; goto out_tsk;
} }
ret = -ESRCH;
if (!(child->ptrace & PT_PTRACED)) ret = ptrace_check_attach(child, request == PTRACE_KILL);
goto out; if (ret < 0)
if (child->state != TASK_STOPPED) { goto out_tsk;
if (request != PTRACE_KILL)
goto out;
}
if (child->parent != current)
goto out;
switch (request) { switch (request) {
case PTRACE_PEEKTEXT: case PTRACE_PEEKTEXT:
...@@ -2185,12 +2180,12 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2185,12 +2180,12 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
ret = put_user(value, (unsigned int *) A(data)); ret = put_user(value, (unsigned int *) A(data));
else else
ret = -EIO; ret = -EIO;
goto out; goto out_tsk;
case PTRACE_POKETEXT: case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */ case PTRACE_POKEDATA: /* write the word at location addr */
ret = ia32_poke(regs, child, addr, data); ret = ia32_poke(regs, child, addr, data);
goto out; goto out_tsk;
case PTRACE_PEEKUSR: /* read word at addr in USER area */ case PTRACE_PEEKUSR: /* read word at addr in USER area */
ret = -EIO; ret = -EIO;
...@@ -2265,6 +2260,8 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2265,6 +2260,8 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
break; break;
} }
out_tsk:
free_task_struct(child);
out: out:
unlock_kernel(); unlock_kernel();
return ret; return ret;
......
...@@ -4,16 +4,15 @@ ...@@ -4,16 +4,15 @@
extra-y := head.o init_task.o extra-y := head.o init_task.o
obj-y := acpi.o entry.o gate.o efi.o efi_stub.o ia64_ksyms.o \ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o \
irq.o irq_ia64.o irq_lsapic.o ivt.o \ ivt.o machvec.o pal.o perfmon.o process.o ptrace.o sal.o semaphore.o setup.o signal.o \
machvec.o pal.o process.o perfmon.o ptrace.o sal.o \ sys_ia64.o time.o traps.o unaligned.o unwind.o
semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
obj-$(CONFIG_FSYS) += fsys.o obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
/*
* arch/ia64/kernel/acpi-ext.c
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
*
* Vendor specific extensions to ACPI. These are used by both
* HP and NEC.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <asm/acpi-ext.h>
/*
* Note: Strictly speaking, this is only needed for HP and NEC machines.
* However, NEC machines identify themselves as DIG-compliant, so there is
* no easy way to #ifdef this out.
*/
acpi_status
hp_acpi_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length)
{
int i, offset = 0;
acpi_status status;
struct acpi_buffer buf;
struct acpi_resource_vendor *res;
struct acpi_hp_vendor_long *hp_res;
efi_guid_t vendor_guid;
*csr_base = 0;
*csr_length = 0;
status = acpi_get_crs(obj, &buf);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
return status;
}
res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
if (!res) {
printk(KERN_ERR PREFIX "Failed to find config space for device\n");
acpi_dispose_crs(&buf);
return AE_NOT_FOUND;
}
hp_res = (struct acpi_hp_vendor_long *)(res->reserved);
if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
printk(KERN_ERR PREFIX "Unknown Vendor data\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
printk(KERN_ERR PREFIX "Vendor GUID does not match\n");
acpi_dispose_crs(&buf);
return AE_TYPE; /* Revisit error? */
}
for (i = 0 ; i < 8 ; i++) {
*csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
*csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
}
acpi_dispose_crs(&buf);
return AE_OK;
}
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com> * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
* Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
* *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
...@@ -109,8 +109,6 @@ acpi_get_sysname (void) ...@@ -109,8 +109,6 @@ acpi_get_sysname (void)
return "sn2"; return "sn2";
# elif defined (CONFIG_IA64_DIG) # elif defined (CONFIG_IA64_DIG)
return "dig"; return "dig";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# else # else
# error Unknown platform. Fix acpi.c. # error Unknown platform. Fix acpi.c.
# endif # endif
...@@ -176,6 +174,73 @@ acpi_dispose_crs (struct acpi_buffer *buf) ...@@ -176,6 +174,73 @@ acpi_dispose_crs (struct acpi_buffer *buf)
kfree(buf->pointer); kfree(buf->pointer);
} }
void
acpi_get_crs_addr (struct acpi_buffer *buf, int type, u64 *base, u64 *size, u64 *tra)
{
int offset = 0;
struct acpi_resource_address16 *addr16;
struct acpi_resource_address32 *addr32;
struct acpi_resource_address64 *addr64;
for (;;) {
struct acpi_resource *res = acpi_get_crs_next(buf, &offset);
if (!res)
return;
switch (res->id) {
case ACPI_RSTYPE_ADDRESS16:
addr16 = (struct acpi_resource_address16 *) &res->data;
if (type == addr16->resource_type) {
*base = addr16->min_address_range;
*size = addr16->address_length;
*tra = addr16->address_translation_offset;
return;
}
break;
case ACPI_RSTYPE_ADDRESS32:
addr32 = (struct acpi_resource_address32 *) &res->data;
if (type == addr32->resource_type) {
*base = addr32->min_address_range;
*size = addr32->address_length;
*tra = addr32->address_translation_offset;
return;
}
break;
case ACPI_RSTYPE_ADDRESS64:
addr64 = (struct acpi_resource_address64 *) &res->data;
if (type == addr64->resource_type) {
*base = addr64->min_address_range;
*size = addr64->address_length;
*tra = addr64->address_translation_offset;
return;
}
break;
}
}
}
int
acpi_get_addr_space(void *obj, u8 type, u64 *base, u64 *length, u64 *tra)
{
acpi_status status;
struct acpi_buffer buf;
*base = 0;
*length = 0;
*tra = 0;
status = acpi_get_crs((acpi_handle)obj, &buf);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
return status;
}
acpi_get_crs_addr(&buf, type, base, length, tra);
acpi_dispose_crs(&buf);
return AE_OK;
}
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_BOOT #ifdef CONFIG_ACPI_BOOT
...@@ -808,6 +873,7 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count) ...@@ -808,6 +873,7 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count)
list_for_each(node, &acpi_prt.entries) { list_for_each(node, &acpi_prt.entries) {
entry = (struct acpi_prt_entry *)node; entry = (struct acpi_prt_entry *)node;
vector[i].segment = entry->id.segment;
vector[i].bus = entry->id.bus; vector[i].bus = entry->id.bus;
vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff; vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff;
vector[i].pin = entry->pin; vector[i].pin = entry->pin;
......
...@@ -91,7 +91,7 @@ ENTRY(ia64_execve) ...@@ -91,7 +91,7 @@ ENTRY(ia64_execve)
END(ia64_execve) END(ia64_execve)
/* /*
* sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 child_tidptr, u64 parent_tidptr, * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
* u64 tls) * u64 tls)
*/ */
GLOBAL_ENTRY(sys_clone2) GLOBAL_ENTRY(sys_clone2)
...@@ -105,10 +105,10 @@ GLOBAL_ENTRY(sys_clone2) ...@@ -105,10 +105,10 @@ GLOBAL_ENTRY(sys_clone2)
mov out1=in1 mov out1=in1
mov out3=in2 mov out3=in2
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;; ;;
(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
mov out5=in4 // parent_tidptr: valid only w/CLONE_PARENT_SETTID mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
...@@ -126,12 +126,12 @@ GLOBAL_ENTRY(sys_clone2) ...@@ -126,12 +126,12 @@ GLOBAL_ENTRY(sys_clone2)
END(sys_clone2) END(sys_clone2)
/* /*
* sys_clone(u64 flags, u64 ustack_base, u64 user_tid, u64 tls) * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
* Deprecated. Use sys_clone2() instead. * Deprecated. Use sys_clone2() instead.
*/ */
GLOBAL_ENTRY(sys_clone) GLOBAL_ENTRY(sys_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,4,2,5,0 alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
mov loc0=rp mov loc0=rp
...@@ -140,9 +140,10 @@ GLOBAL_ENTRY(sys_clone) ...@@ -140,9 +140,10 @@ GLOBAL_ENTRY(sys_clone)
mov out1=in1 mov out1=in1
mov out3=16 // stacksize (compensates for 16-byte scratch area) mov out3=16 // stacksize (compensates for 16-byte scratch area)
tbit.nz p6,p0=in0,CLONE_SETTLS_BIT tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
mov out4=in2 // out4 = user_tid (optional) mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
;; ;;
(p6) st8 [r2]=in3 // store TLS in r13 (tp) (p6) st8 [r2]=in4 // store TLS in r13 (tp)
mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
......
...@@ -533,15 +533,15 @@ fsyscall_table: ...@@ -533,15 +533,15 @@ fsyscall_table:
data8 fsys_fallback_syscall // epoll_wait // 1245 data8 fsys_fallback_syscall // epoll_wait // 1245
data8 fsys_fallback_syscall // restart_syscall data8 fsys_fallback_syscall // restart_syscall
data8 fsys_fallback_syscall // semtimedop data8 fsys_fallback_syscall // semtimedop
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_create
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_settime
data8 fsys_fallback_syscall // 1250 data8 fsys_fallback_syscall // timer_gettime // 1250
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_getoverrun
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // timer_delete
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // clock_settime
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // clock_gettime
data8 fsys_fallback_syscall // 1255 data8 fsys_fallback_syscall // clock_getres // 1255
data8 fsys_fallback_syscall data8 fsys_fallback_syscall // clock_nanosleep
data8 fsys_fallback_syscall data8 fsys_fallback_syscall
data8 fsys_fallback_syscall data8 fsys_fallback_syscall
data8 fsys_fallback_syscall data8 fsys_fallback_syscall
......
...@@ -733,73 +733,3 @@ SET_REG(b4); ...@@ -733,73 +733,3 @@ SET_REG(b4);
SET_REG(b5); SET_REG(b5);
#endif /* CONFIG_IA64_BRL_EMU */ #endif /* CONFIG_IA64_BRL_EMU */
#ifdef CONFIG_SMP
/*
* This routine handles spinlock contention. It uses a simple exponential backoff
* algorithm to reduce unnecessary bus traffic. The initial delay is selected from
* the low-order bits of the cycle counter (a cheap "randomizer"). I'm sure this
* could use additional tuning, especially on systems with a large number of CPUs.
* Also, I think the maximum delay should be made a function of the number of CPUs in
* the system. --davidm 00/08/05
*
* WARNING: This is not a normal procedure. It gets called from C code without
* the compiler knowing about it. Thus, we must not use any scratch registers
* beyond those that were declared "clobbered" at the call-site (see spin_lock()
* macro). We may not even use the stacked registers, because that could overwrite
* output registers. Similarly, we can't use the scratch stack area as it may be
* in use, too.
*
* Inputs:
* ar.ccv = 0 (and available for use)
* r28 = available for use
* r29 = available for use
* r30 = non-zero (and available for use)
* r31 = address of lock we're trying to acquire
* p15 = available for use
*/
# define delay r28
# define timeout r29
# define tmp r30
GLOBAL_ENTRY(ia64_spinlock_contention)
mov tmp=ar.itc
;;
and delay=0x3f,tmp
;;
.retry: add timeout=tmp,delay
shl delay=delay,1
;;
dep delay=delay,r0,0,13 // limit delay to 8192 cycles
;;
// delay a little...
.wait: sub tmp=tmp,timeout
or delay=0xf,delay // make sure delay is non-zero (otherwise we get stuck with 0)
;;
cmp.lt p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.cond.sptk .wait
;;
ld4 tmp=[r31]
;;
cmp.ne p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.cond.sptk .retry // lock is still busy
;;
// try acquiring lock (we know ar.ccv is still zero!):
mov tmp=1
;;
cmpxchg4.acq tmp=[r31],tmp,ar.ccv
;;
cmp.eq p15,p0=tmp,r0
mov tmp=ar.itc
(p15) br.ret.sptk.many b7 // got lock -> return
br .retry // still no luck, retry
END(ia64_spinlock_contention)
#endif
...@@ -57,9 +57,7 @@ EXPORT_SYMBOL_NOVERS(__up); ...@@ -57,9 +57,7 @@ EXPORT_SYMBOL_NOVERS(__up);
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
#include <asm/processor.h> #include <asm/processor.h>
# ifndef CONFIG_NUMA
EXPORT_SYMBOL(cpu_info__per_cpu); EXPORT_SYMBOL(cpu_info__per_cpu);
# endif
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h> #include <asm/system.h>
...@@ -147,3 +145,19 @@ EXPORT_SYMBOL(machvec_noop); ...@@ -147,3 +145,19 @@ EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem); EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem); EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
#endif #endif
#ifdef CONFIG_NUMA
#include <asm/numa.h>
EXPORT_SYMBOL(cpu_to_node_map);
#endif
#include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_from_blocked_task);
EXPORT_SYMBOL(unw_init_running);
EXPORT_SYMBOL(unw_unwind);
EXPORT_SYMBOL(unw_unwind_to_user);
EXPORT_SYMBOL(unw_access_gr);
EXPORT_SYMBOL(unw_access_br);
EXPORT_SYMBOL(unw_access_fr);
EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr);
This diff is collapsed.
This diff is collapsed.
...@@ -27,9 +27,7 @@ ...@@ -27,9 +27,7 @@
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#ifdef CONFIG_SMP
#include <linux/smp.h> #include <linux/smp.h>
#endif
MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>"); MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
MODULE_DESCRIPTION("/proc interface to IA-64 PAL"); MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
...@@ -37,12 +35,6 @@ MODULE_LICENSE("GPL"); ...@@ -37,12 +35,6 @@ MODULE_LICENSE("GPL");
#define PALINFO_VERSION "0.5" #define PALINFO_VERSION "0.5"
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
typedef int (*palinfo_func_t)(char*); typedef int (*palinfo_func_t)(char*);
typedef struct { typedef struct {
...@@ -933,7 +925,7 @@ palinfo_init(void) ...@@ -933,7 +925,7 @@ palinfo_init(void)
*/ */
for (i=0; i < NR_CPUS; i++) { for (i=0; i < NR_CPUS; i++) {
if (!cpu_is_online(i)) continue; if (!cpu_online(i)) continue;
sprintf(cpustr,CPUSTR, i); sprintf(cpustr,CPUSTR, i);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/wrapper.h> #include <linux/wrapper.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/smp.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -134,12 +135,6 @@ ...@@ -134,12 +135,6 @@
#define PFM_CPUINFO_CLEAR(v) __get_cpu_var(pfm_syst_info) &= ~(v) #define PFM_CPUINFO_CLEAR(v) __get_cpu_var(pfm_syst_info) &= ~(v)
#define PFM_CPUINFO_SET(v) __get_cpu_var(pfm_syst_info) |= (v) #define PFM_CPUINFO_SET(v) __get_cpu_var(pfm_syst_info) |= (v)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/* /*
* debugging * debugging
*/ */
...@@ -1082,7 +1077,7 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) ...@@ -1082,7 +1077,7 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
* and it must be a valid CPU * and it must be a valid CPU
*/ */
cpu = ffz(~pfx->ctx_cpu_mask); cpu = ffz(~pfx->ctx_cpu_mask);
if (cpu_is_online(cpu) == 0) { if (cpu_online(cpu) == 0) {
DBprintk(("CPU%d is not online\n", cpu)); DBprintk(("CPU%d is not online\n", cpu));
return -EINVAL; return -EINVAL;
} }
...@@ -3153,7 +3148,7 @@ pfm_proc_info(char *page) ...@@ -3153,7 +3148,7 @@ pfm_proc_info(char *page)
p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val); p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
for(i=0; i < NR_CPUS; i++) { for(i=0; i < NR_CPUS; i++) {
if (cpu_is_online(i) == 0) continue; if (cpu_online(i) == 0) continue;
p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count); p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count); p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count); p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
......
...@@ -66,10 +66,7 @@ do_show_stack (struct unw_frame_info *info, void *arg) ...@@ -66,10 +66,7 @@ do_show_stack (struct unw_frame_info *info, void *arg)
void void
show_trace_task (struct task_struct *task) show_trace_task (struct task_struct *task)
{ {
struct unw_frame_info info; show_stack(task);
unw_init_from_blocked_task(&info, task);
do_show_stack(&info, 0);
} }
void void
...@@ -169,7 +166,10 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall ...@@ -169,7 +166,10 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
void void
default_idle (void) default_idle (void)
{ {
/* may want to do PAL_LIGHT_HALT here... */ #ifdef CONFIG_IA64_PAL_IDLE
if (!need_resched())
safe_halt();
#endif
} }
void __attribute__((noreturn)) void __attribute__((noreturn))
...@@ -177,6 +177,10 @@ cpu_idle (void *unused) ...@@ -177,6 +177,10 @@ cpu_idle (void *unused)
{ {
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
idle = default_idle;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!need_resched()) if (!need_resched())
min_xtp(); min_xtp();
...@@ -186,10 +190,7 @@ cpu_idle (void *unused) ...@@ -186,10 +190,7 @@ cpu_idle (void *unused)
#ifdef CONFIG_IA64_SGI_SN #ifdef CONFIG_IA64_SGI_SN
snidle(); snidle();
#endif #endif
if (pm_idle) (*idle)();
(*pm_idle)();
else
default_idle();
} }
#ifdef CONFIG_IA64_SGI_SN #ifdef CONFIG_IA64_SGI_SN
...@@ -581,6 +582,15 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) ...@@ -581,6 +582,15 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
tid = clone(flags | CLONE_VM | CLONE_UNTRACED, 0); tid = clone(flags | CLONE_VM | CLONE_UNTRACED, 0);
if (parent != current) { if (parent != current) {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(ia64_task_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
}
#endif
result = (*fn)(arg); result = (*fn)(arg);
_exit(result); _exit(result);
} }
...@@ -751,7 +761,7 @@ dup_task_struct(struct task_struct *orig) ...@@ -751,7 +761,7 @@ dup_task_struct(struct task_struct *orig)
} }
void void
__put_task_struct (struct task_struct *tsk) free_task_struct (struct task_struct *tsk)
{ {
free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER); free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER);
} }
...@@ -191,6 +191,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) ...@@ -191,6 +191,10 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]); err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]);
err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]); err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]);
} }
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value);
break; break;
default: default:
err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_uid, &to->si_uid);
......
...@@ -279,12 +279,15 @@ smp_callin (void) ...@@ -279,12 +279,15 @@ smp_callin (void)
smp_setup_percpu_timer(); smp_setup_percpu_timer();
/* if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
* Synchronize the ITC with the BP /*
*/ * Synchronize the ITC with the BP
Dprintk("Going to syncup ITC with BP.\n"); */
Dprintk("Going to syncup ITC with BP.\n");
ia64_sync_itc(0);
}
ia64_sync_itc(0);
/* /*
* Get our bogomips. * Get our bogomips.
*/ */
......
...@@ -60,7 +60,7 @@ do_profile (unsigned long ip) ...@@ -60,7 +60,7 @@ do_profile (unsigned long ip)
} }
/* /*
* Return the number of micro-seconds that elapsed since the last update to jiffy. The * Return the number of nano-seconds that elapsed since the last update to jiffy. The
* xtime_lock must be at least read-locked when calling this routine. * xtime_lock must be at least read-locked when calling this routine.
*/ */
static inline unsigned long static inline unsigned long
...@@ -86,6 +86,9 @@ gettimeoffset (void) ...@@ -86,6 +86,9 @@ gettimeoffset (void)
void void
do_settimeofday (struct timeval *tv) do_settimeofday (struct timeval *tv)
{ {
time_t sec = tv->tv_sec;
long nsec = tv->tv_usec * 1000;
write_seqlock_irq(&xtime_lock); write_seqlock_irq(&xtime_lock);
{ {
/* /*
...@@ -94,22 +97,22 @@ do_settimeofday (struct timeval *tv) ...@@ -94,22 +97,22 @@ do_settimeofday (struct timeval *tv)
* Discover what correction gettimeofday would have done, and then undo * Discover what correction gettimeofday would have done, and then undo
* it! * it!
*/ */
tv->tv_usec -= gettimeoffset(); nsec -= gettimeoffset();
tv->tv_usec -= (jiffies - wall_jiffies) * (1000000 / HZ);
while (tv->tv_usec < 0) { while (nsec < 0) {
tv->tv_usec += 1000000; nsec += 1000000000;
tv->tv_sec--; sec--;
} }
xtime.tv_sec = tv->tv_sec; xtime.tv_sec = sec;
xtime.tv_nsec = 1000 * tv->tv_usec; xtime.tv_nsec = nsec;
time_adjust = 0; /* stop active adjtime() */ time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC; time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT; time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT;
} }
write_sequnlock_irq(&xtime_lock); write_sequnlock_irq(&xtime_lock);
clock_was_set();
} }
void void
......
...@@ -338,8 +338,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) ...@@ -338,8 +338,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
fpu_swa_count = 0; fpu_swa_count = 0;
if ((++fpu_swa_count < 5) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { if ((++fpu_swa_count < 5) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
last_time = jiffies; last_time = jiffies;
printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx\n", printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri); current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
} }
exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
......
...@@ -253,6 +253,11 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char ...@@ -253,6 +253,11 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
struct pt_regs *pt; struct pt_regs *pt;
if ((unsigned) regnum - 1 >= 127) { if ((unsigned) regnum - 1 >= 127) {
if (regnum == 0 && !write) {
*val = 0; /* read r0 always returns 0 */
*nat = 0;
return 0;
}
UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
__FUNCTION__, regnum); __FUNCTION__, regnum);
return -1; return -1;
...@@ -318,13 +323,8 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char ...@@ -318,13 +323,8 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
} }
} else { } else {
/* access a scratch register */ /* access a scratch register */
if (!info->pt) {
UNW_DPRINT(0, "unwind.%s: no pt-regs; cannot access r%d\n",
__FUNCTION__, regnum);
return -1;
}
pt = get_scratch_regs(info); pt = get_scratch_regs(info);
addr = (unsigned long *) (pt + pt_regs_off(regnum)); addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
if (info->pri_unat_loc) if (info->pri_unat_loc)
nat_addr = info->pri_unat_loc; nat_addr = info->pri_unat_loc;
else else
......
...@@ -87,12 +87,31 @@ ia64_outl (unsigned int val, unsigned long port) ...@@ -87,12 +87,31 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl(val, port); __ia64_outl(val, port);
} }
void unsigned char
ia64_mmiob (void) ia64_readb (void *addr)
{
return __ia64_readb (addr);
}
unsigned short
ia64_readw (void *addr)
{ {
__ia64_mmiob(); return __ia64_readw (addr);
} }
unsigned int
ia64_readl (void *addr)
{
return __ia64_readl (addr);
}
unsigned long
ia64_readq (void *addr)
{
return __ia64_readq (addr)
}
/* define aliases: */ /* define aliases: */
asm (".global __ia64_inb, __ia64_inw, __ia64_inl"); asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
...@@ -105,7 +124,11 @@ asm ("__ia64_outb = ia64_outb"); ...@@ -105,7 +124,11 @@ asm ("__ia64_outb = ia64_outb");
asm ("__ia64_outw = ia64_outw"); asm ("__ia64_outw = ia64_outw");
asm ("__ia64_outl = ia64_outl"); asm ("__ia64_outl = ia64_outl");
asm (".global __ia64_mmiob"); asm (".global __ia64_readb, __ia64_readw, __ia64_readl, __ia64_readq");
asm ("__ia64_mmiob = ia64_mmiob"); asm ("__ia64_readb = ia64_readb");
asm ("__ia64_readw = ia64_readw");
asm ("__ia64_readl = ia64_readl");
asm ("__ia64_readq = ia64_readq");
#endif /* CONFIG_IA64_GENERIC */ #endif /* CONFIG_IA64_GENERIC */
...@@ -473,12 +473,6 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int ...@@ -473,12 +473,6 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction); sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
} }
unsigned long
swiotlb_dma_address (struct scatterlist *sg)
{
return sg->dma_address;
}
/* /*
* Return whether the given PCI device DMA address mask can be supported properly. For * Return whether the given PCI device DMA address mask can be supported properly. For
* example, if your device can only drive the low 24-bits during PCI bus mastering, then * example, if your device can only drive the low 24-bits during PCI bus mastering, then
...@@ -497,7 +491,6 @@ EXPORT_SYMBOL(swiotlb_map_sg); ...@@ -497,7 +491,6 @@ EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg); EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single); EXPORT_SYMBOL(swiotlb_sync_single);
EXPORT_SYMBOL(swiotlb_sync_sg); EXPORT_SYMBOL(swiotlb_sync_sg);
EXPORT_SYMBOL(swiotlb_dma_address);
EXPORT_SYMBOL(swiotlb_alloc_consistent); EXPORT_SYMBOL(swiotlb_alloc_consistent);
EXPORT_SYMBOL(swiotlb_free_consistent); EXPORT_SYMBOL(swiotlb_free_consistent);
EXPORT_SYMBOL(swiotlb_pci_dma_supported); EXPORT_SYMBOL(swiotlb_pci_dma_supported);
SECTIONS {
/* Group unwind sections into a single section: */
.IA_64.unwind_info : { *(.IA_64.unwind_info*) }
.IA_64.unwind : { *(.IA_64.unwind*) }
/*
* Create place-holder sections to hold the PLTs, GOT, and
* official procedure-descriptors (.opd).
*/
.core.plt : { BYTE(0) }
.init.plt : { BYTE(0) }
.got : { BYTE(0) }
.opd : { BYTE(0) }
}
...@@ -49,11 +49,13 @@ struct pci_fixup pcibios_fixups[1]; ...@@ -49,11 +49,13 @@ struct pci_fixup pcibios_fixups[1];
/* /*
* Low-level SAL-based PCI configuration access functions. Note that SAL * Low-level SAL-based PCI configuration access functions. Note that SAL
* calls are already serialized (via sal_lock), so we don't need another * calls are already serialized (via sal_lock), so we don't need another
* synchronization mechanism here. Not using segment number (yet). * synchronization mechanism here.
*/ */
#define PCI_SAL_ADDRESS(bus, dev, fn, reg) \ #define PCI_SAL_ADDRESS(seg, bus, dev, fn, reg) \
((u64)(bus << 16) | (u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg)) ((u64)(seg << 24) | (u64)(bus << 16) | \
(u64)(dev << 11) | (u64)(fn << 8) | (u64)(reg))
static int static int
__pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value) __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
...@@ -61,10 +63,10 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value) ...@@ -61,10 +63,10 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
int result = 0; int result = 0;
u64 data = 0; u64 data = 0;
if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) if (!value || (seg > 255) || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL; return -EINVAL;
result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, &data); result = ia64_sal_pci_config_read(PCI_SAL_ADDRESS(seg, bus, dev, fn, reg), len, &data);
*value = (u32) data; *value = (u32) data;
...@@ -74,24 +76,24 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value) ...@@ -74,24 +76,24 @@ __pci_sal_read (int seg, int bus, int dev, int fn, int reg, int len, u32 *value)
static int static int
__pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value) __pci_sal_write (int seg, int bus, int dev, int fn, int reg, int len, u32 value)
{ {
if ((bus > 255) || (dev > 31) || (fn > 7) || (reg > 255)) if ((seg > 255) || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255))
return -EINVAL; return -EINVAL;
return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(bus, dev, fn, reg), len, value); return ia64_sal_pci_config_write(PCI_SAL_ADDRESS(seg, bus, dev, fn, reg), len, value);
} }
static int static int
pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) pci_sal_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
{ {
return __pci_sal_read(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), return __pci_sal_read(PCI_SEGMENT(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, value); where, size, value);
} }
static int static int
pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) pci_sal_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
{ {
return __pci_sal_write(0, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), return __pci_sal_write(PCI_SEGMENT(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn),
where, size, value); where, size, value);
} }
...@@ -114,24 +116,91 @@ pci_acpi_init (void) ...@@ -114,24 +116,91 @@ pci_acpi_init (void)
subsys_initcall(pci_acpi_init); subsys_initcall(pci_acpi_init);
static void __init
pcibios_fixup_resource(struct resource *res, u64 offset)
{
res->start += offset;
res->end += offset;
}
void __init
pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
{
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (!dev->resource[i].start)
continue;
if (dev->resource[i].flags & IORESOURCE_MEM)
pcibios_fixup_resource(&dev->resource[i],
PCI_CONTROLLER(dev)->mem_offset);
}
}
/* Called by ACPI when it finds a new root bus. */ /* Called by ACPI when it finds a new root bus. */
static struct pci_controller *
alloc_pci_controller(int seg)
{
struct pci_controller *controller;
controller = kmalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
return NULL;
memset(controller, 0, sizeof(*controller));
controller->segment = seg;
return controller;
}
struct pci_bus * struct pci_bus *
pcibios_scan_root (int bus) scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
{ {
struct list_head *list; struct pci_bus *b;
struct pci_bus *pci_bus;
list_for_each(list, &pci_root_buses) {
pci_bus = pci_bus_b(list);
if (pci_bus->number == bus) {
/* Already scanned */
printk("PCI: Bus (%02x) already probed\n", bus);
return pci_bus;
}
}
printk("PCI: Probing PCI hardware on bus (%02x)\n", bus); /*
return pci_scan_bus(bus, pci_root_ops, NULL); * We know this is a new root bus we haven't seen before, so
* scan it, even if we've seen the same bus number in a different
* segment.
*/
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b)
return NULL;
memset(b, 0, sizeof(*b));
INIT_LIST_HEAD(&b->children);
INIT_LIST_HEAD(&b->devices);
list_add_tail(&b->node, &pci_root_buses);
b->number = b->secondary = bus;
b->resource[0] = &ioport_resource;
b->resource[1] = &iomem_resource;
b->sysdata = sysdata;
b->ops = ops;
b->subordinate = pci_do_scan_bus(b);
return b;
}
struct pci_bus *
pcibios_scan_root(void *handle, int seg, int bus)
{
struct pci_controller *controller;
u64 base, size, offset;
printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus);
controller = alloc_pci_controller(seg);
if (!controller)
return NULL;
controller->acpi_handle = handle;
acpi_get_addr_space(handle, ACPI_MEMORY_RANGE, &base, &size, &offset);
controller->mem_offset = offset;
return scan_root_bus(bus, pci_root_ops, controller);
} }
/* /*
...@@ -140,6 +209,11 @@ pcibios_scan_root (int bus) ...@@ -140,6 +209,11 @@ pcibios_scan_root (int bus)
void __devinit void __devinit
pcibios_fixup_bus (struct pci_bus *b) pcibios_fixup_bus (struct pci_bus *b)
{ {
struct list_head *ln;
for (ln = b->devices.next; ln != &b->devices; ln = ln->next)
pcibios_fixup_device_resources(pci_dev_b(ln), b);
return; return;
} }
......
#!/bin/sh #!/bin/sh
dir=$(dirname $0) dir=$(dirname $0)
CC=$1 CC=$1
OBJDUMP=$2
$CC -c $dir/check-gas-asm.S $CC -c $dir/check-gas-asm.S
res=$(objdump -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ') res=$($OBJDUMP -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ')
if [ $res != ".text" ]; then if [ $res != ".text" ]; then
echo buggy echo buggy
else else
......
# arch/ia64/sn/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn ia64 subplatform
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += kernel/ # io/
# arch/ia64/sn/fakeprom/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
# for more details. # for more details.
# #
# Copyright (c) 2000-2001 Silicon Graphics, Inc. All rights reserved. # Copyright (c) 2000-2003 Silicon Graphics, Inc. All rights reserved.
# #
# Medusa fake PROM support
#
EXTRA_TARGETS := fpromasm.o main.o fw-emu.o fpmem.o klgraph_init.o \
fprom vmlinux.sym
OBJS := $(obj)/fpromasm.o $(obj)/main.o $(obj)/fw-emu.o $(obj)/fpmem.o \
$(obj)/klgraph_init.o
LDFLAGS_fprom = -static -T
obj-y=fpromasm.o main.o fw-emu.o fpmem.o klgraph_init.o .PHONY: fprom
fprom: $(OBJ) fprom: $(obj)/fprom
$(LD) -static -Tfprom.lds -o fprom $(OBJ) $(LIB)
.S.o: $(obj)/fprom: $(src)/fprom.lds $(OBJS) arch/ia64/lib/lib.a FORCE
$(CC) -D__ASSEMBLY__ $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $< $(call if_changed,ld)
.c.o:
$(CC) $(CFLAGS) $(CFLAGS_KERNEL) -c -o $*.o $<
clean: $(obj)/vmlinux.sym: $(src)/make_textsym System.map
rm -f *.o fprom $(src)/make_textsym vmlinux > vmlinux.sym
$(call cmd,cptotop)
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
This directory contains the files required to build This directory contains the files required to build
the fake PROM image that is currently being used to the fake PROM image that is currently being used to
boot IA64 kernels running under the SGI Medusa kernel. boot IA64 kernels running under the SGI Medusa kernel.
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/ */
...@@ -168,13 +168,13 @@ GetBankSize(int index, node_memmap_t nmemmap) ...@@ -168,13 +168,13 @@ GetBankSize(int index, node_memmap_t nmemmap)
#endif #endif
void void
build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes) build_mem_desc(efi_memory_desc_t *md, int type, long paddr, long numbytes, long attr)
{ {
md->type = type; md->type = type;
md->phys_addr = paddr; md->phys_addr = paddr;
md->virt_addr = 0; md->virt_addr = 0;
md->num_pages = numbytes >> 12; md->num_pages = numbytes >> 12;
md->attribute = EFI_MEMORY_WB; md->attribute = attr;
} }
int int
...@@ -236,28 +236,40 @@ build_efi_memmap(void *md, int mdsize) ...@@ -236,28 +236,40 @@ build_efi_memmap(void *md, int mdsize)
*/ */
if (bank == 0) { if (bank == 0) {
if (cnode == 0) { if (cnode == 0) {
hole = 2*1024*1024;
build_mem_desc(md, EFI_PAL_CODE, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 1*1024*1024; hole = 1*1024*1024;
build_mem_desc(md, EFI_PAL_CODE, paddr, hole); build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, hole, EFI_MEMORY_UC);
numbytes -= hole; numbytes -= hole;
paddr += hole; paddr += hole;
count++ ; count++ ;
md += mdsize; md += mdsize;
hole = 3*1024*1024; hole = 1*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole); build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole; numbytes -= hole;
paddr += hole; paddr += hole;
count++ ; count++ ;
md += mdsize; md += mdsize;
} else { } else {
hole = PROMRESERVED_SIZE; hole = 2*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole); build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_WB|EFI_MEMORY_WB);
numbytes -= hole;
paddr += hole;
count++ ;
md += mdsize;
hole = 2*1024*1024;
build_mem_desc(md, EFI_RUNTIME_SERVICES_DATA, paddr, hole, EFI_MEMORY_UC);
numbytes -= hole; numbytes -= hole;
paddr += hole; paddr += hole;
count++ ; count++ ;
md += mdsize; md += mdsize;
} }
} }
build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes); build_mem_desc(md, EFI_CONVENTIONAL_MEMORY, paddr, numbytes, EFI_MEMORY_WB|EFI_MEMORY_WB);
md += mdsize ; md += mdsize ;
count++ ; count++ ;
......
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
OUTPUT_FORMAT("elf64-ia64-little") OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64) OUTPUT_ARCH(ia64)
......
# arch/ia64/sn/io/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -5,7 +6,8 @@ ...@@ -5,7 +6,8 @@
# #
# Copyright (C) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
# #
# Makefile for the sn kernel routines. # Makefile for the sn io routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
...@@ -19,4 +21,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \ ...@@ -19,4 +21,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \
alenlist.o pci.o pci_dma.o ate_utils.o \ alenlist.o pci.o pci_dma.o ate_utils.o \
ifconfig_net.o io.o ioconfig_bus.o ifconfig_net.o io.o ioconfig_bus.o
obj-$(CONFIG_PCIBA) += pciba.o obj-$(CONFIG_IA64_SGI_SN2) += sn2/
obj-$(CONFIG_PCIBA) += pciba.o
# arch/ia64/sn/io/sn2/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -6,14 +7,14 @@ ...@@ -6,14 +7,14 @@
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
# #
# Makefile for the sn2 specific io routines. # Makefile for the sn2 specific io routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += bte_error.o geo_op.o klconflib.o klgraph.o l1.o \ obj-y += pcibr/ bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \ l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \
pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \ pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \
shub_intr.o shubio.o xbow.o xtalk.o shub_intr.o shubio.o xbow.o xtalk.o
obj-$(CONFIG_KDB) += kdba_io.o obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
...@@ -154,7 +154,7 @@ int iobrick_module_get(nasid_t nasid) ...@@ -154,7 +154,7 @@ int iobrick_module_get(nasid_t nasid)
return ret; return ret;
} }
#ifdef CONFIG_PCI
/* /*
* iobrick_module_get_nasid() returns a module_id which has the brick * iobrick_module_get_nasid() returns a module_id which has the brick
* type encoded in bits 15-12, but this is not the true brick type... * type encoded in bits 15-12, but this is not the true brick type...
...@@ -185,7 +185,7 @@ iobrick_type_get_nasid(nasid_t nasid) ...@@ -185,7 +185,7 @@ iobrick_type_get_nasid(nasid_t nasid)
return -1; /* unknown brick */ return -1; /* unknown brick */
} }
#endif
int iobrick_module_get_nasid(nasid_t nasid) int iobrick_module_get_nasid(nasid_t nasid)
{ {
int io_moduleid; int io_moduleid;
......
# arch/ia64/sn/io/sn2/pcibr/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -6,6 +7,7 @@ ...@@ -6,6 +7,7 @@
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
# #
# Makefile for the sn2 specific pci bridge routines. # Makefile for the sn2 specific pci bridge routines.
#
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
......
# arch/ia64/sn/kernel/Makefile # arch/ia64/sn/kernel/Makefile
# #
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All Rights Reserved. # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
# #
# This program is free software; you can redistribute it and/or modify it # Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
# under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
#
# http://oss.sgi.com/projects/GenInfo/NoticeExplan
# #
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
.S.s: obj-y := probe.o setup.o sn_asm.o sv.o bte.o iomv.o \
$(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -o $*.s $< irq.o mca.o
.S.o:
$(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
obj-y = probe.o setup.o sn_asm.o sv.o bte.o iomv.o obj-$(CONFIG_IA64_SGI_SN2) += sn2/
obj-$(CONFIG_IA64_SGI_SN1) += irq.o mca.o
obj-$(CONFIG_IA64_SGI_SN2) += irq.o mca.o
obj-$(CONFIG_IA64_SGI_AUTOTEST) += llsc4.o misctest.o obj-$(CONFIG_IA64_SGI_AUTOTEST) += llsc4.o misctest.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o obj-$(CONFIG_MODULES) += sn_ksyms.o
......
...@@ -237,7 +237,7 @@ sn_setup(char **cmdline_p) ...@@ -237,7 +237,7 @@ sn_setup(char **cmdline_p)
"%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR); "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
panic("PROM version too old\n"); panic("PROM version too old\n");
} }
#ifdef CONFIG_PCI
#ifdef CONFIG_IA64_SGI_SN2 #ifdef CONFIG_IA64_SGI_SN2
{ {
extern void io_sh_swapper(int, int); extern void io_sh_swapper(int, int);
...@@ -253,7 +253,7 @@ sn_setup(char **cmdline_p) ...@@ -253,7 +253,7 @@ sn_setup(char **cmdline_p)
(void)get_master_baseio_nasid(); (void)get_master_baseio_nasid();
} }
#endif #endif
#endif /* CONFIG_PCI */
status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift); status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift);
if (status != 0 || ticks_per_sec < 100000) { if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n"); printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n");
...@@ -349,7 +349,7 @@ sn_init_pdas(char **cmdline_p) ...@@ -349,7 +349,7 @@ sn_init_pdas(char **cmdline_p)
for (cnode=0; cnode < numnodes; cnode++) for (cnode=0; cnode < numnodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr)); memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr));
#ifdef CONFIG_PCI
/* /*
* Set up IO related platform-dependent nodepda fields. * Set up IO related platform-dependent nodepda fields.
* The following routine actually sets up the hubinfo struct * The following routine actually sets up the hubinfo struct
...@@ -359,6 +359,7 @@ sn_init_pdas(char **cmdline_p) ...@@ -359,6 +359,7 @@ sn_init_pdas(char **cmdline_p)
init_platform_nodepda(nodepdaindr[cnode], cnode); init_platform_nodepda(nodepdaindr[cnode], cnode);
bte_init_node (nodepdaindr[cnode], cnode); bte_init_node (nodepdaindr[cnode], cnode);
} }
#endif
} }
/** /**
......
#
# arch/ia64/sn/kernel/sn2/Makefile # arch/ia64/sn/kernel/sn2/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved. # Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
# #
# This program is free software; you can redistribute it and/or modify it # sn2 specific kernel files
# under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
# Mountain View, CA 94043, or:
#
# http://www.sgi.com
#
# For further information regarding this notice, see:
#
# http://oss.sgi.com/projects/GenInfo/NoticeExplan
# #
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o \ obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o
sn_proc_fs.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
*
* The generic kernel requires function pointers to these routines, so
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <asm/sn/sn2/io.h>
#ifdef CONFIG_IA64_GENERIC
unsigned int
sn_inb (unsigned long port)
{
return __sn_inb(port);
}
unsigned int
sn_inw (unsigned long port)
{
return __sn_inw(port);
}
unsigned int
sn_inl (unsigned long port)
{
return __sn_inl(port);
}
void
sn_outb (unsigned char val, unsigned long port)
{
__sn_outb(val, port);
}
void
sn_outw (unsigned short val, unsigned long port)
{
__sn_outw(val, port);
}
void
sn_outl (unsigned int val, unsigned long port)
{
__sn_outl(val, port);
}
unsigned char
sn_readb (void *addr)
{
return __sn_readb (addr);
}
unsigned short
sn_readw (void *addr)
{
return __sn_readw (addr);
}
unsigned int
sn_readl (void *addr)
{
return __sn_readl (addr);
}
unsigned long
sn_readq (void *addr)
{
return __sn_readq (addr)
}
/* define aliases: */
asm (".global __sn_inb, __sn_inw, __sn_inl");
asm ("__sn_inb = sn_inb");
asm ("__sn_inw = sn_inw");
asm ("__sn_inl = sn_inl");
asm (".global __sn_outb, __sn_outw, __sn_outl");
asm ("__sn_outb = sn_outb");
asm ("__sn_outw = sn_outw");
asm ("__sn_outl = sn_outl");
asm (".global __sn_readb, __sn_readw, __sn_readl, __sn_readq");
asm ("__sn_readb = sn_readb");
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
#endif /* CONFIG_IA64_GENERIC */
...@@ -3,10 +3,11 @@ ...@@ -3,10 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/ */
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/sn/simulator.h> #include <asm/sn/simulator.h>
...@@ -46,8 +47,10 @@ sn_io_addr(unsigned long port) ...@@ -46,8 +47,10 @@ sn_io_addr(unsigned long port)
} }
} }
EXPORT_SYMBOL(sn_io_addr);
/** /**
* sn2_mmiob - I/O space memory barrier * sn_mmiob - I/O space memory barrier
* *
* Acts as a memory mapped I/O barrier for platforms that queue writes to * Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after * I/O space. This ensures that subsequent writes to I/O space arrive after
...@@ -60,9 +63,9 @@ sn_io_addr(unsigned long port) ...@@ -60,9 +63,9 @@ sn_io_addr(unsigned long port)
* *
*/ */
void void
sn2_mmiob (void) sn_mmiob (void)
{ {
while ((((volatile unsigned long) (*pda->pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) != while ((((volatile unsigned long) (*pda.pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1); udelay(1);
} }
/*
* ia64/platform/hp/common/hp_acpi.h
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
*
* Vendor specific extensions to ACPI. The HP-specific extensiosn are also used by NEC.
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
struct acpi_hp_vendor_long {
u8 guid_id;
u8 guid[16];
u8 csr_base[8];
u8 csr_length[8];
};
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
extern acpi_status acpi_get_crs (acpi_handle, struct acpi_buffer *);
extern struct acpi_resource *acpi_get_crs_next (struct acpi_buffer *, int *);
extern union acpi_resource_data *acpi_get_crs_type (struct acpi_buffer *, int *, int);
extern void acpi_dispose_crs (struct acpi_buffer *);
#endif /* _ASM_IA64_ACPI_EXT_H */
...@@ -100,7 +100,9 @@ const char *acpi_get_sysname (void); ...@@ -100,7 +100,9 @@ const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type); int acpi_request_vector (u32 int_type);
int acpi_get_prt (struct pci_vector_struct **vectors, int *count); int acpi_get_prt (struct pci_vector_struct **vectors, int *count);
int acpi_get_interrupt_model (int *type); int acpi_get_interrupt_model (int *type);
int acpi_register_irq (u32 gsi, u32 polarity, u32 trigger);
int acpi_irq_to_vector (u32 irq); int acpi_irq_to_vector (u32 irq);
int acpi_get_addr_space (void *obj, u8 type, u64 *base, u64 *length,u64 *tra);
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
#include <asm/numa.h> #include <asm/numa.h>
......
...@@ -55,6 +55,13 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -55,6 +55,13 @@ ia64_atomic_sub (int i, atomic_t *v)
return new; return new;
} }
#define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
? ia64_fetch_and_add(i, &(v)->counter) \
: ia64_atomic_add(i, v))
/* /*
* Atomically add I to V and return TRUE if the resulting value is * Atomically add I to V and return TRUE if the resulting value is
* negative. * negative.
...@@ -62,15 +69,9 @@ ia64_atomic_sub (int i, atomic_t *v) ...@@ -62,15 +69,9 @@ ia64_atomic_sub (int i, atomic_t *v)
static __inline__ int static __inline__ int
atomic_add_negative (int i, atomic_t *v) atomic_add_negative (int i, atomic_t *v)
{ {
return ia64_atomic_add(i, v) < 0; return atomic_add_return(i, v) < 0;
} }
#define atomic_add_return(i,v) \
((__builtin_constant_p(i) && \
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
? ia64_fetch_and_add(i, &(v)->counter) \
: ia64_atomic_add(i, v))
#define atomic_sub_return(i,v) \ #define atomic_sub_return(i,v) \
((__builtin_constant_p(i) && \ ((__builtin_constant_p(i) && \
......
...@@ -275,7 +275,7 @@ __test_and_change_bit (int nr, void *addr) ...@@ -275,7 +275,7 @@ __test_and_change_bit (int nr, void *addr)
} }
static __inline__ int static __inline__ int
test_bit (int nr, volatile void *addr) test_bit (int nr, const volatile void *addr)
{ {
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_FPU_H #define _ASM_IA64_FPU_H
/* /*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
...@@ -57,8 +57,9 @@ ...@@ -57,8 +57,9 @@
struct ia64_fpreg { struct ia64_fpreg {
union { union {
unsigned long bits[2]; unsigned long bits[2];
long double __dummy; /* force 16-byte alignment */
} u; } u;
} __attribute__ ((aligned (16))); };
# endif /* __ASSEMBLY__ */ # endif /* __ASSEMBLY__ */
......
...@@ -214,8 +214,11 @@ typedef struct siginfo32 { ...@@ -214,8 +214,11 @@ typedef struct siginfo32 {
/* POSIX.1b timers */ /* POSIX.1b timers */
struct { struct {
unsigned int _timer1; timer_t _tid; /* timer id */
unsigned int _timer2; int _overrun; /* overrun count */
char _pad[sizeof(unsigned int) - sizeof(int)];
sigval_t32 _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer; } _timer;
/* POSIX.1b signals */ /* POSIX.1b signals */
......
...@@ -46,14 +46,10 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -46,14 +46,10 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
else if ((i) == -4) \ else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
else if ((i) == -2) \
IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
else if ((i) == -1) \ else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
else if ((i) == 1) \ else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
else if ((i) == 2) \
IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
else if ((i) == 4) \ else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
else if ((i) == 8) \ else if ((i) == 8) \
......
...@@ -69,22 +69,6 @@ phys_to_virt (unsigned long address) ...@@ -69,22 +69,6 @@ phys_to_virt (unsigned long address)
*/ */
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory") #define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
/**
* __ia64_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
* all previous writes. For most ia64 platforms, this is a simple
* 'mf.a' instruction, so the address is ignored. For other platforms,
* the address may be required to ensure proper ordering of writes to I/O space
* since a 'dummy' read might be necessary to barrier the write operation.
*/
static inline void
__ia64_mmiob (void)
{
__ia64_mf_a();
}
static inline const unsigned long static inline const unsigned long
__ia64_get_io_port_base (void) __ia64_get_io_port_base (void)
{ {
...@@ -287,7 +271,6 @@ __outsl (unsigned long port, void *src, unsigned long count) ...@@ -287,7 +271,6 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define __outb platform_outb #define __outb platform_outb
#define __outw platform_outw #define __outw platform_outw
#define __outl platform_outl #define __outl platform_outl
#define __mmiob platform_mmiob
#define inb(p) __inb(p) #define inb(p) __inb(p)
#define inw(p) __inw(p) #define inw(p) __inw(p)
...@@ -301,31 +284,35 @@ __outsl (unsigned long port, void *src, unsigned long count) ...@@ -301,31 +284,35 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define outsb(p,s,c) __outsb(p,s,c) #define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c) #define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c) #define outsl(p,s,c) __outsl(p,s,c)
#define mmiob() __mmiob()
/* /*
* The address passed to these functions are ioremap()ped already. * The address passed to these functions are ioremap()ped already.
*
* We need these to be machine vectors since some platforms don't provide
* DMA coherence via PIO reads (PCI drivers and the spec imply that this is
* a good idea). Writes are ok though for all existing ia64 platforms (and
* hopefully it'll stay that way).
*/ */
static inline unsigned char static inline unsigned char
__readb (void *addr) __ia64_readb (void *addr)
{ {
return *(volatile unsigned char *)addr; return *(volatile unsigned char *)addr;
} }
static inline unsigned short static inline unsigned short
__readw (void *addr) __ia64_readw (void *addr)
{ {
return *(volatile unsigned short *)addr; return *(volatile unsigned short *)addr;
} }
static inline unsigned int static inline unsigned int
__readl (void *addr) __ia64_readl (void *addr)
{ {
return *(volatile unsigned int *) addr; return *(volatile unsigned int *) addr;
} }
static inline unsigned long static inline unsigned long
__readq (void *addr) __ia64_readq (void *addr)
{ {
return *(volatile unsigned long *) addr; return *(volatile unsigned long *) addr;
} }
...@@ -354,6 +341,11 @@ __writeq (unsigned long val, void *addr) ...@@ -354,6 +341,11 @@ __writeq (unsigned long val, void *addr)
*(volatile unsigned long *) addr = val; *(volatile unsigned long *) addr = val;
} }
#define __readb platform_readb
#define __readw platform_readw
#define __readl platform_readl
#define __readq platform_readq
#define readb(a) __readb((void *)(a)) #define readb(a) __readb((void *)(a))
#define readw(a) __readw((void *)(a)) #define readw(a) __readw((void *)(a))
#define readl(a) __readl((void *)(a)) #define readl(a) __readl((void *)(a))
......
...@@ -43,7 +43,6 @@ typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int ...@@ -43,7 +43,6 @@ typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int
typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int); typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int);
typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int); typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int);
typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int); typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int);
typedef unsigned long ia64_mv_pci_dma_address (struct scatterlist *);
typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64); typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64);
/* /*
...@@ -61,7 +60,10 @@ typedef unsigned int ia64_mv_inl_t (unsigned long); ...@@ -61,7 +60,10 @@ typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long); typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long); typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long); typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef void ia64_mv_mmiob_t (void); typedef unsigned char ia64_mv_readb_t (void *);
typedef unsigned short ia64_mv_readw_t (void *);
typedef unsigned int ia64_mv_readl_t (void *);
typedef unsigned long ia64_mv_readq_t (void *);
extern void machvec_noop (void); extern void machvec_noop (void);
...@@ -99,7 +101,6 @@ extern void machvec_noop (void); ...@@ -99,7 +101,6 @@ extern void machvec_noop (void);
# define platform_pci_unmap_sg ia64_mv.unmap_sg # define platform_pci_unmap_sg ia64_mv.unmap_sg
# define platform_pci_dma_sync_single ia64_mv.sync_single # define platform_pci_dma_sync_single ia64_mv.sync_single
# define platform_pci_dma_sync_sg ia64_mv.sync_sg # define platform_pci_dma_sync_sg ia64_mv.sync_sg
# define platform_pci_dma_address ia64_mv.dma_address
# define platform_pci_dma_supported ia64_mv.dma_supported # define platform_pci_dma_supported ia64_mv.dma_supported
# define platform_irq_desc ia64_mv.irq_desc # define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_irq_to_vector ia64_mv.irq_to_vector
...@@ -110,7 +111,10 @@ extern void machvec_noop (void); ...@@ -110,7 +111,10 @@ extern void machvec_noop (void);
# define platform_outb ia64_mv.outb # define platform_outb ia64_mv.outb
# define platform_outw ia64_mv.outw # define platform_outw ia64_mv.outw
# define platform_outl ia64_mv.outl # define platform_outl ia64_mv.outl
# define platofrm_mmiob ia64_mv.mmiob # define platform_readb ia64_mv.readb
# define platform_readw ia64_mv.readw
# define platform_readl ia64_mv.readl
# define platform_readq ia64_mv.readq
# endif # endif
/* __attribute__((__aligned__(16))) is required to make size of the /* __attribute__((__aligned__(16))) is required to make size of the
...@@ -138,7 +142,6 @@ struct ia64_machine_vector { ...@@ -138,7 +142,6 @@ struct ia64_machine_vector {
ia64_mv_pci_unmap_sg *unmap_sg; ia64_mv_pci_unmap_sg *unmap_sg;
ia64_mv_pci_dma_sync_single *sync_single; ia64_mv_pci_dma_sync_single *sync_single;
ia64_mv_pci_dma_sync_sg *sync_sg; ia64_mv_pci_dma_sync_sg *sync_sg;
ia64_mv_pci_dma_address *dma_address;
ia64_mv_pci_dma_supported *dma_supported; ia64_mv_pci_dma_supported *dma_supported;
ia64_mv_irq_desc *irq_desc; ia64_mv_irq_desc *irq_desc;
ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_irq_to_vector *irq_to_vector;
...@@ -149,8 +152,11 @@ struct ia64_machine_vector { ...@@ -149,8 +152,11 @@ struct ia64_machine_vector {
ia64_mv_outb_t *outb; ia64_mv_outb_t *outb;
ia64_mv_outw_t *outw; ia64_mv_outw_t *outw;
ia64_mv_outl_t *outl; ia64_mv_outl_t *outl;
ia64_mv_mmiob_t *mmiob; ia64_mv_readb_t *readb;
} __attribute__((__aligned__(16))); ia64_mv_readw_t *readw;
ia64_mv_readl_t *readl;
ia64_mv_readq_t *readq;
};
#define MACHVEC_INIT(name) \ #define MACHVEC_INIT(name) \
{ \ { \
...@@ -173,7 +179,6 @@ struct ia64_machine_vector { ...@@ -173,7 +179,6 @@ struct ia64_machine_vector {
platform_pci_unmap_sg, \ platform_pci_unmap_sg, \
platform_pci_dma_sync_single, \ platform_pci_dma_sync_single, \
platform_pci_dma_sync_sg, \ platform_pci_dma_sync_sg, \
platform_pci_dma_address, \
platform_pci_dma_supported, \ platform_pci_dma_supported, \
platform_irq_desc, \ platform_irq_desc, \
platform_irq_to_vector, \ platform_irq_to_vector, \
...@@ -184,7 +189,10 @@ struct ia64_machine_vector { ...@@ -184,7 +189,10 @@ struct ia64_machine_vector {
platform_outb, \ platform_outb, \
platform_outw, \ platform_outw, \
platform_outl, \ platform_outl, \
platform_mmiob \ platform_readb, \
platform_readw, \
platform_readl, \
platform_readq, \
} }
extern struct ia64_machine_vector ia64_mv; extern struct ia64_machine_vector ia64_mv;
...@@ -206,7 +214,6 @@ extern ia64_mv_pci_map_sg swiotlb_map_sg; ...@@ -206,7 +214,6 @@ extern ia64_mv_pci_map_sg swiotlb_map_sg;
extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg; extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
extern ia64_mv_pci_dma_sync_single swiotlb_sync_single; extern ia64_mv_pci_dma_sync_single swiotlb_sync_single;
extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg; extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg;
extern ia64_mv_pci_dma_address swiotlb_dma_address;
extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
/* /*
...@@ -267,9 +274,6 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; ...@@ -267,9 +274,6 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_pci_dma_sync_sg #ifndef platform_pci_dma_sync_sg
# define platform_pci_dma_sync_sg swiotlb_sync_sg # define platform_pci_dma_sync_sg swiotlb_sync_sg
#endif #endif
#ifndef platform_pci_dma_address
# define platform_pci_dma_address swiotlb_dma_address
#endif
#ifndef platform_pci_dma_supported #ifndef platform_pci_dma_supported
# define platform_pci_dma_supported swiotlb_pci_dma_supported # define platform_pci_dma_supported swiotlb_pci_dma_supported
#endif #endif
...@@ -300,8 +304,17 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; ...@@ -300,8 +304,17 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_outl #ifndef platform_outl
# define platform_outl __ia64_outl # define platform_outl __ia64_outl
#endif #endif
#ifndef platform_mmiob #ifndef platform_readb
# define platform_mmiob __ia64_mmiob # define platform_readb __ia64_readb
#endif
#ifndef platform_readw
# define platform_readw __ia64_readw
#endif
#ifndef platform_readl
# define platform_readl __ia64_readl
#endif
#ifndef platform_readq
# define platform_readq __ia64_readq
#endif #endif
#endif /* _ASM_IA64_MACHVEC_H */ #endif /* _ASM_IA64_MACHVEC_H */
...@@ -8,7 +8,6 @@ extern ia64_mv_pci_map_single sba_map_single; ...@@ -8,7 +8,6 @@ extern ia64_mv_pci_map_single sba_map_single;
extern ia64_mv_pci_unmap_single sba_unmap_single; extern ia64_mv_pci_unmap_single sba_unmap_single;
extern ia64_mv_pci_map_sg sba_map_sg; extern ia64_mv_pci_map_sg sba_map_sg;
extern ia64_mv_pci_unmap_sg sba_unmap_sg; extern ia64_mv_pci_unmap_sg sba_unmap_sg;
extern ia64_mv_pci_dma_address sba_dma_address;
extern ia64_mv_pci_dma_supported sba_dma_supported; extern ia64_mv_pci_dma_supported sba_dma_supported;
/* /*
...@@ -29,7 +28,6 @@ extern ia64_mv_pci_dma_supported sba_dma_supported; ...@@ -29,7 +28,6 @@ extern ia64_mv_pci_dma_supported sba_dma_supported;
#define platform_pci_unmap_sg sba_unmap_sg #define platform_pci_unmap_sg sba_unmap_sg
#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop) #define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop)
#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop) #define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop)
#define platform_pci_dma_address sba_dma_address
#define platform_pci_dma_supported sba_dma_supported #define platform_pci_dma_supported sba_dma_supported
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ #endif /* _ASM_IA64_MACHVEC_HPZX1_h */
...@@ -16,7 +16,6 @@ extern ia64_mv_inl_t __ia64_inl; ...@@ -16,7 +16,6 @@ extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb; extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw; extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl; extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiob_t __ia64_mmiob;
#define MACHVEC_HELPER(name) \ #define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
......
...@@ -44,7 +44,6 @@ extern ia64_mv_inl_t sn1_inl; ...@@ -44,7 +44,6 @@ extern ia64_mv_inl_t sn1_inl;
extern ia64_mv_outb_t sn1_outb; extern ia64_mv_outb_t sn1_outb;
extern ia64_mv_outw_t sn1_outw; extern ia64_mv_outw_t sn1_outw;
extern ia64_mv_outl_t sn1_outl; extern ia64_mv_outl_t sn1_outl;
extern ia64_mv_mmiob_t sn_mmiob;
extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent; extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn1_pci_free_consistent; extern ia64_mv_pci_free_consistent sn1_pci_free_consistent;
extern ia64_mv_pci_map_single sn1_pci_map_single; extern ia64_mv_pci_map_single sn1_pci_map_single;
...@@ -53,7 +52,6 @@ extern ia64_mv_pci_map_sg sn1_pci_map_sg; ...@@ -53,7 +52,6 @@ extern ia64_mv_pci_map_sg sn1_pci_map_sg;
extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg; extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single; extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg; extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg;
extern ia64_mv_pci_dma_address sn1_dma_address;
/* /*
* This stuff has dual use! * This stuff has dual use!
...@@ -74,7 +72,6 @@ extern ia64_mv_pci_dma_address sn1_dma_address; ...@@ -74,7 +72,6 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_outb sn1_outb #define platform_outb sn1_outb
#define platform_outw sn1_outw #define platform_outw sn1_outw
#define platform_outl sn1_outl #define platform_outl sn1_outl
#define platform_mmiob sn_mmiob
#define platform_pci_dma_init machvec_noop #define platform_pci_dma_init machvec_noop
#define platform_pci_alloc_consistent sn1_pci_alloc_consistent #define platform_pci_alloc_consistent sn1_pci_alloc_consistent
#define platform_pci_free_consistent sn1_pci_free_consistent #define platform_pci_free_consistent sn1_pci_free_consistent
...@@ -84,6 +81,5 @@ extern ia64_mv_pci_dma_address sn1_dma_address; ...@@ -84,6 +81,5 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_pci_unmap_sg sn1_pci_unmap_sg #define platform_pci_unmap_sg sn1_pci_unmap_sg
#define platform_pci_dma_sync_single sn1_pci_dma_sync_single #define platform_pci_dma_sync_single sn1_pci_dma_sync_single
#define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg #define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg
#define platform_pci_dma_address sn1_dma_address
#endif /* _ASM_IA64_MACHVEC_SN1_h */ #endif /* _ASM_IA64_MACHVEC_SN1_h */
/* /*
* Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License * under the terms of version 2 of the GNU General Public License
...@@ -41,13 +41,16 @@ extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; ...@@ -41,13 +41,16 @@ extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_irq_desc sn_irq_desc; extern ia64_mv_irq_desc sn_irq_desc;
extern ia64_mv_irq_to_vector sn_irq_to_vector; extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_inb_t sn_inb; extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t sn_inw; extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t sn_inl; extern ia64_mv_inl_t __sn_inl;
extern ia64_mv_outb_t sn_outb; extern ia64_mv_outb_t __sn_outb;
extern ia64_mv_outw_t sn_outw; extern ia64_mv_outw_t __sn_outw;
extern ia64_mv_outl_t sn_outl; extern ia64_mv_outl_t __sn_outl;
extern ia64_mv_mmiob_t sn2_mmiob; extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent; extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn_pci_free_consistent; extern ia64_mv_pci_free_consistent sn_pci_free_consistent;
extern ia64_mv_pci_map_single sn_pci_map_single; extern ia64_mv_pci_map_single sn_pci_map_single;
...@@ -56,7 +59,6 @@ extern ia64_mv_pci_map_sg sn_pci_map_sg; ...@@ -56,7 +59,6 @@ extern ia64_mv_pci_map_sg sn_pci_map_sg;
extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg; extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg;
extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single; extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single;
extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg; extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg;
extern ia64_mv_pci_dma_address sn_dma_address;
extern ia64_mv_pci_dma_supported sn_pci_dma_supported; extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
/* /*
...@@ -72,13 +74,17 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported; ...@@ -72,13 +74,17 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_irq_init sn_irq_init #define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI #define platform_send_ipi sn2_send_IPI
#define platform_global_tlb_purge sn2_global_tlb_purge #define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_inb sn_inb #define platform_pci_fixup sn_pci_fixup
#define platform_inw sn_inw #define platform_inb __sn_inb
#define platform_inl sn_inl #define platform_inw __sn_inw
#define platform_outb sn_outb #define platform_inl __sn_inl
#define platform_outw sn_outw #define platform_outb __sn_outb
#define platform_outl sn_outl #define platform_outw __sn_outw
#define platform_mmiob sn2_mmiob #define platform_outl __sn_outl
#define platform_readb __sn_readb
#define platform_readw __sn_readw
#define platform_readl __sn_readl
#define platform_readq __sn_readq
#define platform_irq_desc sn_irq_desc #define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector #define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq #define platform_local_vector_to_irq sn_local_vector_to_irq
...@@ -91,7 +97,6 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported; ...@@ -91,7 +97,6 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_pci_unmap_sg sn_pci_unmap_sg #define platform_pci_unmap_sg sn_pci_unmap_sg
#define platform_pci_dma_sync_single sn_pci_dma_sync_single #define platform_pci_dma_sync_single sn_pci_dma_sync_single
#define platform_pci_dma_sync_sg sn_pci_dma_sync_sg #define platform_pci_dma_sync_sg sn_pci_dma_sync_sg
#define platform_pci_dma_address sn_dma_address
#define platform_pci_dma_supported sn_pci_dma_supported #define platform_pci_dma_supported sn_pci_dma_supported
#endif /* _ASM_IA64_MACHVEC_SN2_H */ #endif /* _ASM_IA64_MACHVEC_SN2_H */
...@@ -24,7 +24,7 @@ enum { ...@@ -24,7 +24,7 @@ enum {
IA64_MCA_FAILURE = 1 IA64_MCA_FAILURE = 1
}; };
#define IA64_MCA_RENDEZ_TIMEOUT (100 * HZ) /* 1000 milliseconds */ #define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
#define IA64_CMC_INT_DISABLE 0 #define IA64_CMC_INT_DISABLE 0
#define IA64_CMC_INT_ENABLE 1 #define IA64_CMC_INT_ENABLE 1
......
#ifndef _ASM_IA64_MODULE_H #ifndef _ASM_IA64_MODULE_H
#define _ASM_IA64_MODULE_H #define _ASM_IA64_MODULE_H
/* Module support currently broken (due to in-kernel module loader). */ /*
* IA-64-specific support for kernel module loader.
*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
void *unw_table; /* unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
};
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#define MODULE_PROC_FAMILY "ia64"
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
#endif /* _ASM_IA64_MODULE_H */ #endif /* _ASM_IA64_MODULE_H */
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define PCIBIOS_MIN_MEM 0x10000000 #define PCIBIOS_MIN_MEM 0x10000000
void pcibios_config_init(void); void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus); struct pci_bus * pcibios_scan_root(void *acpi_handle, int segment, int bus);
struct pci_dev; struct pci_dev;
...@@ -58,7 +58,6 @@ extern int pcibios_prep_mwi (struct pci_dev *); ...@@ -58,7 +58,6 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_unmap_sg platform_pci_unmap_sg #define pci_unmap_sg platform_pci_unmap_sg
#define pci_dma_sync_single platform_pci_dma_sync_single #define pci_dma_sync_single platform_pci_dma_sync_single
#define pci_dma_sync_sg platform_pci_dma_sync_sg #define pci_dma_sync_sg platform_pci_dma_sync_sg
#define sg_dma_address platform_pci_dma_address
#define pci_dma_supported platform_pci_dma_supported #define pci_dma_supported platform_pci_dma_supported
/* pci_unmap_{single,page} is not a nop, thus... */ /* pci_unmap_{single,page} is not a nop, thus... */
...@@ -92,11 +91,23 @@ extern int pcibios_prep_mwi (struct pci_dev *); ...@@ -92,11 +91,23 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define pci_controller_num(PDEV) (0) #define pci_controller_num(PDEV) (0)
#define sg_dma_len(sg) ((sg)->dma_length) #define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine); enum pci_mmap_state mmap_state, int write_combine);
struct pci_controller {
void *acpi_handle;
void *iommu;
int segment;
u64 mem_offset;
};
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
#define PCI_SEGMENT(busdev) (PCI_CONTROLLER(busdev)->segment)
/* generic pci stuff */ /* generic pci stuff */
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
......
...@@ -379,7 +379,7 @@ extern unsigned long get_wchan (struct task_struct *p); ...@@ -379,7 +379,7 @@ extern unsigned long get_wchan (struct task_struct *p);
static inline unsigned long static inline unsigned long
ia64_get_kr (unsigned long regnum) ia64_get_kr (unsigned long regnum)
{ {
unsigned long r; unsigned long r = 0;
switch (regnum) { switch (regnum) {
case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break; case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
......
...@@ -36,14 +36,24 @@ extern spinlock_t sal_lock; ...@@ -36,14 +36,24 @@ extern spinlock_t sal_lock;
#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \ #define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \
result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7) result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
# define SAL_CALL(result,args...) do { \ # define SAL_CALL(result,args...) do { \
unsigned long flags; \ unsigned long __ia64_sc_flags; \
struct ia64_fpreg fr[6]; \ struct ia64_fpreg __ia64_sc_fr[6]; \
ia64_save_scratch_fpregs(fr); \ ia64_save_scratch_fpregs(__ia64_sc_fr); \
spin_lock_irqsave(&sal_lock, flags); \ spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
__SAL_CALL(result,args); \ __SAL_CALL(result, args); \
spin_unlock_irqrestore(&sal_lock, flags); \ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
ia64_load_scratch_fpregs(fr); \ ia64_load_scratch_fpregs(__ia64_sc_fr); \
} while (0)
# define SAL_CALL_NOLOCK(result,args...) do { \
unsigned long __ia64_scn_flags; \
struct ia64_fpreg __ia64_scn_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scn_fr); \
local_irq_save(__ia64_scn_flags); \
__SAL_CALL(result, args); \
local_irq_restore(__ia64_scn_flags); \
ia64_load_scratch_fpregs(__ia64_scn_fr); \
} while (0) } while (0)
#define SAL_SET_VECTORS 0x01000000 #define SAL_SET_VECTORS 0x01000000
...@@ -686,13 +696,14 @@ ia64_sal_get_state_info_size (u64 sal_info_type) ...@@ -686,13 +696,14 @@ ia64_sal_get_state_info_size (u64 sal_info_type)
/* /*
* Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
* the monarch processor. * the monarch processor. Must not lock, because it will not return on any cpu until the
* monarch processor sends a wake up.
*/ */
static inline s64 static inline s64
ia64_sal_mc_rendez (void) ia64_sal_mc_rendez (void)
{ {
struct ia64_sal_retval isrv; struct ia64_sal_retval isrv;
SAL_CALL(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0); SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
return isrv.status; return isrv.status;
} }
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co * Copyright (C) 2001-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#ifndef _ASM_IA64_SMP_H #ifndef _ASM_IA64_SMP_H
...@@ -74,7 +74,7 @@ cpu_logical_id (int cpuid) ...@@ -74,7 +74,7 @@ cpu_logical_id (int cpuid)
int i; int i;
for (i = 0; i < NR_CPUS; ++i) for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(i) == (__u32) cpuid) if (cpu_physical_id(i) == cpuid)
break; break;
return i; return i;
} }
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Ralf Baechle * Copyright (C) 2000 Ralf Baechle
* Copyright (C) 2000-2001 Silicon Graphics, Inc.
*/ */
#ifndef _ASM_IA64_SN_IO_H #ifndef _ASM_IA64_SN_IO_H
#define _ASM_IA64_SN_IO_H #define _ASM_IA64_SN_IO_H
...@@ -78,4 +78,9 @@ ...@@ -78,4 +78,9 @@
#include <asm/sn/sn2/shubio.h> #include <asm/sn/sn2/shubio.h>
#endif #endif
/*
* Used to ensure write ordering (like mb(), but for I/O space)
*/
extern void sn_mmiob(void);
#endif /* _ASM_IA64_SN_IO_H */ #endif /* _ASM_IA64_SN_IO_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_SN2_IO_H
#define _ASM_SN_SN2_IO_H
extern void * sn_io_addr(unsigned long port); /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
#define __sn_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
extern void sn_dma_flush(unsigned long);
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
* inX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned int
__sn_inb (unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
unsigned char ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline unsigned int
__sn_inw (unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
unsigned short ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline unsigned int
__sn_inl (unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
unsigned int ret;
ret = *addr;
sn_dma_flush((unsigned long)addr);
__sn_mf_a();
return ret;
}
static inline void
__sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
static inline void
__sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
static inline void
__sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
*addr = val;
sn_mmiob();
}
/*
* The following routines are SN Platform specific, called when
* a reference is made to readX/writeX set macros. SN Platform
* readX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned char
__sn_readb (void *addr)
{
unsigned char val;
val = *(volatile unsigned char *)addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned short
__sn_readw (void *addr)
{
unsigned short val;
val = *(volatile unsigned short *)addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned int
__sn_readl (void *addr)
{
unsigned int val;
val = *(volatile unsigned int *) addr;
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned long
__sn_readq (void *addr)
{
unsigned long val;
val = *(volatile unsigned long *) addr;
sn_dma_flush((unsigned long)addr);
return val;
}
/*
* For generic and SN2 kernels, we have a set of fast access
* PIO macros. These macros are provided on SN Platform
* because the normal inX and readX macros perform an
* additional task of flushing Post DMA request on the Bridge.
*
* These routines should be self explainatory.
*/
static inline unsigned int
sn_inb_fast (unsigned long port)
{
volatile unsigned char *addr = (unsigned char *)port;
unsigned char ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inw_fast (unsigned long port)
{
volatile unsigned short *addr = (unsigned short *)port;
unsigned short ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inl_fast (unsigned long port)
{
volatile unsigned int *addr = (unsigned int *)port;
unsigned int ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned char
sn_readb_fast (void *addr)
{
return *(volatile unsigned char *)addr;
}
static inline unsigned short
sn_readw_fast (void *addr)
{
return *(volatile unsigned short *)addr;
}
static inline unsigned int
sn_readl_fast (void *addr)
{
return *(volatile unsigned int *) addr;
}
static inline unsigned long
sn_readq_fast (void *addr)
{
return *(volatile unsigned long *) addr;
}
#endif
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_SPINLOCK_H #define _ASM_IA64_SPINLOCK_H
/* /*
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* *
...@@ -15,58 +15,6 @@ ...@@ -15,58 +15,6 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#undef NEW_LOCK
#ifdef NEW_LOCK
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#define _raw_spin_lock(x) \
{ \
register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
\
__asm__ __volatile__ ( \
"mov r30=1\n" \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq r30=[%0],r30,ar.ccv\n" \
";;\n" \
"cmp.ne p15,p0=r30,r0\n" \
"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
";;\n" \
"1:\n" /* force a new bundle */ \
:: "r"(addr) \
: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
}
#define _raw_spin_trylock(x) \
({ \
register long result; \
\
__asm__ __volatile__ ( \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq %0=[%2],%1,ar.ccv\n" \
: "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory"); \
(result == 0); \
})
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#else /* !NEW_LOCK */
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} spinlock_t; } spinlock_t;
...@@ -123,8 +71,6 @@ do { \ ...@@ -123,8 +71,6 @@ do { \
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#endif /* !NEW_LOCK */
typedef struct { typedef struct {
volatile int read_counter:31; volatile int read_counter:31;
volatile int write_lock:1; volatile int write_lock:1;
...@@ -136,7 +82,7 @@ typedef struct { ...@@ -136,7 +82,7 @@ typedef struct {
#define _raw_read_lock(rw) \ #define _raw_read_lock(rw) \
do { \ do { \
int tmp = 0; \ int __read_lock_tmp = 0; \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \ __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \
";;\n" \ ";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \ "tbit.nz p6,p0 = %0, 31\n" \
...@@ -151,15 +97,15 @@ do { \ ...@@ -151,15 +97,15 @@ do { \
"br.cond.sptk.few 1b\n" \ "br.cond.sptk.few 1b\n" \
";;\n" \ ";;\n" \
".previous\n" \ ".previous\n" \
: "=&r" (tmp) \ : "=&r" (__read_lock_tmp) \
: "r" (rw) : "p6", "memory"); \ : "r" (rw) : "p6", "memory"); \
} while(0) } while(0)
#define _raw_read_unlock(rw) \ #define _raw_read_unlock(rw) \
do { \ do { \
int tmp = 0; \ int __read_unlock_tmp = 0; \
__asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \ __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \
: "=r" (tmp) \ : "=r" (__read_unlock_tmp) \
: "r" (rw) \ : "r" (rw) \
: "memory"); \ : "memory"); \
} while(0) } while(0)
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/types.h> #include <linux/types.h>
struct pci_vector_struct { struct pci_vector_struct {
__u16 segment; /* PCI Segment number */
__u16 bus; /* PCI Bus number */ __u16 bus; /* PCI Bus number */
__u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
__u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
...@@ -108,7 +109,7 @@ ia64_insn_group_barrier (void) ...@@ -108,7 +109,7 @@ ia64_insn_group_barrier (void)
#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#define set_wmb(var, value) do { (var) = (value); mb(); } while (0) #define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
#define safe_halt() ia64_pal_halt(1) /* PAL_HALT */ #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
/* /*
* The group barrier in front of the rsm & ssm are necessary to ensure * The group barrier in front of the rsm & ssm are necessary to ensure
......
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
* The main single-value unaligned transfer routines. Derived from * The main single-value unaligned transfer routines. Derived from
* the Linux/Alpha version. * the Linux/Alpha version.
* *
* Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2003 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#define get_unaligned(ptr) \ #define get_unaligned(ptr) \
((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr)))) ((__typeof__(*(ptr)))ia64_get_unaligned((ptr), sizeof(*(ptr))))
...@@ -16,110 +16,105 @@ ...@@ -16,110 +16,105 @@
#define put_unaligned(x,ptr) \ #define put_unaligned(x,ptr) \
ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr))) ia64_put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
/*
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
* packed structures to talk about such things with.
*/
struct __una_u64 { __u64 x __attribute__((packed)); }; struct __una_u64 { __u64 x __attribute__((packed)); };
struct __una_u32 { __u32 x __attribute__((packed)); }; struct __una_u32 { __u32 x __attribute__((packed)); };
struct __una_u16 { __u16 x __attribute__((packed)); }; struct __una_u16 { __u16 x __attribute__((packed)); };
static inline unsigned long static inline unsigned long
__uldq (const unsigned long * r11) __uld8 (const unsigned long * addr)
{ {
const struct __una_u64 *ptr = (const struct __una_u64 *) r11; const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
return ptr->x; return ptr->x;
} }
static inline unsigned long static inline unsigned long
__uldl (const unsigned int * r11) __uld4 (const unsigned int * addr)
{ {
const struct __una_u32 *ptr = (const struct __una_u32 *) r11; const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
return ptr->x; return ptr->x;
} }
static inline unsigned long static inline unsigned long
__uldw (const unsigned short * r11) __uld2 (const unsigned short * addr)
{ {
const struct __una_u16 *ptr = (const struct __una_u16 *) r11; const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
return ptr->x; return ptr->x;
} }
static inline void static inline void
__ustq (unsigned long r5, unsigned long * r11) __ust8 (unsigned long val, unsigned long * addr)
{ {
struct __una_u64 *ptr = (struct __una_u64 *) r11; struct __una_u64 *ptr = (struct __una_u64 *) addr;
ptr->x = r5; ptr->x = val;
} }
static inline void static inline void
__ustl (unsigned long r5, unsigned int * r11) __ust4 (unsigned long val, unsigned int * addr)
{ {
struct __una_u32 *ptr = (struct __una_u32 *) r11; struct __una_u32 *ptr = (struct __una_u32 *) addr;
ptr->x = r5; ptr->x = val;
} }
static inline void static inline void
__ustw (unsigned long r5, unsigned short * r11) __ust2 (unsigned long val, unsigned short * addr)
{ {
struct __una_u16 *ptr = (struct __una_u16 *) r11; struct __una_u16 *ptr = (struct __una_u16 *) addr;
ptr->x = r5; ptr->x = val;
} }
/* /*
* This function doesn't actually exist. The idea is that when * This function doesn't actually exist. The idea is that when someone uses the macros
* someone uses the macros below with an unsupported size (datatype), * below with an unsupported size (datatype), the linker will alert us to the problem via
* the linker will alert us to the problem via an unresolved reference * an unresolved reference error.
* error.
*/ */
extern unsigned long ia64_bad_unaligned_access_length (void); extern unsigned long ia64_bad_unaligned_access_length (void);
#define ia64_get_unaligned(_ptr,size) \ #define ia64_get_unaligned(_ptr,size) \
({ \ ({ \
const void *ptr = (_ptr); \ const void *__ia64_ptr = (_ptr); \
unsigned long val; \ unsigned long __ia64_val; \
\ \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
val = *(const unsigned char *) ptr; \ __ia64_val = *(const unsigned char *) __ia64_ptr; \
break; \ break; \
case 2: \ case 2: \
val = __uldw((const unsigned short *)ptr); \ __ia64_val = __uld2((const unsigned short *)__ia64_ptr); \
break; \ break; \
case 4: \ case 4: \
val = __uldl((const unsigned int *)ptr); \ __ia64_val = __uld4((const unsigned int *)__ia64_ptr); \
break; \ break; \
case 8: \ case 8: \
val = __uldq((const unsigned long *)ptr); \ __ia64_val = __uld8((const unsigned long *)__ia64_ptr); \
break; \ break; \
default: \ default: \
val = ia64_bad_unaligned_access_length(); \ __ia64_val = ia64_bad_unaligned_access_length(); \
} \ } \
val; \ __ia64_val; \
}) })
#define ia64_put_unaligned(_val,_ptr,size) \ #define ia64_put_unaligned(_val,_ptr,size) \
do { \ do { \
const void *ptr = (_ptr); \ const void *__ia64_ptr = (_ptr); \
unsigned long val = (_val); \ unsigned long __ia64_val = (_val); \
\ \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
*(unsigned char *)ptr = (val); \ *(unsigned char *)__ia64_ptr = (__ia64_val); \
break; \ break; \
case 2: \ case 2: \
__ustw(val, (unsigned short *)ptr); \ __ust2(__ia64_val, (unsigned short *)__ia64_ptr); \
break; \ break; \
case 4: \ case 4: \
__ustl(val, (unsigned int *)ptr); \ __ust4(__ia64_val, (unsigned int *)__ia64_ptr); \
break; \ break; \
case 8: \ case 8: \
__ustq(val, (unsigned long *)ptr); \ __ust8(__ia64_val, (unsigned long *)__ia64_ptr); \
break; \ break; \
default: \ default: \
ia64_bad_unaligned_access_length(); \ ia64_bad_unaligned_access_length(); \
} \ } \
} while (0) } while (0)
#endif /* _ASM_IA64_UNALIGNED_H */ #endif /* _ASM_IA64_UNALIGNED_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment