Commit 897f5ab2 authored by Linus Torvalds's avatar Linus Torvalds
parents 1d42a0ec b48fc7bb
...@@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM ...@@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM
If you are compiling a kernel that will run under SGI's IA-64 If you are compiling a kernel that will run under SGI's IA-64
simulator (Medusa) then say Y, otherwise say N. simulator (Medusa) then say Y, otherwise say N.
config IA64_SGI_SN_XP
tristate "Support communication between SGI SSIs"
depends on MSPEC
help
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int int
default "18" default "18"
...@@ -261,6 +271,15 @@ config HOTPLUG_CPU ...@@ -261,6 +271,15 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#. can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug. Say N if you want to disable CPU hotplug.
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
default off
help
Improves the CPU scheduler's decision making when dealing with
Intel IA64 chips with MultiThreading at a cost of slightly increased
overhead in some places. If unsure say N here.
config PREEMPT config PREEMPT
bool "Preemptible Kernel" bool "Preemptible Kernel"
help help
......
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.11-rc2 # Linux kernel version: 2.6.12-rc3
# Sat Jan 22 11:17:02 2005 # Tue May 3 15:55:04 2005
# #
# #
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y CONFIG_CLEAN_COMPILE=y
CONFIG_LOCK_KERNEL=y CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
# #
# General setup # General setup
...@@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y ...@@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set # CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=20
CONFIG_HOTPLUG=y CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
# CONFIG_EMBEDDED is not set # CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set # CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y CONFIG_FUTEX=y
CONFIG_EPOLL=y CONFIG_EPOLL=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SHMEM=y CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0 CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0 CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0 CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0 CONFIG_CC_ALIGN_JUMPS=0
# CONFIG_TINY_SHMEM is not set # CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# #
# Loadable module support # Loadable module support
...@@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18 ...@@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=4 CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
# CONFIG_SCHED_SMT is not set
# CONFIG_PREEMPT is not set # CONFIG_PREEMPT is not set
CONFIG_HAVE_DEC_LOCK=y CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y CONFIG_IA32_SUPPORT=y
...@@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y ...@@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y
# CONFIG_PCI_MSI is not set # CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y CONFIG_PCI_NAMES=y
# CONFIG_PCI_DEBUG is not set
# #
# PCI Hotplug Support # PCI Hotplug Support
...@@ -151,10 +157,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m ...@@ -151,10 +157,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m
# #
# CONFIG_PCCARD is not set # CONFIG_PCCARD is not set
#
# PC-card bridges
#
# #
# Device Drivers # Device Drivers
# #
...@@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m ...@@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set # CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096 CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE="" CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CDROM_PKTCDVD is not set # CONFIG_CDROM_PKTCDVD is not set
...@@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y ...@@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_BUSLOGIC is not set # CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set # CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set # CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_IPS is not set # CONFIG_SCSI_IPS is not set
...@@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 ...@@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
# CONFIG_SCSI_IPR is not set # CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
CONFIG_SCSI_QLOGIC_FC=y CONFIG_SCSI_QLOGIC_FC=y
# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
CONFIG_SCSI_QLOGIC_1280=y CONFIG_SCSI_QLOGIC_1280=y
...@@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m ...@@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m
CONFIG_SCSI_QLA2300=m CONFIG_SCSI_QLA2300=m
CONFIG_SCSI_QLA2322=m CONFIG_SCSI_QLA2322=m
# CONFIG_SCSI_QLA6312 is not set # CONFIG_SCSI_QLA6312 is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_DEBUG is not set
...@@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m ...@@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m CONFIG_DM_ZERO=m
# CONFIG_DM_MULTIPATH is not set
# #
# Fusion MPT device support # Fusion MPT device support
...@@ -386,7 +389,6 @@ CONFIG_NET=y ...@@ -386,7 +389,6 @@ CONFIG_NET=y
# #
CONFIG_PACKET=y CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set # CONFIG_PACKET_MMAP is not set
CONFIG_NETLINK_DEV=y
CONFIG_UNIX=y CONFIG_UNIX=y
# CONFIG_NET_KEY is not set # CONFIG_NET_KEY is not set
CONFIG_INET=y CONFIG_INET=y
...@@ -446,7 +448,6 @@ CONFIG_DUMMY=m ...@@ -446,7 +448,6 @@ CONFIG_DUMMY=m
# CONFIG_BONDING is not set # CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set # CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set # CONFIG_TUN is not set
# CONFIG_ETHERTAP is not set
# #
# ARCnet devices # ARCnet devices
...@@ -484,7 +485,6 @@ CONFIG_NET_PCI=y ...@@ -484,7 +485,6 @@ CONFIG_NET_PCI=y
# CONFIG_DGRS is not set # CONFIG_DGRS is not set
CONFIG_EEPRO100=m CONFIG_EEPRO100=m
CONFIG_E100=m CONFIG_E100=m
# CONFIG_E100_NAPI is not set
# CONFIG_FEALNX is not set # CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set # CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set # CONFIG_NE2K_PCI is not set
...@@ -565,25 +565,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 ...@@ -565,25 +565,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_EVDEV is not set # CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set # CONFIG_INPUT_EVBUG is not set
#
# Input I/O drivers
#
CONFIG_GAMEPORT=m
CONFIG_SOUND_GAMEPORT=m
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_GAMEPORT_EMU10K1 is not set
# CONFIG_GAMEPORT_VORTEX is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461X is not set
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PCIPS2 is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
# #
# Input Device Drivers # Input Device Drivers
# #
...@@ -601,6 +582,24 @@ CONFIG_MOUSE_PS2=y ...@@ -601,6 +582,24 @@ CONFIG_MOUSE_PS2=y
# CONFIG_INPUT_TOUCHSCREEN is not set # CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set # CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
#
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_PCIPS2 is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
CONFIG_GAMEPORT=m
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_GAMEPORT_EMU10K1 is not set
# CONFIG_GAMEPORT_VORTEX is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461X is not set
CONFIG_SOUND_GAMEPORT=m
# #
# Character devices # Character devices
# #
...@@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y ...@@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_SYNCLINK is not set # CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set # CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set # CONFIG_N_HDLC is not set
# CONFIG_SPECIALIX is not set
# CONFIG_SX is not set
# CONFIG_STALDRV is not set # CONFIG_STALDRV is not set
# #
...@@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y ...@@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# #
CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256 CONFIG_LEGACY_PTY_COUNT=256
...@@ -670,6 +672,12 @@ CONFIG_HPET=y ...@@ -670,6 +672,12 @@ CONFIG_HPET=y
# CONFIG_HPET_RTC_IRQ is not set # CONFIG_HPET_RTC_IRQ is not set
CONFIG_HPET_MMAP=y CONFIG_HPET_MMAP=y
CONFIG_MAX_RAW_DEVS=256 CONFIG_MAX_RAW_DEVS=256
# CONFIG_HANGCHECK_TIMER is not set
#
# TPM devices
#
# CONFIG_TCG_TPM is not set
# #
# I2C support # I2C support
...@@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256 ...@@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256
# #
CONFIG_VGA_CONSOLE=y CONFIG_VGA_CONSOLE=y
CONFIG_DUMMY_CONSOLE=y CONFIG_DUMMY_CONSOLE=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
# #
# Sound # Sound
...@@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y ...@@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y
# #
# USB support # USB support
# #
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB=y CONFIG_USB=y
# CONFIG_USB_DEBUG is not set # CONFIG_USB_DEBUG is not set
...@@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y ...@@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DYNAMIC_MINORS is not set # CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_SUSPEND is not set # CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set # CONFIG_USB_OTG is not set
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
# #
# USB Host Controller Drivers # USB Host Controller Drivers
...@@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m ...@@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set # CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
CONFIG_USB_OHCI_HCD=m CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=y CONFIG_USB_UHCI_HCD=y
# CONFIG_USB_SL811_HCD is not set # CONFIG_USB_SL811_HCD is not set
...@@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y ...@@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y
# #
CONFIG_USB_STORAGE=m CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_RW_DETECT is not set
# CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set # CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_HP8200e is not set # CONFIG_USB_STORAGE_USBAT is not set
# CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set # CONFIG_USB_STORAGE_SDDR55 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set # CONFIG_USB_STORAGE_JUMPSHOT is not set
...@@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y ...@@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_PEGASUS is not set # CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set # CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set # CONFIG_USB_USBNET is not set
# CONFIG_USB_MON is not set
# #
# USB port drivers # USB port drivers
...@@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y ...@@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set # CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set # CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_TEST is not set # CONFIG_USB_TEST is not set
# #
...@@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y ...@@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y CONFIG_REISERFS_FS_SECURITY=y
# CONFIG_JFS_FS is not set # CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y CONFIG_FS_POSIX_ACL=y
#
# XFS support
#
CONFIG_XFS_FS=y CONFIG_XFS_FS=y
CONFIG_XFS_EXPORT=y
# CONFIG_XFS_RT is not set # CONFIG_XFS_RT is not set
# CONFIG_XFS_QUOTA is not set # CONFIG_XFS_QUOTA is not set
# CONFIG_XFS_SECURITY is not set # CONFIG_XFS_SECURITY is not set
...@@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y ...@@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y
CONFIG_NFSD_TCP=y CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m CONFIG_EXPORTFS=y
CONFIG_SUNRPC=m CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m CONFIG_SUNRPC_GSS=m
CONFIG_RPCSEC_GSS_KRB5=m CONFIG_RPCSEC_GSS_KRB5=m
...@@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y ...@@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y
# #
# Kernel hacking # Kernel hacking
# #
# CONFIG_PRINTK_TIME is not set
CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_LOG_BUF_SHIFT=20
# CONFIG_SCHEDSTATS is not set # CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK is not set
...@@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m ...@@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_SHA256 is not set # CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set # CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_WP512 is not set # CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
CONFIG_CRYPTO_DES=m CONFIG_CRYPTO_DES=m
# CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set # CONFIG_CRYPTO_TWOFISH is not set
......
...@@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus) ...@@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus)
static void __init static void __init
sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
{ {
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
acpi_handle phandle;
unsigned int node; unsigned int node;
int pxm;
ioc->node = MAX_NUMNODES; ioc->node = MAX_NUMNODES;
/* pxm = acpi_get_pxm(handle);
* Check for a _PXM on this node first. We don't typically see
* one here, so we'll end up getting it from the parent.
*/
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) {
if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
return;
/* Reset the acpi buffer */
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL,
&buffer)))
return;
}
if (!buffer.length || !buffer.pointer) if (pxm < 0)
return; return;
obj = buffer.pointer; node = pxm_to_nid_map[pxm];
if (obj->type != ACPI_TYPE_INTEGER ||
obj->integer.value >= MAX_PXM_DOMAINS) {
acpi_os_free(buffer.pointer);
return;
}
node = pxm_to_nid_map[obj->integer.value];
acpi_os_free(buffer.pointer);
if (node >= MAX_NUMNODES || !node_online(node)) if (node >= MAX_NUMNODES || !node_online(node))
return; return;
......
...@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) ...@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
union acpi_object *obj; union acpi_object *obj;
struct acpi_table_iosapic *iosapic; struct acpi_table_iosapic *iosapic;
unsigned int gsi_base; unsigned int gsi_base;
int node; int pxm, node;
/* Only care about objects w/ a method that returns the MADT */ /* Only care about objects w/ a method that returns the MADT */
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
...@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) ...@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
gsi_base = iosapic->global_irq_base; gsi_base = iosapic->global_irq_base;
acpi_os_free(buffer.pointer); acpi_os_free(buffer.pointer);
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
/* /*
* OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
* us which node to associate this with. * us which node to associate this with.
*/ */
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) pxm = acpi_get_pxm(handle);
return AE_OK; if (pxm < 0)
if (!buffer.length || !buffer.pointer)
return AE_OK;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_INTEGER ||
obj->integer.value >= MAX_PXM_DOMAINS) {
acpi_os_free(buffer.pointer);
return AE_OK; return AE_OK;
}
node = pxm_to_nid_map[obj->integer.value]; node = pxm_to_nid_map[pxm];
acpi_os_free(buffer.pointer);
if (node >= MAX_NUMNODES || !node_online(node) || if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node))) cpus_empty(node_to_cpumask(node)))
......
...@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve) ...@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0 .mem.offset 8,0
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
END(ia64_ret_from_ia32_execve_syscall) END(ia64_ret_from_ia32_execve)
// fall through // fall through
#endif /* CONFIG_IA32_SUPPORT */ #endif /* CONFIG_IA32_SUPPORT */
GLOBAL_ENTRY(ia64_leave_kernel) GLOBAL_ENTRY(ia64_leave_kernel)
......
...@@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down) ...@@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down)
movl r2=ia64_ret_from_syscall movl r2=ia64_ret_from_syscall
;; ;;
mov rp=r2 // set the real return addr mov rp=r2 // set the real return addr
tbit.z p8,p0=r3,TIF_SYSCALL_TRACE and r3=_TIF_SYSCALL_TRACEAUDIT,r3
;; ;;
cmp.eq p8,p0=r3,r0
(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8
(p8) br.call.sptk.many b6=b6 // ignore this return addr (p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall br.cond.sptk ia64_trace_syscall
......
...@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr) ...@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
spin_unlock(&mca_bh_lock); spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */ /* This process is about to be killed itself */
force_sig(SIGKILL, current); do_exit(SIGKILL);
schedule();
} }
/** /**
...@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec ...@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0; psr2->cpl = 0;
psr2->ri = 0; psr2->ri = 0;
psr2->i = 0;
return 1; return 1;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ptrace.h>
GLOBAL_ENTRY(mca_handler_bhhook) GLOBAL_ENTRY(mca_handler_bhhook)
invala // clear RSE ? invala // clear RSE ?
...@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook) ...@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
;; ;;
alloc r16=ar.pfs,0,2,1,0 // make a new frame alloc r16=ar.pfs,0,2,1,0 // make a new frame
;; ;;
mov ar.rsc=0
;;
mov r13=IA64_KR(CURRENT) // current task pointer mov r13=IA64_KR(CURRENT) // current task pointer
;; ;;
adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 mov r2=r13
;;
addl r22=IA64_RBS_OFFSET,r2
;;
mov ar.bspstore=r22
;; ;;
ld8 r12=[r12] // stack pointer addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
;; ;;
adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
st1 [r2]=r0 // clear current->thread.on_ustack flag
mov loc0=r16 mov loc0=r16
movl loc1=mca_handler_bh // recovery C function movl loc1=mca_handler_bh // recovery C function
;; ;;
...@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook) ...@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
;; ;;
mov loc1=rp mov loc1=rp
;; ;;
br.call.sptk.many rp=b6 // not return ... ssm psr.i
;;
br.call.sptk.many rp=b6 // does not return ...
;; ;;
mov ar.pfs=loc0 mov ar.pfs=loc0
mov rp=loc1 mov rp=loc1
......
...@@ -1265,6 +1265,8 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid) ...@@ -1265,6 +1265,8 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
} }
EXPORT_SYMBOL(pfm_unregister_buffer_fmt); EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
extern void update_pal_halt_status(int);
static int static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{ {
...@@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) ...@@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
is_syswide, is_syswide,
cpu)); cpu));
/*
* disable default_idle() to go to PAL_HALT
*/
update_pal_halt_status(0);
UNLOCK_PFS(flags); UNLOCK_PFS(flags);
return 0; return 0;
...@@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) ...@@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
is_syswide, is_syswide,
cpu)); cpu));
/*
* if possible, enable default_idle() to go into PAL_HALT
*/
if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
update_pal_halt_status(1);
UNLOCK_PFS(flags); UNLOCK_PFS(flags);
return 0; return 0;
...@@ -4202,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4202,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
req->load_pid, req->load_pid,
ctx->ctx_state)); ctx->ctx_state));
return -EINVAL; return -EBUSY;
} }
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
...@@ -4704,16 +4717,26 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) ...@@ -4704,16 +4717,26 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
if (task == current || ctx->ctx_fl_system) return 0; if (task == current || ctx->ctx_fl_system) return 0;
/* /*
* if context is UNLOADED we are safe to go * we are monitoring another thread
*/
if (state == PFM_CTX_UNLOADED) return 0;
/*
* no command can operate on a zombie context
*/ */
if (state == PFM_CTX_ZOMBIE) { switch(state) {
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); case PFM_CTX_UNLOADED:
return -EINVAL; /*
* if context is UNLOADED we are safe to go
*/
return 0;
case PFM_CTX_ZOMBIE:
/*
* no command can operate on a zombie context
*/
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
return -EINVAL;
case PFM_CTX_MASKED:
/*
* PMU state has been saved to software even though
* the thread may still be running.
*/
if (cmd != PFM_UNLOAD_CONTEXT) return 0;
} }
/* /*
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#include "sigframe.h" #include "sigframe.h"
void (*ia64_mark_idle)(int); void (*ia64_mark_idle)(int);
static cpumask_t cpu_idle_map; static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
unsigned long boot_option_idle_override = 0; unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override); EXPORT_SYMBOL(boot_option_idle_override);
...@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall ...@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
ia64_do_signal(oldset, scr, in_syscall); ia64_do_signal(oldset, scr, in_syscall);
} }
static int pal_halt = 1; static int pal_halt = 1;
static int can_do_pal_halt = 1;
static int __init nohalt_setup(char * str) static int __init nohalt_setup(char * str)
{ {
pal_halt = 0; pal_halt = 0;
...@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str) ...@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
} }
__setup("nohalt", nohalt_setup); __setup("nohalt", nohalt_setup);
void
update_pal_halt_status(int status)
{
can_do_pal_halt = pal_halt && status;
}
/* /*
* We use this if we don't have any better idle routine.. * We use this if we don't have any better idle routine..
*/ */
void void
default_idle (void) default_idle (void)
{ {
unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
while (!need_resched()) while (!need_resched())
if (pal_halt && !pmu_active) if (can_do_pal_halt)
safe_halt(); safe_halt();
else else
cpu_relax(); cpu_relax();
...@@ -223,20 +229,31 @@ static inline void play_dead(void) ...@@ -223,20 +229,31 @@ static inline void play_dead(void)
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
void cpu_idle_wait(void) void cpu_idle_wait(void)
{ {
int cpu; unsigned int cpu, this_cpu = get_cpu();
cpumask_t map; cpumask_t map;
for_each_online_cpu(cpu) set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
cpu_set(cpu, cpu_idle_map); put_cpu();
wmb(); cpus_clear(map);
do { for_each_online_cpu(cpu) {
ssleep(1); per_cpu(cpu_idle_state, cpu) = 1;
cpus_and(map, cpu_idle_map, cpu_online_map); cpu_set(cpu, map);
} while (!cpus_empty(map)); }
__get_cpu_var(cpu_idle_state) = 0;
wmb();
do {
ssleep(1);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
} }
EXPORT_SYMBOL_GPL(cpu_idle_wait); EXPORT_SYMBOL_GPL(cpu_idle_wait);
...@@ -244,7 +261,6 @@ void __attribute__((noreturn)) ...@@ -244,7 +261,6 @@ void __attribute__((noreturn))
cpu_idle (void) cpu_idle (void)
{ {
void (*mark_idle)(int) = ia64_mark_idle; void (*mark_idle)(int) = ia64_mark_idle;
int cpu = smp_processor_id();
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
...@@ -255,12 +271,13 @@ cpu_idle (void) ...@@ -255,12 +271,13 @@ cpu_idle (void)
while (!need_resched()) { while (!need_resched()) {
void (*idle)(void); void (*idle)(void);
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;
rmb();
if (mark_idle) if (mark_idle)
(*mark_idle)(1); (*mark_idle)(1);
if (cpu_isset(cpu, cpu_idle_map))
cpu_clear(cpu, cpu_idle_map);
rmb();
idle = pm_idle; idle = pm_idle;
if (!idle) if (!idle)
idle = default_idle; idle = default_idle;
......
...@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr) ...@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
* could be corrupted. * could be corrupted.
*/ */
retval = (long) &ia64_leave_kernel; retval = (long) &ia64_leave_kernel;
if (test_thread_flag(TIF_SYSCALL_TRACE)) if (test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SYSCALL_AUDIT))
/* /*
* strace expects to be notified after sigreturn returns even though the * strace expects to be notified after sigreturn returns even though the
* context to which we return may not be in the middle of a syscall. * context to which we return may not be in the middle of a syscall.
......
/* /*
* Cache flushing routines. * Cache flushing routines.
* *
* Copyright (C) 1999-2001 Hewlett-Packard Co * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
* Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range) ...@@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range)
mov ar.lc=r8 mov ar.lc=r8
;; ;;
.Loop: fc in0 // issuable on M0 only .Loop: fc.i in0 // issuable on M2 only
add in0=32,in0 add in0=32,in0
br.cloop.sptk.few .Loop br.cloop.sptk.few .Loop
;; ;;
......
...@@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy) ...@@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
mov f6=f0 mov f6=f0
br.cond.sptk .common_code br.cond.sptk .common_code
;; ;;
END(memcpy)
GLOBAL_ENTRY(__copy_user) GLOBAL_ENTRY(__copy_user)
.prologue .prologue
// check dest alignment // check dest alignment
...@@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
#undef B #undef B
#undef C #undef C
#undef D #undef D
END(memcpy)
/* /*
* Due to lack of local tag support in gcc 2.x assembler, it is not clear which * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
......
...@@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset) ...@@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset)
{ .mmi { .mmi
.prologue .prologue
alloc tmp = ar.pfs, 3, 0, 0, 0 alloc tmp = ar.pfs, 3, 0, 0, 0
.body
lfetch.nt1 [dest] // lfetch.nt1 [dest] //
.save ar.lc, save_lc .save ar.lc, save_lc
mov.i save_lc = ar.lc mov.i save_lc = ar.lc
.body
} { .mmi } { .mmi
mov ret0 = dest // return value mov ret0 = dest // return value
cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
......
...@@ -4,10 +4,15 @@ ...@@ -4,10 +4,15 @@
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
# for more details. # for more details.
# #
# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. # Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
# #
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/ huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
...@@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void) ...@@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void)
if (status) if (status)
continue; continue;
/* Attach the error interrupt handlers */
if (nasid & 1)
ice_error_init(hubdev);
else
hub_error_init(hubdev);
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
...@@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void) ...@@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void)
sn_flush_device_list; sn_flush_device_list;
} }
if (!(i & 1))
hub_error_init(hubdev);
else
ice_error_init(hubdev);
} }
} }
......
...@@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize; ...@@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
* This function is the callback routine that SAL calls to log error * This function is the callback routine that SAL calls to log error
* info for platform errors. buf is appended to sn_oemdata, resizing as * info for platform errors. buf is appended to sn_oemdata, resizing as
* required. * required.
* Note: this is a SAL to OS callback, running under the same rules as the SAL
* code. SAL calls are run with preempt disabled so this routine must not
* sleep. vmalloc can sleep so print_hook cannot resize the output buffer
* itself, instead it must set the required size and return to let the caller
* resize the buffer then redrive the SAL call.
*/ */
static int print_hook(const char *fmt, ...) static int print_hook(const char *fmt, ...)
{ {
...@@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...) ...@@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...)
vsnprintf(buf, sizeof(buf), fmt, args); vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args); va_end(args);
len = strlen(buf); len = strlen(buf);
while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) { if (*sn_oemdata_size + len <= sn_oemdata_bufsize)
u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000); memcpy(*sn_oemdata + *sn_oemdata_size, buf, len);
if (!newbuf) {
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__);
return 0;
}
memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
vfree(*sn_oemdata);
*sn_oemdata = newbuf;
}
memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
*sn_oemdata_size += len; *sn_oemdata_size += len;
return 0; return 0;
} }
...@@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, ...@@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
sn_oemdata = oemdata; sn_oemdata = oemdata;
sn_oemdata_size = oemdata_size; sn_oemdata_size = oemdata_size;
sn_oemdata_bufsize = 0; sn_oemdata_bufsize = 0;
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */
while (*sn_oemdata_size > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(*sn_oemdata_size);
if (!newbuf) {
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__);
return 1;
}
vfree(*sn_oemdata);
*sn_oemdata = newbuf;
sn_oemdata_bufsize = *sn_oemdata_size;
*sn_oemdata_size = 0;
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
}
up(&sn_oemdata_mutex); up(&sn_oemdata_mutex);
return 0; return 0;
} }
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -73,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); ...@@ -73,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
partid_t sn_partid = -1; partid_t sn_partid = -1;
EXPORT_SYMBOL(sn_partid); EXPORT_SYMBOL(sn_partid);
char sn_system_serial_number_string[128]; char sn_system_serial_number_string[128];
...@@ -373,11 +379,11 @@ static void __init sn_init_pdas(char **cmdline_p) ...@@ -373,11 +379,11 @@ static void __init sn_init_pdas(char **cmdline_p)
{ {
cnodeid_t cnode; cnodeid_t cnode;
memset(pda->cnodeid_to_nasid_table, -1, memset(sn_cnodeid_to_nasid, -1,
sizeof(pda->cnodeid_to_nasid_table)); sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
for_each_online_node(cnode) for_each_online_node(cnode)
pda->cnodeid_to_nasid_table[cnode] = sn_cnodeid_to_nasid[cnode] =
pxm_to_nasid(nid_to_pxm_map[cnode]); pxm_to_nasid(nid_to_pxm_map[cnode]);
numionodes = num_online_nodes(); numionodes = num_online_nodes();
scan_for_ionodes(); scan_for_ionodes();
...@@ -477,7 +483,8 @@ void __init sn_cpu_init(void) ...@@ -477,7 +483,8 @@ void __init sn_cpu_init(void)
cnode = nasid_to_cnodeid(nasid); cnode = nasid_to_cnodeid(nasid);
pda->p_nodepda = nodepdaindr[cnode]; sn_nodepda = nodepdaindr[cnode];
pda->led_address = pda->led_address =
(typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
pda->led_state = LED_ALWAYS_SET; pda->led_state = LED_ALWAYS_SET;
...@@ -486,15 +493,18 @@ void __init sn_cpu_init(void) ...@@ -486,15 +493,18 @@ void __init sn_cpu_init(void)
pda->idle_flag = 0; pda->idle_flag = 0;
if (cpuid != 0) { if (cpuid != 0) {
memcpy(pda->cnodeid_to_nasid_table, /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
pdacpu(0)->cnodeid_to_nasid_table, memcpy(sn_cnodeid_to_nasid,
sizeof(pda->cnodeid_to_nasid_table)); (&per_cpu(__sn_cnodeid_to_nasid, 0)),
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
} }
/* /*
* Check for WARs. * Check for WARs.
* Only needs to be done once, on BSP. * Only needs to be done once, on BSP.
* Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. * Has to be done after loop above, because it uses this cpu's
* sn_cnodeid_to_nasid table which was just initialized if this
* isn't cpu 0.
* Has to be done before assignment below. * Has to be done before assignment below.
*/ */
if (!wars_have_been_checked) { if (!wars_have_been_checked) {
...@@ -580,8 +590,7 @@ static void __init scan_for_ionodes(void) ...@@ -580,8 +590,7 @@ static void __init scan_for_ionodes(void)
brd = find_lboard_any(brd, KLTYPE_SNIA); brd = find_lboard_any(brd, KLTYPE_SNIA);
while (brd) { while (brd) {
pda->cnodeid_to_nasid_table[numionodes] = sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
brd->brd_nasid;
physical_node_map[brd->brd_nasid] = numionodes; physical_node_map[brd->brd_nasid] = numionodes;
root_lboard[numionodes] = brd; root_lboard[numionodes] = brd;
numionodes++; numionodes++;
...@@ -602,8 +611,7 @@ static void __init scan_for_ionodes(void) ...@@ -602,8 +611,7 @@ static void __init scan_for_ionodes(void)
root_lboard[nasid_to_cnodeid(nasid)], root_lboard[nasid_to_cnodeid(nasid)],
KLTYPE_TIO); KLTYPE_TIO);
while (brd) { while (brd) {
pda->cnodeid_to_nasid_table[numionodes] = sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
brd->brd_nasid;
physical_node_map[brd->brd_nasid] = numionodes; physical_node_map[brd->brd_nasid] = numionodes;
root_lboard[numionodes] = brd; root_lboard[numionodes] = brd;
numionodes++; numionodes++;
...@@ -614,7 +622,6 @@ static void __init scan_for_ionodes(void) ...@@ -614,7 +622,6 @@ static void __init scan_for_ionodes(void)
brd = find_lboard_any(brd, KLTYPE_TIO); brd = find_lboard_any(brd, KLTYPE_TIO);
} }
} }
} }
int int
...@@ -623,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice) ...@@ -623,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice)
long cpu; long cpu;
for (cpu=0; cpu < NR_CPUS; cpu++) for (cpu=0; cpu < NR_CPUS; cpu++)
if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice) if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu; return cpu;
return -1; return -1;
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/shubio.h> #include <asm/sn/shubio.h>
#include <asm/sn/tiocx.h> #include <asm/sn/tiocx.h>
#include <asm/sn/l1.h>
#include <asm/sn/module.h>
#include "tio.h" #include "tio.h"
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
...@@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info) ...@@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
} }
} }
uint64_t uint64_t tiocx_dma_addr(uint64_t addr)
tiocx_dma_addr(uint64_t addr)
{ {
return PHYS_TO_TIODMA(addr); return PHYS_TO_TIODMA(addr);
} }
uint64_t uint64_t tiocx_swin_base(int nasid)
tiocx_swin_base(int nasid)
{ {
return TIO_SWIN_BASE(nasid, TIOCX_CORELET); return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
} }
...@@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type); ...@@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type);
EXPORT_SYMBOL(tiocx_dma_addr); EXPORT_SYMBOL(tiocx_dma_addr);
EXPORT_SYMBOL(tiocx_swin_base); EXPORT_SYMBOL(tiocx_swin_base);
static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
ia64_sal_oemcall_nolock(&ret_stuff,
SN_SAL_IOIF_GET_HUBDEV_INFO,
handle, address, 0, 0, 0, 0, 0);
return ret_stuff.v0;
}
static void tio_conveyor_set(nasid_t nasid, int enable_flag) static void tio_conveyor_set(nasid_t nasid, int enable_flag)
{ {
uint64_t ice_frz; uint64_t ice_frz;
...@@ -379,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) ...@@ -379,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
udelay(2000); udelay(2000);
} }
static int fpga_attached(nasid_t nasid) static int tiocx_btchar_get(int nasid)
{
moduleid_t module_id;
geoid_t geoid;
int cnodeid;
cnodeid = nasid_to_cnodeid(nasid);
geoid = cnodeid_get_geoid(cnodeid);
module_id = geo_module(geoid);
return MODULE_GET_BTCHAR(module_id);
}
static int is_fpga_brick(int nasid)
{
switch (tiocx_btchar_get(nasid)) {
case L1_BRICKTYPE_SA:
case L1_BRICKTYPE_ATHENA:
return 1;
}
return 0;
}
static int bitstream_loaded(nasid_t nasid)
{ {
uint64_t cx_credits; uint64_t cx_credits;
...@@ -396,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev) ...@@ -396,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev)
int mfg_num = CX_DEV_NONE; int mfg_num = CX_DEV_NONE;
nasid_t nasid = cx_dev->cx_id.nasid; nasid_t nasid = cx_dev->cx_id.nasid;
if (fpga_attached(nasid)) { if (bitstream_loaded(nasid)) {
uint64_t cx_id; uint64_t cx_id;
cx_id = cx_id =
...@@ -427,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf) ...@@ -427,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf)
{ {
struct cx_dev *cx_dev = to_cx_dev(dev); struct cx_dev *cx_dev = to_cx_dev(dev);
return sprintf(buf, "0x%x 0x%x 0x%x\n", return sprintf(buf, "0x%x 0x%x 0x%x %d\n",
cx_dev->cx_id.nasid, cx_dev->cx_id.nasid,
cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num); cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num,
tiocx_btchar_get(cx_dev->cx_id.nasid));
} }
static ssize_t store_cxdev_control(struct device *dev, const char *buf, static ssize_t store_cxdev_control(struct device *dev, const char *buf,
...@@ -475,20 +485,14 @@ static int __init tiocx_init(void) ...@@ -475,20 +485,14 @@ static int __init tiocx_init(void)
if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
break; /* No more nasids .. bail out of loop */ break; /* No more nasids .. bail out of loop */
if (nasid & 0x1) { /* TIO's are always odd */ if ((nasid & 0x1) && is_fpga_brick(nasid)) {
struct hubdev_info *hubdev; struct hubdev_info *hubdev;
uint64_t status;
struct xwidget_info *widgetp; struct xwidget_info *widgetp;
DBG("Found TIO at nasid 0x%x\n", nasid); DBG("Found TIO at nasid 0x%x\n", nasid);
hubdev = hubdev =
(struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
status =
tiocx_get_hubdev_info(nasid,
(uint64_t) __pa(hubdev));
if (status)
continue;
widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) base.
*
* XP provides a base from which its users can interact
* with XPC, yet not be dependent on XPC.
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/xp.h>
/*
* Target of nofault PIO read.
*/
u64 xp_nofault_PIOR_target;
/*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC.
*/
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
/*
* Initialize the XPC interface to indicate that XPC isn't loaded.
*/
static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
struct xpc_interface xpc_interface = {
(void (*)(int)) xpc_notloaded,
(void (*)(int)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
xpc_notloaded,
(void (*)(partid_t, int, void *)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
};
/*
* XPC calls this when it (the XPC module) has been loaded.
*/
void
xpc_set_interface(void (*connect)(int),
void (*disconnect)(int),
enum xpc_retval (*allocate)(partid_t, int, u32, void **),
enum xpc_retval (*send)(partid_t, int, void *),
enum xpc_retval (*send_notify)(partid_t, int, void *,
xpc_notify_func, void *),
void (*received)(partid_t, int, void *),
enum xpc_retval (*partid_to_nasids)(partid_t, void *))
{
xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect;
xpc_interface.allocate = allocate;
xpc_interface.send = send;
xpc_interface.send_notify = send_notify;
xpc_interface.received = received;
xpc_interface.partid_to_nasids = partid_to_nasids;
}
/*
* XPC calls this when it (the XPC module) is being unloaded.
*/
void
xpc_clear_interface(void)
{
xpc_interface.connect = (void (*)(int)) xpc_notloaded;
xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
void **)) xpc_notloaded;
xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
xpc_notloaded;
xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
xpc_notify_func, void *)) xpc_notloaded;
xpc_interface.received = (void (*)(partid_t, int, void *))
xpc_notloaded;
xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
xpc_notloaded;
}
/*
* Register for automatic establishment of a channel connection whenever
* a partition comes up.
*
* Arguments:
*
* ch_number - channel # to register for connection.
* func - function to call for asynchronous notification of channel
* state changes (i.e., connection, disconnection, error) and
* the arrival of incoming messages.
* key - pointer to optional user-defined value that gets passed back
* to the user on any callouts made to func.
* payload_size - size in bytes of the XPC message's payload area which
* contains a user-defined message. The user should make
* this large enough to hold their largest message.
* nentries - max #of XPC message entries a message queue can contain.
* The actual number, which is determined when a connection
* is established and may be less then requested, will be
* passed to the user via the xpcConnected callout.
* assigned_limit - max number of kthreads allowed to be processing
* messages (per connection) at any given instant.
* idle_limit - max number of kthreads allowed to be idle at any given
* instant.
*/
enum xpc_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit)
{
struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
DBUG_ON(payload_size == 0 || nentries == 0);
DBUG_ON(func == NULL);
DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
registration = &xpc_registrations[ch_number];
if (down_interruptible(&registration->sema) != 0) {
return xpcInterrupted;
}
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
up(&registration->sema);
return xpcAlreadyRegistered;
}
/* register the channel for connection */
registration->msg_size = XPC_MSG_SIZE(payload_size);
registration->nentries = nentries;
registration->assigned_limit = assigned_limit;
registration->idle_limit = idle_limit;
registration->key = key;
registration->func = func;
up(&registration->sema);
xpc_interface.connect(ch_number);
return xpcSuccess;
}
/*
* Remove the registration for automatic connection of the specified channel
* when a partition comes up.
*
* Before returning this xpc_disconnect() will wait for all connections on the
* specified channel have been closed/torndown. So the caller can be assured
* that they will not be receiving any more callouts from XPC to their
* function registered via xpc_connect().
*
* Arguments:
*
* ch_number - channel # to unregister.
*/
void
xpc_disconnect(int ch_number)
{
struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
registration = &xpc_registrations[ch_number];
/*
* We've decided not to make this a down_interruptible(), since we
* figured XPC's users will just turn around and call xpc_disconnect()
* again anyways, so we might as well wait, if need be.
*/
down(&registration->sema);
/* if !XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func == NULL) {
up(&registration->sema);
return;
}
/* remove the connection registration for the specified channel */
registration->func = NULL;
registration->key = NULL;
registration->nentries = 0;
registration->msg_size = 0;
registration->assigned_limit = 0;
registration->idle_limit = 0;
xpc_interface.disconnect(ch_number);
up(&registration->sema);
return;
}
int __init
xp_init(void)
{
int ret, ch_number;
u64 func_addr = *(u64 *) xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR;
if (!ia64_platform_is("sn2")) {
return -ENODEV;
}
/*
* Register a nofault code region which performs a cross-partition
* PIO read. If the PIO read times out, the MCA handler will consume
* the error and return to a kernel-provided instruction to indicate
* an error. This PIO read exists because it is guaranteed to timeout
* if the destination is down (AMO operations do not timeout on at
* least some CPUs on Shubs <= v1.2, which unfortunately we have to
* work around).
*/
if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 1)) != 0) {
printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
ret);
}
/*
* Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.)
*/
if (is_shub2()) {
xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
} else {
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
}
/* initialize the connection registration semaphores */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */
}
return 0;
}
module_init(xp_init);
void __exit
xp_exit(void)
{
u64 func_addr = *(u64 *) xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR;
/* unregister the PIO read nofault code region */
(void) sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0);
}
module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(xp_nofault_PIOR);
EXPORT_SYMBOL(xp_nofault_PIOR_target);
EXPORT_SYMBOL(xpc_registrations);
EXPORT_SYMBOL(xpc_interface);
EXPORT_SYMBOL(xpc_clear_interface);
EXPORT_SYMBOL(xpc_set_interface);
EXPORT_SYMBOL(xpc_connect);
EXPORT_SYMBOL(xpc_disconnect);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function
* will be registered as a nofault code block. In the event that the
* PIO read fails, the MCA handler will force the error to look
* corrected and vector to the xp_error_PIOR which will return an error.
*
* extern int xp_nofault_PIOR(void *remote_register);
*/
.global xp_nofault_PIOR
xp_nofault_PIOR:
mov r8=r0 // Stage a success return value
ld8.acq r9=[r32];; // PIO Read the specified register
adds r9=1,r9 // Add to force a consume
br.ret.sptk.many b0;; // Return success
.global xp_error_PIOR
xp_error_PIOR:
mov r8=1 // Return value of 1
br.ret.sptk.many b0;; // Return failure
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr) ...@@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr)
spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
sfdl_flush_lock, flags); sfdl_flush_lock, flags);
p->sfdl_flush_value = 0; *p->sfdl_flush_addr = 0;
/* force an interrupt. */ /* force an interrupt. */
*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
......
...@@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) ...@@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
ca_dmamap->cad_dma_addr = bus_addr; ca_dmamap->cad_dma_addr = bus_addr;
ca_dmamap->cad_gart_size = entries; ca_dmamap->cad_gart_size = entries;
ca_dmamap->cad_gart_entry = entry; ca_dmamap->cad_gart_entry = entry;
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list); list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
if (xio_addr % ps) { if (xio_addr % ps) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
......
...@@ -408,7 +408,7 @@ config SGI_TIOCX ...@@ -408,7 +408,7 @@ config SGI_TIOCX
config SGI_MBCS config SGI_MBCS
tristate "SGI FPGA Core Services driver support" tristate "SGI FPGA Core Services driver support"
depends on (IA64_SGI_SN2 || IA64_GENERIC) depends on SGI_TIOCX
help help
If you have an SGI Altix with an attached SABrick If you have an SGI Altix with an attached SABrick
say Y or M here, otherwise say N. say Y or M here, otherwise say N.
......
...@@ -136,6 +136,7 @@ ...@@ -136,6 +136,7 @@
*/ */
#define CAC_BASE (CACHED | AS_CAC_SPACE) #define CAC_BASE (CACHED | AS_CAC_SPACE)
#define AMO_BASE (UNCACHED | AS_AMO_SPACE) #define AMO_BASE (UNCACHED | AS_AMO_SPACE)
#define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE)
#define GET_BASE (CACHED | AS_GET_SPACE) #define GET_BASE (CACHED | AS_GET_SPACE)
/* /*
...@@ -160,6 +161,13 @@ ...@@ -160,6 +161,13 @@
#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) #define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
/*
* Macros to test for address type.
*/
#define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE)
#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE)
/* /*
* The following definitions pertain to the IO special address * The following definitions pertain to the IO special address
* space. They define the location of the big and little windows * space. They define the location of the big and little windows
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* *
* SGI specific setup. * SGI specific setup.
* *
* Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/ */
#ifndef _ASM_IA64_SN_ARCH_H #ifndef _ASM_IA64_SN_ARCH_H
...@@ -47,6 +47,21 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); ...@@ -47,6 +47,21 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
#define MAX_COMPACT_NODES 2048 #define MAX_COMPACT_NODES 2048
#define CPUS_PER_NODE 4 #define CPUS_PER_NODE 4
/*
* Compact node ID to nasid mappings kept in the per-cpu data areas of each
* cpu.
*/
DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
extern u8 sn_partition_id;
extern u8 sn_system_size;
extern u8 sn_sharing_domain_size;
extern u8 sn_region_size;
extern void sn_flush_all_caches(long addr, long bytes); extern void sn_flush_all_caches(long addr, long bytes);
#endif /* _ASM_IA64_SN_ARCH_H */ #endif /* _ASM_IA64_SN_ARCH_H */
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_FETCHOP_H
#define _ASM_IA64_SN_FETCHOP_H
#include <linux/config.h>
#define FETCHOP_BASENAME "sgi_fetchop"
#define FETCHOP_FULLNAME "/dev/sgi_fetchop"
#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
#define FETCHOP_LOAD 0
#define FETCHOP_INCREMENT 8
#define FETCHOP_DECREMENT 16
#define FETCHOP_CLEAR 24
#define FETCHOP_STORE 0
#define FETCHOP_AND 24
#define FETCHOP_OR 32
#define FETCHOP_CLEAR_CACHE 56
#define FETCHOP_LOAD_OP(addr, op) ( \
*(volatile long *)((char*) (addr) + (op)))
#define FETCHOP_STORE_OP(addr, op, x) ( \
*(volatile long *)((char*) (addr) + (op)) = (long) (x))
#ifdef __KERNEL__
/*
* Convert a region 6 (kaddr) address to the address of the fetchop variable
*/
#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr)
/*
* Each Atomic Memory Operation (AMO formerly known as fetchop)
* variable is 64 bytes long. The first 8 bytes are used. The
* remaining 56 bytes are unaddressable due to the operation taking
* that portion of the address.
*
* NOTE: The AMO_t _MUST_ be placed in either the first or second half
* of the cache line. The cache line _MUST NOT_ be used for anything
* other than additional AMO_t entries. This is because there are two
* addresses which reference the same physical cache line. One will
* be a cached entry with the memory type bits all set. This address
* may be loaded into processor cache. The AMO_t will be referenced
* uncached via the memory special memory type. If any portion of the
* cached cache-line is modified, when that line is flushed, it will
* overwrite the uncached value in physical memory and lead to
* inconsistency.
*/
typedef struct {
u64 variable;
u64 unused[7];
} AMO_t;
/*
* The following APIs are externalized to the kernel to allocate/free pages of
* fetchop variables.
* fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the
* specified cnode.
* fetchop_kfree_page - Free a previously allocated fetchop page
*/
unsigned long fetchop_kalloc_page(int nid);
void fetchop_kfree_page(unsigned long maddr);
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_FETCHOP_H */
...@@ -29,8 +29,9 @@ ...@@ -29,8 +29,9 @@
#define L1_BRICKTYPE_CHI_CG 0x76 /* v */ #define L1_BRICKTYPE_CHI_CG 0x76 /* v */
#define L1_BRICKTYPE_X 0x78 /* x */ #define L1_BRICKTYPE_X 0x78 /* x */
#define L1_BRICKTYPE_X2 0x79 /* y */ #define L1_BRICKTYPE_X2 0x79 /* y */
#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */ #define L1_BRICKTYPE_SA 0x5e /* ^ */
#define L1_BRICKTYPE_PA 0x6a /* j */ #define L1_BRICKTYPE_PA 0x6a /* j */
#define L1_BRICKTYPE_IA 0x6b /* k */ #define L1_BRICKTYPE_IA 0x6b /* k */
#define L1_BRICKTYPE_ATHENA 0x2b /* + */
#endif /* _ASM_IA64_SN_L1_H */ #endif /* _ASM_IA64_SN_L1_H */
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/sn/arch.h> #include <asm/sn/arch.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/pda.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
/* /*
...@@ -67,20 +66,18 @@ typedef struct nodepda_s nodepda_t; ...@@ -67,20 +66,18 @@ typedef struct nodepda_s nodepda_t;
* The next set of definitions provides this. * The next set of definitions provides this.
* Routines are expected to use * Routines are expected to use
* *
* nodepda -> to access node PDA for the node on which code is running * sn_nodepda - to access node PDA for the node on which code is running
* subnodepda -> to access subnode PDA for the subnode on which code is running * NODEPDA(cnodeid) - to access node PDA for cnodeid
*
* NODEPDA(cnode) -> to access node PDA for cnodeid
* SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode
*/ */
#define nodepda pda->p_nodepda /* Ptr to this node's PDA */ DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode]) #define sn_nodepda (__get_cpu_var(__sn_nodepda))
#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
/* /*
* Check if given a compact node id the corresponding node has all the * Check if given a compact node id the corresponding node has all the
* cpus disabled. * cpus disabled.
*/ */
#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0) #define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
#endif /* _ASM_IA64_SN_NODEPDA_H */ #endif /* _ASM_IA64_SN_NODEPDA_H */
...@@ -24,14 +24,6 @@ ...@@ -24,14 +24,6 @@
typedef struct pda_s { typedef struct pda_s {
/* Having a pointer in the begining of PDA tends to increase
* the chance of having this pointer in cache. (Yes something
* else gets pushed out). Doing this reduces the number of memory
* access to all nodepda variables to be one
*/
struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
/* /*
* Support for SN LEDs * Support for SN LEDs
*/ */
...@@ -49,7 +41,6 @@ typedef struct pda_s { ...@@ -49,7 +41,6 @@ typedef struct pda_s {
unsigned long sn_soft_irr[4]; unsigned long sn_soft_irr[4];
unsigned long sn_in_service_ivecs[4]; unsigned long sn_in_service_ivecs[4];
short cnodeid_to_nasid_table[MAX_NUMNODES];
int sn_lb_int_war_ticks; int sn_lb_int_war_ticks;
int sn_last_irq; int sn_last_irq;
int sn_first_irq; int sn_first_irq;
......
...@@ -384,6 +384,17 @@ ...@@ -384,6 +384,17 @@
#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26 #define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
#define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000 #define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000
/* ==================================================================== */
/* Register "SH_IPI_ACCESS" */
/* CPU interrupt Access Permission Bits */
/* ==================================================================== */
#define SH1_IPI_ACCESS 0x0000000110060480
#define SH2_IPI_ACCESS0 0x0000000010060c00
#define SH2_IPI_ACCESS1 0x0000000010060c80
#define SH2_IPI_ACCESS2 0x0000000010060d00
#define SH2_IPI_ACCESS3 0x0000000010060d80
/* ==================================================================== */ /* ==================================================================== */
/* Register "SH_INT_CMPB" */ /* Register "SH_INT_CMPB" */
/* RTC Compare Value for Processor B */ /* RTC Compare Value for Processor B */
...@@ -429,6 +440,19 @@ ...@@ -429,6 +440,19 @@
#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 #define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
#define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff #define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff
/* ==================================================================== */
/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 0x0000000100030300
/* ==================================================================== */
/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 0x0000000100050300
/* ==================================================================== */ /* ==================================================================== */
/* Some MMRs are functionally identical (or close enough) on both SHUB1 */ /* Some MMRs are functionally identical (or close enough) on both SHUB1 */
......
This diff is collapsed.
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/ */
...@@ -92,24 +92,24 @@ ...@@ -92,24 +92,24 @@
* NOTE: on non-MP systems, only cpuid 0 exists * NOTE: on non-MP systems, only cpuid 0 exists
*/ */
extern short physical_node_map[]; /* indexed by nasid to get cnode */ extern short physical_node_map[]; /* indexed by nasid to get cnode */
/* /*
* Macros for retrieving info about current cpu * Macros for retrieving info about current cpu
*/ */
#define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid) #define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
#define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode) #define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
#define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice) #define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
#define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode) #define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) #define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
/* /*
* Macros for retrieving info about an arbitrary cpu * Macros for retrieving info about an arbitrary cpu
* cpuid - logical cpu id * cpuid - logical cpu id
*/ */
#define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid) #define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
#define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode) #define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
#define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice) #define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)]) #define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)])
...@@ -123,11 +123,8 @@ extern int nasid_slice_to_cpuid(int, int); ...@@ -123,11 +123,8 @@ extern int nasid_slice_to_cpuid(int, int);
/* /*
* cnodeid_to_nasid - convert a cnodeid to a NASID * cnodeid_to_nasid - convert a cnodeid to a NASID
* Macro relies on pg_data for a node being on the node itself.
* Just extract the NASID from the pointer.
*
*/ */
#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid] #define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
/* /*
* nasid_to_cnodeid - convert a NASID to a cnodeid * nasid_to_cnodeid - convert a NASID to a cnodeid
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_FRU_H
#define _ASM_IA64_SN_SN_FRU_H
#define MAX_DIMMS 8 /* max # of dimm banks */
#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */
typedef unsigned char confidence_t;
typedef struct kf_mem_s {
confidence_t km_confidence; /* confidence level that the memory is bad
* is this necessary ?
*/
confidence_t km_dimm[MAX_DIMMS];
/* confidence level that dimm[i] is bad
*I think this is the right number
*/
} kf_mem_t;
typedef struct kf_cpu_s {
confidence_t kc_confidence; /* confidence level that cpu is bad */
confidence_t kc_icache; /* confidence level that instr. cache is bad */
confidence_t kc_dcache; /* confidence level that data cache is bad */
confidence_t kc_scache; /* confidence level that sec. cache is bad */
confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
} kf_cpu_t;
typedef struct kf_pci_bus_s {
confidence_t kpb_belief; /* confidence level that the pci bus is bad */
confidence_t kpb_pcidev_belief[MAX_PCIDEV];
/* confidence level that the pci dev is bad */
} kf_pci_bus_t;
#endif /* _ASM_IA64_SN_SN_FRU_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -846,6 +846,8 @@ fastcall NORET_TYPE void do_exit(long code) ...@@ -846,6 +846,8 @@ fastcall NORET_TYPE void do_exit(long code)
for (;;) ; for (;;) ;
} }
EXPORT_SYMBOL_GPL(do_exit);
NORET_TYPE void complete_and_exit(struct completion *comp, long code) NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{ {
if (comp) if (comp)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment