Commit f77165df authored by Dave Kleikamp's avatar Dave Kleikamp

Merge with /home/shaggy/git/linus-clean/

parents ddf3e298 b2d84f07
......@@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM
If you are compiling a kernel that will run under SGI's IA-64
simulator (Medusa) then say Y, otherwise say N.
config IA64_SGI_SN_XP
tristate "Support communication between SGI SSIs"
depends on MSPEC
help
An SGI machine can be divided into multiple Single System
Images which act independently of each other and have
hardware based memory protection from the others. Enabling
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
config FORCE_MAX_ZONEORDER
int
default "18"
......@@ -261,6 +271,15 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
default off
help
Improves the CPU scheduler's decision making when dealing with
Intel IA64 chips with MultiThreading at a cost of slightly increased
overhead in some places. If unsure say N here.
config PREEMPT
bool "Preemptible Kernel"
help
......
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.11-rc2
# Sat Jan 22 11:17:02 2005
# Linux kernel version: 2.6.12-rc3
# Tue May 3 15:55:04 2005
#
#
......@@ -10,6 +10,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
......@@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=20
CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
#
# Loadable module support
......@@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
# CONFIG_SCHED_SMT is not set
# CONFIG_PREEMPT is not set
CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
......@@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y
# CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
# CONFIG_PCI_DEBUG is not set
#
# PCI Hotplug Support
......@@ -151,10 +157,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m
#
# CONFIG_PCCARD is not set
#
# PC-card bridges
#
#
# Device Drivers
#
......@@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CDROM_PKTCDVD is not set
......@@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_IPS is not set
......@@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
CONFIG_SCSI_QLOGIC_FC=y
# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
CONFIG_SCSI_QLOGIC_1280=y
......@@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m
CONFIG_SCSI_QLA2300=m
CONFIG_SCSI_QLA2322=m
# CONFIG_SCSI_QLA6312 is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_DEBUG is not set
......@@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
# CONFIG_DM_MULTIPATH is not set
#
# Fusion MPT device support
......@@ -386,7 +389,6 @@ CONFIG_NET=y
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_NETLINK_DEV=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
......@@ -446,7 +448,6 @@ CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_ETHERTAP is not set
#
# ARCnet devices
......@@ -484,7 +485,6 @@ CONFIG_NET_PCI=y
# CONFIG_DGRS is not set
CONFIG_EEPRO100=m
CONFIG_E100=m
# CONFIG_E100_NAPI is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
......@@ -565,25 +565,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
#
# Input I/O drivers
#
CONFIG_GAMEPORT=m
CONFIG_SOUND_GAMEPORT=m
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_GAMEPORT_EMU10K1 is not set
# CONFIG_GAMEPORT_VORTEX is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461X is not set
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PCIPS2 is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
#
# Input Device Drivers
#
......@@ -601,6 +582,24 @@ CONFIG_MOUSE_PS2=y
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
#
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_PCIPS2 is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
CONFIG_GAMEPORT=m
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_GAMEPORT_EMU10K1 is not set
# CONFIG_GAMEPORT_VORTEX is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461X is not set
CONFIG_SOUND_GAMEPORT=m
#
# Character devices
#
......@@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_SPECIALIX is not set
# CONFIG_SX is not set
# CONFIG_STALDRV is not set
#
......@@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
......@@ -670,6 +672,12 @@ CONFIG_HPET=y
# CONFIG_HPET_RTC_IRQ is not set
CONFIG_HPET_MMAP=y
CONFIG_MAX_RAW_DEVS=256
# CONFIG_HANGCHECK_TIMER is not set
#
# TPM devices
#
# CONFIG_TCG_TPM is not set
#
# I2C support
......@@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256
#
CONFIG_VGA_CONSOLE=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Sound
......@@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y
#
# USB support
#
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB=y
# CONFIG_USB_DEBUG is not set
......@@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
#
# USB Host Controller Drivers
......@@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=y
# CONFIG_USB_SL811_HCD is not set
......@@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_RW_DETECT is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_HP8200e is not set
# CONFIG_USB_STORAGE_USBAT is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set
......@@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
# CONFIG_USB_MON is not set
#
# USB port drivers
......@@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_TEST is not set
#
......@@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
#
# XFS support
#
CONFIG_XFS_FS=y
CONFIG_XFS_EXPORT=y
# CONFIG_XFS_RT is not set
# CONFIG_XFS_QUOTA is not set
# CONFIG_XFS_SECURITY is not set
......@@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y
CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_EXPORTFS=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_RPCSEC_GSS_KRB5=m
......@@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOG_BUF_SHIFT=20
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
......@@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
CONFIG_CRYPTO_DES=m
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
......
......@@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus)
static void __init
sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
acpi_handle phandle;
unsigned int node;
int pxm;
ioc->node = MAX_NUMNODES;
/*
* Check for a _PXM on this node first. We don't typically see
* one here, so we'll end up getting it from the parent.
*/
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) {
if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
return;
pxm = acpi_get_pxm(handle);
/* Reset the acpi buffer */
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL,
&buffer)))
if (pxm < 0)
return;
}
if (!buffer.length || !buffer.pointer)
return;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_INTEGER ||
obj->integer.value >= MAX_PXM_DOMAINS) {
acpi_os_free(buffer.pointer);
return;
}
node = pxm_to_nid_map[obj->integer.value];
acpi_os_free(buffer.pointer);
node = pxm_to_nid_map[pxm];
if (node >= MAX_NUMNODES || !node_online(node))
return;
......
......@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
union acpi_object *obj;
struct acpi_table_iosapic *iosapic;
unsigned int gsi_base;
int node;
int pxm, node;
/* Only care about objects w/ a method that returns the MADT */
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
......@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
gsi_base = iosapic->global_irq_base;
acpi_os_free(buffer.pointer);
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
/*
* OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell
* OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
* us which node to associate this with.
*/
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer)))
return AE_OK;
if (!buffer.length || !buffer.pointer)
return AE_OK;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_INTEGER ||
obj->integer.value >= MAX_PXM_DOMAINS) {
acpi_os_free(buffer.pointer);
pxm = acpi_get_pxm(handle);
if (pxm < 0)
return AE_OK;
}
node = pxm_to_nid_map[obj->integer.value];
acpi_os_free(buffer.pointer);
node = pxm_to_nid_map[pxm];
if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node)))
......
......@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
END(ia64_ret_from_ia32_execve_syscall)
END(ia64_ret_from_ia32_execve)
// fall through
#endif /* CONFIG_IA32_SUPPORT */
GLOBAL_ENTRY(ia64_leave_kernel)
......
......@@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down)
movl r2=ia64_ret_from_syscall
;;
mov rp=r2 // set the real return addr
tbit.z p8,p0=r3,TIF_SYSCALL_TRACE
and r3=_TIF_SYSCALL_TRACEAUDIT,r3
;;
cmp.eq p8,p0=r3,r0
(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
......
......@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */
force_sig(SIGKILL, current);
schedule();
do_exit(SIGKILL);
}
/**
......@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
psr2->i = 0;
return 1;
}
......
......@@ -10,6 +10,7 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
GLOBAL_ENTRY(mca_handler_bhhook)
invala // clear RSE ?
......@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
;;
alloc r16=ar.pfs,0,2,1,0 // make a new frame
;;
mov ar.rsc=0
;;
mov r13=IA64_KR(CURRENT) // current task pointer
;;
adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13
mov r2=r13
;;
addl r22=IA64_RBS_OFFSET,r2
;;
mov ar.bspstore=r22
;;
ld8 r12=[r12] // stack pointer
addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
;;
adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
st1 [r2]=r0 // clear current->thread.on_ustack flag
mov loc0=r16
movl loc1=mca_handler_bh // recovery C function
;;
......@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
;;
mov loc1=rp
;;
br.call.sptk.many rp=b6 // not return ...
ssm psr.i
;;
br.call.sptk.many rp=b6 // does not return ...
;;
mov ar.pfs=loc0
mov rp=loc1
......
......@@ -1265,6 +1265,8 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
}
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
extern void update_pal_halt_status(int);
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
......@@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
/*
* disable default_idle() to go to PAL_HALT
*/
update_pal_halt_status(0);
UNLOCK_PFS(flags);
return 0;
......@@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
/*
* if possible, enable default_idle() to go into PAL_HALT
*/
if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
update_pal_halt_status(1);
UNLOCK_PFS(flags);
return 0;
......@@ -4202,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
req->load_pid,
ctx->ctx_state));
return -EINVAL;
return -EBUSY;
}
DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
......@@ -4703,17 +4716,27 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
*/
if (task == current || ctx->ctx_fl_system) return 0;
/*
* we are monitoring another thread
*/
switch(state) {
case PFM_CTX_UNLOADED:
/*
* if context is UNLOADED we are safe to go
*/
if (state == PFM_CTX_UNLOADED) return 0;
return 0;
case PFM_CTX_ZOMBIE:
/*
* no command can operate on a zombie context
*/
if (state == PFM_CTX_ZOMBIE) {
DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
return -EINVAL;
case PFM_CTX_MASKED:
/*
* PMU state has been saved to software even though
* the thread may still be running.
*/
if (cmd != PFM_UNLOAD_CONTEXT) return 0;
}
/*
......
......@@ -50,7 +50,7 @@
#include "sigframe.h"
void (*ia64_mark_idle)(int);
static cpumask_t cpu_idle_map;
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
......@@ -174,6 +174,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
}
static int pal_halt = 1;
static int can_do_pal_halt = 1;
static int __init nohalt_setup(char * str)
{
pal_halt = 0;
......@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
}
__setup("nohalt", nohalt_setup);
void
update_pal_halt_status(int status)
{
can_do_pal_halt = pal_halt && status;
}
/*
* We use this if we don't have any better idle routine..
*/
void
default_idle (void)
{
unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
while (!need_resched())
if (pal_halt && !pmu_active)
if (can_do_pal_halt)
safe_halt();
else
cpu_relax();
......@@ -223,19 +229,30 @@ static inline void play_dead(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
void cpu_idle_wait(void)
{
int cpu;
unsigned int cpu, this_cpu = get_cpu();
cpumask_t map;
for_each_online_cpu(cpu)
cpu_set(cpu, cpu_idle_map);
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
cpus_clear(map);
for_each_online_cpu(cpu) {
per_cpu(cpu_idle_state, cpu) = 1;
cpu_set(cpu, map);
}
__get_cpu_var(cpu_idle_state) = 0;
wmb();
do {
ssleep(1);
cpus_and(map, cpu_idle_map, cpu_online_map);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
......@@ -244,7 +261,6 @@ void __attribute__((noreturn))
cpu_idle (void)
{
void (*mark_idle)(int) = ia64_mark_idle;
int cpu = smp_processor_id();
/* endless idle loop with no priority at all */
while (1) {
......@@ -255,12 +271,13 @@ cpu_idle (void)
while (!need_resched()) {
void (*idle)(void);
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;
rmb();
if (mark_idle)
(*mark_idle)(1);
if (cpu_isset(cpu, cpu_idle_map))
cpu_clear(cpu, cpu_idle_map);
rmb();
idle = pm_idle;
if (!idle)
idle = default_idle;
......
......@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
* could be corrupted.
*/
retval = (long) &ia64_leave_kernel;
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SYSCALL_AUDIT))
/*
* strace expects to be notified after sigreturn returns even though the
* context to which we return may not be in the middle of a syscall.
......
/*
* Cache flushing routines.
*
* Copyright (C) 1999-2001 Hewlett-Packard Co
* Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
......@@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range)
mov ar.lc=r8
;;
.Loop: fc in0 // issuable on M0 only
.Loop: fc.i in0 // issuable on M2 only
add in0=32,in0
br.cloop.sptk.few .Loop
;;
......
......@@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
mov f6=f0
br.cond.sptk .common_code
;;
END(memcpy)
GLOBAL_ENTRY(__copy_user)
.prologue
// check dest alignment
......@@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
#undef B
#undef C
#undef D
END(memcpy)
/*
* Due to lack of local tag support in gcc 2.x assembler, it is not clear which
......
......@@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset)
{ .mmi
.prologue
alloc tmp = ar.pfs, 3, 0, 0, 0
.body
lfetch.nt1 [dest] //
.save ar.lc, save_lc
mov.i save_lc = ar.lc
.body
} { .mmi
mov ret0 = dest // return value
cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
......
......@@ -4,10 +4,15 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
#
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
xp-y := xp_main.o xp_nofault.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
......@@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void)
if (status)
continue;
/* Attach the error interrupt handlers */
if (nasid & 1)
ice_error_init(hubdev);
else
hub_error_init(hubdev);
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
......@@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void)
sn_flush_device_list;
}
if (!(i & 1))
hub_error_init(hubdev);
else
ice_error_init(hubdev);
}
}
......
......@@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
* This function is the callback routine that SAL calls to log error
* info for platform errors. buf is appended to sn_oemdata, resizing as
* required.
* Note: this is a SAL to OS callback, running under the same rules as the SAL
* code. SAL calls are run with preempt disabled so this routine must not
* sleep. vmalloc can sleep so print_hook cannot resize the output buffer
* itself, instead it must set the required size and return to let the caller
* resize the buffer then redrive the SAL call.
*/
static int print_hook(const char *fmt, ...)
{
......@@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...)
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
len = strlen(buf);
while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
if (!newbuf) {
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__);
return 0;
}
memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
vfree(*sn_oemdata);
*sn_oemdata = newbuf;
}
memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
if (*sn_oemdata_size + len <= sn_oemdata_bufsize)
memcpy(*sn_oemdata + *sn_oemdata_size, buf, len);
*sn_oemdata_size += len;
return 0;
}
......@@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
sn_oemdata = oemdata;
sn_oemdata_size = oemdata_size;
sn_oemdata_bufsize = 0;
*sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */
while (*sn_oemdata_size > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(*sn_oemdata_size);
if (!newbuf) {
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__);
return 1;
}
vfree(*sn_oemdata);
*sn_oemdata = newbuf;
sn_oemdata_bufsize = *sn_oemdata_size;
*sn_oemdata_size = 0;
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
}
up(&sn_oemdata_mutex);
return 0;
}
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
......@@ -73,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
partid_t sn_partid = -1;
EXPORT_SYMBOL(sn_partid);
char sn_system_serial_number_string[128];
......@@ -373,10 +379,10 @@ static void __init sn_init_pdas(char **cmdline_p)
{
cnodeid_t cnode;
memset(pda->cnodeid_to_nasid_table, -1,
sizeof(pda->cnodeid_to_nasid_table));
memset(sn_cnodeid_to_nasid, -1,
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
for_each_online_node(cnode)
pda->cnodeid_to_nasid_table[cnode] =
sn_cnodeid_to_nasid[cnode] =
pxm_to_nasid(nid_to_pxm_map[cnode]);
numionodes = num_online_nodes();
......@@ -477,7 +483,8 @@ void __init sn_cpu_init(void)
cnode = nasid_to_cnodeid(nasid);
pda->p_nodepda = nodepdaindr[cnode];
sn_nodepda = nodepdaindr[cnode];
pda->led_address =
(typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
pda->led_state = LED_ALWAYS_SET;
......@@ -486,15 +493,18 @@ void __init sn_cpu_init(void)
pda->idle_flag = 0;
if (cpuid != 0) {
memcpy(pda->cnodeid_to_nasid_table,
pdacpu(0)->cnodeid_to_nasid_table,
sizeof(pda->cnodeid_to_nasid_table));
/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
memcpy(sn_cnodeid_to_nasid,
(&per_cpu(__sn_cnodeid_to_nasid, 0)),
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
}
/*
* Check for WARs.
* Only needs to be done once, on BSP.
* Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
* Has to be done after loop above, because it uses this cpu's
* sn_cnodeid_to_nasid table which was just initialized if this
* isn't cpu 0.
* Has to be done before assignment below.
*/
if (!wars_have_been_checked) {
......@@ -580,8 +590,7 @@ static void __init scan_for_ionodes(void)
brd = find_lboard_any(brd, KLTYPE_SNIA);
while (brd) {
pda->cnodeid_to_nasid_table[numionodes] =
brd->brd_nasid;
sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
physical_node_map[brd->brd_nasid] = numionodes;
root_lboard[numionodes] = brd;
numionodes++;
......@@ -602,8 +611,7 @@ static void __init scan_for_ionodes(void)
root_lboard[nasid_to_cnodeid(nasid)],
KLTYPE_TIO);
while (brd) {
pda->cnodeid_to_nasid_table[numionodes] =
brd->brd_nasid;
sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
physical_node_map[brd->brd_nasid] = numionodes;
root_lboard[numionodes] = brd;
numionodes++;
......@@ -614,7 +622,6 @@ static void __init scan_for_ionodes(void)
brd = find_lboard_any(brd, KLTYPE_TIO);
}
}
}
int
......@@ -623,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice)
long cpu;
for (cpu=0; cpu < NR_CPUS; cpu++)
if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu;
return -1;
......
......@@ -21,6 +21,8 @@
#include <asm/sn/types.h>
#include <asm/sn/shubio.h>
#include <asm/sn/tiocx.h>
#include <asm/sn/l1.h>
#include <asm/sn/module.h>
#include "tio.h"
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
......@@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
}
}
uint64_t
tiocx_dma_addr(uint64_t addr)
uint64_t tiocx_dma_addr(uint64_t addr)
{
return PHYS_TO_TIODMA(addr);
}
uint64_t
tiocx_swin_base(int nasid)
uint64_t tiocx_swin_base(int nasid)
{
return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
}
......@@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type);
EXPORT_SYMBOL(tiocx_dma_addr);
EXPORT_SYMBOL(tiocx_swin_base);
static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
ia64_sal_oemcall_nolock(&ret_stuff,
SN_SAL_IOIF_GET_HUBDEV_INFO,
handle, address, 0, 0, 0, 0, 0);
return ret_stuff.v0;
}
static void tio_conveyor_set(nasid_t nasid, int enable_flag)
{
uint64_t ice_frz;
......@@ -379,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
udelay(2000);
}
static int fpga_attached(nasid_t nasid)
static int tiocx_btchar_get(int nasid)
{
moduleid_t module_id;
geoid_t geoid;
int cnodeid;
cnodeid = nasid_to_cnodeid(nasid);
geoid = cnodeid_get_geoid(cnodeid);
module_id = geo_module(geoid);
return MODULE_GET_BTCHAR(module_id);
}
static int is_fpga_brick(int nasid)
{
switch (tiocx_btchar_get(nasid)) {
case L1_BRICKTYPE_SA:
case L1_BRICKTYPE_ATHENA:
return 1;
}
return 0;
}
static int bitstream_loaded(nasid_t nasid)
{
uint64_t cx_credits;
......@@ -396,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev)
int mfg_num = CX_DEV_NONE;
nasid_t nasid = cx_dev->cx_id.nasid;
if (fpga_attached(nasid)) {
if (bitstream_loaded(nasid)) {
uint64_t cx_id;
cx_id =
......@@ -427,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf)
{
struct cx_dev *cx_dev = to_cx_dev(dev);
return sprintf(buf, "0x%x 0x%x 0x%x\n",
return sprintf(buf, "0x%x 0x%x 0x%x %d\n",
cx_dev->cx_id.nasid,
cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num,
tiocx_btchar_get(cx_dev->cx_id.nasid));
}
static ssize_t store_cxdev_control(struct device *dev, const char *buf,
......@@ -475,20 +485,14 @@ static int __init tiocx_init(void)
if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
break; /* No more nasids .. bail out of loop */
if (nasid & 0x1) { /* TIO's are always odd */
if ((nasid & 0x1) && is_fpga_brick(nasid)) {
struct hubdev_info *hubdev;
uint64_t status;
struct xwidget_info *widgetp;
DBG("Found TIO at nasid 0x%x\n", nasid);
hubdev =
(struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
status =
tiocx_get_hubdev_info(nasid,
(uint64_t) __pa(hubdev));
if (status)
continue;
widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) base.
*
* XP provides a base from which its users can interact
* with XPC, yet not be dependent on XPC.
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/xp.h>
/*
* Target of nofault PIO read.
*/
u64 xp_nofault_PIOR_target;
/*
* xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
* users of XPC.
*/
struct xpc_registration xpc_registrations[XPC_NCHANNELS];
/*
* Initialize the XPC interface to indicate that XPC isn't loaded.
*/
static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
struct xpc_interface xpc_interface = {
(void (*)(int)) xpc_notloaded,
(void (*)(int)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
xpc_notloaded,
(void (*)(partid_t, int, void *)) xpc_notloaded,
(enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
};
/*
* XPC calls this when it (the XPC module) has been loaded.
*/
void
xpc_set_interface(void (*connect)(int),
void (*disconnect)(int),
enum xpc_retval (*allocate)(partid_t, int, u32, void **),
enum xpc_retval (*send)(partid_t, int, void *),
enum xpc_retval (*send_notify)(partid_t, int, void *,
xpc_notify_func, void *),
void (*received)(partid_t, int, void *),
enum xpc_retval (*partid_to_nasids)(partid_t, void *))
{
xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect;
xpc_interface.allocate = allocate;
xpc_interface.send = send;
xpc_interface.send_notify = send_notify;
xpc_interface.received = received;
xpc_interface.partid_to_nasids = partid_to_nasids;
}
/*
* XPC calls this when it (the XPC module) is being unloaded.
*/
void
xpc_clear_interface(void)
{
xpc_interface.connect = (void (*)(int)) xpc_notloaded;
xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
void **)) xpc_notloaded;
xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
xpc_notloaded;
xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
xpc_notify_func, void *)) xpc_notloaded;
xpc_interface.received = (void (*)(partid_t, int, void *))
xpc_notloaded;
xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
xpc_notloaded;
}
/*
* Register for automatic establishment of a channel connection whenever
* a partition comes up.
*
* Arguments:
*
* ch_number - channel # to register for connection.
* func - function to call for asynchronous notification of channel
* state changes (i.e., connection, disconnection, error) and
* the arrival of incoming messages.
* key - pointer to optional user-defined value that gets passed back
* to the user on any callouts made to func.
* payload_size - size in bytes of the XPC message's payload area which
* contains a user-defined message. The user should make
* this large enough to hold their largest message.
* nentries - max #of XPC message entries a message queue can contain.
* The actual number, which is determined when a connection
* is established and may be less then requested, will be
* passed to the user via the xpcConnected callout.
* assigned_limit - max number of kthreads allowed to be processing
* messages (per connection) at any given instant.
* idle_limit - max number of kthreads allowed to be idle at any given
* instant.
*/
enum xpc_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit)
{
struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
DBUG_ON(payload_size == 0 || nentries == 0);
DBUG_ON(func == NULL);
DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
registration = &xpc_registrations[ch_number];
if (down_interruptible(&registration->sema) != 0) {
return xpcInterrupted;
}
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
up(&registration->sema);
return xpcAlreadyRegistered;
}
/* register the channel for connection */
registration->msg_size = XPC_MSG_SIZE(payload_size);
registration->nentries = nentries;
registration->assigned_limit = assigned_limit;
registration->idle_limit = idle_limit;
registration->key = key;
registration->func = func;
up(&registration->sema);
xpc_interface.connect(ch_number);
return xpcSuccess;
}
/*
* Remove the registration for automatic connection of the specified channel
* when a partition comes up.
*
* Before returning this xpc_disconnect() will wait for all connections on the
* specified channel have been closed/torndown. So the caller can be assured
* that they will not be receiving any more callouts from XPC to their
* function registered via xpc_connect().
*
* Arguments:
*
* ch_number - channel # to unregister.
*/
void
xpc_disconnect(int ch_number)
{
struct xpc_registration *registration;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
registration = &xpc_registrations[ch_number];
/*
* We've decided not to make this a down_interruptible(), since we
* figured XPC's users will just turn around and call xpc_disconnect()
* again anyways, so we might as well wait, if need be.
*/
down(&registration->sema);
/* if !XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func == NULL) {
up(&registration->sema);
return;
}
/* remove the connection registration for the specified channel */
registration->func = NULL;
registration->key = NULL;
registration->nentries = 0;
registration->msg_size = 0;
registration->assigned_limit = 0;
registration->idle_limit = 0;
xpc_interface.disconnect(ch_number);
up(&registration->sema);
return;
}
int __init
xp_init(void)
{
int ret, ch_number;
u64 func_addr = *(u64 *) xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR;
if (!ia64_platform_is("sn2")) {
return -ENODEV;
}
/*
* Register a nofault code region which performs a cross-partition
* PIO read. If the PIO read times out, the MCA handler will consume
* the error and return to a kernel-provided instruction to indicate
* an error. This PIO read exists because it is guaranteed to timeout
* if the destination is down (AMO operations do not timeout on at
* least some CPUs on Shubs <= v1.2, which unfortunately we have to
* work around).
*/
if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 1)) != 0) {
printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
ret);
}
/*
* Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.)
*/
if (is_shub2()) {
xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
} else {
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
}
/* initialize the connection registration semaphores */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */
}
return 0;
}
module_init(xp_init);
void __exit
xp_exit(void)
{
u64 func_addr = *(u64 *) xp_nofault_PIOR;
u64 err_func_addr = *(u64 *) xp_error_PIOR;
/* unregister the PIO read nofault code region */
(void) sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0);
}
module_exit(xp_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition (XP) base");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(xp_nofault_PIOR);
EXPORT_SYMBOL(xp_nofault_PIOR_target);
EXPORT_SYMBOL(xpc_registrations);
EXPORT_SYMBOL(xpc_interface);
EXPORT_SYMBOL(xpc_clear_interface);
EXPORT_SYMBOL(xpc_set_interface);
EXPORT_SYMBOL(xpc_connect);
EXPORT_SYMBOL(xpc_disconnect);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function
* will be registered as a nofault code block. In the event that the
* PIO read fails, the MCA handler will force the error to look
* corrected and vector to the xp_error_PIOR which will return an error.
*
* extern int xp_nofault_PIOR(void *remote_register);
*/
.global xp_nofault_PIOR
xp_nofault_PIOR:
mov r8=r0 // Stage a success return value
ld8.acq r9=[r32];; // PIO Read the specified register
adds r9=1,r9 // Add to force a consume
br.ret.sptk.many b0;; // Return success
.global xp_error_PIOR
xp_error_PIOR:
mov r8=1 // Return value of 1
br.ret.sptk.many b0;; // Return failure
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) structures and macros.
*/
#ifndef _IA64_SN_KERNEL_XPC_H
#define _IA64_SN_KERNEL_XPC_H
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/device.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sn/bte.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/addrs.h>
#include <asm/sn/mspec.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/xp.h>
/*
* XPC Version numbers consist of a major and minor number. XPC can always
* talk to versions with same major #, and never talk to versions with a
* different major #.
*/
#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf))
#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
/*
* The next macros define word or bit representations for given
* C-brick nasid in either the SAL provided bit array representing
* nasids in the partition/machine or the AMO_t array used for
* inter-partition initiation communications.
*
* For SN2 machines, C-Bricks are alway even numbered NASIDs. As
* such, some space will be saved by insisting that nasid information
* passed from SAL always be packed for C-Bricks and the
* cross-partition interrupts use the same packing scheme.
*/
#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
(1UL << XPC_NASID_B_INDEX(_n)))
#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */
/* define the process name of HB checker and the CPU it is pinned to */
#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
#define XPC_HB_CHECK_CPU 0
/* define the process name of the discovery thread */
#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p)))
#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p))
#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p)))
/*
* Reserved Page provided by SAL.
*
* SAL provides one page per partition of reserved memory. When SAL
* initialization is complete, SAL_signature, SAL_version, partid,
* part_nasids, and mach_nasids are set.
*
* Note: Until vars_pa is set, the partition XPC code has not been initialized.
*/
struct xpc_rsvd_page {
u64 SAL_signature; /* SAL unique signature */
u64 SAL_version; /* SAL specified version */
u8 partid; /* partition ID from SAL */
u8 version;
u8 pad[6]; /* pad to u64 align */
u64 vars_pa;
u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
};
#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */
#define XPC_RSVD_PAGE_ALIGNED_SIZE \
(L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)))
/*
* Define the structures by which XPC variables can be exported to other
* partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
*/
/*
* The following structure describes the partition generic variables
* needed by other partitions in order to properly initialize.
*
* struct xpc_vars version number also applies to struct xpc_vars_part.
* Changes to either structure and/or related functionality should be
* reflected by incrementing either the major or minor version numbers
* of struct xpc_vars.
*/
struct xpc_vars {
u8 version;
u64 heartbeat;
u64 heartbeating_to_mask;
u64 kdb_status; /* 0 = machine running */
int act_nasid;
int act_phys_cpuid;
u64 vars_part_pa;
u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
AMO_t *act_amos; /* pointer to the first activation AMO */
};
#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */
#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars)))
/*
* The following structure describes the per partition specific variables.
*
* An array of these structures, one per partition, will be defined. As a
* partition becomes active XPC will copy the array entry corresponding to
* itself from that partition. It is desirable that the size of this
* structure evenly divide into a cacheline, such that none of the entries
* in this array crosses a cacheline boundary. As it is now, each entry
* occupies half a cacheline.
*/
struct xpc_vars_part {
u64 magic;
u64 openclose_args_pa; /* physical address of open and close args */
u64 GPs_pa; /* physical address of Get/Put values */
u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */
int IPI_nasid; /* nasid of where to send IPIs */
int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
u8 nchannels; /* #of defined channels supported */
u8 reserved[23]; /* pad to a full 64 bytes */
};
/*
* The vars_part MAGIC numbers play a part in the first contact protocol.
*
* MAGIC1 indicates that the per partition specific variables for a remote
* partition have been initialized by this partition.
*
* MAGIC2 indicates that this partition has pulled the remote partititions
* per partition variables that pertain to this partition.
*/
#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
/*
* Functions registered by add_timer() or called by kernel_thread() only
* allow for a single 64-bit argument. The following macros can be used to
* pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
* the passed argument.
*/
#define XPC_PACK_ARGS(_arg1, _arg2) \
((((u64) _arg1) & 0xffffffff) | \
((((u64) _arg2) & 0xffffffff) << 32))
#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
/*
* Define a Get/Put value pair (pointers) used with a message queue.
*/
struct xpc_gp {
s64 get; /* Get value */
s64 put; /* Put value */
};
#define XPC_GP_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
/*
* Define a structure that contains arguments associated with opening and
* closing a channel.
*/
struct xpc_openclose_args {
u16 reason; /* reason why channel is closing */
u16 msg_size; /* sizeof each message entry */
u16 remote_nentries; /* #of message entries in remote msg queue */
u16 local_nentries; /* #of message entries in local msg queue */
u64 local_msgqueue_pa; /* physical address of local message queue */
};
#define XPC_OPENCLOSE_ARGS_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
/* struct xpc_msg flags */
#define XPC_M_DONE 0x01 /* msg has been received/consumed */
#define XPC_M_READY 0x02 /* msg is ready to be sent */
#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
#define XPC_MSG_ADDRESS(_payload) \
((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
/*
* Defines notify entry.
*
* This is used to notify a message's sender that their message was received
* and consumed by the intended recipient.
*/
struct xpc_notify {
struct semaphore sema; /* notify semaphore */
u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */
xpc_notify_func func; /* user's notify function */
void *key; /* pointer to user's key */
};
/* struct xpc_notify type of notification */
#define XPC_N_CALL 0x01 /* notify function provided by user */
/*
* Define the structure that manages all the stuff required by a channel. In
* particular, they are used to manage the messages sent across the channel.
*
* This structure is private to a partition, and is NOT shared across the
* partition boundary.
*
* There is an array of these structures for each remote partition. It is
* allocated at the time a partition becomes active. The array contains one
* of these structures for each potential channel connection to that partition.
*
* Each of these structures manages two message queues (circular buffers).
* They are allocated at the time a channel connection is made. One of
* these message queues (local_msgqueue) holds the locally created messages
* that are destined for the remote partition. The other of these message
* queues (remote_msgqueue) is a locally cached copy of the remote partition's
* own local_msgqueue.
*
* The following is a description of the Get/Put pointers used to manage these
* two message queues. Consider the local_msgqueue to be on one partition
* and the remote_msgqueue to be its cached copy on another partition. A
* description of what each of the lettered areas contains is included.
*
*
* local_msgqueue remote_msgqueue
*
* |/////////| |/////////|
* w_remote_GP.get --> +---------+ |/////////|
* | F | |/////////|
* remote_GP.get --> +---------+ +---------+ <-- local_GP->get
* | | | |
* | | | E |
* | | | |
* | | +---------+ <-- w_local_GP.get
* | B | |/////////|
* | | |////D////|
* | | |/////////|
* | | +---------+ <-- w_remote_GP.put
* | | |////C////|
* local_GP->put --> +---------+ +---------+ <-- remote_GP.put
* | | |/////////|
* | A | |/////////|
* | | |/////////|
* w_local_GP.put --> +---------+ |/////////|
* |/////////| |/////////|
*
*
* ( remote_GP.[get|put] are cached copies of the remote
* partition's local_GP->[get|put], and thus their values can
* lag behind their counterparts on the remote partition. )
*
*
* A - Messages that have been allocated, but have not yet been sent to the
* remote partition.
*
* B - Messages that have been sent, but have not yet been acknowledged by the
* remote partition as having been received.
*
* C - Area that needs to be prepared for the copying of sent messages, by
* the clearing of the message flags of any previously received messages.
*
* D - Area into which sent messages are to be copied from the remote
* partition's local_msgqueue and then delivered to their intended
* recipients. [ To allow for a multi-message copy, another pointer
* (next_msg_to_pull) has been added to keep track of the next message
* number needing to be copied (pulled). It chases after w_remote_GP.put.
* Any messages lying between w_local_GP.get and next_msg_to_pull have
* been copied and are ready to be delivered. ]
*
* E - Messages that have been copied and delivered, but have not yet been
* acknowledged by the recipient as having been received.
*
* F - Messages that have been acknowledged, but XPC has not yet notified the
* sender that the message was received by its intended recipient.
* This is also an area that needs to be prepared for the allocating of
* new messages, by the clearing of the message flags of the acknowledged
* messages.
*/
struct xpc_channel {
partid_t partid; /* ID of remote partition connected */
spinlock_t lock; /* lock for updating this structure */
u32 flags; /* general flags */
enum xpc_retval reason; /* reason why channel is disconnect'g */
int reason_line; /* line# disconnect initiated from */
u16 number; /* channel # */
u16 msg_size; /* sizeof each msg entry */
u16 local_nentries; /* #of msg entries in local msg queue */
u16 remote_nentries; /* #of msg entries in remote msg queue*/
void *local_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *local_msgqueue; /* local message queue */
void *remote_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
/* local message queue */
u64 remote_msgqueue_pa; /* phys addr of remote partition's */
/* local message queue */
atomic_t references; /* #of external references to queues */
atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
/* queue of msg senders who want to be notified when msg received */
atomic_t n_to_notify; /* #of msg senders to notify */
struct xpc_notify *notify_queue;/* notify queue for messages sent */
xpc_channel_func func; /* user's channel function */
void *key; /* pointer to user's key */
struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
struct semaphore teardown_sema; /* wait for teardown completion */
struct xpc_openclose_args *local_openclose_args; /* args passed on */
/* opening or closing of channel */
/* various flavors of local and remote Get/Put values */
struct xpc_gp *local_GP; /* local Get/Put values */
struct xpc_gp remote_GP; /* remote Get/Put values */
struct xpc_gp w_local_GP; /* working local Get/Put values */
struct xpc_gp w_remote_GP; /* working remote Get/Put values */
s64 next_msg_to_pull; /* Put value of next msg to pull */
/* kthread management related fields */
// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
// >>> dependent on activity over the last interval of time
atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
u32 kthreads_idle_limit; /* limit on #of kthreads idle */
atomic_t kthreads_active; /* #of kthreads actively working */
// >>> following field is temporary
u32 kthreads_created; /* total #of kthreads created */
wait_queue_head_t idle_wq; /* idle kthread wait queue */
} ____cacheline_aligned;
/* struct xpc_channel flags */
#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */
#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */
#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */
#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */
#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */
#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */
#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */
#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */
#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */
/*
* Manages channels on a partition basis. There is one of these structures
* for each partition (a partition will never utilize the structure that
* represents itself).
*/
struct xpc_partition {
/* XPC HB infrastructure */
u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
u64 remote_vars_pa; /* phys addr of partition's vars */
u64 remote_vars_part_pa; /* phys addr of partition's vars part */
u64 last_heartbeat; /* HB at last read */
u64 remote_amos_page_pa; /* phys addr of partition's amos page */
int remote_act_nasid; /* active part's act/deact nasid */
int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
u32 act_IRQ_rcvd; /* IRQs since activation */
spinlock_t act_lock; /* protect updating of act_state */
u8 act_state; /* from XPC HB viewpoint */
enum xpc_retval reason; /* reason partition is deactivating */
int reason_line; /* line# deactivation initiated from */
int reactivate_nasid; /* nasid in partition to reactivate */
/* XPC infrastructure referencing and teardown control */
u8 setup_state; /* infrastructure setup state */
wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
atomic_t references; /* #of references to infrastructure */
/*
* NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
* XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
* COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
* 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
*/
u8 nchannels; /* #of defined channels supported */
atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
struct xpc_channel *channels;/* array of channel structures */
void *local_GPs_base; /* base address of kmalloc'd space */
struct xpc_gp *local_GPs; /* local Get/Put values */
void *remote_GPs_base; /* base address of kmalloc'd space */
struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
/* values */
u64 remote_GPs_pa; /* phys address of remote partition's local */
/* Get/Put values */
/* fields used to pass args when opening or closing a channel */
void *local_openclose_args_base; /* base address of kmalloc'd space */
struct xpc_openclose_args *local_openclose_args; /* local's args */
void *remote_openclose_args_base; /* base address of kmalloc'd space */
struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
/* args */
u64 remote_openclose_args_pa; /* phys addr of remote's args */
/* IPI sending, receiving and handling related fields */
int remote_IPI_nasid; /* nasid of where to send IPIs */
int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
u64 local_IPI_amo; /* IPI amo flags yet to be handled */
char IPI_owner[8]; /* IPI owner's name */
struct timer_list dropped_IPI_timer; /* dropped IPI timer */
spinlock_t IPI_lock; /* IPI handler lock */
/* channel manager related fields */
atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
} ____cacheline_aligned;
/* struct xpc_partition act_state values (for XPC HB) */
#define XPC_P_INACTIVE 0x00 /* partition is not active */
#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
#define XPC_P_ACTIVATING 0x02 /* activation thread started */
#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
xpc_deactivate_partition(__LINE__, (_p), (_reason))
/* struct xpc_partition setup_state values */
#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
#define XPC_P_SETUP 0x01 /* infrastructure is setup */
#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
/*
* struct xpc_partition IPI_timer #of seconds to wait before checking for
* dropped IPIs. These occur whenever an IPI amo write doesn't complete until
* after the IPI was received.
*/
#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
/* found in xp_main.c */
extern struct xpc_registration xpc_registrations[];
/* >>> found in xpc_main.c only */
extern struct device *xpc_part;
extern struct device *xpc_chan;
extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
extern void xpc_dropped_IPI_check(struct xpc_partition *);
extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int);
extern void xpc_disconnect_wait(int);
/* found in xpc_main.c and efi-xpc.c */
extern void xpc_activate_partition(struct xpc_partition *);
/* found in xpc_partition.c */
extern int xpc_exiting;
extern int xpc_hb_interval;
extern int xpc_hb_check_interval;
extern struct xpc_vars *xpc_vars;
extern struct xpc_rsvd_page *xpc_rsvd_page;
extern struct xpc_vars_part *xpc_vars_part;
extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
extern char xpc_remote_copy_buffer[];
extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
extern void xpc_allow_IPI_ops(void);
extern void xpc_restrict_IPI_ops(void);
extern int xpc_identify_act_IRQ_sender(void);
extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void);
extern void xpc_deactivate_partition(const int, struct xpc_partition *,
enum xpc_retval);
extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
/* found in xpc_channel.c */
extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int);
extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
xpc_notify_func, void *);
extern void xpc_initiate_received(partid_t, int, void *);
extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
extern void xpc_process_channel_activity(struct xpc_partition *);
extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *);
extern void xpc_disconnect_channel(const int, struct xpc_channel *,
enum xpc_retval, unsigned long *);
extern void xpc_disconnected_callout(struct xpc_channel *);
extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *);
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
wake_up(&part->channel_mgr_wq);
}
}
/*
* These next two inlines are used to keep us from tearing down a channel's
* msg queues while a thread may be referencing them.
*/
static inline void
xpc_msgqueue_ref(struct xpc_channel *ch)
{
atomic_inc(&ch->references);
}
static inline void
xpc_msgqueue_deref(struct xpc_channel *ch)
{
s32 refs = atomic_dec_return(&ch->references);
DBUG_ON(refs < 0);
if (refs == 0) {
xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
}
}
#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
/*
* These two inlines are used to keep us from tearing down a partition's
* setup infrastructure while a thread may be referencing it.
*/
static inline void
xpc_part_deref(struct xpc_partition *part)
{
s32 refs = atomic_dec_return(&part->references);
DBUG_ON(refs < 0);
if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
wake_up(&part->teardown_wq);
}
}
static inline int
xpc_part_ref(struct xpc_partition *part)
{
int setup;
atomic_inc(&part->references);
setup = (part->setup_state == XPC_P_SETUP);
if (!setup) {
xpc_part_deref(part);
}
return setup;
}
/*
* The following macro is to be used for the setting of the reason and
* reason_line fields in both the struct xpc_channel and struct xpc_partition
* structures.
*/
#define XPC_SET_REASON(_p, _reason, _line) \
{ \
(_p)->reason = _reason; \
(_p)->reason_line = _line; \
}
/*
* The following set of macros and inlines are used for the sending and
* receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
* one that is associated with partition activity (SGI_XPC_ACTIVATE) and
* the other that is associated with channel activity (SGI_XPC_NOTIFY).
*/
static inline u64
xpc_IPI_receive(AMO_t *amo)
{
return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
}
static inline enum xpc_retval
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
{
int ret = 0;
unsigned long irq_flags;
local_irq_save(irq_flags);
FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
/*
* We must always use the nofault function regardless of whether we
* are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
* didn't, we'd never know that the other partition is down and would
* keep sending IPIs and AMOs to it until the heartbeat times out.
*/
ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
xp_nofault_PIOR_target));
local_irq_restore(irq_flags);
return ((ret == 0) ? xpcSuccess : xpcPioReadError);
}
/*
* IPIs associated with SGI_XPC_ACTIVATE IRQ.
*/
/*
* Flag the appropriate AMO variable and send an IPI to the specified node.
*/
static inline void
xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid,
int to_phys_cpuid)
{
int w_index = XPC_NASID_W_INDEX(from_nasid);
int b_index = XPC_NASID_B_INDEX(from_nasid);
AMO_t *amos = (AMO_t *) __va(amos_page +
(XP_MAX_PARTITIONS * sizeof(AMO_t)));
(void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
to_phys_cpuid, SGI_XPC_ACTIVATE);
}
static inline void
xpc_IPI_send_activate(struct xpc_vars *vars)
{
xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
vars->act_nasid, vars->act_phys_cpuid);
}
static inline void
xpc_IPI_send_activated(struct xpc_partition *part)
{
xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
part->remote_act_nasid, part->remote_act_phys_cpuid);
}
static inline void
xpc_IPI_send_reactivate(struct xpc_partition *part)
{
xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
}
/*
* IPIs associated with SGI_XPC_NOTIFY IRQ.
*/
/*
* Send an IPI to the remote partition that is associated with the
* specified channel.
*/
#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
static inline void
xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
enum xpc_retval ret;
if (likely(part->act_state != XPC_P_DEACTIVATING)) {
ret = xpc_IPI_send(part->remote_IPI_amo_va,
(u64) ipi_flag << (ch->number * 8),
part->remote_IPI_nasid,
part->remote_IPI_phys_cpuid,
SGI_XPC_NOTIFY);
dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
ipi_flag_string, ch->partid, ch->number, ret);
if (unlikely(ret != xpcSuccess)) {
if (irq_flags != NULL) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
}
XPC_DEACTIVATE_PARTITION(part, ret);
if (irq_flags != NULL) {
spin_lock_irqsave(&ch->lock, *irq_flags);
}
}
}
}
/*
* Make it look like the remote partition, which is associated with the
* specified channel, sent us an IPI. This faked IPI will be handled
* by xpc_dropped_IPI_check().
*/
#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
static inline void
xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
char *ipi_flag_string)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
ipi_flag_string, ch->partid, ch->number);
}
/*
* The sending and receiving of IPIs includes the setting of an AMO variable
* to indicate the reason the IPI was sent. The 64-bit variable is divided
* up into eight bytes, ordered from right to left. Byte zero pertains to
* channel 0, byte one to channel 1, and so on. Each byte is described by
* the following IPI flags.
*/
#define XPC_IPI_CLOSEREQUEST 0x01
#define XPC_IPI_CLOSEREPLY 0x02
#define XPC_IPI_OPENREQUEST 0x04
#define XPC_IPI_OPENREPLY 0x08
#define XPC_IPI_MSGREQUEST 0x10
/* given an AMO variable and a channel#, get its associated IPI flags */
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)
static inline void
xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_openclose_args *args = ch->local_openclose_args;
args->reason = ch->reason;
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
}
static inline void
xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
{
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
}
static inline void
xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_openclose_args *args = ch->local_openclose_args;
args->msg_size = ch->msg_size;
args->local_nentries = ch->local_nentries;
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
}
static inline void
xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_openclose_args *args = ch->local_openclose_args;
args->remote_nentries = ch->remote_nentries;
args->local_nentries = ch->local_nentries;
args->local_msgqueue_pa = __pa(ch->local_msgqueue);
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
}
static inline void
xpc_IPI_send_msgrequest(struct xpc_channel *ch)
{
XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
}
static inline void
xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
{
XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
}
/*
* Memory for XPC's AMO variables is allocated by the MSPEC driver. These
* pages are located in the lowest granule. The lowest granule uses 4k pages
* for cached references and an alternate TLB handler to never provide a
* cacheable mapping for the entire region. This will prevent speculative
* reading of cached copies of our lines from being issued which will cause
* a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
* (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c)
* and an additional 16 AMO variables for partition activation (xpc_hb.c).
*/
static inline AMO_t *
xpc_IPI_init(partid_t partid)
{
AMO_t *part_amo = xpc_vars->amos_page + partid;
xpc_IPI_receive(part_amo);
return part_amo;
}
static inline enum xpc_retval
xpc_map_bte_errors(bte_result_t error)
{
switch (error) {
case BTE_SUCCESS: return xpcSuccess;
case BTEFAIL_DIR: return xpcBteDirectoryError;
case BTEFAIL_POISON: return xpcBtePoisonError;
case BTEFAIL_WERR: return xpcBteWriteError;
case BTEFAIL_ACCESS: return xpcBteAccessError;
case BTEFAIL_PWERR: return xpcBtePWriteError;
case BTEFAIL_PRERR: return xpcBtePReadError;
case BTEFAIL_TOUT: return xpcBteTimeOutError;
case BTEFAIL_XTERR: return xpcBteXtalkError;
case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
default: return xpcBteUnmappedError;
}
}
static inline void *
xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
if (*base == NULL) {
return NULL;
}
if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
return *base;
}
kfree(*base);
/* nope, we'll have to do it ourselves */
*base = kmalloc(size + L1_CACHE_BYTES, flags);
if (*base == NULL) {
return NULL;
}
return (void *) L1_CACHE_ALIGN((u64) *base);
}
/*
* Check to see if there is any channel activity to/from the specified
* partition.
*/
static inline void
xpc_check_for_channel_activity(struct xpc_partition *part)
{
u64 IPI_amo;
unsigned long irq_flags;
IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
if (IPI_amo == 0) {
return;
}
spin_lock_irqsave(&part->IPI_lock, irq_flags);
part->local_IPI_amo |= IPI_amo;
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
XPC_PARTID(part), IPI_amo);
xpc_wakeup_channel_mgr(part);
}
#endif /* _IA64_SN_KERNEL_XPC_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) channel support.
*
* This is the part of XPC that manages the channels and
* sends/receives messages across them to/from other partitions.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include "xpc.h"
/*
* Set up the initial values for the XPartition Communication channels.
*/
static void
xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
{
int ch_number;
struct xpc_channel *ch;
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
ch->partid = partid;
ch->number = ch_number;
ch->flags = XPC_C_DISCONNECTED;
ch->local_GP = &part->local_GPs[ch_number];
ch->local_openclose_args =
&part->local_openclose_args[ch_number];
atomic_set(&ch->kthreads_assigned, 0);
atomic_set(&ch->kthreads_idle, 0);
atomic_set(&ch->kthreads_active, 0);
atomic_set(&ch->references, 0);
atomic_set(&ch->n_to_notify, 0);
spin_lock_init(&ch->lock);
sema_init(&ch->msg_to_pull_sema, 1); /* mutex */
atomic_set(&ch->n_on_msg_allocate_wq, 0);
init_waitqueue_head(&ch->msg_allocate_wq);
init_waitqueue_head(&ch->idle_wq);
}
}
/*
* Setup the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
enum xpc_retval
xpc_setup_infrastructure(struct xpc_partition *part)
{
int ret;
struct timer_list *timer;
partid_t partid = XPC_PARTID(part);
/*
* Zero out MOST of the entry for this partition. Only the fields
* starting with `nchannels' will be zeroed. The preceding fields must
* remain `viable' across partition ups and downs, since they may be
* referenced during this memset() operation.
*/
memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
offsetof(struct xpc_partition, nchannels));
/*
* Allocate all of the channel structures as a contiguous chunk of
* memory.
*/
part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
GFP_KERNEL);
if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n");
return xpcNoMemory;
}
memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
part->nchannels = XPC_NCHANNELS;
/* allocate all the required GET/PUT values */
part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
GFP_KERNEL, &part->local_GPs_base);
if (part->local_GPs == NULL) {
kfree(part->channels);
part->channels = NULL;
dev_err(xpc_chan, "can't get memory for local get/put "
"values\n");
return xpcNoMemory;
}
memset(part->local_GPs, 0, XPC_GP_SIZE);
part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
GFP_KERNEL, &part->remote_GPs_base);
if (part->remote_GPs == NULL) {
kfree(part->channels);
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
dev_err(xpc_chan, "can't get memory for remote get/put "
"values\n");
return xpcNoMemory;
}
memset(part->remote_GPs, 0, XPC_GP_SIZE);
/* allocate all the required open and close args */
part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
&part->local_openclose_args_base);
if (part->local_openclose_args == NULL) {
kfree(part->channels);
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
dev_err(xpc_chan, "can't get memory for local connect args\n");
return xpcNoMemory;
}
memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
&part->remote_openclose_args_base);
if (part->remote_openclose_args == NULL) {
kfree(part->channels);
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_openclose_args_base);
part->local_openclose_args = NULL;
dev_err(xpc_chan, "can't get memory for remote connect args\n");
return xpcNoMemory;
}
memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
xpc_initialize_channels(part, partid);
atomic_set(&part->nchannels_active, 0);
/* local_IPI_amo were set to 0 by an earlier memset() */
/* Initialize this partitions AMO_t structure */
part->local_IPI_amo_va = xpc_IPI_init(partid);
spin_lock_init(&part->IPI_lock);
atomic_set(&part->channel_mgr_requests, 1);
init_waitqueue_head(&part->channel_mgr_wq);
sprintf(part->IPI_owner, "xpc%02d", partid);
ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
part->IPI_owner, (void *) (u64) partid);
if (ret != 0) {
kfree(part->channels);
part->channels = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_openclose_args_base);
part->local_openclose_args = NULL;
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
"errno=%d\n", -ret);
return xpcLackOfResources;
}
/* Setup a timer to check for dropped IPIs */
timer = &part->dropped_IPI_timer;
init_timer(timer);
timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
timer->data = (unsigned long) part;
timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
add_timer(timer);
/*
* With the setting of the partition setup_state to XPC_P_SETUP, we're
* declaring that this partition is ready to go.
*/
(volatile u8) part->setup_state = XPC_P_SETUP;
/*
* Setup the per partition specific variables required by the
* remote partition to establish channel connections with us.
*
* The setting of the magic # indicates that these per partition
* specific variables are ready to be used.
*/
xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
xpc_vars_part[partid].openclose_args_pa =
__pa(part->local_openclose_args);
xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id());
xpc_vars_part[partid].IPI_phys_cpuid =
cpu_physical_id(smp_processor_id());
xpc_vars_part[partid].nchannels = part->nchannels;
(volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
return xpcSuccess;
}
/*
* Create a wrapper that hides the underlying mechanism for pulling a cacheline
* (or multiple cachelines) from a remote partition.
*
* src must be a cacheline aligned physical address on the remote partition.
* dst must be a cacheline aligned virtual address on this partition.
* cnt must be an cacheline sized
*/
static enum xpc_retval
xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
const void *src, size_t cnt)
{
bte_result_t bte_ret;
DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
if (part->act_state == XPC_P_DEACTIVATING) {
return part->reason;
}
bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
(u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
if (bte_ret == BTE_SUCCESS) {
return xpcSuccess;
}
dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
XPC_PARTID(part), bte_ret);
return xpc_map_bte_errors(bte_ret);
}
/*
* Pull the remote per partititon specific variables from the specified
* partition.
*/
enum xpc_retval
xpc_pull_remote_vars_part(struct xpc_partition *part)
{
u8 buffer[L1_CACHE_BYTES * 2];
struct xpc_vars_part *pulled_entry_cacheline =
(struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
struct xpc_vars_part *pulled_entry;
u64 remote_entry_cacheline_pa, remote_entry_pa;
partid_t partid = XPC_PARTID(part);
enum xpc_retval ret;
/* pull the cacheline that contains the variables we're interested in */
DBUG_ON(part->remote_vars_part_pa !=
L1_CACHE_ALIGN(part->remote_vars_part_pa));
DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
remote_entry_pa = part->remote_vars_part_pa +
sn_partition_id * sizeof(struct xpc_vars_part);
remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
(remote_entry_pa & (L1_CACHE_BYTES - 1)));
ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
(void *) remote_entry_cacheline_pa,
L1_CACHE_BYTES);
if (ret != xpcSuccess) {
dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
"partition %d, ret=%d\n", partid, ret);
return ret;
}
/* see if they've been set up yet */
if (pulled_entry->magic != XPC_VP_MAGIC1 &&
pulled_entry->magic != XPC_VP_MAGIC2) {
if (pulled_entry->magic != 0) {
dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
"partition %d has bad magic value (=0x%lx)\n",
partid, sn_partition_id, pulled_entry->magic);
return xpcBadMagic;
}
/* they've not been initialized yet */
return xpcRetry;
}
if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
/* validate the variables */
if (pulled_entry->GPs_pa == 0 ||
pulled_entry->openclose_args_pa == 0 ||
pulled_entry->IPI_amo_pa == 0) {
dev_err(xpc_chan, "partition %d's XPC vars_part for "
"partition %d are not valid\n", partid,
sn_partition_id);
return xpcInvalidAddress;
}
/* the variables we imported look to be valid */
part->remote_GPs_pa = pulled_entry->GPs_pa;
part->remote_openclose_args_pa =
pulled_entry->openclose_args_pa;
part->remote_IPI_amo_va =
(AMO_t *) __va(pulled_entry->IPI_amo_pa);
part->remote_IPI_nasid = pulled_entry->IPI_nasid;
part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
if (part->nchannels > pulled_entry->nchannels) {
part->nchannels = pulled_entry->nchannels;
}
/* let the other side know that we've pulled their variables */
(volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
}
if (pulled_entry->magic == XPC_VP_MAGIC1) {
return xpcRetry;
}
return xpcSuccess;
}
/*
* Get the IPI flags and pull the openclose args and/or remote GPs as needed.
*/
static u64
xpc_get_IPI_flags(struct xpc_partition *part)
{
unsigned long irq_flags;
u64 IPI_amo;
enum xpc_retval ret;
/*
* See if there are any IPI flags to be handled.
*/
spin_lock_irqsave(&part->IPI_lock, irq_flags);
if ((IPI_amo = part->local_IPI_amo) != 0) {
part->local_IPI_amo = 0;
}
spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
ret = xpc_pull_remote_cachelines(part,
part->remote_openclose_args,
(void *) part->remote_openclose_args_pa,
XPC_OPENCLOSE_ARGS_SIZE);
if (ret != xpcSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
dev_dbg(xpc_chan, "failed to pull openclose args from "
"partition %d, ret=%d\n", XPC_PARTID(part),
ret);
/* don't bother processing IPIs anymore */
IPI_amo = 0;
}
}
if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
(void *) part->remote_GPs_pa,
XPC_GP_SIZE);
if (ret != xpcSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
dev_dbg(xpc_chan, "failed to pull GPs from partition "
"%d, ret=%d\n", XPC_PARTID(part), ret);
/* don't bother processing IPIs anymore */
IPI_amo = 0;
}
}
return IPI_amo;
}
/*
* Allocate the local message queue and the notify queue.
*/
static enum xpc_retval
xpc_allocate_local_msgqueue(struct xpc_channel *ch)
{
unsigned long irq_flags;
int nentries;
size_t nbytes;
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
// >>> iterations of the for-loop, bail if set?
// >>> should we impose a minumum #of entries? like 4 or 8?
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
(GFP_KERNEL | GFP_DMA),
&ch->local_msgqueue_base);
if (ch->local_msgqueue == NULL) {
continue;
}
memset(ch->local_msgqueue, 0, nbytes);
nbytes = nentries * sizeof(struct xpc_notify);
ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
if (ch->notify_queue == NULL) {
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
continue;
}
memset(ch->notify_queue, 0, nbytes);
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->local_nentries) {
dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
"partid=%d, channel=%d\n", nentries,
ch->local_nentries, ch->partid, ch->number);
ch->local_nentries = nentries;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpcSuccess;
}
dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
"queue, partid=%d, channel=%d\n", ch->partid, ch->number);
return xpcNoMemory;
}
/*
* Allocate the cached remote message queue.
*/
static enum xpc_retval
xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
{
unsigned long irq_flags;
int nentries;
size_t nbytes;
DBUG_ON(ch->remote_nentries <= 0);
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
// >>> iterations of the for-loop, bail if set?
// >>> should we impose a minumum #of entries? like 4 or 8?
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
(GFP_KERNEL | GFP_DMA),
&ch->remote_msgqueue_base);
if (ch->remote_msgqueue == NULL) {
continue;
}
memset(ch->remote_msgqueue, 0, nbytes);
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->remote_nentries) {
dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
"partid=%d, channel=%d\n", nentries,
ch->remote_nentries, ch->partid, ch->number);
ch->remote_nentries = nentries;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpcSuccess;
}
dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
return xpcNoMemory;
}
/*
* Allocate message queues and other stuff associated with a channel.
*
* Note: Assumes all of the channel sizes are filled in.
*/
static enum xpc_retval
xpc_allocate_msgqueues(struct xpc_channel *ch)
{
unsigned long irq_flags;
int i;
enum xpc_retval ret;
DBUG_ON(ch->flags & XPC_C_SETUP);
if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
return ret;
}
if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
kfree(ch->notify_queue);
ch->notify_queue = NULL;
return ret;
}
for (i = 0; i < ch->local_nentries; i++) {
/* use a semaphore as an event wait queue */
sema_init(&ch->notify_queue[i].sema, 0);
}
sema_init(&ch->teardown_sema, 0); /* event wait */
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_SETUP;
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpcSuccess;
}
/*
* Process a connect message from a remote partition.
*
* Note: xpc_process_connect() is expecting to be called with the
* spin_lock_irqsave held and will leave it locked upon return.
*/
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xpc_retval ret;
DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
/* nothing more to do for now */
return;
}
DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
if (!(ch->flags & XPC_C_SETUP)) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
ret = xpc_allocate_msgqueues(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
if (ret != xpcSuccess) {
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
}
if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
return;
}
DBUG_ON(!(ch->flags & XPC_C_SETUP));
DBUG_ON(ch->local_msgqueue == NULL);
DBUG_ON(ch->remote_msgqueue == NULL);
}
if (!(ch->flags & XPC_C_OPENREPLY)) {
ch->flags |= XPC_C_OPENREPLY;
xpc_IPI_send_openreply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENREPLY)) {
return;
}
DBUG_ON(ch->remote_msgqueue_pa == 0);
ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
dev_info(xpc_chan, "channel %d to partition %d connected\n",
ch->number, ch->partid);
spin_unlock_irqrestore(&ch->lock, *irq_flags);
xpc_create_kthreads(ch, 1);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
/*
* Free up message queues and other stuff that were allocated for the specified
* channel.
*
* Note: ch->reason and ch->reason_line are left set for debugging purposes,
* they're cleared when XPC_C_DISCONNECTED is cleared.
*/
static void
xpc_free_msgqueues(struct xpc_channel *ch)
{
DBUG_ON(!spin_is_locked(&ch->lock));
DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
ch->remote_msgqueue_pa = 0;
ch->func = NULL;
ch->key = NULL;
ch->msg_size = 0;
ch->local_nentries = 0;
ch->remote_nentries = 0;
ch->kthreads_assigned_limit = 0;
ch->kthreads_idle_limit = 0;
ch->local_GP->get = 0;
ch->local_GP->put = 0;
ch->remote_GP.get = 0;
ch->remote_GP.put = 0;
ch->w_local_GP.get = 0;
ch->w_local_GP.put = 0;
ch->w_remote_GP.get = 0;
ch->w_remote_GP.put = 0;
ch->next_msg_to_pull = 0;
if (ch->flags & XPC_C_SETUP) {
ch->flags &= ~XPC_C_SETUP;
dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
ch->flags, ch->partid, ch->number);
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
kfree(ch->remote_msgqueue_base);
ch->remote_msgqueue = NULL;
kfree(ch->notify_queue);
ch->notify_queue = NULL;
/* in case someone is waiting for the teardown to complete */
up(&ch->teardown_sema);
}
}
/*
* spin_lock_irqsave() is expected to be held on entry.
*/
static void
xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 ch_flags = ch->flags;
DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_DISCONNECTING)) {
return;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
/* make sure all activity has settled down first */
if (atomic_read(&ch->references) > 0) {
return;
}
DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
/* it's now safe to free the channel's message queues */
xpc_free_msgqueues(ch);
DBUG_ON(ch->flags & XPC_C_SETUP);
if (part->act_state != XPC_P_DEACTIVATING) {
/* as long as the other side is up do the full protocol */
if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
return;
}
if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY;
xpc_IPI_send_closereply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
return;
}
}
/* both sides are disconnected now */
ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */
atomic_dec(&part->nchannels_active);
if (ch_flags & XPC_C_WASCONNECTED) {
dev_info(xpc_chan, "channel %d to partition %d disconnected, "
"reason=%d\n", ch->number, ch->partid, ch->reason);
}
}
/*
* Process a change in the channel's remote connection state.
*/
static void
xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
u8 IPI_flags)
{
unsigned long irq_flags;
struct xpc_openclose_args *args =
&part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number];
enum xpc_retval reason;
spin_lock_irqsave(&ch->lock, irq_flags);
if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
"from partid=%d, channel=%d\n", args->reason,
ch->partid, ch->number);
/*
* If RCLOSEREQUEST is set, we're probably waiting for
* RCLOSEREPLY. We should find it and a ROPENREQUEST packed
* with this RCLOSEQREUQEST in the IPI_flags.
*/
if (ch->flags & XPC_C_RCLOSEREQUEST) {
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
IPI_flags &= ~XPC_IPI_CLOSEREPLY;
ch->flags |= XPC_C_RCLOSEREPLY;
/* both sides have finished disconnecting */
xpc_process_disconnect(ch, &irq_flags);
}
if (ch->flags & XPC_C_DISCONNECTED) {
// >>> explain this section
if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
DBUG_ON(part->act_state !=
XPC_P_DEACTIVATING);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&part->nchannels_active);
ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
}
IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
/*
* The meaningful CLOSEREQUEST connection state fields are:
* reason = reason connection is to be closed
*/
ch->flags |= XPC_C_RCLOSEREQUEST;
if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason;
if (reason <= xpcSuccess || reason > xpcUnknownReason) {
reason = xpcUnknownReason;
} else if (reason == xpcUnregistering) {
reason = xpcOtherUnregistering;
}
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
} else {
xpc_process_disconnect(ch, &irq_flags);
}
}
if (IPI_flags & XPC_IPI_CLOSEREPLY) {
dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
" channel=%d\n", ch->partid, ch->number);
if (ch->flags & XPC_C_DISCONNECTED) {
DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST));
ch->flags |= XPC_C_RCLOSEREPLY;
if (ch->flags & XPC_C_CLOSEREPLY) {
/* both sides have finished disconnecting */
xpc_process_disconnect(ch, &irq_flags);
}
}
if (IPI_flags & XPC_IPI_OPENREQUEST) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
"local_nentries=%d) received from partid=%d, "
"channel=%d\n", args->msg_size, args->local_nentries,
ch->partid, ch->number);
if ((ch->flags & XPC_C_DISCONNECTING) ||
part->act_state == XPC_P_DEACTIVATING) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
XPC_C_OPENREQUEST)));
DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_OPENREPLY | XPC_C_CONNECTED));
/*
* The meaningful OPENREQUEST connection state fields are:
* msg_size = size of channel's messages in bytes
* local_nentries = remote partition's local_nentries
*/
DBUG_ON(args->msg_size == 0);
DBUG_ON(args->local_nentries == 0);
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
ch->remote_nentries = args->local_nentries;
if (ch->flags & XPC_C_OPENREQUEST) {
if (args->msg_size != ch->msg_size) {
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
} else {
ch->msg_size = args->msg_size;
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&part->nchannels_active);
}
xpc_process_connect(ch, &irq_flags);
}
if (IPI_flags & XPC_IPI_OPENREPLY) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
"local_nentries=%d, remote_nentries=%d) received from "
"partid=%d, channel=%d\n", args->local_msgqueue_pa,
args->local_nentries, args->remote_nentries,
ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST));
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
DBUG_ON(ch->flags & XPC_C_CONNECTED);
/*
* The meaningful OPENREPLY connection state fields are:
* local_msgqueue_pa = physical address of remote
* partition's local_msgqueue
* local_nentries = remote partition's local_nentries
* remote_nentries = remote partition's remote_nentries
*/
DBUG_ON(args->local_msgqueue_pa == 0);
DBUG_ON(args->local_nentries == 0);
DBUG_ON(args->remote_nentries == 0);
ch->flags |= XPC_C_ROPENREPLY;
ch->remote_msgqueue_pa = args->local_msgqueue_pa;
if (args->local_nentries < ch->remote_nentries) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
"remote_nentries=%d, old remote_nentries=%d, "
"partid=%d, channel=%d\n",
args->local_nentries, ch->remote_nentries,
ch->partid, ch->number);
ch->remote_nentries = args->local_nentries;
}
if (args->remote_nentries < ch->local_nentries) {
dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
"local_nentries=%d, old local_nentries=%d, "
"partid=%d, channel=%d\n",
args->remote_nentries, ch->local_nentries,
ch->partid, ch->number);
ch->local_nentries = args->remote_nentries;
}
xpc_process_connect(ch, &irq_flags);
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
/*
* Attempt to establish a channel connection to a remote partition.
*/
static enum xpc_retval
xpc_connect_channel(struct xpc_channel *ch)
{
unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number];
if (down_interruptible(&registration->sema) != 0) {
return xpcInterrupted;
}
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
up(&registration->sema);
return xpcUnregistered;
}
spin_lock_irqsave(&ch->lock, irq_flags);
DBUG_ON(ch->flags & XPC_C_CONNECTED);
DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
if (ch->flags & XPC_C_DISCONNECTING) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
up(&registration->sema);
return ch->reason;
}
/* add info from the channel connect registration to the channel */
ch->kthreads_assigned_limit = registration->assigned_limit;
ch->kthreads_idle_limit = registration->idle_limit;
DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
ch->func = registration->func;
DBUG_ON(registration->func == NULL);
ch->key = registration->key;
ch->local_nentries = registration->nentries;
if (ch->flags & XPC_C_ROPENREQUEST) {
if (registration->msg_size != ch->msg_size) {
/* the local and remote sides aren't the same */
/*
* Because XPC_DISCONNECT_CHANNEL() can block we're
* forced to up the registration sema before we unlock
* the channel lock. But that's okay here because we're
* done with the part that required the registration
* sema. XPC_DISCONNECT_CHANNEL() requires that the
* channel lock be locked and will unlock and relock
* the channel lock as needed.
*/
up(&registration->sema);
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpcUnequalMsgSizes;
}
} else {
ch->msg_size = registration->msg_size;
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
}
up(&registration->sema);
/* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
xpc_IPI_send_openrequest(ch, &irq_flags);
xpc_process_connect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpcSuccess;
}
/*
* Notify those who wanted to be notified upon delivery of their message.
*/
static void
xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
{
struct xpc_notify *notify;
u8 notify_type;
s64 get = ch->w_remote_GP.get - 1;
while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
notify = &ch->notify_queue[get % ch->local_nentries];
/*
* See if the notify entry indicates it was associated with
* a message who's sender wants to be notified. It is possible
* that it is, but someone else is doing or has done the
* notification.
*/
notify_type = notify->type;
if (notify_type == 0 ||
cmpxchg(&notify->type, notify_type, 0) !=
notify_type) {
continue;
}
DBUG_ON(notify_type != XPC_N_CALL);
atomic_dec(&ch->n_to_notify);
if (notify->func != NULL) {
dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n",
(void *) notify, get, ch->partid, ch->number);
notify->func(reason, ch->partid, ch->number,
notify->key);
dev_dbg(xpc_chan, "notify->func() returned, "
"notify=0x%p, msg_number=%ld, partid=%d, "
"channel=%d\n", (void *) notify, get,
ch->partid, ch->number);
}
}
}
/*
* Clear some of the msg flags in the local message queue.
*/
static inline void
xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
{
struct xpc_msg *msg;
s64 get;
get = ch->w_remote_GP.get;
do {
msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
(get % ch->local_nentries) * ch->msg_size);
msg->flags = 0;
} while (++get < (volatile s64) ch->remote_GP.get);
}
/*
* Clear some of the msg flags in the remote message queue.
*/
static inline void
xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
{
struct xpc_msg *msg;
s64 put;
put = ch->w_remote_GP.put;
do {
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
(put % ch->remote_nentries) * ch->msg_size);
msg->flags = 0;
} while (++put < (volatile s64) ch->remote_GP.put);
}
static void
xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
{
struct xpc_channel *ch = &part->channels[ch_number];
int nmsgs_sent;
ch->remote_GP = part->remote_GPs[ch_number];
/* See what, if anything, has changed for each connected channel */
xpc_msgqueue_ref(ch);
if (ch->w_remote_GP.get == ch->remote_GP.get &&
ch->w_remote_GP.put == ch->remote_GP.put) {
/* nothing changed since GPs were last pulled */
xpc_msgqueue_deref(ch);
return;
}
if (!(ch->flags & XPC_C_CONNECTED)){
xpc_msgqueue_deref(ch);
return;
}
/*
* First check to see if messages recently sent by us have been
* received by the other side. (The remote GET value will have
* changed since we last looked at it.)
*/
if (ch->w_remote_GP.get != ch->remote_GP.get) {
/*
* We need to notify any senders that want to be notified
* that their sent messages have been received by their
* intended recipients. We need to do this before updating
* w_remote_GP.get so that we don't allocate the same message
* queue entries prematurely (see xpc_allocate_msg()).
*/
if (atomic_read(&ch->n_to_notify) > 0) {
/*
* Notify senders that messages sent have been
* received and delivered by the other side.
*/
xpc_notify_senders(ch, xpcMsgDelivered,
ch->remote_GP.get);
}
/*
* Clear msg->flags in previously sent messages, so that
* they're ready for xpc_allocate_msg().
*/
xpc_clear_local_msgqueue_flags(ch);
(volatile s64) ch->w_remote_GP.get = ch->remote_GP.get;
dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
"channel=%d\n", ch->w_remote_GP.get, ch->partid,
ch->number);
/*
* If anyone was waiting for message queue entries to become
* available, wake them up.
*/
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
wake_up(&ch->msg_allocate_wq);
}
}
/*
* Now check for newly sent messages by the other side. (The remote
* PUT value will have changed since we last looked at it.)
*/
if (ch->w_remote_GP.put != ch->remote_GP.put) {
/*
* Clear msg->flags in previously received messages, so that
* they're ready for xpc_get_deliverable_msg().
*/
xpc_clear_remote_msgqueue_flags(ch);
(volatile s64) ch->w_remote_GP.put = ch->remote_GP.put;
dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
"channel=%d\n", ch->w_remote_GP.put, ch->partid,
ch->number);
nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
if (nmsgs_sent > 0) {
dev_dbg(xpc_chan, "msgs waiting to be copied and "
"delivered=%d, partid=%d, channel=%d\n",
nmsgs_sent, ch->partid, ch->number);
if (ch->flags & XPC_C_CONNECTCALLOUT) {
xpc_activate_kthreads(ch, nmsgs_sent);
}
}
}
xpc_msgqueue_deref(ch);
}
void
xpc_process_channel_activity(struct xpc_partition *part)
{
unsigned long irq_flags;
u64 IPI_amo, IPI_flags;
struct xpc_channel *ch;
int ch_number;
IPI_amo = xpc_get_IPI_flags(part);
/*
* Initiate channel connections for registered channels.
*
* For each connected channel that has pending messages activate idle
* kthreads and/or create new kthreads as needed.
*/
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
/*
* Process any open or close related IPI flags, and then deal
* with connecting or disconnecting the channel as required.
*/
IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
xpc_process_openclose_IPI(part, ch_number, IPI_flags);
}
if (ch->flags & XPC_C_DISCONNECTING) {
spin_lock_irqsave(&ch->lock, irq_flags);
xpc_process_disconnect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
continue;
}
if (part->act_state == XPC_P_DEACTIVATING) {
continue;
}
if (!(ch->flags & XPC_C_CONNECTED)) {
if (!(ch->flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch->flags & XPC_C_SETUP);
(void) xpc_connect_channel(ch);
} else {
spin_lock_irqsave(&ch->lock, irq_flags);
xpc_process_connect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
continue;
}
/*
* Process any message related IPI flags, this may involve the
* activation of kthreads to deliver any pending messages sent
* from the other partition.
*/
if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
xpc_process_msg_IPI(part, ch_number);
}
}
}
/*
* XPC's heartbeat code calls this function to inform XPC that a partition has
* gone down. XPC responds by tearing down the XPartition Communication
* infrastructure used for the just downed partition.
*
* XPC's heartbeat code will never call this function and xpc_partition_up()
* at the same time. Nor will it ever make multiple calls to either function
* at the same time.
*/
void
xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason)
{
unsigned long irq_flags;
int ch_number;
struct xpc_channel *ch;
dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
XPC_PARTID(part), reason);
if (!xpc_part_ref(part)) {
/* infrastructure for this partition isn't currently set up */
return;
}
/* disconnect all channels associated with the downed partition */
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_msgqueue_deref(ch);
}
xpc_wakeup_channel_mgr(part);
xpc_part_deref(part);
}
/*
* Teardown the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
void
xpc_teardown_infrastructure(struct xpc_partition *part)
{
partid_t partid = XPC_PARTID(part);
/*
* We start off by making this partition inaccessible to local
* processes by marking it as no longer setup. Then we make it
* inaccessible to remote processes by clearing the XPC per partition
* specific variable's magic # (which indicates that these variables
* are no longer valid) and by ignoring all XPC notify IPIs sent to
* this partition.
*/
DBUG_ON(atomic_read(&part->nchannels_active) != 0);
DBUG_ON(part->setup_state != XPC_P_SETUP);
part->setup_state = XPC_P_WTEARDOWN;
xpc_vars_part[partid].magic = 0;
free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
/*
* Before proceding with the teardown we have to wait until all
* existing references cease.
*/
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
/* now we can begin tearing down the infrastructure */
part->setup_state = XPC_P_TORNDOWN;
/* in case we've still got outstanding timers registered... */
del_timer_sync(&part->dropped_IPI_timer);
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
kfree(part->local_openclose_args_base);
part->local_openclose_args = NULL;
kfree(part->remote_GPs_base);
part->remote_GPs = NULL;
kfree(part->local_GPs_base);
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
part->local_IPI_amo_va = NULL;
}
/*
* Called by XP at the time of channel connection registration to cause
* XPC to establish connections to all currently active partitions.
*/
void
xpc_initiate_connect(int ch_number)
{
partid_t partid;
struct xpc_partition *part;
struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
ch = &part->channels[ch_number];
if (!(ch->flags & XPC_C_DISCONNECTING)) {
DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
DBUG_ON(ch->flags & XPC_C_CONNECTED);
DBUG_ON(ch->flags & XPC_C_SETUP);
/*
* Initiate the establishment of a connection
* on the newly registered channel to the
* remote partition.
*/
xpc_wakeup_channel_mgr(part);
}
xpc_part_deref(part);
}
}
}
void
xpc_connected_callout(struct xpc_channel *ch)
{
unsigned long irq_flags;
/* let the registerer know that a connection has been established */
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
ch->func(xpcConnected, ch->partid, ch->number,
(void *) (u64) ch->local_nentries, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
}
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_CONNECTCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
/*
* Called by XP at the time of channel connection unregistration to cause
* XPC to teardown all current connections for the specified channel.
*
* Before returning xpc_initiate_disconnect() will wait until all connections
* on the specified channel have been closed/torndown. So the caller can be
* assured that they will not be receiving any more callouts from XPC to the
* function they registered via xpc_connect().
*
* Arguments:
*
* ch_number - channel # to unregister.
*/
void
xpc_initiate_disconnect(int ch_number)
{
unsigned long irq_flags;
partid_t partid;
struct xpc_partition *part;
struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
/* initiate the channel disconnect for every active partition */
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_msgqueue_deref(ch);
xpc_part_deref(part);
}
}
xpc_disconnect_wait(ch_number);
}
/*
* To disconnect a channel, and reflect it back to all who may be waiting.
*
* >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
* >>> xpc_free_msgqueues().
*
* THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
*/
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
enum xpc_retval reason, unsigned long *irq_flags)
{
u32 flags;
DBUG_ON(!spin_is_locked(&ch->lock));
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
return;
}
DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
reason, line, ch->partid, ch->number);
XPC_SET_REASON(ch, reason, line);
flags = ch->flags;
/* some of these may not have been set */
ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_CONNECTING | XPC_C_CONNECTED);
ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
xpc_IPI_send_closerequest(ch, irq_flags);
if (flags & XPC_C_CONNECTED) {
ch->flags |= XPC_C_WASCONNECTED;
}
if (atomic_read(&ch->kthreads_idle) > 0) {
/* wake all idle kthreads so they can exit */
wake_up_all(&ch->idle_wq);
}
spin_unlock_irqrestore(&ch->lock, *irq_flags);
/* wake those waiting to allocate an entry from the local msg queue */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
wake_up(&ch->msg_allocate_wq);
}
/* wake those waiting for notify completion */
if (atomic_read(&ch->n_to_notify) > 0) {
xpc_notify_senders(ch, reason, ch->w_local_GP.put);
}
spin_lock_irqsave(&ch->lock, *irq_flags);
}
void
xpc_disconnected_callout(struct xpc_channel *ch)
{
/*
* Let the channel's registerer know that the channel is now
* disconnected. We don't want to do this if the registerer was never
* informed of a connection being made, unless the disconnect was for
* abnormal reasons.
*/
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
"channel=%d\n", ch->reason, ch->partid, ch->number);
ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
"channel=%d\n", ch->reason, ch->partid, ch->number);
}
}
/*
* Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy.
*/
static enum xpc_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
enum xpc_retval ret;
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
return ch->reason;
}
atomic_inc(&ch->n_on_msg_allocate_wq);
ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
atomic_dec(&ch->n_on_msg_allocate_wq);
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
} else if (ret == 0) {
ret = xpcTimeout;
} else {
ret = xpcInterrupted;
}
return ret;
}
/*
* Allocate an entry for a message from the message queue associated with the
* specified channel.
*/
static enum xpc_retval
xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
struct xpc_msg **address_of_msg)
{
struct xpc_msg *msg;
enum xpc_retval ret;
s64 put;
/* this reference will be dropped in xpc_send_msg() */
xpc_msgqueue_ref(ch);
if (ch->flags & XPC_C_DISCONNECTING) {
xpc_msgqueue_deref(ch);
return ch->reason;
}
if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
return xpcNotConnected;
}
/*
* Get the next available message entry from the local message queue.
* If none are available, we'll make sure that we grab the latest
* GP values.
*/
ret = xpcTimeout;
while (1) {
put = (volatile s64) ch->w_local_GP.put;
if (put - (volatile s64) ch->w_remote_GP.get <
ch->local_nentries) {
/* There are available message entries. We need to try
* to secure one for ourselves. We'll do this by trying
* to increment w_local_GP.put as long as someone else
* doesn't beat us to it. If they do, we'll have to
* try again.
*/
if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
put) {
/* we got the entry referenced by put */
break;
}
continue; /* try again */
}
/*
* There aren't any available msg entries at this time.
*
* In waiting for a message entry to become available,
* we set a timeout in case the other side is not
* sending completion IPIs. This lets us fake an IPI
* that will cause the IPI handler to fetch the latest
* GP values as if an IPI was sent by the other side.
*/
if (ret == xpcTimeout) {
xpc_IPI_send_local_msgrequest(ch);
}
if (flags & XPC_NOWAIT) {
xpc_msgqueue_deref(ch);
return xpcNoWait;
}
ret = xpc_allocate_msg_wait(ch);
if (ret != xpcInterrupted && ret != xpcTimeout) {
xpc_msgqueue_deref(ch);
return ret;
}
}
/* get the message's address and initialize it */
msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
(put % ch->local_nentries) * ch->msg_size);
DBUG_ON(msg->flags != 0);
msg->number = put;
dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n", put + 1,
(void *) msg, msg->number, ch->partid, ch->number);
*address_of_msg = msg;
return xpcSuccess;
}
/*
* Allocate an entry for a message from the message queue associated with the
* specified channel. NOTE that this routine can sleep waiting for a message
* entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel #.
* flags - see xpc.h for valid flags.
* payload - address of the allocated payload area pointer (filled in on
* return) in which the user-defined message is constructed.
*/
enum xpc_retval
xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xpc_retval ret = xpcUnknownReason;
struct xpc_msg *msg;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
*payload = NULL;
if (xpc_part_ref(part)) {
ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
xpc_part_deref(part);
if (msg != NULL) {
*payload = &msg->payload;
}
}
return ret;
}
/*
* Now we actually send the messages that are ready to be sent by advancing
* the local message queue's Put value and then send an IPI to the recipient
* partition.
*/
static void
xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
{
struct xpc_msg *msg;
s64 put = initial_put + 1;
int send_IPI = 0;
while (1) {
while (1) {
if (put == (volatile s64) ch->w_local_GP.put) {
break;
}
msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
(put % ch->local_nentries) * ch->msg_size);
if (!(msg->flags & XPC_M_READY)) {
break;
}
put++;
}
if (put == initial_put) {
/* nothing's changed */
break;
}
if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
initial_put) {
/* someone else beat us to it */
DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
break;
}
/* we just set the new value of local_GP->put */
dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
"channel=%d\n", put, ch->partid, ch->number);
send_IPI = 1;
/*
* We need to ensure that the message referenced by
* local_GP->put is not XPC_M_READY or that local_GP->put
* equals w_local_GP.put, so we'll go have a look.
*/
initial_put = put;
}
if (send_IPI) {
xpc_IPI_send_msgrequest(ch);
}
}
/*
* Common code that does the actual sending of the message by advancing the
* local message queue's Put value and sends an IPI to the partition the
* message is being sent to.
*/
static enum xpc_retval
xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
xpc_notify_func func, void *key)
{
enum xpc_retval ret = xpcSuccess;
struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!!
s64 put, msg_number = msg->number;
DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
msg_number % ch->local_nentries);
DBUG_ON(msg->flags & XPC_M_READY);
if (ch->flags & XPC_C_DISCONNECTING) {
/* drop the reference grabbed in xpc_allocate_msg() */
xpc_msgqueue_deref(ch);
return ch->reason;
}
if (notify_type != 0) {
/*
* Tell the remote side to send an ACK interrupt when the
* message has been delivered.
*/
msg->flags |= XPC_M_INTERRUPT;
atomic_inc(&ch->n_to_notify);
notify = &ch->notify_queue[msg_number % ch->local_nentries];
notify->func = func;
notify->key = key;
(volatile u8) notify->type = notify_type;
// >>> is a mb() needed here?
if (ch->flags & XPC_C_DISCONNECTING) {
/*
* An error occurred between our last error check and
* this one. We will try to clear the type field from
* the notify entry. If we succeed then
* xpc_disconnect_channel() didn't already process
* the notify entry.
*/
if (cmpxchg(&notify->type, notify_type, 0) ==
notify_type) {
atomic_dec(&ch->n_to_notify);
ret = ch->reason;
}
/* drop the reference grabbed in xpc_allocate_msg() */
xpc_msgqueue_deref(ch);
return ret;
}
}
msg->flags |= XPC_M_READY;
/*
* The preceding store of msg->flags must occur before the following
* load of ch->local_GP->put.
*/
mb();
/* see if the message is next in line to be sent, if so send it */
put = ch->local_GP->put;
if (put == msg_number) {
xpc_send_msgs(ch, put);
}
/* drop the reference grabbed in xpc_allocate_msg() */
xpc_msgqueue_deref(ch);
return ret;
}
/*
* Send a message previously allocated using xpc_initiate_allocate() on the
* specified channel connected to the specified partition.
*
* This routine will not wait for the message to be received, nor will
* notification be given when it does happen. Once this routine has returned
* the message entry allocated via xpc_initiate_allocate() is no longer
* accessable to the caller.
*
* This routine, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on.
* payload - pointer to the payload area allocated via
* xpc_initiate_allocate().
*/
enum xpc_retval
xpc_initiate_send(partid_t partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
enum xpc_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
partid, ch_number);
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(msg == NULL);
ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
return ret;
}
/*
* Send a message previously allocated using xpc_initiate_allocate on the
* specified channel connected to the specified partition.
*
* This routine will not wait for the message to be sent. Once this routine
* has returned the message entry allocated via xpc_initiate_allocate() is no
* longer accessable to the caller.
*
* Once the remote end of the channel has received the message, the function
* passed as an argument to xpc_initiate_send_notify() will be called. This
* allows the sender to free up or re-use any buffers referenced by the
* message, but does NOT mean the message has been processed at the remote
* end by a receiver.
*
* If this routine returns an error, the caller's function will NOT be called.
*
* This routine, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on.
* payload - pointer to the payload area allocated via
* xpc_initiate_allocate().
* func - function to call with asynchronous notification of message
* receipt. THIS FUNCTION MUST BE NON-BLOCKING.
* key - user-defined key to be passed to the function when it's called.
*/
enum xpc_retval
xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
xpc_notify_func func, void *key)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
enum xpc_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
partid, ch_number);
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(msg == NULL);
DBUG_ON(func == NULL);
ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
func, key);
return ret;
}
static struct xpc_msg *
xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
struct xpc_msg *remote_msg, *msg;
u32 msg_index, nmsgs;
u64 msg_offset;
enum xpc_retval ret;
if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
/* we were interrupted by a signal */
return NULL;
}
while (get >= ch->next_msg_to_pull) {
/* pull as many messages as are ready and able to be pulled */
msg_index = ch->next_msg_to_pull % ch->remote_nentries;
DBUG_ON(ch->next_msg_to_pull >=
(volatile s64) ch->w_remote_GP.put);
nmsgs = (volatile s64) ch->w_remote_GP.put -
ch->next_msg_to_pull;
if (msg_index + nmsgs > ch->remote_nentries) {
/* ignore the ones that wrap the msg queue for now */
nmsgs = ch->remote_nentries - msg_index;
}
msg_offset = msg_index * ch->msg_size;
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
msg_offset);
remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
msg_offset);
if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
nmsgs * ch->msg_size)) != xpcSuccess) {
dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
" msg %ld from partition %d, channel=%d, "
"ret=%d\n", nmsgs, ch->next_msg_to_pull,
ch->partid, ch->number, ret);
XPC_DEACTIVATE_PARTITION(part, ret);
up(&ch->msg_to_pull_sema);
return NULL;
}
mb(); /* >>> this may not be needed, we're not sure */
ch->next_msg_to_pull += nmsgs;
}
up(&ch->msg_to_pull_sema);
/* return the message we were looking for */
msg_offset = (get % ch->remote_nentries) * ch->msg_size;
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
return msg;
}
/*
* Get a message to be delivered.
*/
static struct xpc_msg *
xpc_get_deliverable_msg(struct xpc_channel *ch)
{
struct xpc_msg *msg = NULL;
s64 get;
do {
if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
break;
}
get = (volatile s64) ch->w_local_GP.get;
if (get == (volatile s64) ch->w_remote_GP.put) {
break;
}
/* There are messages waiting to be pulled and delivered.
* We need to try to secure one for ourselves. We'll do this
* by trying to increment w_local_GP.get and hope that no one
* else beats us to it. If they do, we'll we'll simply have
* to try again for the next one.
*/
if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
/* we got the entry referenced by get */
dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
"partid=%d, channel=%d\n", get + 1,
ch->partid, ch->number);
/* pull the message from the remote partition */
msg = xpc_pull_remote_msg(ch, get);
DBUG_ON(msg != NULL && msg->number != get);
DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
break;
}
} while (1);
return msg;
}
/*
* Deliver a message to its intended recipient.
*/
void
xpc_deliver_msg(struct xpc_channel *ch)
{
struct xpc_msg *msg;
if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
/*
* This ref is taken to protect the payload itself from being
* freed before the user is finished with it, which the user
* indicates by calling xpc_initiate_received().
*/
xpc_msgqueue_ref(ch);
atomic_inc(&ch->kthreads_active);
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n",
(void *) msg, msg->number, ch->partid,
ch->number);
/* deliver the message to its intended recipient */
ch->func(xpcMsgReceived, ch->partid, ch->number,
&msg->payload, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
"msg_number=%ld, partid=%d, channel=%d\n",
(void *) msg, msg->number, ch->partid,
ch->number);
}
atomic_dec(&ch->kthreads_active);
}
}
/*
* Now we actually acknowledge the messages that have been delivered and ack'd
* by advancing the cached remote message queue's Get value and if requested
* send an IPI to the message sender's partition.
*/
static void
xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
{
struct xpc_msg *msg;
s64 get = initial_get + 1;
int send_IPI = 0;
while (1) {
while (1) {
if (get == (volatile s64) ch->w_local_GP.get) {
break;
}
msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
(get % ch->remote_nentries) * ch->msg_size);
if (!(msg->flags & XPC_M_DONE)) {
break;
}
msg_flags |= msg->flags;
get++;
}
if (get == initial_get) {
/* nothing's changed */
break;
}
if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
initial_get) {
/* someone else beat us to it */
DBUG_ON((volatile s64) ch->local_GP->get <=
initial_get);
break;
}
/* we just set the new value of local_GP->get */
dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
"channel=%d\n", get, ch->partid, ch->number);
send_IPI = (msg_flags & XPC_M_INTERRUPT);
/*
* We need to ensure that the message referenced by
* local_GP->get is not XPC_M_DONE or that local_GP->get
* equals w_local_GP.get, so we'll go have a look.
*/
initial_get = get;
}
if (send_IPI) {
xpc_IPI_send_msgrequest(ch);
}
}
/*
* Acknowledge receipt of a delivered message.
*
* If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
* that sent the message.
*
* This function, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # message received on.
* payload - pointer to the payload area allocated via
* xpc_initiate_allocate().
*/
void
xpc_initiate_received(partid_t partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
s64 get, msg_number = msg->number;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
(void *) msg, msg_number, ch->partid, ch->number);
DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
msg_number % ch->remote_nentries);
DBUG_ON(msg->flags & XPC_M_DONE);
msg->flags |= XPC_M_DONE;
/*
* The preceding store of msg->flags must occur before the following
* load of ch->local_GP->get.
*/
mb();
/*
* See if this message is next in line to be acknowledged as having
* been delivered.
*/
get = ch->local_GP->get;
if (get == msg_number) {
xpc_acknowledge_msgs(ch, get, msg->flags);
}
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
xpc_msgqueue_deref(ch);
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) support - standard version.
*
* XPC provides a message passing capability that crosses partition
* boundaries. This module is made up of two parts:
*
* partition This part detects the presence/absence of other
* partitions. It provides a heartbeat and monitors
* the heartbeats of other partitions.
*
* channel This part manages the channels and sends/receives
* messages across them to/from other partitions.
*
* There are a couple of additional functions residing in XP, which
* provide an interface to XPC for its users.
*
*
* Caveats:
*
* . We currently have no way to determine which nasid an IPI came
* from. Thus, xpc_IPI_send() does a remote AMO write followed by
* an IPI. The AMO indicates where data is to be pulled from, so
* after the IPI arrives, the remote partition checks the AMO word.
* The IPI can actually arrive before the AMO however, so other code
* must periodically check for this case. Also, remote AMO operations
* do not reliably time out. Thus we do a remote PIO read solely to
* know whether the remote partition is down and whether we should
* stop sending IPIs to it. This remote PIO read operation is set up
* in a special nofault region so SAL knows to ignore (and cleanup)
* any errors due to the remote AMO write, PIO read, and/or PIO
* write operations.
*
* If/when new hardware solves this IPI problem, we should abandon
* the current approach.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#include <asm/uaccess.h>
#include "xpc.h"
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
.name = "xpc"
};
struct device xpc_part_dbg_subname = {
.bus_id = {0}, /* set to "part" at xpc_init() time */
.driver = &xpc_dbg_name
};
struct device xpc_chan_dbg_subname = {
.bus_id = {0}, /* set to "chan" at xpc_init() time */
.driver = &xpc_dbg_name
};
struct device *xpc_part = &xpc_part_dbg_subname;
struct device *xpc_chan = &xpc_chan_dbg_subname;
/* systune related variables for /proc/sys directories */
static int xpc_hb_min = 1;
static int xpc_hb_max = 10;
static int xpc_hb_check_min = 10;
static int xpc_hb_check_max = 120;
static ctl_table xpc_sys_xpc_hb_dir[] = {
{
1,
"hb_interval",
&xpc_hb_interval,
sizeof(int),
0644,
NULL,
&proc_dointvec_minmax,
&sysctl_intvec,
NULL,
&xpc_hb_min, &xpc_hb_max
},
{
2,
"hb_check_interval",
&xpc_hb_check_interval,
sizeof(int),
0644,
NULL,
&proc_dointvec_minmax,
&sysctl_intvec,
NULL,
&xpc_hb_check_min, &xpc_hb_check_max
},
{0}
};
static ctl_table xpc_sys_xpc_dir[] = {
{
1,
"hb",
NULL,
0,
0555,
xpc_sys_xpc_hb_dir
},
{0}
};
static ctl_table xpc_sys_dir[] = {
{
1,
"xpc",
NULL,
0,
0555,
xpc_sys_xpc_dir
},
{0}
};
static struct ctl_table_header *xpc_sysctl;
/* #of IRQs received */
static atomic_t xpc_act_IRQ_rcvd;
/* IRQ handler notifies this wait queue on receipt of an IRQ */
static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
static unsigned long xpc_hb_check_timeout;
/* xpc_hb_checker thread exited notification */
static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited);
/* xpc_discovery thread exited notification */
static DECLARE_MUTEX_LOCKED(xpc_discovery_exited);
static struct timer_list xpc_hb_timer;
static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
/*
* Notify the heartbeat check thread that an IRQ has been received.
*/
static irqreturn_t
xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
{
atomic_inc(&xpc_act_IRQ_rcvd);
wake_up_interruptible(&xpc_act_IRQ_wq);
return IRQ_HANDLED;
}
/*
* Timer to produce the heartbeat. The timer structures function is
* already set when this is initially called. A tunable is used to
* specify when the next timeout should occur.
*/
static void
xpc_hb_beater(unsigned long dummy)
{
xpc_vars->heartbeat++;
if (jiffies >= xpc_hb_check_timeout) {
wake_up_interruptible(&xpc_act_IRQ_wq);
}
xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
add_timer(&xpc_hb_timer);
}
/*
* This thread is responsible for nearly all of the partition
* activation/deactivation.
*/
static int
xpc_hb_checker(void *ignore)
{
int last_IRQ_count = 0;
int new_IRQ_count;
int force_IRQ=0;
/* this thread was marked active by xpc_hb_init() */
daemonize(XPC_HB_CHECK_THREAD_NAME);
set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
while (!(volatile int) xpc_exiting) {
/* wait for IRQ or timeout */
(void) wait_event_interruptible(xpc_act_IRQ_wq,
(last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
jiffies >= xpc_hb_check_timeout ||
(volatile int) xpc_exiting));
dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
"been received\n",
(int) (xpc_hb_check_timeout - jiffies),
atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
/* checking of remote heartbeats is skewed by IRQ handling */
if (jiffies >= xpc_hb_check_timeout) {
dev_dbg(xpc_part, "checking remote heartbeats\n");
xpc_check_remote_hb();
/*
* We need to periodically recheck to ensure no
* IPI/AMO pairs have been missed. That check
* must always reset xpc_hb_check_timeout.
*/
force_IRQ = 1;
}
new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
force_IRQ = 0;
dev_dbg(xpc_part, "found an IRQ to process; will be "
"resetting xpc_hb_check_timeout\n");
last_IRQ_count += xpc_identify_act_IRQ_sender();
if (last_IRQ_count < new_IRQ_count) {
/* retry once to help avoid missing AMO */
(void) xpc_identify_act_IRQ_sender();
}
last_IRQ_count = new_IRQ_count;
xpc_hb_check_timeout = jiffies +
(xpc_hb_check_interval * HZ);
}
}
dev_dbg(xpc_part, "heartbeat checker is exiting\n");
/* mark this thread as inactive */
up(&xpc_hb_checker_exited);
return 0;
}
/*
* This thread will attempt to discover other partitions to activate
* based on info provided by SAL. This new thread is short lived and
* will exit once discovery is complete.
*/
static int
xpc_initiate_discovery(void *ignore)
{
daemonize(XPC_DISCOVERY_THREAD_NAME);
xpc_discovery();
dev_dbg(xpc_part, "discovery thread is exiting\n");
/* mark this thread as inactive */
up(&xpc_discovery_exited);
return 0;
}
/*
* Establish first contact with the remote partititon. This involves pulling
* the XPC per partition variables from the remote partition and waiting for
* the remote partition to pull ours.
*/
static enum xpc_retval
xpc_make_first_contact(struct xpc_partition *part)
{
enum xpc_retval ret;
while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
if (ret != xpcRetry) {
XPC_DEACTIVATE_PARTITION(part, ret);
return ret;
}
dev_dbg(xpc_chan, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
/* wait a 1/4 of a second or so */
set_current_state(TASK_INTERRUPTIBLE);
(void) schedule_timeout(0.25 * HZ);
if (part->act_state == XPC_P_DEACTIVATING) {
return part->reason;
}
}
return xpc_mark_partition_active(part);
}
/*
* The first kthread assigned to a newly activated partition is the one
* created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
* that kthread until the partition is brought down, at which time that kthread
* returns back to XPC HB. (The return of that kthread will signify to XPC HB
* that XPC has dismantled all communication infrastructure for the associated
* partition.) This kthread becomes the channel manager for that partition.
*
* Each active partition has a channel manager, who, besides connecting and
* disconnecting channels, will ensure that each of the partition's connected
* channels has the required number of assigned kthreads to get the work done.
*/
static void
xpc_channel_mgr(struct xpc_partition *part)
{
while (part->act_state != XPC_P_DEACTIVATING ||
atomic_read(&part->nchannels_active) > 0) {
xpc_process_channel_activity(part);
/*
* Wait until we've been requested to activate kthreads or
* all of the channel's message queues have been torn down or
* a signal is pending.
*
* The channel_mgr_requests is set to 1 after being awakened,
* This is done to prevent the channel mgr from making one pass
* through the loop for each request, since he will
* be servicing all the requests in one pass. The reason it's
* set to 1 instead of 0 is so that other kthreads will know
* that the channel mgr is running and won't bother trying to
* wake him up.
*/
atomic_dec(&part->channel_mgr_requests);
(void) wait_event_interruptible(part->channel_mgr_wq,
(atomic_read(&part->channel_mgr_requests) > 0 ||
(volatile u64) part->local_IPI_amo != 0 ||
((volatile u8) part->act_state ==
XPC_P_DEACTIVATING &&
atomic_read(&part->nchannels_active) == 0)));
atomic_set(&part->channel_mgr_requests, 1);
// >>> Does it need to wakeup periodically as well? In case we
// >>> miscalculated the #of kthreads to wakeup or create?
}
}
/*
* When XPC HB determines that a partition has come up, it will create a new
* kthread and that kthread will call this function to attempt to set up the
* basic infrastructure used for Cross Partition Communication with the newly
* upped partition.
*
* The kthread that was created by XPC HB and which setup the XPC
* infrastructure will remain assigned to the partition until the partition
* goes down. At which time the kthread will teardown the XPC infrastructure
* and then exit.
*
* XPC HB will put the remote partition's XPC per partition specific variables
* physical address into xpc_partitions[partid].remote_vars_part_pa prior to
* calling xpc_partition_up().
*/
static void
xpc_partition_up(struct xpc_partition *part)
{
DBUG_ON(part->channels != NULL);
dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
if (xpc_setup_infrastructure(part) != xpcSuccess) {
return;
}
/*
* The kthread that XPC HB called us with will become the
* channel manager for this partition. It will not return
* back to XPC HB until the partition's XPC infrastructure
* has been dismantled.
*/
(void) xpc_part_ref(part); /* this will always succeed */
if (xpc_make_first_contact(part) == xpcSuccess) {
xpc_channel_mgr(part);
}
xpc_part_deref(part);
xpc_teardown_infrastructure(part);
}
static int
xpc_activating(void *__partid)
{
partid_t partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 };
int ret;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_DEACTIVATING) {
part->act_state = XPC_P_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
}
/* indicate the thread is activating */
DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
part->act_state = XPC_P_ACTIVATING;
XPC_SET_REASON(part, 0, 0);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
dev_dbg(xpc_part, "bringing partition %d up\n", partid);
daemonize("xpc%02d", partid);
/*
* This thread needs to run at a realtime priority to prevent a
* significant performance degradation.
*/
ret = sched_setscheduler(current, SCHED_FIFO, &param);
if (ret != 0) {
dev_warn(xpc_part, "unable to set pid %d to a realtime "
"priority, ret=%d\n", current->pid, ret);
}
/* allow this thread and its children to run on any CPU */
set_cpus_allowed(current, CPU_MASK_ALL);
/*
* Register the remote partition's AMOs with SAL so it can handle
* and cleanup errors within that address range should the remote
* partition go down. We don't unregister this range because it is
* difficult to tell when outstanding writes to the remote partition
* are finished and thus when it is safe to unregister. This should
* not result in wasted space in the SAL xp_addr_region table because
* we should get the same page for remote_amos_page_pa after module
* reloads and system reboots.
*/
if (sn_register_xp_addr_region(part->remote_amos_page_pa,
PAGE_SIZE, 1) < 0) {
dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
"xp_addr region\n", partid);
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE;
XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
}
XPC_ALLOW_HB(partid, xpc_vars);
xpc_IPI_send_activated(part);
/*
* xpc_partition_up() holds this thread and marks this partition as
* XPC_P_ACTIVE by calling xpc_hb_mark_active().
*/
(void) xpc_partition_up(part);
xpc_mark_partition_inactive(part);
if (part->reason == xpcReactivating) {
/* interrupting ourselves results in activating partition */
xpc_IPI_send_reactivate(part);
}
return 0;
}
void
xpc_activate_partition(struct xpc_partition *part)
{
partid_t partid = XPC_PARTID(part);
unsigned long irq_flags;
pid_t pid;
spin_lock_irqsave(&part->act_lock, irq_flags);
pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
DBUG_ON(part->act_state != XPC_P_INACTIVE);
if (pid > 0) {
part->act_state = XPC_P_ACTIVATION_REQ;
XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
} else {
XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
}
/*
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
* partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
* than one partition, we use an AMO_t structure per partition to indicate
* whether a partition has sent an IPI or not. >>> If it has, then wake up the
* associated kthread to handle it.
*
* All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
* running on other partitions.
*
* Noteworthy Arguments:
*
* irq - Interrupt ReQuest number. NOT USED.
*
* dev_id - partid of IPI's potential sender.
*
* regs - processor's context before the processor entered
* interrupt code. NOT USED.
*/
irqreturn_t
xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
{
partid_t partid = (partid_t) (u64) dev_id;
struct xpc_partition *part = &xpc_partitions[partid];
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
if (xpc_part_ref(part)) {
xpc_check_for_channel_activity(part);
xpc_part_deref(part);
}
return IRQ_HANDLED;
}
/*
* Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
* because the write to their associated IPI amo completed after the IRQ/IPI
* was received.
*/
void
xpc_dropped_IPI_check(struct xpc_partition *part)
{
if (xpc_part_ref(part)) {
xpc_check_for_channel_activity(part);
part->dropped_IPI_timer.expires = jiffies +
XPC_P_DROPPED_IPI_WAIT;
add_timer(&part->dropped_IPI_timer);
xpc_part_deref(part);
}
}
void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
int idle = atomic_read(&ch->kthreads_idle);
int assigned = atomic_read(&ch->kthreads_assigned);
int wakeup;
DBUG_ON(needed <= 0);
if (idle > 0) {
wakeup = (needed > idle) ? idle : needed;
needed -= wakeup;
dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
"channel=%d\n", wakeup, ch->partid, ch->number);
/* only wakeup the requested number of kthreads */
wake_up_nr(&ch->idle_wq, wakeup);
}
if (needed <= 0) {
return;
}
if (needed + assigned > ch->kthreads_assigned_limit) {
needed = ch->kthreads_assigned_limit - assigned;
// >>>should never be less than 0
if (needed <= 0) {
return;
}
}
dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
needed, ch->partid, ch->number);
xpc_create_kthreads(ch, needed);
}
/*
* This function is where XPC's kthreads wait for messages to deliver.
*/
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
do {
/* deliver messages to their intended recipients */
while ((volatile s64) ch->w_local_GP.get <
(volatile s64) ch->w_remote_GP.put &&
!((volatile u32) ch->flags &
XPC_C_DISCONNECTING)) {
xpc_deliver_msg(ch);
}
if (atomic_inc_return(&ch->kthreads_idle) >
ch->kthreads_idle_limit) {
/* too many idle kthreads on this channel */
atomic_dec(&ch->kthreads_idle);
break;
}
dev_dbg(xpc_chan, "idle kthread calling "
"wait_event_interruptible_exclusive()\n");
(void) wait_event_interruptible_exclusive(ch->idle_wq,
((volatile s64) ch->w_local_GP.get <
(volatile s64) ch->w_remote_GP.put ||
((volatile u32) ch->flags &
XPC_C_DISCONNECTING)));
atomic_dec(&ch->kthreads_idle);
} while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
}
static int
xpc_daemonize_kthread(void *args)
{
partid_t partid = XPC_UNPACK_ARG1(args);
u16 ch_number = XPC_UNPACK_ARG2(args);
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
int n_needed;
daemonize("xpc%02dc%d", partid, ch_number);
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
partid, ch_number);
ch = &part->channels[ch_number];
if (!(ch->flags & XPC_C_DISCONNECTING)) {
DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
/* let registerer know that connection has been established */
if (atomic_read(&ch->kthreads_assigned) == 1) {
xpc_connected_callout(ch);
/*
* It is possible that while the callout was being
* made that the remote partition sent some messages.
* If that is the case, we may need to activate
* additional kthreads to help deliver them. We only
* need one less than total #of messages to deliver.
*/
n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
if (n_needed > 0 &&
!(ch->flags & XPC_C_DISCONNECTING)) {
xpc_activate_kthreads(ch, n_needed);
}
}
xpc_kthread_waitmsgs(part, ch);
}
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
((ch->flags & XPC_C_CONNECTCALLOUT) ||
(ch->reason != xpcUnregistering &&
ch->reason != xpcOtherUnregistering))) {
xpc_disconnected_callout(ch);
}
xpc_msgqueue_deref(ch);
dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
partid, ch_number);
xpc_part_deref(part);
return 0;
}
/*
* For each partition that XPC has established communications with, there is
* a minimum of one kernel thread assigned to perform any operation that
* may potentially sleep or block (basically the callouts to the asynchronous
* functions registered via xpc_connect()).
*
* Additional kthreads are created and destroyed by XPC as the workload
* demands.
*
* A kthread is assigned to one of the active channels that exists for a given
* partition.
*/
void
xpc_create_kthreads(struct xpc_channel *ch, int needed)
{
unsigned long irq_flags;
pid_t pid;
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
while (needed-- > 0) {
pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
if (pid < 0) {
/* the fork failed */
if (atomic_read(&ch->kthreads_assigned) <
ch->kthreads_idle_limit) {
/*
* Flag this as an error only if we have an
* insufficient #of kthreads for the channel
* to function.
*
* No xpc_msgqueue_ref() is needed here since
* the channel mgr is doing this.
*/
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
break;
}
/*
* The following is done on behalf of the newly created
* kthread. That kthread is responsible for doing the
* counterpart to the following before it exits.
*/
(void) xpc_part_ref(&xpc_partitions[ch->partid]);
xpc_msgqueue_ref(ch);
atomic_inc(&ch->kthreads_assigned);
ch->kthreads_created++; // >>> temporary debug only!!!
}
}
void
xpc_disconnect_wait(int ch_number)
{
partid_t partid;
struct xpc_partition *part;
struct xpc_channel *ch;
/* now wait for all callouts to the caller's function to cease */
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
ch = &part->channels[ch_number];
// >>> how do we keep from falling into the window between our check and going
// >>> down and coming back up where sema is re-inited?
if (ch->flags & XPC_C_SETUP) {
(void) down(&ch->teardown_sema);
}
xpc_part_deref(part);
}
}
}
static void
xpc_do_exit(void)
{
partid_t partid;
int active_part_count;
struct xpc_partition *part;
/* now it's time to eliminate our heartbeat */
del_timer_sync(&xpc_hb_timer);
xpc_vars->heartbeating_to_mask = 0;
/* indicate to others that our reserved page is uninitialized */
xpc_rsvd_page->vars_pa = 0;
/*
* Ignore all incoming interrupts. Without interupts the heartbeat
* checker won't activate any new partitions that may come up.
*/
free_irq(SGI_XPC_ACTIVATE, NULL);
/*
* Cause the heartbeat checker and the discovery threads to exit.
* We don't want them attempting to activate new partitions as we
* try to deactivate the existing ones.
*/
xpc_exiting = 1;
wake_up_interruptible(&xpc_act_IRQ_wq);
/* wait for the heartbeat checker thread to mark itself inactive */
down(&xpc_hb_checker_exited);
/* wait for the discovery thread to mark itself inactive */
down(&xpc_discovery_exited);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(0.3 * HZ);
set_current_state(TASK_RUNNING);
/* wait for all partitions to become inactive */
do {
active_part_count = 0;
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid];
if (part->act_state != XPC_P_INACTIVE) {
active_part_count++;
XPC_DEACTIVATE_PARTITION(part, xpcUnloading);
}
}
if (active_part_count) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(0.3 * HZ);
set_current_state(TASK_RUNNING);
}
} while (active_part_count > 0);
/* close down protections for IPI operations */
xpc_restrict_IPI_ops();
/* clear the interface to XPC's functions */
xpc_clear_interface();
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
}
int __init
xpc_init(void)
{
int ret;
partid_t partid;
struct xpc_partition *part;
pid_t pid;
/*
* xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
* both a partition's reserved page and its XPC variables. Its size was
* based on the size of a reserved page. So we need to ensure that the
* XPC variables will fit as well.
*/
if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) {
dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
return -EPERM;
}
DBUG_ON((u64) xpc_remote_copy_buffer !=
L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1);
/*
* The first few fields of each entry of xpc_partitions[] need to
* be initialized now so that calls to xpc_connect() and
* xpc_disconnect() can be made prior to the activation of any remote
* partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
* ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
* PARTITION HAS BEEN ACTIVATED.
*/
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
part = &xpc_partitions[partid];
DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
part->act_IRQ_rcvd = 0;
spin_lock_init(&part->act_lock);
part->act_state = XPC_P_INACTIVE;
XPC_SET_REASON(part, 0, 0);
part->setup_state = XPC_P_UNSET;
init_waitqueue_head(&part->teardown_wq);
atomic_set(&part->references, 0);
}
/*
* Open up protections for IPI operations (and AMO operations on
* Shub 1.1 systems).
*/
xpc_allow_IPI_ops();
/*
* Interrupts being processed will increment this atomic variable and
* awaken the heartbeat thread which will process the interrupts.
*/
atomic_set(&xpc_act_IRQ_rcvd, 0);
/*
* This is safe to do before the xpc_hb_checker thread has started
* because the handler releases a wait queue. If an interrupt is
* received before the thread is waiting, it will not go to sleep,
* but rather immediately process the interrupt.
*/
ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
"xpc hb", NULL);
if (ret != 0) {
dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
"errno=%d\n", -ret);
xpc_restrict_IPI_ops();
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
return -EBUSY;
}
/*
* Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial
* communications.
*/
xpc_rsvd_page = xpc_rsvd_page_init();
if (xpc_rsvd_page == NULL) {
dev_err(xpc_part, "could not setup our reserved page\n");
free_irq(SGI_XPC_ACTIVATE, NULL);
xpc_restrict_IPI_ops();
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
return -EBUSY;
}
/*
* Set the beating to other partitions into motion. This is
* the last requirement for other partitions' discovery to
* initiate communications with us.
*/
init_timer(&xpc_hb_timer);
xpc_hb_timer.function = xpc_hb_beater;
xpc_hb_beater(0);
/*
* The real work-horse behind xpc. This processes incoming
* interrupts and monitors remote heartbeats.
*/
pid = kernel_thread(xpc_hb_checker, NULL, 0);
if (pid < 0) {
dev_err(xpc_part, "failed while forking hb check thread\n");
/* indicate to others that our reserved page is uninitialized */
xpc_rsvd_page->vars_pa = 0;
del_timer_sync(&xpc_hb_timer);
free_irq(SGI_XPC_ACTIVATE, NULL);
xpc_restrict_IPI_ops();
if (xpc_sysctl) {
unregister_sysctl_table(xpc_sysctl);
}
return -EBUSY;
}
/*
* Startup a thread that will attempt to discover other partitions to
* activate based on info provided by SAL. This new thread is short
* lived and will exit once discovery is complete.
*/
pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
if (pid < 0) {
dev_err(xpc_part, "failed while forking discovery thread\n");
/* mark this new thread as a non-starter */
up(&xpc_discovery_exited);
xpc_do_exit();
return -EBUSY;
}
/* set the interface to point at XPC's functions */
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
xpc_initiate_allocate, xpc_initiate_send,
xpc_initiate_send_notify, xpc_initiate_received,
xpc_initiate_partid_to_nasids);
return 0;
}
module_init(xpc_init);
void __exit
xpc_exit(void)
{
xpc_do_exit();
}
module_exit(xpc_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
MODULE_LICENSE("GPL");
module_param(xpc_hb_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
"heartbeat increments.");
module_param(xpc_hb_check_interval, int, 0);
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
"heartbeat checks.");
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) partition support.
*
* This is the part of XPC that detects the presence/absence of
* other partitions. It provides a heartbeat and monitors the
* heartbeats of other partitions.
*
*/
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/cache.h>
#include <linux/mmzone.h>
#include <linux/nodemask.h>
#include <asm/sn/bte.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
#include "xpc.h"
/* XPC is exiting flag */
int xpc_exiting;
/* SH_IPI_ACCESS shub register value on startup */
static u64 xpc_sh1_IPI_access;
static u64 xpc_sh2_IPI_access0;
static u64 xpc_sh2_IPI_access1;
static u64 xpc_sh2_IPI_access2;
static u64 xpc_sh2_IPI_access3;
/* original protection values for each node */
u64 xpc_prot_vec[MAX_COMPACT_NODES];
/* this partition's reserved page */
struct xpc_rsvd_page *xpc_rsvd_page;
/* this partition's XPC variables (within the reserved page) */
struct xpc_vars *xpc_vars;
struct xpc_vars_part *xpc_vars_part;
/*
* For performance reasons, each entry of xpc_partitions[] is cacheline
* aligned. And xpc_partitions[] is padded with an additional entry at the
* end so that the last legitimate entry doesn't share its cacheline with
* another variable.
*/
struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
/*
* Generic buffer used to store a local copy of the remote partitions
* reserved page or XPC variables.
*
* xpc_discovery runs only once and is a seperate thread that is
* very likely going to be processing in parallel with receiving
* interrupts.
*/
char ____cacheline_aligned
xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE];
/* systune related variables */
int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT;
/*
* Given a nasid, get the physical address of the partition's reserved page
* for that nasid. This function returns 0 on any error.
*/
static u64
xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size)
{
bte_result_t bte_res;
s64 status;
u64 cookie = 0;
u64 rp_pa = nasid; /* seed with nasid */
u64 len = 0;
while (1) {
status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
&len);
dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
"0x%016lx, address=0x%016lx, len=0x%016lx\n",
status, cookie, rp_pa, len);
if (status != SALRET_MORE_PASSES) {
break;
}
if (len > buf_size) {
dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len);
status = SALRET_ERROR;
break;
}
bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bte_res != BTE_SUCCESS) {
dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
status = SALRET_ERROR;
break;
}
}
if (status != SALRET_OK) {
rp_pa = 0;
}
dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
return rp_pa;
}
/*
* Fill the partition reserved page with the information needed by
* other partitions to discover we are alive and establish initial
* communications.
*/
struct xpc_rsvd_page *
xpc_rsvd_page_init(void)
{
struct xpc_rsvd_page *rp;
AMO_t *amos_page;
u64 rp_pa, next_cl, nasid_array = 0;
int i, ret;
/* get the local reserved page's address */
rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
(u64) xpc_remote_copy_buffer,
XPC_RSVD_PAGE_ALIGNED_SIZE);
if (rp_pa == 0) {
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return NULL;
}
rp = (struct xpc_rsvd_page *) __va(rp_pa);
if (rp->partid != sn_partition_id) {
dev_err(xpc_part, "the reserved page's partid of %d should be "
"%d\n", rp->partid, sn_partition_id);
return NULL;
}
rp->version = XPC_RP_VERSION;
/*
* Place the XPC variables on the cache line following the
* reserved page structure.
*/
next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
xpc_vars = (struct xpc_vars *) next_cl;
/*
* Before clearing xpc_vars, see if a page of AMOs had been previously
* allocated. If not we'll need to allocate one and set permissions
* so that cross-partition AMOs are allowed.
*
* The allocated AMO page needs MCA reporting to remain disabled after
* XPC has unloaded. To make this work, we keep a copy of the pointer
* to this page (i.e., amos_page) in the struct xpc_vars structure,
* which is pointed to by the reserved page, and re-use that saved copy
* on subsequent loads of XPC. This AMO page is never freed, and its
* memory protections are never restricted.
*/
if ((amos_page = xpc_vars->amos_page) == NULL) {
amos_page = (AMO_t *) mspec_kalloc_page(0);
if (amos_page == NULL) {
dev_err(xpc_part, "can't allocate page of AMOs\n");
return NULL;
}
/*
* Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
* when xpc_allow_IPI_ops() is called via xpc_hb_init().
*/
if (!enable_shub_wars_1_1()) {
ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
&nasid_array);
if (ret != 0) {
dev_err(xpc_part, "can't change memory "
"protections\n");
mspec_kfree_page((unsigned long) amos_page);
return NULL;
}
}
} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
/*
* EFI's XPBOOT can also set amos_page in the reserved page,
* but it happens to leave it as an uncached physical address
* and we need it to be an uncached virtual, so we'll have to
* convert it.
*/
if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
dev_err(xpc_part, "previously used amos_page address "
"is bad = 0x%p\n", (void *) amos_page);
return NULL;
}
amos_page = (AMO_t *) TO_AMO((u64) amos_page);
}
memset(xpc_vars, 0, sizeof(struct xpc_vars));
/*
* Place the XPC per partition specific variables on the cache line
* following the XPC variables structure.
*/
next_cl += XPC_VARS_ALIGNED_SIZE;
memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
XP_MAX_PARTITIONS);
xpc_vars_part = (struct xpc_vars_part *) next_cl;
xpc_vars->vars_part_pa = __pa(next_cl);
xpc_vars->version = XPC_V_VERSION;
xpc_vars->act_nasid = cpuid_to_nasid(0);
xpc_vars->act_phys_cpuid = cpu_physical_id(0);
xpc_vars->amos_page = amos_page; /* save for next load of XPC */
/*
* Initialize the activation related AMO variables.
*/
xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS);
for (i = 1; i < XP_NASID_MASK_WORDS; i++) {
xpc_IPI_init(i + XP_MAX_PARTITIONS);
}
/* export AMO page's physical address to other partitions */
xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);
/*
* This signifies to the remote partition that our reserved
* page is initialized.
*/
(volatile u64) rp->vars_pa = __pa(xpc_vars);
return rp;
}
/*
* Change protections to allow IPI operations (and AMO operations on
* Shub 1.1 systems).
*/
void
xpc_allow_IPI_ops(void)
{
int node;
int nasid;
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
if (is_shub2()) {
xpc_sh2_IPI_access0 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
xpc_sh2_IPI_access1 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
xpc_sh2_IPI_access2 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
xpc_sh2_IPI_access3 =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
-1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
-1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
-1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
-1UL);
}
} else {
xpc_sh1_IPI_access =
(u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
-1UL);
/*
* Since the BIST collides with memory operations on
* SHUB 1.1 sn_change_memprotect() cannot be used.
*/
if (enable_shub_wars_1_1()) {
/* open up everything */
xpc_prot_vec[node] = (u64) HUB_L((u64 *)
GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQLP_MMR_DIR_PRIVEC0));
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQLP_MMR_DIR_PRIVEC0),
-1UL);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
-1UL);
}
}
}
}
/*
* Restrict protections to disallow IPI operations (and AMO operations on
* Shub 1.1 systems).
*/
void
xpc_restrict_IPI_ops(void)
{
int node;
int nasid;
// >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
if (is_shub2()) {
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
xpc_sh2_IPI_access0);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
xpc_sh2_IPI_access1);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
xpc_sh2_IPI_access2);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
xpc_sh2_IPI_access3);
}
} else {
for_each_online_node(node) {
nasid = cnodeid_to_nasid(node);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
xpc_sh1_IPI_access);
if (enable_shub_wars_1_1()) {
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQLP_MMR_DIR_PRIVEC0),
xpc_prot_vec[node]);
HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
SH1_MD_DQRP_MMR_DIR_PRIVEC0),
xpc_prot_vec[node]);
}
}
}
}
/*
* At periodic intervals, scan through all active partitions and ensure
* their heartbeat is still active. If not, the partition is deactivated.
*/
void
xpc_check_remote_hb(void)
{
struct xpc_vars *remote_vars;
struct xpc_partition *part;
partid_t partid;
bte_result_t bres;
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
if (partid == sn_partition_id) {
continue;
}
part = &xpc_partitions[partid];
if (part->act_state == XPC_P_INACTIVE ||
part->act_state == XPC_P_DEACTIVATING) {
continue;
}
/* pull the remote_hb cache line */
bres = xp_bte_copy(part->remote_vars_pa,
ia64_tpa((u64) remote_vars),
XPC_VARS_ALIGNED_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
XPC_DEACTIVATE_PARTITION(part,
xpc_map_bte_errors(bres));
continue;
}
dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
" = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid,
remote_vars->heartbeat, part->last_heartbeat,
remote_vars->kdb_status,
remote_vars->heartbeating_to_mask);
if (((remote_vars->heartbeat == part->last_heartbeat) &&
(remote_vars->kdb_status == 0)) ||
!XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
continue;
}
part->last_heartbeat = remote_vars->heartbeat;
}
}
/*
* Get a copy of the remote partition's rsvd page.
*
* remote_rp points to a buffer that is cacheline aligned for BTE copies and
* assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE.
*/
static enum xpc_retval
xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa)
{
int bres, i;
/* get the reserved page's physical address */
*remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp,
XPC_RSVD_PAGE_ALIGNED_SIZE);
if (*remote_rsvd_page_pa == 0) {
return xpcNoRsvdPageAddr;
}
/* pull over the reserved page structure */
bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp),
XPC_RSVD_PAGE_ALIGNED_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
return xpc_map_bte_errors(bres);
}
if (discovered_nasids != NULL) {
for (i = 0; i < XP_NASID_MASK_WORDS; i++) {
discovered_nasids[i] |= remote_rp->part_nasids[i];
}
}
/* check that the partid is for another partition */
if (remote_rp->partid < 1 ||
remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
return xpcInvalidPartid;
}
if (remote_rp->partid == sn_partition_id) {
return xpcLocalPartid;
}
if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
return xpcBadVersion;
}
return xpcSuccess;
}
/*
* Get a copy of the remote partition's XPC variables.
*
* remote_vars points to a buffer that is cacheline aligned for BTE copies and
* assumed to be of size XPC_VARS_ALIGNED_SIZE.
*/
static enum xpc_retval
xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
{
int bres;
if (remote_vars_pa == 0) {
return xpcVarsNotSet;
}
/* pull over the cross partition variables */
bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
XPC_VARS_ALIGNED_SIZE,
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bres != BTE_SUCCESS) {
return xpc_map_bte_errors(bres);
}
if (XPC_VERSION_MAJOR(remote_vars->version) !=
XPC_VERSION_MAJOR(XPC_V_VERSION)) {
return xpcBadVersion;
}
return xpcSuccess;
}
/*
* Prior code has determine the nasid which generated an IPI. Inspect
* that nasid to determine if its partition needs to be activated or
* deactivated.
*
* A partition is consider "awaiting activation" if our partition
* flags indicate it is not active and it has a heartbeat. A
* partition is considered "awaiting deactivation" if our partition
* flags indicate it is active but it has no heartbeat or it is not
* sending its heartbeat to us.
*
* To determine the heartbeat, the remote nasid must have a properly
* initialized reserved page.
*/
static void
xpc_identify_act_IRQ_req(int nasid)
{
struct xpc_rsvd_page *remote_rp;
struct xpc_vars *remote_vars;
u64 remote_rsvd_page_pa;
u64 remote_vars_pa;
partid_t partid;
struct xpc_partition *part;
enum xpc_retval ret;
/* pull over the reserved page structure */
remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa);
if (ret != xpcSuccess) {
dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret);
return;
}
remote_vars_pa = remote_rp->vars_pa;
partid = remote_rp->partid;
part = &xpc_partitions[partid];
/* pull over the cross partition variables */
remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
if (ret != xpcSuccess) {
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret);
XPC_DEACTIVATE_PARTITION(part, ret);
return;
}
part->act_IRQ_rcvd++;
dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
"%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
if (part->act_state == XPC_P_INACTIVE) {
part->remote_rp_pa = remote_rsvd_page_pa;
dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n",
part->remote_rp_pa);
part->remote_vars_pa = remote_vars_pa;
dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
part->remote_vars_pa);
part->last_heartbeat = remote_vars->heartbeat;
dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
part->last_heartbeat);
part->remote_vars_part_pa = remote_vars->vars_part_pa;
dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
part->remote_vars_part_pa);
part->remote_act_nasid = remote_vars->act_nasid;
dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
part->remote_act_nasid);
part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
part->remote_act_phys_cpuid);
part->remote_amos_page_pa = remote_vars->amos_page_pa;
dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
part->remote_amos_page_pa);
xpc_activate_partition(part);
} else if (part->remote_amos_page_pa != remote_vars->amos_page_pa ||
!XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
part->reactivate_nasid = nasid;
XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
}
}
/*
* Loop through the activation AMO variables and process any bits
* which are set. Each bit indicates a nasid sending a partition
* activation or deactivation request.
*
* Return #of IRQs detected.
*/
int
xpc_identify_act_IRQ_sender(void)
{
int word, bit;
u64 nasid_mask;
u64 nasid; /* remote nasid */
int n_IRQs_detected = 0;
AMO_t *act_amos;
struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
act_amos = xpc_vars->act_amos;
/* scan through act AMO variable looking for non-zero entries */
for (word = 0; word < XP_NASID_MASK_WORDS; word++) {
nasid_mask = xpc_IPI_receive(&act_amos[word]);
if (nasid_mask == 0) {
/* no IRQs from nasids in this variable */
continue;
}
dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
nasid_mask);
/*
* If this nasid has been added to the machine since
* our partition was reset, this will retain the
* remote nasid in our reserved pages machine mask.
* This is used in the event of module reload.
*/
rp->mach_nasids[word] |= nasid_mask;
/* locate the nasid(s) which sent interrupts */
for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
if (nasid_mask & (1UL << bit)) {
n_IRQs_detected++;
nasid = XPC_NASID_FROM_W_B(word, bit);
dev_dbg(xpc_part, "interrupt from nasid %ld\n",
nasid);
xpc_identify_act_IRQ_req(nasid);
}
}
}
return n_IRQs_detected;
}
/*
* Mark specified partition as active.
*/
enum xpc_retval
xpc_mark_partition_active(struct xpc_partition *part)
{
unsigned long irq_flags;
enum xpc_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_ACTIVATING) {
part->act_state = XPC_P_ACTIVE;
ret = xpcSuccess;
} else {
DBUG_ON(part->reason == xpcSuccess);
ret = part->reason;
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
return ret;
}
/*
* Notify XPC that the partition is down.
*/
void
xpc_deactivate_partition(const int line, struct xpc_partition *part,
enum xpc_retval reason)
{
unsigned long irq_flags;
partid_t partid = XPC_PARTID(part);
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_INACTIVE) {
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpcReactivating) {
/* we interrupt ourselves to reactivate partition */
xpc_IPI_send_reactivate(part);
}
return;
}
if (part->act_state == XPC_P_DEACTIVATING) {
if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
reason == xpcReactivating) {
XPC_SET_REASON(part, reason, line);
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
return;
}
part->act_state = XPC_P_DEACTIVATING;
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
XPC_DISALLOW_HB(partid, xpc_vars);
dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid,
reason);
xpc_partition_down(part, reason);
}
/*
* Mark specified partition as active.
*/
void
xpc_mark_partition_inactive(struct xpc_partition *part)
{
unsigned long irq_flags;
dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
}
/*
* SAL has provided a partition and machine mask. The partition mask
* contains a bit for each even nasid in our partition. The machine
* mask contains a bit for each even nasid in the entire machine.
*
* Using those two bit arrays, we can determine which nasids are
* known in the machine. Each should also have a reserved page
* initialized if they are available for partitioning.
*/
void
xpc_discovery(void)
{
void *remote_rp_base;
struct xpc_rsvd_page *remote_rp;
struct xpc_vars *remote_vars;
u64 remote_rsvd_page_pa;
u64 remote_vars_pa;
int region;
int max_regions;
int nasid;
struct xpc_rsvd_page *rp;
partid_t partid;
struct xpc_partition *part;
u64 *discovered_nasids;
enum xpc_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE,
GFP_KERNEL, &remote_rp_base);
if (remote_rp == NULL) {
return;
}
remote_vars = (struct xpc_vars *) remote_rp;
discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS,
GFP_KERNEL);
if (discovered_nasids == NULL) {
kfree(remote_rp_base);
return;
}
memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS);
rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
/*
* The term 'region' in this context refers to the minimum number of
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
//>>> move the next two #defines into either include/asm-ia64/sn/arch.h or
//>>> include/asm-ia64/sn/addrs.h
#define SH1_MAX_REGIONS 64
#define SH2_MAX_REGIONS 256
max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS;
for (region = 0; region < max_regions; region++) {
if ((volatile int) xpc_exiting) {
break;
}
dev_dbg(xpc_part, "searching region %d\n", region);
for (nasid = (region * sn_region_size * 2);
nasid < ((region + 1) * sn_region_size * 2);
nasid += 2) {
if ((volatile int) xpc_exiting) {
break;
}
dev_dbg(xpc_part, "checking nasid %d\n", nasid);
if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) {
dev_dbg(xpc_part, "PROM indicates Nasid %d is "
"part of the local partition; skipping "
"region\n", nasid);
break;
}
if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) {
dev_dbg(xpc_part, "PROM indicates Nasid %d was "
"not on Numa-Link network at reset\n",
nasid);
continue;
}
if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
dev_dbg(xpc_part, "Nasid %d is part of a "
"partition which was previously "
"discovered\n", nasid);
continue;
}
/* pull over the reserved page structure */
ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rsvd_page_pa);
if (ret != xpcSuccess) {
dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid,
ret);
if (ret == xpcLocalPartid) {
break;
}
continue;
}
remote_vars_pa = remote_rp->vars_pa;
partid = remote_rp->partid;
part = &xpc_partitions[partid];
/* pull over the cross partition variables */
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
if (ret != xpcSuccess) {
dev_dbg(xpc_part, "unable to get XPC variables "
"from nasid %d, reason=%d\n", nasid,
ret);
XPC_DEACTIVATE_PARTITION(part, ret);
continue;
}
if (part->act_state != XPC_P_INACTIVE) {
dev_dbg(xpc_part, "partition %d on nasid %d is "
"already activating\n", partid, nasid);
break;
}
/*
* Register the remote partition's AMOs with SAL so it
* can handle and cleanup errors within that address
* range should the remote partition go down. We don't
* unregister this range because it is difficult to
* tell when outstanding writes to the remote partition
* are finished and thus when it is thus safe to
* unregister. This should not result in wasted space
* in the SAL xp_addr_region table because we should
* get the same page for remote_act_amos_pa after
* module reloads and system reboots.
*/
if (sn_register_xp_addr_region(
remote_vars->amos_page_pa,
PAGE_SIZE, 1) < 0) {
dev_dbg(xpc_part, "partition %d failed to "
"register xp_addr region 0x%016lx\n",
partid, remote_vars->amos_page_pa);
XPC_SET_REASON(part, xpcPhysAddrRegFailed,
__LINE__);
break;
}
/*
* The remote nasid is valid and available.
* Send an interrupt to that nasid to notify
* it that we are ready to begin activation.
*/
dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
"nasid %d, phys_cpuid 0x%x\n",
remote_vars->amos_page_pa,
remote_vars->act_nasid,
remote_vars->act_phys_cpuid);
xpc_IPI_send_activate(remote_vars);
}
}
kfree(discovered_nasids);
kfree(remote_rp_base);
}
/*
* Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page.
*/
enum xpc_retval
xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
{
struct xpc_partition *part;
u64 part_nasid_pa;
int bte_res;
part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0) {
return xpcPartitionDown;
}
part_nasid_pa = part->remote_rp_pa +
(u64) &((struct xpc_rsvd_page *) 0)->part_nasids;
bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
L1_CACHE_ALIGN(XP_NASID_MASK_BYTES),
(BTE_NOTIFY | BTE_WACQUIRE), NULL);
return xpc_map_bte_errors(bte_res);
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
/*
* Cross Partition Network Interface (XPNET) support
*
* XPNET provides a virtual network layered on top of the Cross
* Partition communication layer.
*
* XPNET provides direct point-to-point and broadcast-like support
* for an ethernet-like device. The ethernet broadcast medium is
* replaced with a point-to-point message structure which passes
* pointers to a DMA-capable block that a remote partition should
* retrieve and pass to the upper level networking layer.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <asm/sn/bte.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/types.h>
#include <asm/atomic.h>
#include <asm/sn/xp.h>
/*
* The message payload transferred by XPC.
*
* buf_pa is the physical address where the DMA should pull from.
*
* NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
* cacheline boundary. To accomplish this, we record the number of
* bytes from the beginning of the first cacheline to the first useful
* byte of the skb (leadin_ignore) and the number of bytes from the
* last useful byte of the skb to the end of the last cacheline
* (tailout_ignore).
*
* size is the number of bytes to transfer which includes the skb->len
* (useful bytes of the senders skb) plus the leadin and tailout
*/
struct xpnet_message {
u16 version; /* Version for this message */
u16 embedded_bytes; /* #of bytes embedded in XPC message */
u32 magic; /* Special number indicating this is xpnet */
u64 buf_pa; /* phys address of buffer to retrieve */
u32 size; /* #of bytes in buffer */
u8 leadin_ignore; /* #of bytes to ignore at the beginning */
u8 tailout_ignore; /* #of bytes to ignore at the end */
unsigned char data; /* body of small packets */
};
/*
* Determine the size of our message, the cacheline aligned size,
* and then the number of message will request from XPC.
*
* XPC expects each message to exist in an individual cacheline.
*/
#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET)
#define XPNET_MSG_DATA_MAX \
(XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data))
#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
/*
* Version number of XPNET implementation. XPNET can always talk to versions
* with same major #, and never talk to versions with a different version.
*/
#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
#define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
&& (msg->magic == XPNET_MAGIC))
#define XPNET_DEVICE_NAME "xp0"
/*
* When messages are queued with xpc_send_notify, a kmalloc'd buffer
* of the following type is passed as a notification cookie. When the
* notification function is called, we use the cookie to decide
* whether all outstanding message sends have completed. The skb can
* then be released.
*/
struct xpnet_pending_msg {
struct list_head free_list;
struct sk_buff *skb;
atomic_t use_count;
};
/* driver specific structure pointed to by the device structure */
struct xpnet_dev_private {
struct net_device_stats stats;
};
struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
* our bitmask of partitions to which we broadcast.
*/
static u64 xpnet_broadcast_partitions;
/* protect above */
static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED;
/*
* Since the Block Transfer Engine (BTE) is being used for the transfer
* and it relies upon cache-line size transfers, we need to reserve at
* least one cache-line for head and tail alignment. The BTE is
* limited to 8MB transfers.
*
* Testing has shown that changing MTU to greater than 64KB has no effect
* on TCP as the two sides negotiate a Max Segment Size that is limited
* to 64K. Other protocols May use packets greater than this, but for
* now, the default is 64KB.
*/
#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
/* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL)
/*
* The partition id is encapsulated in the MAC address. The following
* define locates the octet the partid is in.
*/
#define XPNET_PARTID_OCTET 1
#define XPNET_LICENSE_OCTET 2
/*
* Define the XPNET debug device structure that is to be used with dev_dbg(),
* dev_err(), dev_warn(), and dev_info().
*/
struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
struct device xpnet_dbg_subname = {
.bus_id = {0}, /* set to "" */
.driver = &xpnet_dbg_name
};
struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
*/
static void
xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
{
struct sk_buff *skb;
bte_result_t bret;
struct xpnet_dev_private *priv =
(struct xpnet_dev_private *) xpnet_device->priv;
if (!XPNET_VALID_MSG(msg)) {
/*
* Packet with a different XPC version. Ignore.
*/
xpc_received(partid, channel, (void *) msg);
priv->stats.rx_errors++;
return;
}
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
/* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
if (!skb) {
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
msg->size + L1_CACHE_BYTES);
xpc_received(partid, channel, (void *) msg);
priv->stats.rx_errors++;
return;
}
/*
* The allocated skb has some reserved space.
* In order to use bte_copy, we need to get the
* skb->data pointer moved forward.
*/
skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
(L1_CACHE_BYTES - 1)) +
msg->leadin_ignore));
/*
* Update the tail pointer to indicate data actually
* transferred.
*/
skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
/*
* Move the data over from the the other side.
*/
if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
(msg->embedded_bytes != 0)) {
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
"%lu)\n", skb->data, &msg->data,
(size_t) msg->embedded_bytes);
memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes);
} else {
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
(void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
msg->size);
bret = bte_copy(msg->buf_pa,
__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (bret != BTE_SUCCESS) {
// >>> Need better way of cleaning skb. Currently skb
// >>> appears in_use and we can't just call
// >>> dev_kfree_skb.
dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
"error=0x%x\n", (void *)msg->buf_pa,
(void *)__pa((u64)skb->data &
~(L1_CACHE_BYTES - 1)),
msg->size, bret);
xpc_received(partid, channel, (void *) msg);
priv->stats.rx_errors++;
return;
}
}
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
(void *) skb->data, (void *) skb->tail, (void *) skb->end,
skb->len);
skb->dev = xpnet_device;
skb->protocol = eth_type_trans(skb, xpnet_device);
skb->ip_summed = CHECKSUM_UNNECESSARY;
dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
"skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
(void *) skb->head, (void *) skb->data, (void *) skb->tail,
(void *) skb->end, skb->len);
xpnet_device->last_rx = jiffies;
priv->stats.rx_packets++;
priv->stats.rx_bytes += skb->len + ETH_HLEN;
netif_rx_ni(skb);
xpc_received(partid, channel, (void *) msg);
}
/*
* This is the handler which XPC calls during any sort of change in
* state or message reception on a connection.
*/
static void
xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
void *data, void *key)
{
long bp;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
DBUG_ON(channel != XPC_NET_CHANNEL);
switch(reason) {
case xpcMsgReceived: /* message received */
DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *) data);
break;
case xpcConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock);
xpnet_broadcast_partitions |= 1UL << (partid -1 );
bp = xpnet_broadcast_partitions;
spin_unlock_bh(&xpnet_broadcast_lock);
netif_carrier_on(xpnet_device);
dev_dbg(xpnet, "%s connection created to partition %d; "
"xpnet_broadcast_partitions=0x%lx\n",
xpnet_device->name, partid, bp);
break;
default:
spin_lock_bh(&xpnet_broadcast_lock);
xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
bp = xpnet_broadcast_partitions;
spin_unlock_bh(&xpnet_broadcast_lock);
if (bp == 0) {
netif_carrier_off(xpnet_device);
}
dev_dbg(xpnet, "%s disconnected from partition %d; "
"xpnet_broadcast_partitions=0x%lx\n",
xpnet_device->name, partid, bp);
break;
}
}
static int
xpnet_dev_open(struct net_device *dev)
{
enum xpc_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, "
"%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
XPNET_MAX_IDLE_KTHREADS);
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
if (ret != xpcSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret);
return -ENOMEM;
}
dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
return 0;
}
static int
xpnet_dev_stop(struct net_device *dev)
{
xpc_disconnect(XPC_NET_CHANNEL);
dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
return 0;
}
static int
xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* 68 comes from min TCP+IP+MAC header */
if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
"between 68 and %ld\n", dev->name, new_mtu,
XPNET_MAX_MTU);
return -EINVAL;
}
dev->mtu = new_mtu;
dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
return 0;
}
/*
* Required for the net_device structure.
*/
static int
xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
{
return 0;
}
/*
* Return statistics to the caller.
*/
static struct net_device_stats *
xpnet_dev_get_stats(struct net_device *dev)
{
struct xpnet_dev_private *priv;
priv = (struct xpnet_dev_private *) dev->priv;
return &priv->stats;
}
/*
* Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with
* our side. When all recipients are done processing, we
* release the skb and then release our pending message structure.
*/
static void
xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
void *__qm)
{
struct xpnet_pending_msg *queued_msg =
(struct xpnet_pending_msg *) __qm;
DBUG_ON(queued_msg == NULL);
dev_dbg(xpnet, "message to %d notified with reason %d\n",
partid, reason);
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
(void *) queued_msg->skb->head);
dev_kfree_skb_any(queued_msg->skb);
kfree(queued_msg);
}
}
/*
* Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions
* which have connected with us and are targets of this packet.
*
* MAC-NOTE: For the XPNET driver, the MAC address contains the
* destination partition_id. If the destination partition id word
* is 0xff, this packet is to broadcast to all partitions.
*/
static int
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xpnet_pending_msg *queued_msg;
enum xpc_retval ret;
struct xpnet_message *msg;
u64 start_addr, end_addr;
long dp;
u8 second_mac_octet;
partid_t dest_partid;
struct xpnet_dev_private *priv;
u16 embedded_bytes;
priv = (struct xpnet_dev_private *) dev->priv;
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *) skb->head,
(void *) skb->data, (void *) skb->tail, (void *) skb->end,
skb->len);
/*
* The xpnet_pending_msg tracks how many outstanding
* xpc_send_notifies are relying on this skb. When none
* remain, release the skb.
*/
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
if (queued_msg == NULL) {
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
"packet\n", sizeof(struct xpnet_pending_msg));
priv->stats.tx_errors++;
return -ENOMEM;
}
/* get the beginning of the first cacheline and end of last */
start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
end_addr = L1_CACHE_ALIGN((u64) skb->tail);
/* calculate how many bytes to embed in the XPC message */
embedded_bytes = 0;
if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
/* skb->data does fit so embed */
embedded_bytes = skb->len;
}
/*
* Since the send occurs asynchronously, we set the count to one
* and begin sending. Any sends that happen to complete before
* we are done sending will not free the skb. We will be left
* with that task during exit. This also handles the case of
* a packet destined for a partition which is no longer up.
*/
atomic_set(&queued_msg->use_count, 1);
queued_msg->skb = skb;
second_mac_octet = skb->data[XPNET_PARTID_OCTET];
if (second_mac_octet == 0xff) {
/* we are being asked to broadcast to all partitions */
dp = xpnet_broadcast_partitions;
} else if (second_mac_octet != 0) {
dp = xpnet_broadcast_partitions &
(1UL << (second_mac_octet - 1));
} else {
/* 0 is an invalid partid. Ignore */
dp = 0;
}
dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
/*
* If we wanted to allow promiscous mode to work like an
* unswitched network, this would be a good point to OR in a
* mask of partitions which should be receiving all packets.
*/
/*
* Main send loop.
*/
for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
dest_partid++) {
if (!(dp & (1UL << (dest_partid - 1)))) {
/* not destined for this partition */
continue;
}
/* remove this partition from the destinations mask */
dp &= ~(1UL << (dest_partid - 1));
/* found a partition to send to */
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
XPC_NOWAIT, (void **)&msg);
if (unlikely(ret != xpcSuccess)) {
continue;
}
msg->embedded_bytes = embedded_bytes;
if (unlikely(embedded_bytes != 0)) {
msg->version = XPNET_VERSION_EMBED;
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
&msg->data, skb->data, (size_t) embedded_bytes);
memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
} else {
msg->version = XPNET_VERSION;
}
msg->magic = XPNET_MAGIC;
msg->size = end_addr - start_addr;
msg->leadin_ignore = (u64) skb->data - start_addr;
msg->tailout_ignore = end_addr - (u64) skb->tail;
msg->buf_pa = __pa(start_addr);
dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
"0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
"msg->tailout_ignore=%u\n", dest_partid,
XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
atomic_inc(&queued_msg->use_count);
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
xpnet_send_completed, queued_msg);
if (unlikely(ret != xpcSuccess)) {
atomic_dec(&queued_msg->use_count);
continue;
}
}
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "no partitions to receive packet destined for "
"%d\n", dest_partid);
dev_kfree_skb(skb);
kfree(queued_msg);
}
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
return 0;
}
/*
* Deal with transmit timeouts coming from the network layer.
*/
static void
xpnet_dev_tx_timeout (struct net_device *dev)
{
struct xpnet_dev_private *priv;
priv = (struct xpnet_dev_private *) dev->priv;
priv->stats.tx_errors++;
return;
}
static int __init
xpnet_init(void)
{
int i;
u32 license_num;
int result = -ENOMEM;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
/*
* use ether_setup() to init the majority of our device
* structure and then override the necessary pieces.
*/
xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
XPNET_DEVICE_NAME, ether_setup);
if (xpnet_device == NULL) {
return -ENOMEM;
}
netif_carrier_off(xpnet_device);
xpnet_device->mtu = XPNET_DEF_MTU;
xpnet_device->change_mtu = xpnet_dev_change_mtu;
xpnet_device->open = xpnet_dev_open;
xpnet_device->get_stats = xpnet_dev_get_stats;
xpnet_device->stop = xpnet_dev_stop;
xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
xpnet_device->set_config = xpnet_dev_set_config;
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
xpnet_device->dev_addr[0] = 0xfe;
xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id;
license_num = sn_partition_serial_number_val();
for (i = 3; i >= 0; i--) {
xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
license_num & 0xff;
license_num = license_num >> 8;
}
/*
* ether_setup() sets this to a multicast device. We are
* really not supporting multicast at this time.
*/
xpnet_device->flags &= ~IFF_MULTICAST;
/*
* No need to checksum as it is a DMA transfer. The BTE will
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
xpnet_device->features = NETIF_F_NO_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
free_netdev(xpnet_device);
}
return result;
}
module_init(xpnet_init);
static void __exit
xpnet_exit(void)
{
dev_info(xpnet, "unregistering network device %s\n",
xpnet_device[0].name);
unregister_netdev(xpnet_device);
free_netdev(xpnet_device);
}
module_exit(xpnet_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
MODULE_LICENSE("GPL");
......@@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr)
spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
sfdl_flush_lock, flags);
p->sfdl_flush_value = 0;
*p->sfdl_flush_addr = 0;
/* force an interrupt. */
*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
......
......@@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
ca_dmamap->cad_dma_addr = bus_addr;
ca_dmamap->cad_gart_size = entries;
ca_dmamap->cad_gart_entry = entry;
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list);
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
if (xio_addr % ps) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
......
......@@ -405,9 +405,8 @@ void device_release_driver(struct device * dev)
static void driver_detach(struct device_driver * drv)
{
struct list_head * entry, * next;
list_for_each_safe(entry, next, &drv->devices) {
struct device * dev = container_of(entry, struct device, driver_list);
while (!list_empty(&drv->devices)) {
struct device * dev = container_of(drv->devices.next, struct device, driver_list);
device_release_driver(dev);
}
}
......
......@@ -139,7 +139,7 @@ static int dev_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
buffer = &buffer[length];
buffer_size -= length;
if (dev->bus->hotplug) {
if (dev->bus && dev->bus->hotplug) {
/* have the bus specific function add its stuff */
retval = dev->bus->hotplug (dev, envp, num_envp, buffer, buffer_size);
if (retval) {
......
......@@ -408,7 +408,7 @@ config SGI_TIOCX
config SGI_MBCS
tristate "SGI FPGA Core Services driver support"
depends on (IA64_SGI_SN2 || IA64_GENERIC)
depends on SGI_TIOCX
help
If you have an SGI Altix with an attached SABrick
say Y or M here, otherwise say N.
......
......@@ -136,6 +136,7 @@
*/
#define CAC_BASE (CACHED | AS_CAC_SPACE)
#define AMO_BASE (UNCACHED | AS_AMO_SPACE)
#define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE)
#define GET_BASE (CACHED | AS_GET_SPACE)
/*
......@@ -160,6 +161,13 @@
#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
/*
* Macros to test for address type.
*/
#define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE)
#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE)
/*
* The following definitions pertain to the IO special address
* space. They define the location of the big and little windows
......
......@@ -5,7 +5,7 @@
*
* SGI specific setup.
*
* Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/
#ifndef _ASM_IA64_SN_ARCH_H
......@@ -47,6 +47,21 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
#define MAX_COMPACT_NODES 2048
#define CPUS_PER_NODE 4
/*
* Compact node ID to nasid mappings kept in the per-cpu data areas of each
* cpu.
*/
DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
extern u8 sn_partition_id;
extern u8 sn_system_size;
extern u8 sn_sharing_domain_size;
extern u8 sn_region_size;
extern void sn_flush_all_caches(long addr, long bytes);
#endif /* _ASM_IA64_SN_ARCH_H */
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_FETCHOP_H
#define _ASM_IA64_SN_FETCHOP_H
#include <linux/config.h>
#define FETCHOP_BASENAME "sgi_fetchop"
#define FETCHOP_FULLNAME "/dev/sgi_fetchop"
#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
#define FETCHOP_LOAD 0
#define FETCHOP_INCREMENT 8
#define FETCHOP_DECREMENT 16
#define FETCHOP_CLEAR 24
#define FETCHOP_STORE 0
#define FETCHOP_AND 24
#define FETCHOP_OR 32
#define FETCHOP_CLEAR_CACHE 56
#define FETCHOP_LOAD_OP(addr, op) ( \
*(volatile long *)((char*) (addr) + (op)))
#define FETCHOP_STORE_OP(addr, op, x) ( \
*(volatile long *)((char*) (addr) + (op)) = (long) (x))
#ifdef __KERNEL__
/*
* Convert a region 6 (kaddr) address to the address of the fetchop variable
*/
#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr)
/*
* Each Atomic Memory Operation (AMO formerly known as fetchop)
* variable is 64 bytes long. The first 8 bytes are used. The
* remaining 56 bytes are unaddressable due to the operation taking
* that portion of the address.
*
* NOTE: The AMO_t _MUST_ be placed in either the first or second half
* of the cache line. The cache line _MUST NOT_ be used for anything
* other than additional AMO_t entries. This is because there are two
* addresses which reference the same physical cache line. One will
* be a cached entry with the memory type bits all set. This address
* may be loaded into processor cache. The AMO_t will be referenced
* uncached via the memory special memory type. If any portion of the
* cached cache-line is modified, when that line is flushed, it will
* overwrite the uncached value in physical memory and lead to
* inconsistency.
*/
typedef struct {
u64 variable;
u64 unused[7];
} AMO_t;
/*
* The following APIs are externalized to the kernel to allocate/free pages of
* fetchop variables.
* fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the
* specified cnode.
* fetchop_kfree_page - Free a previously allocated fetchop page
*/
unsigned long fetchop_kalloc_page(int nid);
void fetchop_kfree_page(unsigned long maddr);
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_FETCHOP_H */
......@@ -29,8 +29,9 @@
#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
#define L1_BRICKTYPE_X 0x78 /* x */
#define L1_BRICKTYPE_X2 0x79 /* y */
#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */
#define L1_BRICKTYPE_SA 0x5e /* ^ */
#define L1_BRICKTYPE_PA 0x6a /* j */
#define L1_BRICKTYPE_IA 0x6b /* k */
#define L1_BRICKTYPE_ATHENA 0x2b /* + */
#endif /* _ASM_IA64_SN_L1_H */
......@@ -13,7 +13,6 @@
#include <asm/irq.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/pda.h>
#include <asm/sn/bte.h>
/*
......@@ -67,20 +66,18 @@ typedef struct nodepda_s nodepda_t;
* The next set of definitions provides this.
* Routines are expected to use
*
* nodepda -> to access node PDA for the node on which code is running
* subnodepda -> to access subnode PDA for the subnode on which code is running
*
* NODEPDA(cnode) -> to access node PDA for cnodeid
* SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode
* sn_nodepda - to access node PDA for the node on which code is running
* NODEPDA(cnodeid) - to access node PDA for cnodeid
*/
#define nodepda pda->p_nodepda /* Ptr to this node's PDA */
#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode])
DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
#define sn_nodepda (__get_cpu_var(__sn_nodepda))
#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
/*
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0)
#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
#endif /* _ASM_IA64_SN_NODEPDA_H */
......@@ -24,14 +24,6 @@
typedef struct pda_s {
/* Having a pointer in the begining of PDA tends to increase
* the chance of having this pointer in cache. (Yes something
* else gets pushed out). Doing this reduces the number of memory
* access to all nodepda variables to be one
*/
struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
/*
* Support for SN LEDs
*/
......@@ -49,7 +41,6 @@ typedef struct pda_s {
unsigned long sn_soft_irr[4];
unsigned long sn_in_service_ivecs[4];
short cnodeid_to_nasid_table[MAX_NUMNODES];
int sn_lb_int_war_ticks;
int sn_last_irq;
int sn_first_irq;
......
......@@ -384,6 +384,17 @@
#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
#define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000
/* ==================================================================== */
/* Register "SH_IPI_ACCESS" */
/* CPU interrupt Access Permission Bits */
/* ==================================================================== */
#define SH1_IPI_ACCESS 0x0000000110060480
#define SH2_IPI_ACCESS0 0x0000000010060c00
#define SH2_IPI_ACCESS1 0x0000000010060c80
#define SH2_IPI_ACCESS2 0x0000000010060d00
#define SH2_IPI_ACCESS3 0x0000000010060d80
/* ==================================================================== */
/* Register "SH_INT_CMPB" */
/* RTC Compare Value for Processor B */
......@@ -429,6 +440,19 @@
#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
#define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff
/* ==================================================================== */
/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 0x0000000100030300
/* ==================================================================== */
/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 0x0000000100050300
/* ==================================================================== */
/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SHUBIO_H
......@@ -65,7 +65,6 @@
#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */
#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */
#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */
#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */
......@@ -216,7 +215,6 @@
#define IIO_IPCR 0x00430000 /* IO Performance Control */
#define IIO_IPPR 0x00430008 /* IO Performance Profiling */
/************************************************************************
* *
* Description: This register echoes some information from the *
......@@ -231,15 +229,14 @@
typedef union ii_wid_u {
uint64_t ii_wid_regval;
struct {
uint64_t w_rsvd_1 : 1;
uint64_t w_mfg_num : 11;
uint64_t w_part_num : 16;
uint64_t w_rev_num : 4;
uint64_t w_rsvd : 32;
uint64_t w_rsvd_1:1;
uint64_t w_mfg_num:11;
uint64_t w_part_num:16;
uint64_t w_rev_num:4;
uint64_t w_rsvd:32;
} ii_wid_fld_s;
} ii_wid_u_t;
/************************************************************************
* *
* The fields in this register are set upon detection of an error *
......@@ -251,20 +248,19 @@ typedef union ii_wid_u {
typedef union ii_wstat_u {
uint64_t ii_wstat_regval;
struct {
uint64_t w_pending : 4;
uint64_t w_xt_crd_to : 1;
uint64_t w_xt_tail_to : 1;
uint64_t w_rsvd_3 : 3;
uint64_t w_tx_mx_rty : 1;
uint64_t w_rsvd_2 : 6;
uint64_t w_llp_tx_cnt : 8;
uint64_t w_rsvd_1 : 8;
uint64_t w_crazy : 1;
uint64_t w_rsvd : 31;
uint64_t w_pending:4;
uint64_t w_xt_crd_to:1;
uint64_t w_xt_tail_to:1;
uint64_t w_rsvd_3:3;
uint64_t w_tx_mx_rty:1;
uint64_t w_rsvd_2:6;
uint64_t w_llp_tx_cnt:8;
uint64_t w_rsvd_1:8;
uint64_t w_crazy:1;
uint64_t w_rsvd:31;
} ii_wstat_fld_s;
} ii_wstat_u_t;
/************************************************************************
* *
* Description: This is a read-write enabled register. It controls *
......@@ -275,18 +271,17 @@ typedef union ii_wstat_u {
typedef union ii_wcr_u {
uint64_t ii_wcr_regval;
struct {
uint64_t w_wid : 4;
uint64_t w_tag : 1;
uint64_t w_rsvd_1 : 8;
uint64_t w_dst_crd : 3;
uint64_t w_f_bad_pkt : 1;
uint64_t w_dir_con : 1;
uint64_t w_e_thresh : 5;
uint64_t w_rsvd : 41;
uint64_t w_wid:4;
uint64_t w_tag:1;
uint64_t w_rsvd_1:8;
uint64_t w_dst_crd:3;
uint64_t w_f_bad_pkt:1;
uint64_t w_dir_con:1;
uint64_t w_e_thresh:5;
uint64_t w_rsvd:41;
} ii_wcr_fld_s;
} ii_wcr_u_t;
/************************************************************************
* *
* Description: This register's value is a bit vector that guards *
......@@ -317,13 +312,10 @@ typedef union ii_wcr_u {
typedef union ii_ilapr_u {
uint64_t ii_ilapr_regval;
struct {
uint64_t i_region : 64;
uint64_t i_region:64;
} ii_ilapr_fld_s;
} ii_ilapr_u_t;
/************************************************************************
* *
* Description: A write to this register of the 64-bit value *
......@@ -340,12 +332,10 @@ typedef union ii_ilapr_u {
typedef union ii_ilapo_u {
uint64_t ii_ilapo_regval;
struct {
uint64_t i_io_ovrride : 64;
uint64_t i_io_ovrride:64;
} ii_ilapo_fld_s;
} ii_ilapo_u_t;
/************************************************************************
* *
* This register qualifies all the PIO and Graphics writes launched *
......@@ -356,14 +346,13 @@ typedef union ii_ilapo_u {
typedef union ii_iowa_u {
uint64_t ii_iowa_regval;
struct {
uint64_t i_w0_oac : 1;
uint64_t i_rsvd_1 : 7;
uint64_t i_wx_oac : 8;
uint64_t i_rsvd : 48;
uint64_t i_w0_oac:1;
uint64_t i_rsvd_1:7;
uint64_t i_wx_oac:8;
uint64_t i_rsvd:48;
} ii_iowa_fld_s;
} ii_iowa_u_t;
/************************************************************************
* *
* Description: This register qualifies all the requests launched *
......@@ -376,15 +365,13 @@ typedef union ii_iowa_u {
typedef union ii_iiwa_u {
uint64_t ii_iiwa_regval;
struct {
uint64_t i_w0_iac : 1;
uint64_t i_rsvd_1 : 7;
uint64_t i_wx_iac : 8;
uint64_t i_rsvd : 48;
uint64_t i_w0_iac:1;
uint64_t i_rsvd_1:7;
uint64_t i_wx_iac:8;
uint64_t i_rsvd:48;
} ii_iiwa_fld_s;
} ii_iiwa_u_t;
/************************************************************************
* *
* Description: This register qualifies all the operations launched *
......@@ -407,18 +394,17 @@ typedef union ii_iiwa_u {
typedef union ii_iidem_u {
uint64_t ii_iidem_regval;
struct {
uint64_t i_w8_dxs : 8;
uint64_t i_w9_dxs : 8;
uint64_t i_wa_dxs : 8;
uint64_t i_wb_dxs : 8;
uint64_t i_wc_dxs : 8;
uint64_t i_wd_dxs : 8;
uint64_t i_we_dxs : 8;
uint64_t i_wf_dxs : 8;
uint64_t i_w8_dxs:8;
uint64_t i_w9_dxs:8;
uint64_t i_wa_dxs:8;
uint64_t i_wb_dxs:8;
uint64_t i_wc_dxs:8;
uint64_t i_wd_dxs:8;
uint64_t i_we_dxs:8;
uint64_t i_wf_dxs:8;
} ii_iidem_fld_s;
} ii_iidem_u_t;
/************************************************************************
* *
* This register contains the various programmable fields necessary *
......@@ -429,25 +415,24 @@ typedef union ii_iidem_u {
typedef union ii_ilcsr_u {
uint64_t ii_ilcsr_regval;
struct {
uint64_t i_nullto : 6;
uint64_t i_rsvd_4 : 2;
uint64_t i_wrmrst : 1;
uint64_t i_rsvd_3 : 1;
uint64_t i_llp_en : 1;
uint64_t i_bm8 : 1;
uint64_t i_llp_stat : 2;
uint64_t i_remote_power : 1;
uint64_t i_rsvd_2 : 1;
uint64_t i_maxrtry : 10;
uint64_t i_d_avail_sel : 2;
uint64_t i_rsvd_1 : 4;
uint64_t i_maxbrst : 10;
uint64_t i_rsvd : 22;
uint64_t i_nullto:6;
uint64_t i_rsvd_4:2;
uint64_t i_wrmrst:1;
uint64_t i_rsvd_3:1;
uint64_t i_llp_en:1;
uint64_t i_bm8:1;
uint64_t i_llp_stat:2;
uint64_t i_remote_power:1;
uint64_t i_rsvd_2:1;
uint64_t i_maxrtry:10;
uint64_t i_d_avail_sel:2;
uint64_t i_rsvd_1:4;
uint64_t i_maxbrst:10;
uint64_t i_rsvd:22;
} ii_ilcsr_fld_s;
} ii_ilcsr_u_t;
/************************************************************************
* *
* This is simply a status registers that monitors the LLP error *
......@@ -458,13 +443,12 @@ typedef union ii_ilcsr_u {
typedef union ii_illr_u {
uint64_t ii_illr_regval;
struct {
uint64_t i_sn_cnt : 16;
uint64_t i_cb_cnt : 16;
uint64_t i_rsvd : 32;
uint64_t i_sn_cnt:16;
uint64_t i_cb_cnt:16;
uint64_t i_rsvd:32;
} ii_illr_fld_s;
} ii_illr_u_t;
/************************************************************************
* *
* Description: All II-detected non-BTE error interrupts are *
......@@ -482,22 +466,20 @@ typedef union ii_illr_u {
typedef union ii_iidsr_u {
uint64_t ii_iidsr_regval;
struct {
uint64_t i_level : 8;
uint64_t i_pi_id : 1;
uint64_t i_node : 11;
uint64_t i_rsvd_3 : 4;
uint64_t i_enable : 1;
uint64_t i_rsvd_2 : 3;
uint64_t i_int_sent : 2;
uint64_t i_rsvd_1 : 2;
uint64_t i_pi0_forward_int : 1;
uint64_t i_pi1_forward_int : 1;
uint64_t i_rsvd : 30;
uint64_t i_level:8;
uint64_t i_pi_id:1;
uint64_t i_node:11;
uint64_t i_rsvd_3:4;
uint64_t i_enable:1;
uint64_t i_rsvd_2:3;
uint64_t i_int_sent:2;
uint64_t i_rsvd_1:2;
uint64_t i_pi0_forward_int:1;
uint64_t i_pi1_forward_int:1;
uint64_t i_rsvd:30;
} ii_iidsr_fld_s;
} ii_iidsr_u_t;
/************************************************************************
* *
* There are two instances of this register. This register is used *
......@@ -512,15 +494,14 @@ typedef union ii_iidsr_u {
typedef union ii_igfx0_u {
uint64_t ii_igfx0_regval;
struct {
uint64_t i_w_num : 4;
uint64_t i_pi_id : 1;
uint64_t i_n_num : 12;
uint64_t i_p_num : 1;
uint64_t i_rsvd : 46;
uint64_t i_w_num:4;
uint64_t i_pi_id:1;
uint64_t i_n_num:12;
uint64_t i_p_num:1;
uint64_t i_rsvd:46;
} ii_igfx0_fld_s;
} ii_igfx0_u_t;
/************************************************************************
* *
* There are two instances of this register. This register is used *
......@@ -535,15 +516,14 @@ typedef union ii_igfx0_u {
typedef union ii_igfx1_u {
uint64_t ii_igfx1_regval;
struct {
uint64_t i_w_num : 4;
uint64_t i_pi_id : 1;
uint64_t i_n_num : 12;
uint64_t i_p_num : 1;
uint64_t i_rsvd : 46;
uint64_t i_w_num:4;
uint64_t i_pi_id:1;
uint64_t i_n_num:12;
uint64_t i_p_num:1;
uint64_t i_rsvd:46;
} ii_igfx1_fld_s;
} ii_igfx1_u_t;
/************************************************************************
* *
* There are two instances of this registers. These registers are *
......@@ -554,12 +534,10 @@ typedef union ii_igfx1_u {
typedef union ii_iscr0_u {
uint64_t ii_iscr0_regval;
struct {
uint64_t i_scratch : 64;
uint64_t i_scratch:64;
} ii_iscr0_fld_s;
} ii_iscr0_u_t;
/************************************************************************
* *
* There are two instances of this registers. These registers are *
......@@ -570,11 +548,10 @@ typedef union ii_iscr0_u {
typedef union ii_iscr1_u {
uint64_t ii_iscr1_regval;
struct {
uint64_t i_scratch : 64;
uint64_t i_scratch:64;
} ii_iscr1_fld_s;
} ii_iscr1_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -605,15 +582,14 @@ typedef union ii_iscr1_u {
typedef union ii_itte1_u {
uint64_t ii_itte1_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte1_fld_s;
} ii_itte1_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -644,15 +620,14 @@ typedef union ii_itte1_u {
typedef union ii_itte2_u {
uint64_t ii_itte2_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte2_fld_s;
} ii_itte2_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -683,15 +658,14 @@ typedef union ii_itte2_u {
typedef union ii_itte3_u {
uint64_t ii_itte3_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte3_fld_s;
} ii_itte3_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -722,15 +696,14 @@ typedef union ii_itte3_u {
typedef union ii_itte4_u {
uint64_t ii_itte4_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte4_fld_s;
} ii_itte4_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -761,15 +734,14 @@ typedef union ii_itte4_u {
typedef union ii_itte5_u {
uint64_t ii_itte5_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte5_fld_s;
} ii_itte5_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -800,15 +772,14 @@ typedef union ii_itte5_u {
typedef union ii_itte6_u {
uint64_t ii_itte6_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte6_fld_s;
} ii_itte6_u_t;
/************************************************************************
* *
* Description: There are seven instances of translation table entry *
......@@ -839,15 +810,14 @@ typedef union ii_itte6_u {
typedef union ii_itte7_u {
uint64_t ii_itte7_regval;
struct {
uint64_t i_offset : 5;
uint64_t i_rsvd_1 : 3;
uint64_t i_w_num : 4;
uint64_t i_iosp : 1;
uint64_t i_rsvd : 51;
uint64_t i_offset:5;
uint64_t i_rsvd_1:3;
uint64_t i_w_num:4;
uint64_t i_iosp:1;
uint64_t i_rsvd:51;
} ii_itte7_fld_s;
} ii_itte7_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -875,24 +845,23 @@ typedef union ii_itte7_u {
typedef union ii_iprb0_u {
uint64_t ii_iprb0_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprb0_fld_s;
} ii_iprb0_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -920,24 +889,23 @@ typedef union ii_iprb0_u {
typedef union ii_iprb8_u {
uint64_t ii_iprb8_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprb8_fld_s;
} ii_iprb8_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -965,24 +933,23 @@ typedef union ii_iprb8_u {
typedef union ii_iprb9_u {
uint64_t ii_iprb9_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprb9_fld_s;
} ii_iprb9_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -1010,24 +977,23 @@ typedef union ii_iprb9_u {
typedef union ii_iprba_u {
uint64_t ii_iprba_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprba_fld_s;
} ii_iprba_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -1055,24 +1021,23 @@ typedef union ii_iprba_u {
typedef union ii_iprbb_u {
uint64_t ii_iprbb_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprbb_fld_s;
} ii_iprbb_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -1100,24 +1065,23 @@ typedef union ii_iprbb_u {
typedef union ii_iprbc_u {
uint64_t ii_iprbc_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprbc_fld_s;
} ii_iprbc_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -1145,24 +1109,23 @@ typedef union ii_iprbc_u {
typedef union ii_iprbd_u {
uint64_t ii_iprbd_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprbd_fld_s;
} ii_iprbd_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -1190,24 +1153,23 @@ typedef union ii_iprbd_u {
typedef union ii_iprbe_u {
uint64_t ii_iprbe_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprbe_fld_s;
} ii_iprbe_u_t;
/************************************************************************
* *
* Description: There are 9 instances of this register, one per *
......@@ -1235,24 +1197,23 @@ typedef union ii_iprbe_u {
typedef union ii_iprbf_u {
uint64_t ii_iprbf_regval;
struct {
uint64_t i_c : 8;
uint64_t i_na : 14;
uint64_t i_rsvd_2 : 2;
uint64_t i_nb : 14;
uint64_t i_rsvd_1 : 2;
uint64_t i_m : 2;
uint64_t i_f : 1;
uint64_t i_of_cnt : 5;
uint64_t i_error : 1;
uint64_t i_rd_to : 1;
uint64_t i_spur_wr : 1;
uint64_t i_spur_rd : 1;
uint64_t i_rsvd : 11;
uint64_t i_mult_err : 1;
uint64_t i_c:8;
uint64_t i_na:14;
uint64_t i_rsvd_2:2;
uint64_t i_nb:14;
uint64_t i_rsvd_1:2;
uint64_t i_m:2;
uint64_t i_f:1;
uint64_t i_of_cnt:5;
uint64_t i_error:1;
uint64_t i_rd_to:1;
uint64_t i_spur_wr:1;
uint64_t i_spur_rd:1;
uint64_t i_rsvd:11;
uint64_t i_mult_err:1;
} ii_iprbe_fld_s;
} ii_iprbf_u_t;
/************************************************************************
* *
* This register specifies the timeout value to use for monitoring *
......@@ -1273,12 +1234,11 @@ typedef union ii_iprbf_u {
typedef union ii_ixcc_u {
uint64_t ii_ixcc_regval;
struct {
uint64_t i_time_out : 26;
uint64_t i_rsvd : 38;
uint64_t i_time_out:26;
uint64_t i_rsvd:38;
} ii_ixcc_fld_s;
} ii_ixcc_u_t;
/************************************************************************
* *
* Description: This register qualifies all the PIO and DMA *
......@@ -1298,19 +1258,17 @@ typedef union ii_ixcc_u {
typedef union ii_imem_u {
uint64_t ii_imem_regval;
struct {
uint64_t i_w0_esd : 1;
uint64_t i_rsvd_3 : 3;
uint64_t i_b0_esd : 1;
uint64_t i_rsvd_2 : 3;
uint64_t i_b1_esd : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_clr_precise : 1;
uint64_t i_rsvd : 51;
uint64_t i_w0_esd:1;
uint64_t i_rsvd_3:3;
uint64_t i_b0_esd:1;
uint64_t i_rsvd_2:3;
uint64_t i_b1_esd:1;
uint64_t i_rsvd_1:3;
uint64_t i_clr_precise:1;
uint64_t i_rsvd:51;
} ii_imem_fld_s;
} ii_imem_u_t;
/************************************************************************
* *
* Description: This register specifies the timeout value to use for *
......@@ -1338,15 +1296,14 @@ typedef union ii_imem_u {
typedef union ii_ixtt_u {
uint64_t ii_ixtt_regval;
struct {
uint64_t i_tail_to : 26;
uint64_t i_rsvd_1 : 6;
uint64_t i_rrsp_ps : 23;
uint64_t i_rrsp_to : 5;
uint64_t i_rsvd : 4;
uint64_t i_tail_to:26;
uint64_t i_rsvd_1:6;
uint64_t i_rrsp_ps:23;
uint64_t i_rrsp_to:5;
uint64_t i_rsvd:4;
} ii_ixtt_fld_s;
} ii_ixtt_u_t;
/************************************************************************
* *
* Writing a 1 to the fields of this register clears the appropriate *
......@@ -1361,39 +1318,38 @@ typedef union ii_ixtt_u {
typedef union ii_ieclr_u {
uint64_t ii_ieclr_regval;
struct {
uint64_t i_e_prb_0 : 1;
uint64_t i_rsvd : 7;
uint64_t i_e_prb_8 : 1;
uint64_t i_e_prb_9 : 1;
uint64_t i_e_prb_a : 1;
uint64_t i_e_prb_b : 1;
uint64_t i_e_prb_c : 1;
uint64_t i_e_prb_d : 1;
uint64_t i_e_prb_e : 1;
uint64_t i_e_prb_f : 1;
uint64_t i_e_crazy : 1;
uint64_t i_e_bte_0 : 1;
uint64_t i_e_bte_1 : 1;
uint64_t i_reserved_1 : 10;
uint64_t i_spur_rd_hdr : 1;
uint64_t i_cam_intr_to : 1;
uint64_t i_cam_overflow : 1;
uint64_t i_cam_read_miss : 1;
uint64_t i_ioq_rep_underflow : 1;
uint64_t i_ioq_req_underflow : 1;
uint64_t i_ioq_rep_overflow : 1;
uint64_t i_ioq_req_overflow : 1;
uint64_t i_iiq_rep_overflow : 1;
uint64_t i_iiq_req_overflow : 1;
uint64_t i_ii_xn_rep_cred_overflow : 1;
uint64_t i_ii_xn_req_cred_overflow : 1;
uint64_t i_ii_xn_invalid_cmd : 1;
uint64_t i_xn_ii_invalid_cmd : 1;
uint64_t i_reserved_2 : 21;
uint64_t i_e_prb_0:1;
uint64_t i_rsvd:7;
uint64_t i_e_prb_8:1;
uint64_t i_e_prb_9:1;
uint64_t i_e_prb_a:1;
uint64_t i_e_prb_b:1;
uint64_t i_e_prb_c:1;
uint64_t i_e_prb_d:1;
uint64_t i_e_prb_e:1;
uint64_t i_e_prb_f:1;
uint64_t i_e_crazy:1;
uint64_t i_e_bte_0:1;
uint64_t i_e_bte_1:1;
uint64_t i_reserved_1:10;
uint64_t i_spur_rd_hdr:1;
uint64_t i_cam_intr_to:1;
uint64_t i_cam_overflow:1;
uint64_t i_cam_read_miss:1;
uint64_t i_ioq_rep_underflow:1;
uint64_t i_ioq_req_underflow:1;
uint64_t i_ioq_rep_overflow:1;
uint64_t i_ioq_req_overflow:1;
uint64_t i_iiq_rep_overflow:1;
uint64_t i_iiq_req_overflow:1;
uint64_t i_ii_xn_rep_cred_overflow:1;
uint64_t i_ii_xn_req_cred_overflow:1;
uint64_t i_ii_xn_invalid_cmd:1;
uint64_t i_xn_ii_invalid_cmd:1;
uint64_t i_reserved_2:21;
} ii_ieclr_fld_s;
} ii_ieclr_u_t;
/************************************************************************
* *
* This register controls both BTEs. SOFT_RESET is intended for *
......@@ -1406,14 +1362,13 @@ typedef union ii_ieclr_u {
typedef union ii_ibcr_u {
uint64_t ii_ibcr_regval;
struct {
uint64_t i_count : 4;
uint64_t i_rsvd_1 : 4;
uint64_t i_soft_reset : 1;
uint64_t i_rsvd : 55;
uint64_t i_count:4;
uint64_t i_rsvd_1:4;
uint64_t i_soft_reset:1;
uint64_t i_rsvd:55;
} ii_ibcr_fld_s;
} ii_ibcr_u_t;
/************************************************************************
* *
* This register contains the header of a spurious read response *
......@@ -1446,24 +1401,23 @@ typedef union ii_ibcr_u {
typedef union ii_ixsm_u {
uint64_t ii_ixsm_regval;
struct {
uint64_t i_byte_en : 32;
uint64_t i_reserved : 1;
uint64_t i_tag : 3;
uint64_t i_alt_pactyp : 4;
uint64_t i_bo : 1;
uint64_t i_error : 1;
uint64_t i_vbpm : 1;
uint64_t i_gbr : 1;
uint64_t i_ds : 2;
uint64_t i_ct : 1;
uint64_t i_tnum : 5;
uint64_t i_pactyp : 4;
uint64_t i_sidn : 4;
uint64_t i_didn : 4;
uint64_t i_byte_en:32;
uint64_t i_reserved:1;
uint64_t i_tag:3;
uint64_t i_alt_pactyp:4;
uint64_t i_bo:1;
uint64_t i_error:1;
uint64_t i_vbpm:1;
uint64_t i_gbr:1;
uint64_t i_ds:2;
uint64_t i_ct:1;
uint64_t i_tnum:5;
uint64_t i_pactyp:4;
uint64_t i_sidn:4;
uint64_t i_didn:4;
} ii_ixsm_fld_s;
} ii_ixsm_u_t;
/************************************************************************
* *
* This register contains the sideband bits of a spurious read *
......@@ -1474,13 +1428,12 @@ typedef union ii_ixsm_u {
typedef union ii_ixss_u {
uint64_t ii_ixss_regval;
struct {
uint64_t i_sideband : 8;
uint64_t i_rsvd : 55;
uint64_t i_valid : 1;
uint64_t i_sideband:8;
uint64_t i_rsvd:55;
uint64_t i_valid:1;
} ii_ixss_fld_s;
} ii_ixss_u_t;
/************************************************************************
* *
* This register enables software to access the II LLP's test port. *
......@@ -1496,19 +1449,18 @@ typedef union ii_ixss_u {
typedef union ii_ilct_u {
uint64_t ii_ilct_regval;
struct {
uint64_t i_test_seed : 20;
uint64_t i_test_mask : 8;
uint64_t i_test_data : 20;
uint64_t i_test_valid : 1;
uint64_t i_test_cberr : 1;
uint64_t i_test_flit : 3;
uint64_t i_test_clear : 1;
uint64_t i_test_err_capture : 1;
uint64_t i_rsvd : 9;
uint64_t i_test_seed:20;
uint64_t i_test_mask:8;
uint64_t i_test_data:20;
uint64_t i_test_valid:1;
uint64_t i_test_cberr:1;
uint64_t i_test_flit:3;
uint64_t i_test_clear:1;
uint64_t i_test_err_capture:1;
uint64_t i_rsvd:9;
} ii_ilct_fld_s;
} ii_ilct_u_t;
/************************************************************************
* *
* If the II detects an illegal incoming Duplonet packet (request or *
......@@ -1532,22 +1484,21 @@ typedef union ii_ilct_u {
typedef union ii_iieph1_u {
uint64_t ii_iieph1_regval;
struct {
uint64_t i_command : 7;
uint64_t i_rsvd_5 : 1;
uint64_t i_suppl : 14;
uint64_t i_rsvd_4 : 1;
uint64_t i_source : 14;
uint64_t i_rsvd_3 : 1;
uint64_t i_err_type : 4;
uint64_t i_rsvd_2 : 4;
uint64_t i_overrun : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_valid : 1;
uint64_t i_rsvd : 13;
uint64_t i_command:7;
uint64_t i_rsvd_5:1;
uint64_t i_suppl:14;
uint64_t i_rsvd_4:1;
uint64_t i_source:14;
uint64_t i_rsvd_3:1;
uint64_t i_err_type:4;
uint64_t i_rsvd_2:4;
uint64_t i_overrun:1;
uint64_t i_rsvd_1:3;
uint64_t i_valid:1;
uint64_t i_rsvd:13;
} ii_iieph1_fld_s;
} ii_iieph1_u_t;
/************************************************************************
* *
* This register holds the Address field from the header flit of an *
......@@ -1562,19 +1513,16 @@ typedef union ii_iieph1_u {
typedef union ii_iieph2_u {
uint64_t ii_iieph2_regval;
struct {
uint64_t i_rsvd_0 : 3;
uint64_t i_address : 47;
uint64_t i_rsvd_1 : 10;
uint64_t i_tail : 1;
uint64_t i_rsvd : 3;
uint64_t i_rsvd_0:3;
uint64_t i_address:47;
uint64_t i_rsvd_1:10;
uint64_t i_tail:1;
uint64_t i_rsvd:3;
} ii_iieph2_fld_s;
} ii_iieph2_u_t;
/******************************/
/************************************************************************
* *
* This register's value is a bit vector that guards access from SXBs *
......@@ -1586,11 +1534,10 @@ typedef union ii_iieph2_u {
typedef union ii_islapr_u {
uint64_t ii_islapr_regval;
struct {
uint64_t i_region : 64;
uint64_t i_region:64;
} ii_islapr_fld_s;
} ii_islapr_u_t;
/************************************************************************
* *
* A write to this register of the 56-bit value "Pup+Bun" will cause *
......@@ -1602,8 +1549,8 @@ typedef union ii_islapr_u {
typedef union ii_islapo_u {
uint64_t ii_islapo_regval;
struct {
uint64_t i_io_sbx_ovrride : 56;
uint64_t i_rsvd : 8;
uint64_t i_io_sbx_ovrride:56;
uint64_t i_rsvd:8;
} ii_islapo_fld_s;
} ii_islapo_u_t;
......@@ -1618,12 +1565,12 @@ typedef union ii_islapo_u {
typedef union ii_iwi_u {
uint64_t ii_iwi_regval;
struct {
uint64_t i_prescale : 24;
uint64_t i_rsvd : 8;
uint64_t i_timeout : 8;
uint64_t i_rsvd1 : 8;
uint64_t i_intrpt_retry_period : 8;
uint64_t i_rsvd2 : 8;
uint64_t i_prescale:24;
uint64_t i_rsvd:8;
uint64_t i_timeout:8;
uint64_t i_rsvd1:8;
uint64_t i_intrpt_retry_period:8;
uint64_t i_rsvd2:8;
} ii_iwi_fld_s;
} ii_iwi_u_t;
......@@ -1637,24 +1584,24 @@ typedef union ii_iwi_u {
typedef union ii_iwel_u {
uint64_t ii_iwel_regval;
struct {
uint64_t i_intr_timed_out : 1;
uint64_t i_rsvd : 7;
uint64_t i_cam_overflow : 1;
uint64_t i_cam_read_miss : 1;
uint64_t i_rsvd1 : 2;
uint64_t i_ioq_rep_underflow : 1;
uint64_t i_ioq_req_underflow : 1;
uint64_t i_ioq_rep_overflow : 1;
uint64_t i_ioq_req_overflow : 1;
uint64_t i_iiq_rep_overflow : 1;
uint64_t i_iiq_req_overflow : 1;
uint64_t i_rsvd2 : 6;
uint64_t i_ii_xn_rep_cred_over_under: 1;
uint64_t i_ii_xn_req_cred_over_under: 1;
uint64_t i_rsvd3 : 6;
uint64_t i_ii_xn_invalid_cmd : 1;
uint64_t i_xn_ii_invalid_cmd : 1;
uint64_t i_rsvd4 : 30;
uint64_t i_intr_timed_out:1;
uint64_t i_rsvd:7;
uint64_t i_cam_overflow:1;
uint64_t i_cam_read_miss:1;
uint64_t i_rsvd1:2;
uint64_t i_ioq_rep_underflow:1;
uint64_t i_ioq_req_underflow:1;
uint64_t i_ioq_rep_overflow:1;
uint64_t i_ioq_req_overflow:1;
uint64_t i_iiq_rep_overflow:1;
uint64_t i_iiq_req_overflow:1;
uint64_t i_rsvd2:6;
uint64_t i_ii_xn_rep_cred_over_under:1;
uint64_t i_ii_xn_req_cred_over_under:1;
uint64_t i_rsvd3:6;
uint64_t i_ii_xn_invalid_cmd:1;
uint64_t i_xn_ii_invalid_cmd:1;
uint64_t i_rsvd4:30;
} ii_iwel_fld_s;
} ii_iwel_u_t;
......@@ -1667,20 +1614,20 @@ typedef union ii_iwel_u {
typedef union ii_iwc_u {
uint64_t ii_iwc_regval;
struct {
uint64_t i_dma_byte_swap : 1;
uint64_t i_rsvd : 3;
uint64_t i_cam_read_lines_reset : 1;
uint64_t i_rsvd1 : 3;
uint64_t i_ii_xn_cred_over_under_log: 1;
uint64_t i_rsvd2 : 19;
uint64_t i_xn_rep_iq_depth : 5;
uint64_t i_rsvd3 : 3;
uint64_t i_xn_req_iq_depth : 5;
uint64_t i_rsvd4 : 3;
uint64_t i_iiq_depth : 6;
uint64_t i_rsvd5 : 12;
uint64_t i_force_rep_cred : 1;
uint64_t i_force_req_cred : 1;
uint64_t i_dma_byte_swap:1;
uint64_t i_rsvd:3;
uint64_t i_cam_read_lines_reset:1;
uint64_t i_rsvd1:3;
uint64_t i_ii_xn_cred_over_under_log:1;
uint64_t i_rsvd2:19;
uint64_t i_xn_rep_iq_depth:5;
uint64_t i_rsvd3:3;
uint64_t i_xn_req_iq_depth:5;
uint64_t i_rsvd4:3;
uint64_t i_iiq_depth:6;
uint64_t i_rsvd5:12;
uint64_t i_force_rep_cred:1;
uint64_t i_force_req_cred:1;
} ii_iwc_fld_s;
} ii_iwc_u_t;
......@@ -1693,10 +1640,10 @@ typedef union ii_iwc_u {
typedef union ii_iws_u {
uint64_t ii_iws_regval;
struct {
uint64_t i_xn_rep_iq_credits : 5;
uint64_t i_rsvd : 3;
uint64_t i_xn_req_iq_credits : 5;
uint64_t i_rsvd1 : 51;
uint64_t i_xn_rep_iq_credits:5;
uint64_t i_rsvd:3;
uint64_t i_xn_req_iq_credits:5;
uint64_t i_rsvd1:51;
} ii_iws_fld_s;
} ii_iws_u_t;
......@@ -1709,28 +1656,27 @@ typedef union ii_iws_u {
typedef union ii_iweim_u {
uint64_t ii_iweim_regval;
struct {
uint64_t i_intr_timed_out : 1;
uint64_t i_rsvd : 7;
uint64_t i_cam_overflow : 1;
uint64_t i_cam_read_miss : 1;
uint64_t i_rsvd1 : 2;
uint64_t i_ioq_rep_underflow : 1;
uint64_t i_ioq_req_underflow : 1;
uint64_t i_ioq_rep_overflow : 1;
uint64_t i_ioq_req_overflow : 1;
uint64_t i_iiq_rep_overflow : 1;
uint64_t i_iiq_req_overflow : 1;
uint64_t i_rsvd2 : 6;
uint64_t i_ii_xn_rep_cred_overflow : 1;
uint64_t i_ii_xn_req_cred_overflow : 1;
uint64_t i_rsvd3 : 6;
uint64_t i_ii_xn_invalid_cmd : 1;
uint64_t i_xn_ii_invalid_cmd : 1;
uint64_t i_rsvd4 : 30;
uint64_t i_intr_timed_out:1;
uint64_t i_rsvd:7;
uint64_t i_cam_overflow:1;
uint64_t i_cam_read_miss:1;
uint64_t i_rsvd1:2;
uint64_t i_ioq_rep_underflow:1;
uint64_t i_ioq_req_underflow:1;
uint64_t i_ioq_rep_overflow:1;
uint64_t i_ioq_req_overflow:1;
uint64_t i_iiq_rep_overflow:1;
uint64_t i_iiq_req_overflow:1;
uint64_t i_rsvd2:6;
uint64_t i_ii_xn_rep_cred_overflow:1;
uint64_t i_ii_xn_req_cred_overflow:1;
uint64_t i_rsvd3:6;
uint64_t i_ii_xn_invalid_cmd:1;
uint64_t i_xn_ii_invalid_cmd:1;
uint64_t i_rsvd4:30;
} ii_iweim_fld_s;
} ii_iweim_u_t;
/************************************************************************
* *
* A write to this register causes a particular field in the *
......@@ -1744,15 +1690,14 @@ typedef union ii_iweim_u {
typedef union ii_ipca_u {
uint64_t ii_ipca_regval;
struct {
uint64_t i_wid : 4;
uint64_t i_adjust : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_field : 2;
uint64_t i_rsvd : 54;
uint64_t i_wid:4;
uint64_t i_adjust:1;
uint64_t i_rsvd_1:3;
uint64_t i_field:2;
uint64_t i_rsvd:54;
} ii_ipca_fld_s;
} ii_ipca_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1763,18 +1708,16 @@ typedef union ii_ipca_u {
* *
************************************************************************/
typedef union ii_iprte0a_u {
uint64_t ii_iprte0a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte0a_fld_s;
} ii_iprte0a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1788,14 +1731,13 @@ typedef union ii_iprte0a_u {
typedef union ii_iprte1a_u {
uint64_t ii_iprte1a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte1a_fld_s;
} ii_iprte1a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1809,14 +1751,13 @@ typedef union ii_iprte1a_u {
typedef union ii_iprte2a_u {
uint64_t ii_iprte2a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte2a_fld_s;
} ii_iprte2a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1830,14 +1771,13 @@ typedef union ii_iprte2a_u {
typedef union ii_iprte3a_u {
uint64_t ii_iprte3a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte3a_fld_s;
} ii_iprte3a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1851,14 +1791,13 @@ typedef union ii_iprte3a_u {
typedef union ii_iprte4a_u {
uint64_t ii_iprte4a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte4a_fld_s;
} ii_iprte4a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1872,14 +1811,13 @@ typedef union ii_iprte4a_u {
typedef union ii_iprte5a_u {
uint64_t ii_iprte5a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte5a_fld_s;
} ii_iprte5a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1893,14 +1831,13 @@ typedef union ii_iprte5a_u {
typedef union ii_iprte6a_u {
uint64_t ii_iprte6a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprte6a_fld_s;
} ii_iprte6a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1914,15 +1851,13 @@ typedef union ii_iprte6a_u {
typedef union ii_iprte7a_u {
uint64_t ii_iprte7a_regval;
struct {
uint64_t i_rsvd_1 : 54;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:54;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} ii_iprtea7_fld_s;
} ii_iprte7a_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1933,18 +1868,16 @@ typedef union ii_iprte7a_u {
* *
************************************************************************/
typedef union ii_iprte0b_u {
uint64_t ii_iprte0b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte0b_fld_s;
} ii_iprte0b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1958,14 +1891,13 @@ typedef union ii_iprte0b_u {
typedef union ii_iprte1b_u {
uint64_t ii_iprte1b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte1b_fld_s;
} ii_iprte1b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -1979,14 +1911,13 @@ typedef union ii_iprte1b_u {
typedef union ii_iprte2b_u {
uint64_t ii_iprte2b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte2b_fld_s;
} ii_iprte2b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -2000,14 +1931,13 @@ typedef union ii_iprte2b_u {
typedef union ii_iprte3b_u {
uint64_t ii_iprte3b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte3b_fld_s;
} ii_iprte3b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -2021,14 +1951,13 @@ typedef union ii_iprte3b_u {
typedef union ii_iprte4b_u {
uint64_t ii_iprte4b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte4b_fld_s;
} ii_iprte4b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -2042,14 +1971,13 @@ typedef union ii_iprte4b_u {
typedef union ii_iprte5b_u {
uint64_t ii_iprte5b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte5b_fld_s;
} ii_iprte5b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -2063,15 +1991,14 @@ typedef union ii_iprte5b_u {
typedef union ii_iprte6b_u {
uint64_t ii_iprte6b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte6b_fld_s;
} ii_iprte6b_u_t;
/************************************************************************
* *
* There are 8 instances of this register. This register contains *
......@@ -2085,14 +2012,13 @@ typedef union ii_iprte6b_u {
typedef union ii_iprte7b_u {
uint64_t ii_iprte7b_regval;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_address : 47;
uint64_t i_init : 3;
uint64_t i_source : 11;
uint64_t i_rsvd_1:3;
uint64_t i_address:47;
uint64_t i_init:3;
uint64_t i_source:11;
} ii_iprte7b_fld_s;
} ii_iprte7b_u_t;
/************************************************************************
* *
* Description: SHub II contains a feature which did not exist in *
......@@ -2114,15 +2040,14 @@ typedef union ii_iprte7b_u {
typedef union ii_ipdr_u {
uint64_t ii_ipdr_regval;
struct {
uint64_t i_te : 3;
uint64_t i_rsvd_1 : 1;
uint64_t i_pnd : 1;
uint64_t i_init_rpcnt : 1;
uint64_t i_rsvd : 58;
uint64_t i_te:3;
uint64_t i_rsvd_1:1;
uint64_t i_pnd:1;
uint64_t i_init_rpcnt:1;
uint64_t i_rsvd:58;
} ii_ipdr_fld_s;
} ii_ipdr_u_t;
/************************************************************************
* *
* A write to this register causes a CRB entry to be returned to the *
......@@ -2143,13 +2068,12 @@ typedef union ii_ipdr_u {
typedef union ii_icdr_u {
uint64_t ii_icdr_regval;
struct {
uint64_t i_crb_num : 4;
uint64_t i_pnd : 1;
uint64_t i_rsvd : 59;
uint64_t i_crb_num:4;
uint64_t i_pnd:1;
uint64_t i_rsvd:59;
} ii_icdr_fld_s;
} ii_icdr_u_t;
/************************************************************************
* *
* This register provides debug access to two FIFOs inside of II. *
......@@ -2170,15 +2094,14 @@ typedef union ii_icdr_u {
typedef union ii_ifdr_u {
uint64_t ii_ifdr_regval;
struct {
uint64_t i_ioq_max_rq : 7;
uint64_t i_set_ioq_rq : 1;
uint64_t i_ioq_max_rp : 7;
uint64_t i_set_ioq_rp : 1;
uint64_t i_rsvd : 48;
uint64_t i_ioq_max_rq:7;
uint64_t i_set_ioq_rq:1;
uint64_t i_ioq_max_rp:7;
uint64_t i_set_ioq_rp:1;
uint64_t i_rsvd:48;
} ii_ifdr_fld_s;
} ii_ifdr_u_t;
/************************************************************************
* *
* This register allows the II to become sluggish in removing *
......@@ -2193,14 +2116,13 @@ typedef union ii_ifdr_u {
typedef union ii_iiap_u {
uint64_t ii_iiap_regval;
struct {
uint64_t i_rq_mls : 6;
uint64_t i_rsvd_1 : 2;
uint64_t i_rp_mls : 6;
uint64_t i_rsvd : 50;
uint64_t i_rq_mls:6;
uint64_t i_rsvd_1:2;
uint64_t i_rp_mls:6;
uint64_t i_rsvd:50;
} ii_iiap_fld_s;
} ii_iiap_u_t;
/************************************************************************
* *
* This register allows several parameters of CRB operation to be *
......@@ -2213,24 +2135,23 @@ typedef union ii_iiap_u {
typedef union ii_icmr_u {
uint64_t ii_icmr_regval;
struct {
uint64_t i_sp_msg : 1;
uint64_t i_rd_hdr : 1;
uint64_t i_rsvd_4 : 2;
uint64_t i_c_cnt : 4;
uint64_t i_rsvd_3 : 4;
uint64_t i_clr_rqpd : 1;
uint64_t i_clr_rppd : 1;
uint64_t i_rsvd_2 : 2;
uint64_t i_fc_cnt : 4;
uint64_t i_crb_vld : 15;
uint64_t i_crb_mark : 15;
uint64_t i_rsvd_1 : 2;
uint64_t i_precise : 1;
uint64_t i_rsvd : 11;
uint64_t i_sp_msg:1;
uint64_t i_rd_hdr:1;
uint64_t i_rsvd_4:2;
uint64_t i_c_cnt:4;
uint64_t i_rsvd_3:4;
uint64_t i_clr_rqpd:1;
uint64_t i_clr_rppd:1;
uint64_t i_rsvd_2:2;
uint64_t i_fc_cnt:4;
uint64_t i_crb_vld:15;
uint64_t i_crb_mark:15;
uint64_t i_rsvd_1:2;
uint64_t i_precise:1;
uint64_t i_rsvd:11;
} ii_icmr_fld_s;
} ii_icmr_u_t;
/************************************************************************
* *
* This register allows control of the table portion of the CRB *
......@@ -2242,15 +2163,14 @@ typedef union ii_icmr_u {
typedef union ii_iccr_u {
uint64_t ii_iccr_regval;
struct {
uint64_t i_crb_num : 4;
uint64_t i_rsvd_1 : 4;
uint64_t i_cmd : 8;
uint64_t i_pending : 1;
uint64_t i_rsvd : 47;
uint64_t i_crb_num:4;
uint64_t i_rsvd_1:4;
uint64_t i_cmd:8;
uint64_t i_pending:1;
uint64_t i_rsvd:47;
} ii_iccr_fld_s;
} ii_iccr_u_t;
/************************************************************************
* *
* This register allows the maximum timeout value to be programmed. *
......@@ -2260,12 +2180,11 @@ typedef union ii_iccr_u {
typedef union ii_icto_u {
uint64_t ii_icto_regval;
struct {
uint64_t i_timeout : 8;
uint64_t i_rsvd : 56;
uint64_t i_timeout:8;
uint64_t i_rsvd:56;
} ii_icto_fld_s;
} ii_icto_u_t;
/************************************************************************
* *
* This register allows the timeout prescalar to be programmed. An *
......@@ -2280,12 +2199,11 @@ typedef union ii_icto_u {
typedef union ii_ictp_u {
uint64_t ii_ictp_regval;
struct {
uint64_t i_prescale : 24;
uint64_t i_rsvd : 40;
uint64_t i_prescale:24;
uint64_t i_rsvd:40;
} ii_ictp_fld_s;
} ii_ictp_u_t;
/************************************************************************
* *
* Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
......@@ -2312,16 +2230,15 @@ typedef union ii_ictp_u {
typedef union ii_icrb0_a_u {
uint64_t ii_icrb0_a_regval;
struct {
uint64_t ia_iow : 1;
uint64_t ia_vld : 1;
uint64_t ia_addr : 47;
uint64_t ia_tnum : 5;
uint64_t ia_sidn : 4;
uint64_t ia_rsvd : 6;
uint64_t ia_iow:1;
uint64_t ia_vld:1;
uint64_t ia_addr:47;
uint64_t ia_tnum:5;
uint64_t ia_sidn:4;
uint64_t ia_rsvd:6;
} ii_icrb0_a_fld_s;
} ii_icrb0_a_u_t;
/************************************************************************
* *
* Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
......@@ -2334,32 +2251,31 @@ typedef union ii_icrb0_a_u {
typedef union ii_icrb0_b_u {
uint64_t ii_icrb0_b_regval;
struct {
uint64_t ib_xt_err : 1;
uint64_t ib_mark : 1;
uint64_t ib_ln_uce : 1;
uint64_t ib_errcode : 3;
uint64_t ib_error : 1;
uint64_t ib_stall__bte_1 : 1;
uint64_t ib_stall__bte_0 : 1;
uint64_t ib_stall__intr : 1;
uint64_t ib_stall_ib : 1;
uint64_t ib_intvn : 1;
uint64_t ib_wb : 1;
uint64_t ib_hold : 1;
uint64_t ib_ack : 1;
uint64_t ib_resp : 1;
uint64_t ib_ack_cnt : 11;
uint64_t ib_rsvd : 7;
uint64_t ib_exc : 5;
uint64_t ib_init : 3;
uint64_t ib_imsg : 8;
uint64_t ib_imsgtype : 2;
uint64_t ib_use_old : 1;
uint64_t ib_rsvd_1 : 11;
uint64_t ib_xt_err:1;
uint64_t ib_mark:1;
uint64_t ib_ln_uce:1;
uint64_t ib_errcode:3;
uint64_t ib_error:1;
uint64_t ib_stall__bte_1:1;
uint64_t ib_stall__bte_0:1;
uint64_t ib_stall__intr:1;
uint64_t ib_stall_ib:1;
uint64_t ib_intvn:1;
uint64_t ib_wb:1;
uint64_t ib_hold:1;
uint64_t ib_ack:1;
uint64_t ib_resp:1;
uint64_t ib_ack_cnt:11;
uint64_t ib_rsvd:7;
uint64_t ib_exc:5;
uint64_t ib_init:3;
uint64_t ib_imsg:8;
uint64_t ib_imsgtype:2;
uint64_t ib_use_old:1;
uint64_t ib_rsvd_1:11;
} ii_icrb0_b_fld_s;
} ii_icrb0_b_u_t;
/************************************************************************
* *
* Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
......@@ -2372,19 +2288,18 @@ typedef union ii_icrb0_b_u {
typedef union ii_icrb0_c_u {
uint64_t ii_icrb0_c_regval;
struct {
uint64_t ic_source : 15;
uint64_t ic_size : 2;
uint64_t ic_ct : 1;
uint64_t ic_bte_num : 1;
uint64_t ic_gbr : 1;
uint64_t ic_resprqd : 1;
uint64_t ic_bo : 1;
uint64_t ic_suppl : 15;
uint64_t ic_rsvd : 27;
uint64_t ic_source:15;
uint64_t ic_size:2;
uint64_t ic_ct:1;
uint64_t ic_bte_num:1;
uint64_t ic_gbr:1;
uint64_t ic_resprqd:1;
uint64_t ic_bo:1;
uint64_t ic_suppl:15;
uint64_t ic_rsvd:27;
} ii_icrb0_c_fld_s;
} ii_icrb0_c_u_t;
/************************************************************************
* *
* Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
......@@ -2397,16 +2312,15 @@ typedef union ii_icrb0_c_u {
typedef union ii_icrb0_d_u {
uint64_t ii_icrb0_d_regval;
struct {
uint64_t id_pa_be : 43;
uint64_t id_bte_op : 1;
uint64_t id_pr_psc : 4;
uint64_t id_pr_cnt : 4;
uint64_t id_sleep : 1;
uint64_t id_rsvd : 11;
uint64_t id_pa_be:43;
uint64_t id_bte_op:1;
uint64_t id_pr_psc:4;
uint64_t id_pr_cnt:4;
uint64_t id_sleep:1;
uint64_t id_rsvd:11;
} ii_icrb0_d_fld_s;
} ii_icrb0_d_u_t;
/************************************************************************
* *
* Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
......@@ -2419,16 +2333,15 @@ typedef union ii_icrb0_d_u {
typedef union ii_icrb0_e_u {
uint64_t ii_icrb0_e_regval;
struct {
uint64_t ie_timeout : 8;
uint64_t ie_context : 15;
uint64_t ie_rsvd : 1;
uint64_t ie_tvld : 1;
uint64_t ie_cvld : 1;
uint64_t ie_rsvd_0 : 38;
uint64_t ie_timeout:8;
uint64_t ie_context:15;
uint64_t ie_rsvd:1;
uint64_t ie_tvld:1;
uint64_t ie_cvld:1;
uint64_t ie_rsvd_0:38;
} ii_icrb0_e_fld_s;
} ii_icrb0_e_u_t;
/************************************************************************
* *
* This register contains the lower 64 bits of the header of the *
......@@ -2440,14 +2353,13 @@ typedef union ii_icrb0_e_u {
typedef union ii_icsml_u {
uint64_t ii_icsml_regval;
struct {
uint64_t i_tt_addr : 47;
uint64_t i_newsuppl_ex : 14;
uint64_t i_reserved : 2;
uint64_t i_overflow : 1;
uint64_t i_tt_addr:47;
uint64_t i_newsuppl_ex:14;
uint64_t i_reserved:2;
uint64_t i_overflow:1;
} ii_icsml_fld_s;
} ii_icsml_u_t;
/************************************************************************
* *
* This register contains the middle 64 bits of the header of the *
......@@ -2459,12 +2371,11 @@ typedef union ii_icsml_u {
typedef union ii_icsmm_u {
uint64_t ii_icsmm_regval;
struct {
uint64_t i_tt_ack_cnt : 11;
uint64_t i_reserved : 53;
uint64_t i_tt_ack_cnt:11;
uint64_t i_reserved:53;
} ii_icsmm_fld_s;
} ii_icsmm_u_t;
/************************************************************************
* *
* This register contains the microscopic state, all the inputs to *
......@@ -2476,50 +2387,49 @@ typedef union ii_icsmm_u {
typedef union ii_icsmh_u {
uint64_t ii_icsmh_regval;
struct {
uint64_t i_tt_vld : 1;
uint64_t i_xerr : 1;
uint64_t i_ft_cwact_o : 1;
uint64_t i_ft_wact_o : 1;
uint64_t i_ft_active_o : 1;
uint64_t i_sync : 1;
uint64_t i_mnusg : 1;
uint64_t i_mnusz : 1;
uint64_t i_plusz : 1;
uint64_t i_plusg : 1;
uint64_t i_tt_exc : 5;
uint64_t i_tt_wb : 1;
uint64_t i_tt_hold : 1;
uint64_t i_tt_ack : 1;
uint64_t i_tt_resp : 1;
uint64_t i_tt_intvn : 1;
uint64_t i_g_stall_bte1 : 1;
uint64_t i_g_stall_bte0 : 1;
uint64_t i_g_stall_il : 1;
uint64_t i_g_stall_ib : 1;
uint64_t i_tt_imsg : 8;
uint64_t i_tt_imsgtype : 2;
uint64_t i_tt_use_old : 1;
uint64_t i_tt_respreqd : 1;
uint64_t i_tt_bte_num : 1;
uint64_t i_cbn : 1;
uint64_t i_match : 1;
uint64_t i_rpcnt_lt_34 : 1;
uint64_t i_rpcnt_ge_34 : 1;
uint64_t i_rpcnt_lt_18 : 1;
uint64_t i_rpcnt_ge_18 : 1;
uint64_t i_rpcnt_lt_2 : 1;
uint64_t i_rpcnt_ge_2 : 1;
uint64_t i_rqcnt_lt_18 : 1;
uint64_t i_rqcnt_ge_18 : 1;
uint64_t i_rqcnt_lt_2 : 1;
uint64_t i_rqcnt_ge_2 : 1;
uint64_t i_tt_device : 7;
uint64_t i_tt_init : 3;
uint64_t i_reserved : 5;
uint64_t i_tt_vld:1;
uint64_t i_xerr:1;
uint64_t i_ft_cwact_o:1;
uint64_t i_ft_wact_o:1;
uint64_t i_ft_active_o:1;
uint64_t i_sync:1;
uint64_t i_mnusg:1;
uint64_t i_mnusz:1;
uint64_t i_plusz:1;
uint64_t i_plusg:1;
uint64_t i_tt_exc:5;
uint64_t i_tt_wb:1;
uint64_t i_tt_hold:1;
uint64_t i_tt_ack:1;
uint64_t i_tt_resp:1;
uint64_t i_tt_intvn:1;
uint64_t i_g_stall_bte1:1;
uint64_t i_g_stall_bte0:1;
uint64_t i_g_stall_il:1;
uint64_t i_g_stall_ib:1;
uint64_t i_tt_imsg:8;
uint64_t i_tt_imsgtype:2;
uint64_t i_tt_use_old:1;
uint64_t i_tt_respreqd:1;
uint64_t i_tt_bte_num:1;
uint64_t i_cbn:1;
uint64_t i_match:1;
uint64_t i_rpcnt_lt_34:1;
uint64_t i_rpcnt_ge_34:1;
uint64_t i_rpcnt_lt_18:1;
uint64_t i_rpcnt_ge_18:1;
uint64_t i_rpcnt_lt_2:1;
uint64_t i_rpcnt_ge_2:1;
uint64_t i_rqcnt_lt_18:1;
uint64_t i_rqcnt_ge_18:1;
uint64_t i_rqcnt_lt_2:1;
uint64_t i_rqcnt_ge_2:1;
uint64_t i_tt_device:7;
uint64_t i_tt_init:3;
uint64_t i_reserved:5;
} ii_icsmh_fld_s;
} ii_icsmh_u_t;
/************************************************************************
* *
* The Shub DEBUG unit provides a 3-bit selection signal to the *
......@@ -2531,16 +2441,15 @@ typedef union ii_icsmh_u {
typedef union ii_idbss_u {
uint64_t ii_idbss_regval;
struct {
uint64_t i_iioclk_core_submenu : 3;
uint64_t i_rsvd : 5;
uint64_t i_fsbclk_wrapper_submenu : 3;
uint64_t i_rsvd_1 : 5;
uint64_t i_iioclk_menu : 5;
uint64_t i_rsvd_2 : 43;
uint64_t i_iioclk_core_submenu:3;
uint64_t i_rsvd:5;
uint64_t i_fsbclk_wrapper_submenu:3;
uint64_t i_rsvd_1:5;
uint64_t i_iioclk_menu:5;
uint64_t i_rsvd_2:43;
} ii_idbss_fld_s;
} ii_idbss_u_t;
/************************************************************************
* *
* Description: This register is used to set up the length for a *
......@@ -2559,15 +2468,14 @@ typedef union ii_idbss_u {
typedef union ii_ibls0_u {
uint64_t ii_ibls0_regval;
struct {
uint64_t i_length : 16;
uint64_t i_error : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_busy : 1;
uint64_t i_rsvd : 43;
uint64_t i_length:16;
uint64_t i_error:1;
uint64_t i_rsvd_1:3;
uint64_t i_busy:1;
uint64_t i_rsvd:43;
} ii_ibls0_fld_s;
} ii_ibls0_u_t;
/************************************************************************
* *
* This register should be loaded before a transfer is started. The *
......@@ -2581,13 +2489,12 @@ typedef union ii_ibls0_u {
typedef union ii_ibsa0_u {
uint64_t ii_ibsa0_regval;
struct {
uint64_t i_rsvd_1 : 7;
uint64_t i_addr : 42;
uint64_t i_rsvd : 15;
uint64_t i_rsvd_1:7;
uint64_t i_addr:42;
uint64_t i_rsvd:15;
} ii_ibsa0_fld_s;
} ii_ibsa0_u_t;
/************************************************************************
* *
* This register should be loaded before a transfer is started. The *
......@@ -2601,13 +2508,12 @@ typedef union ii_ibsa0_u {
typedef union ii_ibda0_u {
uint64_t ii_ibda0_regval;
struct {
uint64_t i_rsvd_1 : 7;
uint64_t i_addr : 42;
uint64_t i_rsvd : 15;
uint64_t i_rsvd_1:7;
uint64_t i_addr:42;
uint64_t i_rsvd:15;
} ii_ibda0_fld_s;
} ii_ibda0_u_t;
/************************************************************************
* *
* Writing to this register sets up the attributes of the transfer *
......@@ -2623,16 +2529,15 @@ typedef union ii_ibda0_u {
typedef union ii_ibct0_u {
uint64_t ii_ibct0_regval;
struct {
uint64_t i_zerofill : 1;
uint64_t i_rsvd_2 : 3;
uint64_t i_notify : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_poison : 1;
uint64_t i_rsvd : 55;
uint64_t i_zerofill:1;
uint64_t i_rsvd_2:3;
uint64_t i_notify:1;
uint64_t i_rsvd_1:3;
uint64_t i_poison:1;
uint64_t i_rsvd:55;
} ii_ibct0_fld_s;
} ii_ibct0_u_t;
/************************************************************************
* *
* This register contains the address to which the WINV is sent. *
......@@ -2643,13 +2548,12 @@ typedef union ii_ibct0_u {
typedef union ii_ibna0_u {
uint64_t ii_ibna0_regval;
struct {
uint64_t i_rsvd_1 : 7;
uint64_t i_addr : 42;
uint64_t i_rsvd : 15;
uint64_t i_rsvd_1:7;
uint64_t i_addr:42;
uint64_t i_rsvd:15;
} ii_ibna0_fld_s;
} ii_ibna0_u_t;
/************************************************************************
* *
* This register contains the programmable level as well as the node *
......@@ -2661,15 +2565,14 @@ typedef union ii_ibna0_u {
typedef union ii_ibia0_u {
uint64_t ii_ibia0_regval;
struct {
uint64_t i_rsvd_2 : 1;
uint64_t i_node_id : 11;
uint64_t i_rsvd_1 : 4;
uint64_t i_level : 7;
uint64_t i_rsvd : 41;
uint64_t i_rsvd_2:1;
uint64_t i_node_id:11;
uint64_t i_rsvd_1:4;
uint64_t i_level:7;
uint64_t i_rsvd:41;
} ii_ibia0_fld_s;
} ii_ibia0_u_t;
/************************************************************************
* *
* Description: This register is used to set up the length for a *
......@@ -2688,15 +2591,14 @@ typedef union ii_ibia0_u {
typedef union ii_ibls1_u {
uint64_t ii_ibls1_regval;
struct {
uint64_t i_length : 16;
uint64_t i_error : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_busy : 1;
uint64_t i_rsvd : 43;
uint64_t i_length:16;
uint64_t i_error:1;
uint64_t i_rsvd_1:3;
uint64_t i_busy:1;
uint64_t i_rsvd:43;
} ii_ibls1_fld_s;
} ii_ibls1_u_t;
/************************************************************************
* *
* This register should be loaded before a transfer is started. The *
......@@ -2710,13 +2612,12 @@ typedef union ii_ibls1_u {
typedef union ii_ibsa1_u {
uint64_t ii_ibsa1_regval;
struct {
uint64_t i_rsvd_1 : 7;
uint64_t i_addr : 33;
uint64_t i_rsvd : 24;
uint64_t i_rsvd_1:7;
uint64_t i_addr:33;
uint64_t i_rsvd:24;
} ii_ibsa1_fld_s;
} ii_ibsa1_u_t;
/************************************************************************
* *
* This register should be loaded before a transfer is started. The *
......@@ -2730,13 +2631,12 @@ typedef union ii_ibsa1_u {
typedef union ii_ibda1_u {
uint64_t ii_ibda1_regval;
struct {
uint64_t i_rsvd_1 : 7;
uint64_t i_addr : 33;
uint64_t i_rsvd : 24;
uint64_t i_rsvd_1:7;
uint64_t i_addr:33;
uint64_t i_rsvd:24;
} ii_ibda1_fld_s;
} ii_ibda1_u_t;
/************************************************************************
* *
* Writing to this register sets up the attributes of the transfer *
......@@ -2752,16 +2652,15 @@ typedef union ii_ibda1_u {
typedef union ii_ibct1_u {
uint64_t ii_ibct1_regval;
struct {
uint64_t i_zerofill : 1;
uint64_t i_rsvd_2 : 3;
uint64_t i_notify : 1;
uint64_t i_rsvd_1 : 3;
uint64_t i_poison : 1;
uint64_t i_rsvd : 55;
uint64_t i_zerofill:1;
uint64_t i_rsvd_2:3;
uint64_t i_notify:1;
uint64_t i_rsvd_1:3;
uint64_t i_poison:1;
uint64_t i_rsvd:55;
} ii_ibct1_fld_s;
} ii_ibct1_u_t;
/************************************************************************
* *
* This register contains the address to which the WINV is sent. *
......@@ -2772,13 +2671,12 @@ typedef union ii_ibct1_u {
typedef union ii_ibna1_u {
uint64_t ii_ibna1_regval;
struct {
uint64_t i_rsvd_1 : 7;
uint64_t i_addr : 33;
uint64_t i_rsvd : 24;
uint64_t i_rsvd_1:7;
uint64_t i_addr:33;
uint64_t i_rsvd:24;
} ii_ibna1_fld_s;
} ii_ibna1_u_t;
/************************************************************************
* *
* This register contains the programmable level as well as the node *
......@@ -2790,15 +2688,14 @@ typedef union ii_ibna1_u {
typedef union ii_ibia1_u {
uint64_t ii_ibia1_regval;
struct {
uint64_t i_pi_id : 1;
uint64_t i_node_id : 8;
uint64_t i_rsvd_1 : 7;
uint64_t i_level : 7;
uint64_t i_rsvd : 41;
uint64_t i_pi_id:1;
uint64_t i_node_id:8;
uint64_t i_rsvd_1:7;
uint64_t i_level:7;
uint64_t i_rsvd:41;
} ii_ibia1_fld_s;
} ii_ibia1_u_t;
/************************************************************************
* *
* This register defines the resources that feed information into *
......@@ -2817,14 +2714,13 @@ typedef union ii_ibia1_u {
typedef union ii_ipcr_u {
uint64_t ii_ipcr_regval;
struct {
uint64_t i_ippr0_c : 4;
uint64_t i_ippr1_c : 4;
uint64_t i_icct : 8;
uint64_t i_rsvd : 48;
uint64_t i_ippr0_c:4;
uint64_t i_ippr1_c:4;
uint64_t i_icct:8;
uint64_t i_rsvd:48;
} ii_ipcr_fld_s;
} ii_ipcr_u_t;
/************************************************************************
* *
* *
......@@ -2834,14 +2730,12 @@ typedef union ii_ipcr_u {
typedef union ii_ippr_u {
uint64_t ii_ippr_regval;
struct {
uint64_t i_ippr0 : 32;
uint64_t i_ippr1 : 32;
uint64_t i_ippr0:32;
uint64_t i_ippr1:32;
} ii_ippr_fld_s;
} ii_ippr_u_t;
/**************************************************************************
/************************************************************************
* *
* The following defines which were not formed into structures are *
* probably indentical to another register, and the name of the *
......@@ -2919,8 +2813,7 @@ typedef union ii_ippr_u {
* IIO_ICRBE_D IIO_ICRB0_D *
* IIO_ICRBE_E IIO_ICRB0_E *
* *
**************************************************************************/
************************************************************************/
/*
* Slightly friendlier names for some common registers.
......@@ -2935,7 +2828,7 @@ typedef union ii_ippr_u {
#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
#define IIO_LLP_LOG IIO_ILLR /* LLP log */
#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/
#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */
#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
#define IIO_IGFX_0 IIO_IGFX0
......@@ -2960,7 +2853,7 @@ typedef union ii_ippr_u {
#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */
#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */
#define IIO_NUM_IPRBS (9)
#define IIO_NUM_IPRBS 9
#define IIO_LLP_CSR_IS_UP 0x00002000
#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
......@@ -2990,7 +2883,6 @@ typedef union ii_ippr_u {
#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
/* names used in shub diags */
#define IIO_BASE_BTE0 IIO_IBLS_0
#define IIO_BASE_BTE1 IIO_IBLS_1
......@@ -3005,7 +2897,6 @@ typedef union ii_ippr_u {
(_x) : \
(_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
/* GFX Flow Control Node/Widget Register */
#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
......@@ -3025,7 +2916,6 @@ typedef union ii_ippr_u {
(((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
(((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
/* Scratch registers (all bits available) */
#define IIO_SCRATCH_REG0 IIO_ISCR0
#define IIO_SCRATCH_REG1 IIO_ISCR1
......@@ -3242,16 +3132,16 @@ typedef union ii_ippr_u {
/*
* IIO CRB control register Fields: IIO_ICCR
*/
#define IIO_ICCR_PENDING (0x10000)
#define IIO_ICCR_CMD_MASK (0xFF)
#define IIO_ICCR_CMD_SHFT (7)
#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
#define IIO_ICCR_PENDING 0x10000
#define IIO_ICCR_CMD_MASK 0xFF
#define IIO_ICCR_CMD_SHFT 7
#define IIO_ICCR_CMD_NOP 0x0 /* No Op */
#define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */
#define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */
#define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory
* via a WB
*/
#define IIO_ICCR_CMD_FLUSH (0x800)
#define IIO_ICCR_CMD_FLUSH 0x800
/*
*
......@@ -3324,14 +3214,13 @@ typedef ii_icrb0_c_u_t icrbc_t;
#define c_source ii_icrb0_c_fld_s.ic_source
#define c_regvalue ii_icrb0_c_regval
typedef ii_icrb0_d_u_t icrbd_t;
#define d_sleep ii_icrb0_d_fld_s.id_sleep
#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt
#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc
#define d_bteop ii_icrb0_d_fld_s.id_bte_op
#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/
#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/
#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
#define d_regvalue ii_icrb0_d_regval
typedef ii_icrb0_e_u_t icrbe_t;
......@@ -3341,7 +3230,6 @@ typedef ii_icrb0_e_u_t icrbe_t;
#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout
#define e_regvalue ii_icrb0_e_regval
/* Number of widgets supported by shub */
#define HUB_NUM_WIDGET 9
#define HUB_WIDGET_ID_MIN 0x8
......@@ -3369,8 +3257,8 @@ typedef ii_icrb0_e_u_t icrbe_t;
#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
#define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */
#define IIO_WSTAT_TXRETRY_SHFT (16)
#define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */
#define IIO_WSTAT_TXRETRY_SHFT 16
#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
IIO_WSTAT_TXRETRY_MASK)
......@@ -3416,14 +3304,14 @@ typedef ii_icrb0_e_u_t icrbe_t;
typedef union hubii_wcr_u {
uint64_t wcr_reg_value;
struct {
uint64_t wcr_widget_id: 4, /* LLP crossbar credit */
wcr_tag_mode: 1, /* Tag mode */
wcr_rsvd1: 8, /* Reserved */
wcr_xbar_crd: 3, /* LLP crossbar credit */
wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
wcr_dir_con: 1, /* widget direct connect */
wcr_e_thresh: 5, /* elasticity threshold */
wcr_rsvd: 41; /* unused */
uint64_t wcr_widget_id:4, /* LLP crossbar credit */
wcr_tag_mode:1, /* Tag mode */
wcr_rsvd1:8, /* Reserved */
wcr_xbar_crd:3, /* LLP crossbar credit */
wcr_f_bad_pkt:1, /* Force bad llp pkt enable */
wcr_dir_con:1, /* widget direct connect */
wcr_e_thresh:5, /* elasticity threshold */
wcr_rsvd:41; /* unused */
} wcr_fields_s;
} hubii_wcr_t;
......@@ -3438,10 +3326,7 @@ performance registers */
typedef union io_perf_sel {
uint64_t perf_sel_reg;
struct {
uint64_t perf_ippr0 : 4,
perf_ippr1 : 4,
perf_icct : 8,
perf_rsvd : 48;
uint64_t perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48;
} perf_sel_bits;
} io_perf_sel_t;
......@@ -3451,9 +3336,7 @@ typedef union io_perf_sel {
typedef union io_perf_cnt {
uint64_t perf_cnt;
struct {
uint64_t perf_cnt : 20,
perf_rsvd2 : 12,
perf_rsvd1 : 32;
uint64_t perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32;
} perf_cnt_bits;
} io_perf_cnt_t;
......@@ -3461,16 +3344,15 @@ typedef union io_perf_cnt {
typedef union iprte_a {
uint64_t entry;
struct {
uint64_t i_rsvd_1 : 3;
uint64_t i_addr : 38;
uint64_t i_init : 3;
uint64_t i_source : 8;
uint64_t i_rsvd : 2;
uint64_t i_widget : 4;
uint64_t i_to_cnt : 5;
uint64_t i_vld : 1;
uint64_t i_rsvd_1:3;
uint64_t i_addr:38;
uint64_t i_init:3;
uint64_t i_source:8;
uint64_t i_rsvd:2;
uint64_t i_widget:4;
uint64_t i_to_cnt:5;
uint64_t i_vld:1;
} iprte_fields;
} iprte_a_t;
#endif /* _ASM_IA64_SN_SHUBIO_H */
......@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
......@@ -97,19 +97,19 @@ extern short physical_node_map[]; /* indexed by nasid to get cnode */
/*
* Macros for retrieving info about current cpu
*/
#define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid)
#define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode)
#define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice)
#define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode)
#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
/*
* Macros for retrieving info about an arbitrary cpu
* cpuid - logical cpu id
*/
#define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid)
#define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode)
#define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice)
#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)])
......@@ -123,11 +123,8 @@ extern int nasid_slice_to_cpuid(int, int);
/*
* cnodeid_to_nasid - convert a cnodeid to a NASID
* Macro relies on pg_data for a node being on the node itself.
* Just extract the NASID from the pointer.
*
*/
#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid]
#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
/*
* nasid_to_cnodeid - convert a NASID to a cnodeid
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_FRU_H
#define _ASM_IA64_SN_SN_FRU_H
#define MAX_DIMMS 8 /* max # of dimm banks */
#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */
typedef unsigned char confidence_t;
typedef struct kf_mem_s {
confidence_t km_confidence; /* confidence level that the memory is bad
* is this necessary ?
*/
confidence_t km_dimm[MAX_DIMMS];
/* confidence level that dimm[i] is bad
*I think this is the right number
*/
} kf_mem_t;
typedef struct kf_cpu_s {
confidence_t kc_confidence; /* confidence level that cpu is bad */
confidence_t kc_icache; /* confidence level that instr. cache is bad */
confidence_t kc_dcache; /* confidence level that data cache is bad */
confidence_t kc_scache; /* confidence level that sec. cache is bad */
confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
} kf_cpu_t;
typedef struct kf_pci_bus_s {
confidence_t kpb_belief; /* confidence level that the pci bus is bad */
confidence_t kpb_pcidev_belief[MAX_PCIDEV];
/* confidence level that the pci dev is bad */
} kf_pci_bus_t;
#endif /* _ASM_IA64_SN_SN_FRU_H */
......@@ -557,7 +557,8 @@ static inline u64
ia64_sn_partition_serial_get(void)
{
struct ia64_sal_retval ret_stuff;
SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0,
0, 0, 0, 0, 0, 0);
if (ret_stuff.status != 0)
return 0;
return ret_stuff.v0;
......@@ -565,11 +566,10 @@ ia64_sn_partition_serial_get(void)
static inline u64
sn_partition_serial_number_val(void) {
if (sn_partition_serial_number) {
return(sn_partition_serial_number);
} else {
return(sn_partition_serial_number = ia64_sn_partition_serial_get());
if (unlikely(sn_partition_serial_number == 0)) {
sn_partition_serial_number = ia64_sn_partition_serial_get();
}
return sn_partition_serial_number;
}
/*
......@@ -580,7 +580,7 @@ static inline partid_t
ia64_sn_sysctl_partition_get(nasid_t nasid)
{
struct ia64_sal_retval ret_stuff;
SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
0, 0, 0, 0, 0, 0);
if (ret_stuff.status != 0)
return INVALID_PARTID;
......@@ -595,11 +595,38 @@ extern partid_t sn_partid;
static inline partid_t
sn_local_partid(void) {
if (sn_partid < 0) {
return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
} else {
return sn_partid;
if (unlikely(sn_partid < 0)) {
sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()));
}
return sn_partid;
}
/*
* Returns the physical address of the partition's reserved page through
* an iterative number of calls.
*
* On first call, 'cookie' and 'len' should be set to 0, and 'addr'
* set to the nasid of the partition whose reserved page's address is
* being sought.
* On subsequent calls, pass the values, that were passed back on the
* previous call.
*
* While the return status equals SALRET_MORE_PASSES, keep calling
* this function after first copying 'len' bytes starting at 'addr'
* into 'buf'. Once the return status equals SALRET_OK, 'addr' will
* be the physical address of the partition's reserved page. If the
* return status equals neither of these, an error as occurred.
*/
static inline s64
sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
{
struct ia64_sal_retval rv;
ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
*addr, buf, *len, 0, 0, 0);
*cookie = rv.v0;
*addr = rv.v1;
*len = rv.v2;
return rv.status;
}
/*
......@@ -621,8 +648,8 @@ static inline int
sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
{
struct ia64_sal_retval ret_stuff;
SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
0, 0, 0, 0);
ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len,
(u64)operation, 0, 0, 0, 0);
return ret_stuff.status;
}
......@@ -646,8 +673,8 @@ sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
} else {
call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
}
SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
0, 0, 0);
ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr,
(u64)1, 0, 0, 0);
return ret_stuff.status;
}
......@@ -668,8 +695,8 @@ static inline int
sn_change_coherence(u64 *new_domain, u64 *old_domain)
{
struct ia64_sal_retval ret_stuff;
SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
0, 0, 0);
ia64_sal_oemcall(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain,
(u64)old_domain, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
......@@ -688,8 +715,8 @@ sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
cnodeid = nasid_to_cnodeid(get_node_number(paddr));
// spin_lock(&NODEPDA(cnodeid)->bist_lock);
local_irq_save(irq_flags);
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
perms, 0, 0, 0);
ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
(u64)nasid_array, perms, 0, 0, 0);
local_irq_restore(irq_flags);
// spin_unlock(&NODEPDA(cnodeid)->bist_lock);
return ret_stuff.status;
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_SNDRV_H
#define _ASM_IA64_SN_SNDRV_H
/* ioctl commands */
#define SNDRV_GET_ROUTERINFO 1
#define SNDRV_GET_INFOSIZE 2
#define SNDRV_GET_HUBINFO 3
#define SNDRV_GET_FLASHLOGSIZE 4
#define SNDRV_SET_FLASHSYNC 5
#define SNDRV_GET_FLASHLOGDATA 6
#define SNDRV_GET_FLASHLOGALL 7
#define SNDRV_SET_HISTOGRAM_TYPE 14
#define SNDRV_ELSC_COMMAND 19
#define SNDRV_CLEAR_LOG 20
#define SNDRV_INIT_LOG 21
#define SNDRV_GET_PIMM_PSC 22
#define SNDRV_SET_PARTITION 23
#define SNDRV_GET_PARTITION 24
/* see synergy_perf_ioctl() */
#define SNDRV_GET_SYNERGY_VERSION 30
#define SNDRV_GET_SYNERGY_STATUS 31
#define SNDRV_GET_SYNERGYINFO 32
#define SNDRV_SYNERGY_APPEND 33
#define SNDRV_SYNERGY_ENABLE 34
#define SNDRV_SYNERGY_FREQ 35
/* Devices */
#define SNDRV_UKNOWN_DEVICE -1
#define SNDRV_ROUTER_DEVICE 1
#define SNDRV_HUB_DEVICE 2
#define SNDRV_ELSC_NVRAM_DEVICE 3
#define SNDRV_ELSC_CONTROLLER_DEVICE 4
#define SNDRV_SYSCTL_SUBCH 5
#define SNDRV_SYNERGY_DEVICE 6
#endif /* _ASM_IA64_SN_SNDRV_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
*/
/*
* External Cross Partition (XP) structures and defines.
*/
#ifndef _ASM_IA64_SN_XP_H
#define _ASM_IA64_SN_XP_H
#include <linux/version.h>
#include <linux/cache.h>
#include <linux/hardirq.h>
#include <asm/sn/types.h>
#include <asm/sn/bte.h>
#ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition)
#else
#define DBUG_ON(condition)
#endif
/*
* Define the maximum number of logically defined partitions the system
* can support. It is constrained by the maximum number of hardware
* partitionable regions. The term 'region' in this context refers to the
* minimum number of nodes that can comprise an access protection grouping.
* The access protection is in regards to memory, IPI and IOI.
*
* The maximum number of hardware partitionable regions is equal to the
* maximum number of nodes in the entire system divided by the minimum number
* of nodes that comprise an access protection grouping.
*/
#define XP_MAX_PARTITIONS 64
/*
* Define the number of u64s required to represent all the C-brick nasids
* as a bitmap. The cross-partition kernel modules deal only with
* C-brick nasids, thus the need for bitmaps which don't account for
* odd-numbered (non C-brick) nasids.
*/
#define XP_MAX_PHYSNODE_ID (MAX_PHYSNODE_ID / 2)
#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
/*
* Wrapper for bte_copy() that should it return a failure status will retry
* the bte_copy() once in the hope that the failure was due to a temporary
* aberration (i.e., the link going down temporarily).
*
* See bte_copy for definition of the input parameters.
*
* Note: xp_bte_copy() should never be called while holding a spinlock.
*/
static inline bte_result_t
xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
bte_result_t ret;
ret = bte_copy(src, dest, len, mode, notification);
if (ret != BTE_SUCCESS) {
if (!in_interrupt()) {
cond_resched();
}
ret = bte_copy(src, dest, len, mode, notification);
}
return ret;
}
/*
* XPC establishes channel connections between the local partition and any
* other partition that is currently up. Over these channels, kernel-level
* `users' can communicate with their counterparts on the other partitions.
*
* The maxinum number of channels is limited to eight. For performance reasons,
* the internal cross partition structures require sixteen bytes per channel,
* and eight allows all of this interface-shared info to fit in one cache line.
*
* XPC_NCHANNELS reflects the total number of channels currently defined.
* If the need for additional channels arises, one can simply increase
* XPC_NCHANNELS accordingly. If the day should come where that number
* exceeds the MAXIMUM number of channels allowed (eight), then one will need
* to make changes to the XPC code to allow for this.
*/
#define XPC_MEM_CHANNEL 0 /* memory channel number */
#define XPC_NET_CHANNEL 1 /* network channel number */
#define XPC_NCHANNELS 2 /* #of defined channels */
#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */
#if XPC_NCHANNELS > XPC_MAX_NCHANNELS
#error XPC_NCHANNELS exceeds MAXIMUM allowed.
#endif
/*
* The format of an XPC message is as follows:
*
* +-------+--------------------------------+
* | flags |////////////////////////////////|
* +-------+--------------------------------+
* | message # |
* +----------------------------------------+
* | payload (user-defined message) |
* | |
* :
* | |
* +----------------------------------------+
*
* The size of the payload is defined by the user via xpc_connect(). A user-
* defined message resides in the payload area.
*
* The user should have no dealings with the message header, but only the
* message's payload. When a message entry is allocated (via xpc_allocate())
* a pointer to the payload area is returned and not the actual beginning of
* the XPC message. The user then constructs a message in the payload area
* and passes that pointer as an argument on xpc_send() or xpc_send_notify().
*
* The size of a message entry (within a message queue) must be a cacheline
* sized multiple in order to facilitate the BTE transfer of messages from one
* message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user
* that wants to fit as many msg entries as possible in a given memory size
* (e.g. a memory page).
*/
struct xpc_msg {
u8 flags; /* FOR XPC INTERNAL USE ONLY */
u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
s64 number; /* FOR XPC INTERNAL USE ONLY */
u64 payload; /* user defined portion of message */
};
#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
#define XPC_MSG_SIZE(_payload_size) \
L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
/*
* Define the return values and values passed to user's callout functions.
* (It is important to add new value codes at the end just preceding
* xpcUnknownReason, which must have the highest numerical value.)
*/
enum xpc_retval {
xpcSuccess = 0,
xpcNotConnected, /* 1: channel is not connected */
xpcConnected, /* 2: channel connected (opened) */
xpcRETIRED1, /* 3: (formerly xpcDisconnected) */
xpcMsgReceived, /* 4: message received */
xpcMsgDelivered, /* 5: message delivered and acknowledged */
xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */
xpcNoWait, /* 7: operation would require wait */
xpcRetry, /* 8: retry operation */
xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
xpcInterrupted, /* 10: interrupted wait */
xpcUnequalMsgSizes, /* 11: message size disparity between sides */
xpcInvalidAddress, /* 12: invalid address */
xpcNoMemory, /* 13: no memory available for XPC structures */
xpcLackOfResources, /* 14: insufficient resources for operation */
xpcUnregistered, /* 15: channel is not registered */
xpcAlreadyRegistered, /* 16: channel is already registered */
xpcPartitionDown, /* 17: remote partition is down */
xpcNotLoaded, /* 18: XPC module is not loaded */
xpcUnloading, /* 19: this side is unloading XPC module */
xpcBadMagic, /* 20: XPC MAGIC string not found */
xpcReactivating, /* 21: remote partition was reactivated */
xpcUnregistering, /* 22: this side is unregistering channel */
xpcOtherUnregistering, /* 23: other side is unregistering channel */
xpcCloneKThread, /* 24: cloning kernel thread */
xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */
xpcNoHeartbeat, /* 26: remote partition has no heartbeat */
xpcPioReadError, /* 27: PIO read error */
xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */
xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */
xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */
xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */
xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */
xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */
xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */
xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */
xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */
xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */
xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */
xpcBadVersion, /* 39: bad version number */
xpcVarsNotSet, /* 40: the XPC variables are not set up */
xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
xpcInvalidPartid, /* 42: invalid partition ID */
xpcLocalPartid, /* 43: local partition ID */
xpcUnknownReason /* 44: unknown reason -- must be last in list */
};
/*
* Define the callout function types used by XPC to update the user on
* connection activity and state changes (via the user function registered by
* xpc_connect()) and to notify them of messages received and delivered (via
* the user function registered by xpc_send_notify()).
*
* The two function types are xpc_channel_func and xpc_notify_func and
* both share the following arguments, with the exception of "data", which
* only xpc_channel_func has.
*
* Arguments:
*
* reason - reason code. (See following table.)
* partid - partition ID associated with condition.
* ch_number - channel # associated with condition.
* data - pointer to optional data. (See following table.)
* key - pointer to optional user-defined value provided as the "key"
* argument to xpc_connect() or xpc_send_notify().
*
* In the following table the "Optional Data" column applies to callouts made
* to functions registered by xpc_connect(). A "NA" in that column indicates
* that this reason code can be passed to functions registered by
* xpc_send_notify() (i.e. they don't have data arguments).
*
* Also, the first three reason codes in the following table indicate
* success, whereas the others indicate failure. When a failure reason code
* is received, one can assume that the channel is not connected.
*
*
* Reason Code | Cause | Optional Data
* =====================+================================+=====================
* xpcConnected | connection has been established| max #of entries
* | to the specified partition on | allowed in message
* | the specified channel | queue
* ---------------------+--------------------------------+---------------------
* xpcMsgReceived | an XPC message arrived from | address of payload
* | the specified partition on the |
* | specified channel | [the user must call
* | | xpc_received() when
* | | finished with the
* | | payload]
* ---------------------+--------------------------------+---------------------
* xpcMsgDelivered | notification that the message | NA
* | was delivered to the intended |
* | recipient and that they have |
* | acknowledged its receipt by |
* | calling xpc_received() |
* =====================+================================+=====================
* xpcUnequalMsgSizes | can't connect to the specified | NULL
* | partition on the specified |
* | channel because of mismatched |
* | message sizes |
* ---------------------+--------------------------------+---------------------
* xpcNoMemory | insufficient memory avaiable | NULL
* | to allocate message queue |
* ---------------------+--------------------------------+---------------------
* xpcLackOfResources | lack of resources to create | NULL
* | the necessary kthreads to |
* | support the channel |
* ---------------------+--------------------------------+---------------------
* xpcUnregistering | this side's user has | NULL or NA
* | unregistered by calling |
* | xpc_disconnect() |
* ---------------------+--------------------------------+---------------------
* xpcOtherUnregistering| the other side's user has | NULL or NA
* | unregistered by calling |
* | xpc_disconnect() |
* ---------------------+--------------------------------+---------------------
* xpcNoHeartbeat | the other side's XPC is no | NULL or NA
* | longer heartbeating |
* | |
* ---------------------+--------------------------------+---------------------
* xpcUnloading | this side's XPC module is | NULL or NA
* | being unloaded |
* | |
* ---------------------+--------------------------------+---------------------
* xpcOtherUnloading | the other side's XPC module is | NULL or NA
* | is being unloaded |
* | |
* ---------------------+--------------------------------+---------------------
* xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA
* | error while sending an IPI |
* | |
* ---------------------+--------------------------------+---------------------
* xpcInvalidAddress | the address either received or | NULL or NA
* | sent by the specified partition|
* | is invalid |
* ---------------------+--------------------------------+---------------------
* xpcBteNotAvailable | attempt to pull data from the | NULL or NA
* xpcBtePoisonError | specified partition over the |
* xpcBteWriteError | specified channel via a |
* xpcBteAccessError | bte_copy() failed |
* xpcBteTimeOutError | |
* xpcBteXtalkError | |
* xpcBteDirectoryError | |
* xpcBteGenericError | |
* xpcBteUnmappedError | |
* ---------------------+--------------------------------+---------------------
* xpcUnknownReason | the specified channel to the | NULL or NA
* | specified partition was |
* | unavailable for unknown reasons|
* =====================+================================+=====================
*/
typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
int ch_number, void *data, void *key);
typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
int ch_number, void *key);
/*
* The following is a registration entry. There is a global array of these,
* one per channel. It is used to record the connection registration made
* by the users of XPC. As long as a registration entry exists, for any
* partition that comes up, XPC will attempt to establish a connection on
* that channel. Notification that a connection has been made will occur via
* the xpc_channel_func function.
*
* The 'func' field points to the function to call when aynchronous
* notification is required for such events as: a connection established/lost,
* or an incomming message received, or an error condition encountered. A
* non-NULL 'func' field indicates that there is an active registration for
* the channel.
*/
struct xpc_registration {
struct semaphore sema;
xpc_channel_func func; /* function to call */
void *key; /* pointer to user's key */
u16 nentries; /* #of msg entries in local msg queue */
u16 msg_size; /* message queue's message size */
u32 assigned_limit; /* limit on #of assigned kthreads */
u32 idle_limit; /* limit on #of idle kthreads */
} ____cacheline_aligned;
#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
/* the following are valid xpc_allocate() flags */
#define XPC_WAIT 0 /* wait flag */
#define XPC_NOWAIT 1 /* no wait flag */
struct xpc_interface {
void (*connect)(int);
void (*disconnect)(int);
enum xpc_retval (*allocate)(partid_t, int, u32, void **);
enum xpc_retval (*send)(partid_t, int, void *);
enum xpc_retval (*send_notify)(partid_t, int, void *,
xpc_notify_func, void *);
void (*received)(partid_t, int, void *);
enum xpc_retval (*partid_to_nasids)(partid_t, void *);
};
extern struct xpc_interface xpc_interface;
extern void xpc_set_interface(void (*)(int),
void (*)(int),
enum xpc_retval (*)(partid_t, int, u32, void **),
enum xpc_retval (*)(partid_t, int, void *),
enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
void *),
void (*)(partid_t, int, void *),
enum xpc_retval (*)(partid_t, void *));
extern void xpc_clear_interface(void);
extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
u16, u32, u32);
extern void xpc_disconnect(int);
static inline enum xpc_retval
xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
{
return xpc_interface.allocate(partid, ch_number, flags, payload);
}
static inline enum xpc_retval
xpc_send(partid_t partid, int ch_number, void *payload)
{
return xpc_interface.send(partid, ch_number, payload);
}
static inline enum xpc_retval
xpc_send_notify(partid_t partid, int ch_number, void *payload,
xpc_notify_func func, void *key)
{
return xpc_interface.send_notify(partid, ch_number, payload, func, key);
}
static inline void
xpc_received(partid_t partid, int ch_number, void *payload)
{
return xpc_interface.received(partid, ch_number, payload);
}
static inline enum xpc_retval
xpc_partid_to_nasids(partid_t partid, void *nasids)
{
return xpc_interface.partid_to_nasids(partid, nasids);
}
extern u64 xp_nofault_PIOR_target;
extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void);
#endif /* _ASM_IA64_SN_XP_H */
......@@ -846,6 +846,8 @@ fastcall NORET_TYPE void do_exit(long code)
for (;;) ;
}
EXPORT_SYMBOL_GPL(do_exit);
NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{
if (comp)
......
......@@ -43,7 +43,9 @@
* initializer cleaner
*/
nodemask_t node_online_map = { { [0] = 1UL } };
EXPORT_SYMBOL(node_online_map);
nodemask_t node_possible_map = NODE_MASK_ALL;
EXPORT_SYMBOL(node_possible_map);
struct pglist_data *pgdat_list;
unsigned long totalram_pages;
unsigned long totalhigh_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment