Commit 49799291 authored by Linus Torvalds's avatar Linus Torvalds
parents 7e732bfc 386d1d50
...@@ -52,9 +52,9 @@ ...@@ -52,9 +52,9 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/vfs.h> #include <linux/vfs.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mutex.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/semaphore.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
* while doing so. * while doing so.
*/ */
/* XXX make per-mm: */ /* XXX make per-mm: */
static DECLARE_MUTEX(ia32_mmap_sem); static DEFINE_MUTEX(ia32_mmap_mutex);
asmlinkage long asmlinkage long
sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
...@@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot ...@@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
prot = get_prot32(prot); prot = get_prot32(prot);
#if PAGE_SHIFT > IA32_PAGE_SHIFT #if PAGE_SHIFT > IA32_PAGE_SHIFT
down(&ia32_mmap_sem); mutex_lock(&ia32_mmap_mutex);
{ {
addr = emulate_mmap(file, addr, len, prot, flags, offset); addr = emulate_mmap(file, addr, len, prot, flags, offset);
} }
up(&ia32_mmap_sem); mutex_unlock(&ia32_mmap_mutex);
#else #else
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
{ {
...@@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len) ...@@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len)
if (start >= end) if (start >= end)
return 0; return 0;
down(&ia32_mmap_sem); mutex_lock(&ia32_mmap_mutex);
{
ret = sys_munmap(start, end - start); ret = sys_munmap(start, end - start);
} mutex_unlock(&ia32_mmap_mutex);
up(&ia32_mmap_sem);
#endif #endif
return ret; return ret;
} }
...@@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot) ...@@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
if (retval < 0) if (retval < 0)
return retval; return retval;
down(&ia32_mmap_sem); mutex_lock(&ia32_mmap_mutex);
{ {
if (offset_in_page(start)) { if (offset_in_page(start)) {
/* start address is 4KB aligned but not page aligned. */ /* start address is 4KB aligned but not page aligned. */
...@@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot) ...@@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
retval = sys_mprotect(start, end - start, prot); retval = sys_mprotect(start, end - start, prot);
} }
out: out:
up(&ia32_mmap_sem); mutex_unlock(&ia32_mmap_mutex);
return retval; return retval;
#endif #endif
} }
...@@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, ...@@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
old_len = PAGE_ALIGN(old_end) - addr; old_len = PAGE_ALIGN(old_end) - addr;
new_len = PAGE_ALIGN(new_end) - addr; new_len = PAGE_ALIGN(new_end) - addr;
down(&ia32_mmap_sem); mutex_lock(&ia32_mmap_mutex);
{
ret = sys_mremap(addr, old_len, new_len, flags, new_addr); ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
} mutex_unlock(&ia32_mmap_mutex);
up(&ia32_mmap_sem);
if ((ret >= 0) && (old_len < new_len)) { if ((ret >= 0) && (old_len < new_len)) {
/* mremap expanded successfully */ /* mremap expanded successfully */
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/completion.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
...@@ -286,7 +287,7 @@ typedef struct pfm_context { ...@@ -286,7 +287,7 @@ typedef struct pfm_context {
unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
struct semaphore ctx_restart_sem; /* use for blocking notification mode */ struct completion ctx_restart_done; /* use for blocking notification mode */
unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
...@@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp)
/* /*
* force task to wake up from MASKED state * force task to wake up from MASKED state
*/ */
up(&ctx->ctx_restart_sem); complete(&ctx->ctx_restart_done);
DPRINT(("waking up ctx_state=%d\n", state)); DPRINT(("waking up ctx_state=%d\n", state));
...@@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/* /*
* init restart semaphore to locked * init restart semaphore to locked
*/ */
sema_init(&ctx->ctx_restart_sem, 0); init_completion(&ctx->ctx_restart_done);
/* /*
* activation is used in SMP only * activation is used in SMP only
...@@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/ */
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
DPRINT(("unblocking [%d] \n", task->pid)); DPRINT(("unblocking [%d] \n", task->pid));
up(&ctx->ctx_restart_sem); complete(&ctx->ctx_restart_done);
} else { } else {
DPRINT(("[%d] armed exit trap\n", task->pid)); DPRINT(("[%d] armed exit trap\n", task->pid));
...@@ -5089,7 +5090,7 @@ pfm_handle_work(void) ...@@ -5089,7 +5090,7 @@ pfm_handle_work(void)
* may go through without blocking on SMP systems * may go through without blocking on SMP systems
* if restart has been received already by the time we call down() * if restart has been received already by the time we call down()
*/ */
ret = down_interruptible(&ctx->ctx_restart_sem); ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
DPRINT(("after block sleeping ret=%d\n", ret)); DPRINT(("after block sleeping ret=%d\n", ret));
......
...@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg) ...@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
touch_softlockup_watchdog();
memset((char *)start, 0, length); memset((char *)start, 0, length);
node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
......
...@@ -51,6 +51,15 @@ struct sn_flush_device_kernel { ...@@ -51,6 +51,15 @@ struct sn_flush_device_kernel {
struct sn_flush_device_common *common; struct sn_flush_device_common *common;
}; };
/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
* for older official PROMs to function on the new kernel base. This struct
* will be removed when the next official PROM release occurs. */
struct sn_flush_device_war {
struct sn_flush_device_common common;
u32 filler; /* older PROMs expect the default size of a spinlock_t */
};
/* /*
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
*/ */
......
...@@ -165,6 +165,43 @@ sn_pcidev_info_get(struct pci_dev *dev) ...@@ -165,6 +165,43 @@ sn_pcidev_info_get(struct pci_dev *dev)
return NULL; return NULL;
} }
/* Older PROM flush WAR
*
* 01/16/06 -- This war will be in place until a new official PROM is released.
* Additionally note that the struct sn_flush_device_war also has to be
* removed from arch/ia64/sn/include/xtalk/hubdev.h
*/
static u8 war_implemented = 0;
static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_common *common)
{
struct sn_flush_device_war *war_list;
struct sn_flush_device_war *dev_entry;
struct ia64_sal_retval isrv = {0,0,0,0};
if (!war_implemented) {
printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
"PROM flush WAR\n");
war_implemented = 1;
}
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
if (!war_list)
BUG();
SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
nasid, widget, __pa(war_list), 0, 0, 0 ,0);
if (isrv.status)
panic("sn_device_fixup_war failed: %s\n",
ia64_sal_strerror(isrv.status));
dev_entry = war_list + device;
memcpy(common,dev_entry, sizeof(*common));
kfree(war_list);
}
/* /*
* sn_fixup_ionodes() - This routine initializes the HUB data strcuture for * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
* each node in the system. * each node in the system.
...@@ -246,8 +283,19 @@ static void sn_fixup_ionodes(void) ...@@ -246,8 +283,19 @@ static void sn_fixup_ionodes(void)
widget, widget,
device, device,
(u64)(dev_entry->common)); (u64)(dev_entry->common));
if (status) if (status) {
if (sn_sal_rev() < 0x0450) {
/* shortlived WAR for older
* PROM images
*/
sn_device_fixup_war(nasid,
widget,
device,
dev_entry->common);
}
else
BUG(); BUG();
}
spin_lock_init(&dev_entry->sfdl_flush_lock); spin_lock_init(&dev_entry->sfdl_flush_lock);
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
...@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void); ...@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void);
/* Printing oemdata from mca uses data that is not passed through SAL, it is /* Printing oemdata from mca uses data that is not passed through SAL, it is
* global. Only one user at a time. * global. Only one user at a time.
*/ */
static DECLARE_MUTEX(sn_oemdata_mutex); static DEFINE_MUTEX(sn_oemdata_mutex);
static u8 **sn_oemdata; static u8 **sn_oemdata;
static u64 *sn_oemdata_size, sn_oemdata_bufsize; static u64 *sn_oemdata_size, sn_oemdata_bufsize;
...@@ -89,7 +90,7 @@ static int ...@@ -89,7 +90,7 @@ static int
sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size) u64 * oemdata_size)
{ {
down(&sn_oemdata_mutex); mutex_lock(&sn_oemdata_mutex);
sn_oemdata = oemdata; sn_oemdata = oemdata;
sn_oemdata_size = oemdata_size; sn_oemdata_size = oemdata_size;
sn_oemdata_bufsize = 0; sn_oemdata_bufsize = 0;
...@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, ...@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
*sn_oemdata_size = 0; *sn_oemdata_size = 0;
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
} }
up(&sn_oemdata_mutex); mutex_unlock(&sn_oemdata_mutex);
return 0; return 0;
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/xp.h> #include <asm/sn/xp.h>
...@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration = &xpc_registrations[ch_number]; registration = &xpc_registrations[ch_number];
if (down_interruptible(&registration->sema) != 0) { if (mutex_lock_interruptible(&registration->mutex) != 0) {
return xpcInterrupted; return xpcInterrupted;
} }
/* if XPC_CHANNEL_REGISTERED(ch_number) */ /* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) { if (registration->func != NULL) {
up(&registration->sema); mutex_unlock(&registration->mutex);
return xpcAlreadyRegistered; return xpcAlreadyRegistered;
} }
...@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, ...@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration->key = key; registration->key = key;
registration->func = func; registration->func = func;
up(&registration->sema); mutex_unlock(&registration->mutex);
xpc_interface.connect(ch_number); xpc_interface.connect(ch_number);
...@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number) ...@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number)
* figured XPC's users will just turn around and call xpc_disconnect() * figured XPC's users will just turn around and call xpc_disconnect()
* again anyways, so we might as well wait, if need be. * again anyways, so we might as well wait, if need be.
*/ */
down(&registration->sema); mutex_lock(&registration->mutex);
/* if !XPC_CHANNEL_REGISTERED(ch_number) */ /* if !XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func == NULL) { if (registration->func == NULL) {
up(&registration->sema); mutex_unlock(&registration->mutex);
return; return;
} }
...@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number) ...@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number)
xpc_interface.disconnect(ch_number); xpc_interface.disconnect(ch_number);
up(&registration->sema); mutex_unlock(&registration->mutex);
return; return;
} }
...@@ -250,9 +251,9 @@ xp_init(void) ...@@ -250,9 +251,9 @@ xp_init(void)
xp_nofault_PIOR_target = SH1_IPI_ACCESS; xp_nofault_PIOR_target = SH1_IPI_ACCESS;
} }
/* initialize the connection registration semaphores */ /* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ mutex_init(&xpc_registrations[ch_number].mutex);
} }
return 0; return 0;
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/xpc.h> #include <asm/sn/xpc.h>
...@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) ...@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
atomic_set(&ch->n_to_notify, 0); atomic_set(&ch->n_to_notify, 0);
spin_lock_init(&ch->lock); spin_lock_init(&ch->lock);
sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ mutex_init(&ch->msg_to_pull_mutex);
sema_init(&ch->wdisconnect_sema, 0); /* event wait */ init_completion(&ch->wdisconnect_wait);
atomic_set(&ch->n_on_msg_allocate_wq, 0); atomic_set(&ch->n_on_msg_allocate_wq, 0);
init_waitqueue_head(&ch->msg_allocate_wq); init_waitqueue_head(&ch->msg_allocate_wq);
...@@ -534,7 +536,6 @@ static enum xpc_retval ...@@ -534,7 +536,6 @@ static enum xpc_retval
xpc_allocate_msgqueues(struct xpc_channel *ch) xpc_allocate_msgqueues(struct xpc_channel *ch)
{ {
unsigned long irq_flags; unsigned long irq_flags;
int i;
enum xpc_retval ret; enum xpc_retval ret;
...@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) ...@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
return ret; return ret;
} }
for (i = 0; i < ch->local_nentries; i++) {
/* use a semaphore as an event wait queue */
sema_init(&ch->notify_queue[i].sema, 0);
}
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_SETUP; ch->flags |= XPC_C_SETUP;
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
...@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) ...@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
} }
if (ch->flags & XPC_C_WDISCONNECT) { if (ch->flags & XPC_C_WDISCONNECT) {
spin_unlock_irqrestore(&ch->lock, *irq_flags); /* we won't lose the CPU since we're holding ch->lock */
up(&ch->wdisconnect_sema); complete(&ch->wdisconnect_wait);
spin_lock_irqsave(&ch->lock, *irq_flags);
} else if (ch->delayed_IPI_flags) { } else if (ch->delayed_IPI_flags) {
if (part->act_state != XPC_P_DEACTIVATING) { if (part->act_state != XPC_P_DEACTIVATING) {
/* time to take action on any delayed IPI flags */ /* time to take action on any delayed IPI flags */
...@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
struct xpc_registration *registration = &xpc_registrations[ch->number]; struct xpc_registration *registration = &xpc_registrations[ch->number];
if (down_trylock(&registration->sema) != 0) { if (mutex_trylock(&registration->mutex) == 0) {
return xpcRetry; return xpcRetry;
} }
if (!XPC_CHANNEL_REGISTERED(ch->number)) { if (!XPC_CHANNEL_REGISTERED(ch->number)) {
up(&registration->sema); mutex_unlock(&registration->mutex);
return xpcUnregistered; return xpcUnregistered;
} }
...@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
if (ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING) {
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
up(&registration->sema); mutex_unlock(&registration->mutex);
return ch->reason; return ch->reason;
} }
...@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
* channel lock be locked and will unlock and relock * channel lock be locked and will unlock and relock
* the channel lock as needed. * the channel lock as needed.
*/ */
up(&registration->sema); mutex_unlock(&registration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags); &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags);
...@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch) ...@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
atomic_inc(&xpc_partitions[ch->partid].nchannels_active); atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
} }
up(&registration->sema); mutex_unlock(&registration->mutex);
/* initiate the connection */ /* initiate the connection */
...@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
enum xpc_retval ret; enum xpc_retval ret;
if (down_interruptible(&ch->msg_to_pull_sema) != 0) { if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
/* we were interrupted by a signal */ /* we were interrupted by a signal */
return NULL; return NULL;
} }
...@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
XPC_DEACTIVATE_PARTITION(part, ret); XPC_DEACTIVATE_PARTITION(part, ret);
up(&ch->msg_to_pull_sema); mutex_unlock(&ch->msg_to_pull_mutex);
return NULL; return NULL;
} }
...@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ...@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
ch->next_msg_to_pull += nmsgs; ch->next_msg_to_pull += nmsgs;
} }
up(&ch->msg_to_pull_sema); mutex_unlock(&ch->msg_to_pull_mutex);
/* return the message we were looking for */ /* return the message we were looking for */
msg_offset = (get % ch->remote_nentries) * ch->msg_size; msg_offset = (get % ch->remote_nentries) * ch->msg_size;
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/completion.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
...@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); ...@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
static unsigned long xpc_hb_check_timeout; static unsigned long xpc_hb_check_timeout;
/* notification that the xpc_hb_checker thread has exited */ /* notification that the xpc_hb_checker thread has exited */
static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); static DECLARE_COMPLETION(xpc_hb_checker_exited);
/* notification that the xpc_discovery thread has exited */ /* notification that the xpc_discovery thread has exited */
static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); static DECLARE_COMPLETION(xpc_discovery_exited);
static struct timer_list xpc_hb_timer; static struct timer_list xpc_hb_timer;
...@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore) ...@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore)
/* mark this thread as having exited */ /* mark this thread as having exited */
up(&xpc_hb_checker_exited); complete(&xpc_hb_checker_exited);
return 0; return 0;
} }
...@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore) ...@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore)
dev_dbg(xpc_part, "discovery thread is exiting\n"); dev_dbg(xpc_part, "discovery thread is exiting\n");
/* mark this thread as having exited */ /* mark this thread as having exited */
up(&xpc_discovery_exited); complete(&xpc_discovery_exited);
return 0; return 0;
} }
...@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number) ...@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number)
continue; continue;
} }
(void) down(&ch->wdisconnect_sema); wait_for_completion(&ch->wdisconnect_wait);
spin_lock_irqsave(&ch->lock, irq_flags); spin_lock_irqsave(&ch->lock, irq_flags);
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
...@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason) ...@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason)
free_irq(SGI_XPC_ACTIVATE, NULL); free_irq(SGI_XPC_ACTIVATE, NULL);
/* wait for the discovery thread to exit */ /* wait for the discovery thread to exit */
down(&xpc_discovery_exited); wait_for_completion(&xpc_discovery_exited);
/* wait for the heartbeat checker thread to exit */ /* wait for the heartbeat checker thread to exit */
down(&xpc_hb_checker_exited); wait_for_completion(&xpc_hb_checker_exited);
/* sleep for a 1/3 of a second or so */ /* sleep for a 1/3 of a second or so */
...@@ -1367,7 +1368,7 @@ xpc_init(void) ...@@ -1367,7 +1368,7 @@ xpc_init(void)
dev_err(xpc_part, "failed while forking discovery thread\n"); dev_err(xpc_part, "failed while forking discovery thread\n");
/* mark this new thread as a non-starter */ /* mark this new thread as a non-starter */
up(&xpc_discovery_exited); complete(&xpc_discovery_exited);
xpc_do_exit(xpcUnloading); xpc_do_exit(xpcUnloading);
return -EBUSY; return -EBUSY;
......
...@@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) ...@@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
{ {
struct ia64_sal_retval ret_stuff; struct ia64_sal_retval ret_stuff;
u64 busnum; u64 busnum;
u64 segment;
ret_stuff.status = 0; ret_stuff.status = 0;
ret_stuff.v0 = 0; ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum; busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
(u64) device, (u64) resp, 0, 0, 0, 0); busnum, (u64) device, (u64) resp, 0, 0, 0);
return (int)ret_stuff.v0; return (int)ret_stuff.v0;
} }
...@@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, ...@@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
{ {
struct ia64_sal_retval ret_stuff; struct ia64_sal_retval ret_stuff;
u64 busnum; u64 busnum;
u64 segment;
ret_stuff.status = 0; ret_stuff.status = 0;
ret_stuff.v0 = 0; ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum; busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
(u64) busnum, (u64) device, (u64) action, segment, busnum, (u64) device, (u64) action,
(u64) resp, 0, 0, 0); (u64) resp, 0, 0);
return (int)ret_stuff.v0; return (int)ret_stuff.v0;
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* driver for that. * driver for that.
* *
* *
* Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License * under the terms of version 2 of the GNU General Public License
...@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void) ...@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void)
misc.name = DEVICE_NAME_DYNAMIC; misc.name = DEVICE_NAME_DYNAMIC;
retval = misc_register(&misc); retval = misc_register(&misc);
if (retval != 0) { if (retval != 0) {
printk printk(KERN_WARNING "Failed to register console "
("Failed to register console device using misc_register.\n"); "device using misc_register.\n");
return -ENODEV; return -ENODEV;
} }
sal_console_uart.major = MISC_MAJOR; sal_console_uart.major = MISC_MAJOR;
...@@ -942,20 +942,21 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) ...@@ -942,20 +942,21 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
{ {
unsigned long flags = 0; unsigned long flags = 0;
struct sn_cons_port *port = &sal_console_port; struct sn_cons_port *port = &sal_console_port;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
static int stole_lock = 0; static int stole_lock = 0;
#endif
BUG_ON(!port->sc_is_asynch); BUG_ON(!port->sc_is_asynch);
/* We can't look at the xmit buffer if we're not registered with serial core /* We can't look at the xmit buffer if we're not registered with serial core
* yet. So only do the fancy recovery after registering * yet. So only do the fancy recovery after registering
*/ */
if (port->sc_port.info) { if (!port->sc_port.info) {
/* Not yet registered with serial core - simple case */
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
return;
}
/* somebody really wants this output, might be an /* somebody really wants this output, might be an
* oops, kdb, panic, etc. make sure they get it. */ * oops, kdb, panic, etc. make sure they get it. */
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
if (spin_is_locked(&port->sc_port.lock)) { if (spin_is_locked(&port->sc_port.lock)) {
int lhead = port->sc_port.info->xmit.head; int lhead = port->sc_port.info->xmit.head;
int ltail = port->sc_port.info->xmit.tail; int ltail = port->sc_port.info->xmit.tail;
...@@ -976,25 +977,20 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) ...@@ -976,25 +977,20 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
if (!spin_is_locked(&port->sc_port.lock) if (!spin_is_locked(&port->sc_port.lock)
|| stole_lock) { || stole_lock) {
if (!stole_lock) { if (!stole_lock) {
spin_lock_irqsave(&port-> spin_lock_irqsave(&port->sc_port.lock,
sc_port.lock,
flags); flags);
got_lock = 1; got_lock = 1;
} }
break; break;
} else { } else {
/* still locked */ /* still locked */
if ((lhead != if ((lhead != port->sc_port.info->xmit.head)
port->sc_port.info->xmit.head)
|| (ltail != || (ltail !=
port->sc_port.info->xmit. port->sc_port.info->xmit.tail)) {
tail)) {
lhead = lhead =
port->sc_port.info->xmit. port->sc_port.info->xmit.head;
head;
ltail = ltail =
port->sc_port.info->xmit. port->sc_port.info->xmit.tail;
tail;
counter = 0; counter = 0;
} }
} }
...@@ -1002,8 +998,7 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) ...@@ -1002,8 +998,7 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
/* flush anything in the serial core xmit buffer, raw */ /* flush anything in the serial core xmit buffer, raw */
sn_transmit_chars(port, 1); sn_transmit_chars(port, 1);
if (got_lock) { if (got_lock) {
spin_unlock_irqrestore(&port->sc_port.lock, spin_unlock_irqrestore(&port->sc_port.lock, flags);
flags);
stole_lock = 0; stole_lock = 0;
} else { } else {
/* fell thru */ /* fell thru */
...@@ -1012,18 +1007,10 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count) ...@@ -1012,18 +1007,10 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
} else { } else {
stole_lock = 0; stole_lock = 0;
#endif
spin_lock_irqsave(&port->sc_port.lock, flags); spin_lock_irqsave(&port->sc_port.lock, flags);
sn_transmit_chars(port, 1); sn_transmit_chars(port, 1);
spin_unlock_irqrestore(&port->sc_port.lock, flags); spin_unlock_irqrestore(&port->sc_port.lock, flags);
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
}
#endif
}
else {
/* Not yet registered with serial core - simple case */
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
} }
} }
......
...@@ -61,7 +61,7 @@ static inline void ...@@ -61,7 +61,7 @@ static inline void
down (struct semaphore *sem) down (struct semaphore *sem)
{ {
might_sleep(); might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
__down(sem); __down(sem);
} }
...@@ -75,7 +75,7 @@ down_interruptible (struct semaphore * sem) ...@@ -75,7 +75,7 @@ down_interruptible (struct semaphore * sem)
int ret = 0; int ret = 0;
might_sleep(); might_sleep();
if (atomic_dec_return(&sem->count) < 0) if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
ret = __down_interruptible(sem); ret = __down_interruptible(sem);
return ret; return ret;
} }
...@@ -85,7 +85,7 @@ down_trylock (struct semaphore *sem) ...@@ -85,7 +85,7 @@ down_trylock (struct semaphore *sem)
{ {
int ret = 0; int ret = 0;
if (atomic_dec_return(&sem->count) < 0) if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
ret = __down_trylock(sem); ret = __down_trylock(sem);
return ret; return ret;
} }
...@@ -93,7 +93,7 @@ down_trylock (struct semaphore *sem) ...@@ -93,7 +93,7 @@ down_trylock (struct semaphore *sem)
static inline void static inline void
up (struct semaphore * sem) up (struct semaphore * sem)
{ {
if (atomic_inc_return(&sem->count) <= 0) if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
__up(sem); __up(sem);
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/mutex.h>
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
...@@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, ...@@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
* the channel. * the channel.
*/ */
struct xpc_registration { struct xpc_registration {
struct semaphore sema; struct mutex mutex;
xpc_channel_func func; /* function to call */ xpc_channel_func func; /* function to call */
void *key; /* pointer to user's key */ void *key; /* pointer to user's key */
u16 nentries; /* #of msg entries in local msg queue */ u16 nentries; /* #of msg entries in local msg queue */
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sn/bte.h> #include <asm/sn/bte.h>
...@@ -335,7 +337,6 @@ struct xpc_openclose_args { ...@@ -335,7 +337,6 @@ struct xpc_openclose_args {
* and consumed by the intended recipient. * and consumed by the intended recipient.
*/ */
struct xpc_notify { struct xpc_notify {
struct semaphore sema; /* notify semaphore */
volatile u8 type; /* type of notification */ volatile u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */ /* the following two fields are only used if type == XPC_N_CALL */
...@@ -465,8 +466,8 @@ struct xpc_channel { ...@@ -465,8 +466,8 @@ struct xpc_channel {
xpc_channel_func func; /* user's channel function */ xpc_channel_func func; /* user's channel function */
void *key; /* pointer to user's key */ void *key; /* pointer to user's key */
struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
struct semaphore wdisconnect_sema; /* wait for channel disconnect */ struct completion wdisconnect_wait; /* wait for channel disconnect */
struct xpc_openclose_args *local_openclose_args; /* args passed on */ struct xpc_openclose_args *local_openclose_args; /* args passed on */
/* opening or closing of channel */ /* opening or closing of channel */
......
...@@ -18,6 +18,10 @@ ...@@ -18,6 +18,10 @@
#include <asm/smp.h> #include <asm/smp.h>
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
#define PENALTY_FOR_NODE_WITH_CPUS 255
/* /*
* Returns the number of the node containing CPU 'cpu' * Returns the number of the node containing CPU 'cpu'
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment