Commit 7e992711 authored by Dave Anglin's avatar Dave Anglin Committed by Helge Deller

parisc: Don't disable interrupts in cmpxchg and futex operations

I no longer think interrupts can be disabled in the futex and cmpxchg
operations because of COW breaks.  This not ideal but I suspect it's the
best we can do.

For the cmpxchg operations in syscall.S, we rely on the code to not
schedule off the gateway page.  For the futex, I added code to disable
preemption.

So far, I haven't seen the warnings with the attached change but the
change is only lightly tested.
Signed-off-by: default avatarDave Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 014966dc
...@@ -11,35 +11,34 @@ ...@@ -11,35 +11,34 @@
sixteen four-word locks. */ sixteen four-word locks. */
static inline void static inline void
_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) _futex_spin_lock(u32 __user *uaddr)
{ {
extern u32 lws_lock_start[]; extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x3f8) >> 1; long index = ((long)uaddr & 0x3f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
local_irq_save(*flags); preempt_disable();
arch_spin_lock(s); arch_spin_lock(s);
} }
static inline void static inline void
_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) _futex_spin_unlock(u32 __user *uaddr)
{ {
extern u32 lws_lock_start[]; extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x3f8) >> 1; long index = ((long)uaddr & 0x3f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s); arch_spin_unlock(s);
local_irq_restore(*flags); preempt_enable();
} }
static inline int static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{ {
unsigned long int flags;
int oldval, ret; int oldval, ret;
u32 tmp; u32 tmp;
_futex_spin_lock_irqsave(uaddr, &flags);
ret = -EFAULT; ret = -EFAULT;
_futex_spin_lock(uaddr);
if (unlikely(get_user(oldval, uaddr) != 0)) if (unlikely(get_user(oldval, uaddr) != 0))
goto out_pagefault_enable; goto out_pagefault_enable;
...@@ -70,7 +69,7 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ...@@ -70,7 +69,7 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
ret = -EFAULT; ret = -EFAULT;
out_pagefault_enable: out_pagefault_enable:
_futex_spin_unlock_irqrestore(uaddr, &flags); _futex_spin_unlock(uaddr);
if (!ret) if (!ret)
*oval = oldval; *oval = oldval;
...@@ -83,7 +82,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -83,7 +82,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
u32 val; u32 val;
unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble... * our gateway page, and causes no end of trouble...
...@@ -100,19 +98,19 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -100,19 +98,19 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
* address. This should scale to a couple of CPUs. * address. This should scale to a couple of CPUs.
*/ */
_futex_spin_lock_irqsave(uaddr, &flags); _futex_spin_lock(uaddr);
if (unlikely(get_user(val, uaddr) != 0)) { if (unlikely(get_user(val, uaddr) != 0)) {
_futex_spin_unlock_irqrestore(uaddr, &flags); _futex_spin_unlock(uaddr);
return -EFAULT; return -EFAULT;
} }
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
_futex_spin_unlock_irqrestore(uaddr, &flags); _futex_spin_unlock(uaddr);
return -EFAULT; return -EFAULT;
} }
*uval = val; *uval = val;
_futex_spin_unlock_irqrestore(uaddr, &flags); _futex_spin_unlock(uaddr);
return 0; return 0;
} }
......
...@@ -597,13 +597,11 @@ cas_nocontend: ...@@ -597,13 +597,11 @@ cas_nocontend:
# endif # endif
/* ENABLE_LWS_DEBUG */ /* ENABLE_LWS_DEBUG */
rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */ /* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock: cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */ ldo 2(%r0), %r28 /* 2nd case */
ssm PSW_SM_I, %r0
b lws_exit /* Contended... */ b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
...@@ -639,8 +637,6 @@ cas_action: ...@@ -639,8 +637,6 @@ cas_action:
/* Clear thread register indicator */ /* Clear thread register indicator */
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
#endif #endif
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */ /* Return to userspace, set no error */
b lws_exit b lws_exit
copy %r0, %r21 copy %r0, %r21
...@@ -652,7 +648,6 @@ cas_action: ...@@ -652,7 +648,6 @@ cas_action:
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
#endif #endif
ssm PSW_SM_I, %r0
b lws_exit b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */ ldo -EFAULT(%r0),%r21 /* set errno */
nop nop
...@@ -764,13 +759,11 @@ cas2_lock_start: ...@@ -764,13 +759,11 @@ cas2_lock_start:
shlw %r20, 4, %r20 shlw %r20, 4, %r20
add %r20, %r28, %r20 add %r20, %r28, %r20
rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */ /* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
cas2_wouldblock: cas2_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */ ldo 2(%r0), %r28 /* 2nd case */
ssm PSW_SM_I, %r0
b lws_exit /* Contended... */ b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
...@@ -850,8 +843,6 @@ cas2_action: ...@@ -850,8 +843,6 @@ cas2_action:
cas2_end: cas2_end:
/* Free lock */ /* Free lock */
stw,ma %r20, 0(%sr2,%r20) stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */ /* Return to userspace, set no error */
b lws_exit b lws_exit
copy %r0, %r21 copy %r0, %r21
...@@ -860,7 +851,6 @@ cas2_end: ...@@ -860,7 +851,6 @@ cas2_end:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
stw,ma %r20, 0(%sr2,%r20) stw,ma %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28 ldo 1(%r0),%r28
b lws_exit b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */ ldo -EFAULT(%r0),%r21 /* set errno */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment