Commit fbdc8f0f authored by Helge Deller's avatar Helge Deller

parisc: Rework arch_rw locking functions

Clean up the arch read/write locking functions based on the arc
implemenation. This improves readability of those functions.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 2772f0ef
...@@ -67,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) ...@@ -67,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
/* /*
* Read-write spinlocks, allowing multiple readers but only one writer. * Read-write spinlocks, allowing multiple readers but only one writer.
* Linux rwlocks are unfair to writers; they can be starved for an indefinite * Unfair locking as Writers could be starved indefinitely by Reader(s)
* time by readers. With care, they can also be taken in interrupt context.
* *
* In the PA-RISC implementation, we have a spinlock and a counter. * The spinlock itself is contained in @counter and access to it is
* Readers use the lock to serialise their access to the counter (which * serialized with @lock_mutex.
* records how many readers currently hold the lock).
* Writers hold the spinlock, preventing any readers or other writers from
* grabbing the rwlock.
*/ */
/* Note that we have to ensure interrupts are disabled in case we're /* 1 - lock taken successfully */
* interrupted by some other code that wants to grab the same read lock */ static inline int arch_read_trylock(arch_rwlock_t *rw)
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{ {
int ret = 0;
unsigned long flags; unsigned long flags;
local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags); arch_spin_lock(&(rw->lock_mutex));
rw->counter--;
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're /*
* interrupted by some other code that wants to grab the same read lock */ * zero means writer holds the lock exclusively, deny Reader.
static __inline__ int arch_read_trylock(arch_rwlock_t *rw) * Otherwise grant lock to first/subseq reader
{ */
unsigned long flags; if (rw->counter > 0) {
retry: rw->counter--;
local_irq_save(flags); ret = 1;
if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
} }
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags); local_irq_restore(flags);
/* If write-locked, we fail to acquire the lock */
if (rw->counter < 0)
return 0;
/* Wait until we have a realistic chance at the lock */ return ret;
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
} }
/* Note that we have to ensure interrupts are disabled in case we're /* 1 - lock taken successfully */
* interrupted by some other code that wants to read_trylock() this lock */ static inline int arch_write_trylock(arch_rwlock_t *rw)
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{ {
int ret = 0;
unsigned long flags; unsigned long flags;
retry:
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags); arch_spin_lock(&(rw->lock_mutex));
if (rw->counter != 0) { /*
arch_spin_unlock(&rw->lock); * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*/
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
rw->counter = 0;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags); local_irq_restore(flags);
while (rw->counter != 0) return ret;
cpu_relax(); }
goto retry;
}
rw->counter = -1; /* mark as write-locked */ static inline void arch_read_lock(arch_rwlock_t *rw)
mb(); {
local_irq_restore(flags); while (!arch_read_trylock(rw))
cpu_relax();
} }
static __inline__ void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw)
{ {
rw->counter = 0; while (!arch_write_trylock(rw))
arch_spin_unlock(&rw->lock); cpu_relax();
} }
/* Note that we have to ensure interrupts are disabled in case we're static inline void arch_read_unlock(arch_rwlock_t *rw)
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{ {
unsigned long flags; unsigned long flags;
int result = 0;
local_irq_save(flags); local_irq_save(flags);
if (arch_spin_trylock(&rw->lock)) { arch_spin_lock(&(rw->lock_mutex));
if (rw->counter == 0) { rw->counter++;
rw->counter = -1; arch_spin_unlock(&(rw->lock_mutex));
result = 1;
} else {
/* Read-locked. Oh well. */
arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags); local_irq_restore(flags);
}
return result; static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
} }
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -12,11 +12,19 @@ typedef struct { ...@@ -12,11 +12,19 @@ typedef struct {
#endif #endif
} arch_spinlock_t; } arch_spinlock_t;
/* counter:
* Unlocked : 0x0100_0000
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
*/
typedef struct { typedef struct {
arch_spinlock_t lock; arch_spinlock_t lock_mutex;
volatile int counter; volatile unsigned int counter;
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment