Commit 9d40ee20 authored by Tony Luck's avatar Tony Luck

[IA64] Squeeze ticket locks back into 4 bytes.

Linus pointed out that other people have spent large amounts of time
and effort to optimize the layout of frequently used structures. Often
these have embedded locks, and the assumption is that a lock takes
4 bytes.  Linus also pointed out how to work with the limited options
for atomic instructions on Itanium.
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 0eca52a9
...@@ -25,61 +25,68 @@ ...@@ -25,61 +25,68 @@
* by atomically noting the tail and incrementing it by one (thus adding * by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head * ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail. * becomes equal to the the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
* *
* 63 32 31 0 * 31 17 16 15 14 0
* +----------------------------------------------------+ * +----------------------------------------------------+
* | next_ticket_number | now_serving | * | now_serving | padding | next_ticket |
* +----------------------------------------------------+ * +----------------------------------------------------+
*/ */
#define TICKET_SHIFT 32 #define TICKET_SHIFT 17
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{ {
int *p = (int *)&lock->lock, turn, now_serving; int *p = (int *)&lock->lock, ticket, serve;
now_serving = *p; ticket = ia64_fetchadd(1, p, acq);
turn = ia64_fetchadd(1, p+1, acq);
if (turn == now_serving) if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return; return;
do { ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
cpu_relax(); cpu_relax();
} while (ACCESS_ONCE(*p) != turn); }
} }
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
{ {
long tmp = ACCESS_ONCE(lock->lock), try; int tmp = ACCESS_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
try = tmp + (1L << TICKET_SHIFT);
return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
} return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
return 0; return 0;
} }
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
{ {
int *p = (int *)&lock->lock; unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
(void)ia64_fetchadd(1, p, rel); asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
} }
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{ {
long tmp = ACCESS_ONCE(lock->lock); long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
} }
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{ {
long tmp = ACCESS_ONCE(lock->lock); long tmp = ACCESS_ONCE(lock->lock);
return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
} }
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#endif #endif
typedef struct { typedef struct {
volatile unsigned long lock; volatile unsigned int lock;
} raw_spinlock_t; } raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment