Commit 883a3acf authored by Tony Luck's avatar Tony Luck

[IA64] Re-implement spinaphores using ticket lock concepts

Bound the wait time for the ptcg_sem by using similar idea to the
ticket spin locks.  In this case we have only one instance of a
spinaphore, so make it 8 bytes rather than try to squeeze it into
4-bytes to keep the code simpler (and shorter).
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 36a07902
...@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm)
* this primitive it can be moved up to a spinaphore.h header. * this primitive it can be moved up to a spinaphore.h header.
*/ */
struct spinaphore { struct spinaphore {
atomic_t cur; unsigned long ticket;
unsigned long serve;
}; };
static inline void spinaphore_init(struct spinaphore *ss, int val) static inline void spinaphore_init(struct spinaphore *ss, int val)
{ {
atomic_set(&ss->cur, val); ss->ticket = 0;
ss->serve = val;
} }
static inline void down_spin(struct spinaphore *ss) static inline void down_spin(struct spinaphore *ss)
{ {
while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
while (atomic_read(&ss->cur) == 0)
cpu_relax(); if (time_before(t, ss->serve))
return;
ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
if (time_before(t, serve))
return;
cpu_relax();
}
} }
static inline void up_spin(struct spinaphore *ss) static inline void up_spin(struct spinaphore *ss)
{ {
atomic_add(1, &ss->cur); ia64_fetchadd(1, &ss->serve, rel);
} }
static struct spinaphore ptcg_sem; static struct spinaphore ptcg_sem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment