Commit ef08f522 authored by Len Brown's avatar Len Brown

[ACPI] global lock macro fixes (Paul Menage, Luming Yu)

  http://bugzilla.kernel.org/show_bug.cgi?id=1669
parent 8ec01320
......@@ -61,33 +61,36 @@
* Immediate values in the assembly are preceded by "$" as in "$0x1"
* The final asm parameter are the operation altered non-output registers.
*/
static inline int
__acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
__acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
do { \
int dummy; \
asm("1: movl (%1),%%eax;" \
"movl %%eax,%%edx;" \
"andl %2,%%edx;" \
"btsl $0x1,%%edx;" \
"adcl $0x0,%%edx;" \
"lock; cmpxchgl %%edx,(%1);" \
"jnz 1b;" \
"cmpb $0x3,%%dl;" \
"sbbl %%eax,%%eax" \
:"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~1L):"dx"); \
} while(0)
((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
do { \
int dummy; \
asm("1: movl (%1),%%eax;" \
"movl %%eax,%%edx;" \
"andl %2,%%edx;" \
"lock; cmpxchgl %%edx,(%1);" \
"jnz 1b;" \
"andl $0x1,%%eax" \
:"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~3L):"dx"); \
} while(0)
((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
/*
* Math helper asm macros
......
......@@ -60,7 +60,7 @@ __acpi_acquire_global_lock (unsigned int *lock)
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg4_locked(lock, new, old);
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
......@@ -72,7 +72,7 @@ __acpi_release_global_lock (unsigned int *lock)
do {
old = *lock;
new = old & ~0x3;
val = cmpxchg4_locked(lock, new, old);
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
return old & 0x1;
}
......
......@@ -226,13 +226,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
static inline __u32 cmpxchg4_locked(__u32 *ptr, __u32 old, __u32 new)
{
asm volatile("lock ; cmpxchgl %k1,%2" :
"=r" (new) : "0" (old), "m" (*(__u32 *)ptr) : "memory");
return new;
}
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment