Commit 77ba93a7 authored by Paul Mundt's avatar Paul Mundt

sh: Fix up the SH-4A mutex fastpath semantics.

This fixes up the __mutex_fastpath_xxx() routines to match the semantics
noted in the comment. Previously these were looping rather than doing a
single-pass, which is counter-intuitive, as the slow path takes care of
the looping for us in the event of contention.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent c6f17cb2
...@@ -21,16 +21,18 @@ ...@@ -21,16 +21,18 @@
static inline void static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{ {
int __res; int __ex_flag, __res;
__asm__ __volatile__ ( __asm__ __volatile__ (
"movli.l @%1, %0 \n" "movli.l @%2, %0 \n"
"dt %0 \n" "add #-1, %0 \n"
"movco.l %0, @%1 \n" "movco.l %0, @%2 \n"
: "=&z" (__res) "movt %1 \n"
: "=&z" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter) : "r" (&(count)->counter)
: "t"); : "t");
__res |= !__ex_flag;
if (unlikely(__res != 0)) if (unlikely(__res != 0))
fail_fn(count); fail_fn(count);
} }
...@@ -38,16 +40,18 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -38,16 +40,18 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{ {
int __res; int __ex_flag, __res;
__asm__ __volatile__ ( __asm__ __volatile__ (
"movli.l @%1, %0 \n" "movli.l @%2, %0 \n"
"dt %0 \n" "add #-1, %0 \n"
"movco.l %0, @%1 \n" "movco.l %0, @%2 \n"
: "=&z" (__res) "movt %1 \n"
: "=&z" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter) : "r" (&(count)->counter)
: "t"); : "t");
__res |= !__ex_flag;
if (unlikely(__res != 0)) if (unlikely(__res != 0))
__res = fail_fn(count); __res = fail_fn(count);
...@@ -57,18 +61,19 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) ...@@ -57,18 +61,19 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
static inline void static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{ {
int __res; int __ex_flag, __res;
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: movli.l @%1, %0 \n\t" "movli.l @%2, %0 \n\t"
"add #1, %0 \n\t" "add #1, %0 \n\t"
"movco.l %0, @%1 \n\t" "movco.l %0, @%2 \n\t"
"bf 1b\n\t" "movt %1 \n\t"
: "=&z" (__res) : "=&z" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter) : "r" (&(count)->counter)
: "t"); : "t");
if (unlikely(__res <= 0)) __res |= !__ex_flag;
if (unlikely(__res != 0))
fail_fn(count); fail_fn(count);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment