Commit 0bc671d3 authored by Will Deacon's avatar Will Deacon

arm64: cmpxchg: avoid "cc" clobber in ll/sc routines

We can perform the cmpxchg comparison using eor and cbnz which avoids
the "cc" clobber for the ll/sc case and consequently for the LSE case
where we may have to fall-back on the ll/sc code at runtime.
Reviewed-by: default avatarSteve Capper <steve.capper@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent e9a4b795
......@@ -101,14 +101,13 @@ __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
asm volatile("// atomic_cmpxchg\n"
"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
" eor %w0, %w1, %w3\n"
" cbnz %w0, 2f\n"
" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
: "cc");
: "Lr" (old), "r" (new));
smp_mb();
return oldval;
......@@ -179,14 +178,13 @@ __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
asm volatile("// atomic64_cmpxchg\n"
"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
" eor %0, %1, %3\n"
" cbnz %w0, 2f\n"
" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
: "cc");
: "Lr" (old), "r" (new));
smp_mb();
return oldval;
......
......@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
" mov %w[ret], w30")
: [ret] "+r" (x0), [v] "+Q" (ptr->counter)
: [old] "r" (w1), [new] "r" (w2)
: "x30", "cc", "memory");
: "x30", "memory");
return x0;
}
......@@ -313,7 +313,7 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
" mov %[ret], x30")
: [ret] "+r" (x0), [v] "+Q" (ptr->counter)
: [old] "r" (x1), [new] "r" (x2)
: "x30", "cc", "memory");
: "x30", "memory");
return x0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment