Commit e0d5896b authored by Sami Tolvanen's avatar Sami Tolvanen Committed by Will Deacon

arm64: lse: fix LSE atomics with LLVM's integrated assembler

Unlike gcc, clang considers each inline assembly block to be independent
and therefore, when using the integrated assembler for inline assembly,
any preambles that enable features must be repeated in each block.

This change defines __LSE_PREAMBLE and adds it to each inline assembly
block that has LSE instructions, which allows them to be compiled also
with clang's assembler.

Link: https://github.com/ClangBuiltLinux/linux/issues/671Signed-off-by: default avatarSami Tolvanen <samitolvanen@google.com>
Tested-by: default avatarAndrew Murray <andrew.murray@arm.com>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarAndrew Murray <andrew.murray@arm.com>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarNick Desaulniers <ndesaulniers@google.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 46cf053e
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
static inline void __lse_atomic_##op(int i, atomic_t *v) \ static inline void __lse_atomic_##op(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" " #asm_op " %w[i], %[v]\n" \ " " #asm_op " %w[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \ : "r" (v)); \
...@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd) ...@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" " #asm_op #mb " %w[i], %w[i], %[v]" \ " " #asm_op #mb " %w[i], %w[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \ : "r" (v) \
...@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ ...@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
u32 tmp; \ u32 tmp; \
\ \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
" add %w[i], %w[i], %w[tmp]" \ " add %w[i], %w[i], %w[tmp]" \
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
...@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") ...@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
static inline void __lse_atomic_and(int i, atomic_t *v) static inline void __lse_atomic_and(int i, atomic_t *v)
{ {
asm volatile( asm volatile(
__LSE_PREAMBLE
" mvn %w[i], %w[i]\n" " mvn %w[i], %w[i]\n"
" stclr %w[i], %[v]" " stclr %w[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter) : [i] "+&r" (i), [v] "+Q" (v->counter)
...@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v) ...@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" mvn %w[i], %w[i]\n" \ " mvn %w[i], %w[i]\n" \
" ldclr" #mb " %w[i], %w[i], %[v]" \ " ldclr" #mb " %w[i], %w[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \ : [i] "+&r" (i), [v] "+Q" (v->counter) \
...@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") ...@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
static inline void __lse_atomic_sub(int i, atomic_t *v) static inline void __lse_atomic_sub(int i, atomic_t *v)
{ {
asm volatile( asm volatile(
__LSE_PREAMBLE
" neg %w[i], %w[i]\n" " neg %w[i], %w[i]\n"
" stadd %w[i], %[v]" " stadd %w[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter) : [i] "+&r" (i), [v] "+Q" (v->counter)
...@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ ...@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
u32 tmp; \ u32 tmp; \
\ \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" neg %w[i], %w[i]\n" \ " neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
" add %w[i], %w[i], %w[tmp]" \ " add %w[i], %w[i], %w[tmp]" \
...@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") ...@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" neg %w[i], %w[i]\n" \ " neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[i], %[v]" \ " ldadd" #mb " %w[i], %w[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \ : [i] "+&r" (i), [v] "+Q" (v->counter) \
...@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") ...@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" " #asm_op " %[i], %[v]\n" \ " " #asm_op " %[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \ : "r" (v)); \
...@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd) ...@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" " #asm_op #mb " %[i], %[i], %[v]" \ " " #asm_op #mb " %[i], %[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \ : "r" (v) \
...@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ ...@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
unsigned long tmp; \ unsigned long tmp; \
\ \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
" add %[i], %[i], %x[tmp]" \ " add %[i], %[i], %x[tmp]" \
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
...@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") ...@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
static inline void __lse_atomic64_and(s64 i, atomic64_t *v) static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{ {
asm volatile( asm volatile(
__LSE_PREAMBLE
" mvn %[i], %[i]\n" " mvn %[i], %[i]\n"
" stclr %[i], %[v]" " stclr %[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter) : [i] "+&r" (i), [v] "+Q" (v->counter)
...@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v) ...@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" mvn %[i], %[i]\n" \ " mvn %[i], %[i]\n" \
" ldclr" #mb " %[i], %[i], %[v]" \ " ldclr" #mb " %[i], %[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \ : [i] "+&r" (i), [v] "+Q" (v->counter) \
...@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") ...@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
{ {
asm volatile( asm volatile(
__LSE_PREAMBLE
" neg %[i], %[i]\n" " neg %[i], %[i]\n"
" stadd %[i], %[v]" " stadd %[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter) : [i] "+&r" (i), [v] "+Q" (v->counter)
...@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ ...@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
unsigned long tmp; \ unsigned long tmp; \
\ \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" neg %[i], %[i]\n" \ " neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
" add %[i], %[i], %x[tmp]" \ " add %[i], %[i], %x[tmp]" \
...@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") ...@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" neg %[i], %[i]\n" \ " neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %[i], %[v]" \ " ldadd" #mb " %[i], %[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \ : [i] "+&r" (i), [v] "+Q" (v->counter) \
...@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) ...@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile( asm volatile(
__LSE_PREAMBLE
"1: ldr %x[tmp], %[v]\n" "1: ldr %x[tmp], %[v]\n"
" subs %[ret], %x[tmp], #1\n" " subs %[ret], %x[tmp], #1\n"
" b.lt 2f\n" " b.lt 2f\n"
...@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ ...@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long tmp; \ unsigned long tmp; \
\ \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" mov %" #w "[tmp], %" #w "[old]\n" \ " mov %" #w "[tmp], %" #w "[old]\n" \
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
" mov %" #w "[ret], %" #w "[tmp]" \ " mov %" #w "[ret], %" #w "[tmp]" \
...@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ ...@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\ \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
" eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
#include <linux/compiler_types.h> #include <linux/compiler_types.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
...@@ -14,8 +16,6 @@ ...@@ -14,8 +16,6 @@
#include <asm/atomic_lse.h> #include <asm/atomic_lse.h>
#include <asm/cpucaps.h> #include <asm/cpucaps.h>
__asm__(".arch_extension lse");
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready; extern struct static_key_false arm64_const_caps_ready;
...@@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void) ...@@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void)
/* In-line patching at runtime */ /* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment