Commit d791a4da authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-asm-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm updates from Ingo Molnar:

 - Clean up & fix asm() operand modifiers & constraints

 - Misc cleanups

* tag 'x86-asm-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/alternatives: Remove a superfluous newline in _static_cpu_has()
  x86/asm/64: Clean up memset16(), memset32(), memset64() assembly constraints in <asm/string_64.h>
  x86/asm: Use "m" operand constraint in WRUSSQ asm template
  x86/asm: Use %a instead of %P operand modifier in asm templates
  x86/asm: Use %c/%n instead of %P operand modifier in asm templates
  x86/asm: Remove %P operand modifier from altinstr asm templates
parents 019040fb a0c8cf97
...@@ -119,8 +119,8 @@ static void init_heap(void) ...@@ -119,8 +119,8 @@ static void init_heap(void)
char *stack_end; char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) { if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
asm("leal %P1(%%esp),%0" asm("leal %n1(%%esp),%0"
: "=r" (stack_end) : "i" (-STACK_SIZE)); : "=r" (stack_end) : "i" (STACK_SIZE));
heap_end = (char *) heap_end = (char *)
((size_t)boot_params.hdr.heap_end_ptr + 0x200); ((size_t)boot_params.hdr.heap_end_ptr + 0x200);
......
...@@ -307,7 +307,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -307,7 +307,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
/* Like alternative_io, but for replacing a direct call with another one. */ /* Like alternative_io, but for replacing a direct call with another one. */
#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \ #define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
asm_inline volatile (ALTERNATIVE("call %P[old]", "call %P[new]", ft_flags) \ asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/* /*
...@@ -318,8 +318,8 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -318,8 +318,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
*/ */
#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \ #define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
output, input...) \ output, input...) \
asm_inline volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", ft_flags1,\ asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
"call %P[new2]", ft_flags2) \ "call %c[new2]", ft_flags2) \
: output, ASM_CALL_CONSTRAINT \ : output, ASM_CALL_CONSTRAINT \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input) [new2] "i" (newfunc2), ## input)
......
...@@ -92,7 +92,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v) ...@@ -92,7 +92,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
{ {
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP, alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)), ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr))); ASM_OUTPUT2("0" (v), "m" (*addr)));
} }
......
...@@ -50,7 +50,7 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v) ...@@ -50,7 +50,7 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
#ifdef CONFIG_X86_CMPXCHG64 #ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \ #define __alternative_atomic64(f, g, out, in...) \
asm volatile("call %P[func]" \ asm volatile("call %c[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in) : out : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8) #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
......
...@@ -172,11 +172,10 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); ...@@ -172,11 +172,10 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/ */
static __always_inline bool _static_cpu_has(u16 bit) static __always_inline bool _static_cpu_has(u16 bit)
{ {
asm goto( asm goto(ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]")
ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
".pushsection .altinstr_aux,\"ax\"\n" ".pushsection .altinstr_aux,\"ax\"\n"
"6:\n" "6:\n"
" testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n" " testb %[bitnum], %a[cap_byte]\n"
" jnz %l[t_yes]\n" " jnz %l[t_yes]\n"
" jmp %l[t_no]\n" " jmp %l[t_no]\n"
".popsection\n" ".popsection\n"
......
...@@ -100,7 +100,7 @@ ...@@ -100,7 +100,7 @@
} }
#define ASM_CALL_ARG0 \ #define ASM_CALL_ARG0 \
"call %P[__func] \n" \ "call %c[__func] \n" \
ASM_REACHABLE ASM_REACHABLE
#define ASM_CALL_ARG1 \ #define ASM_CALL_ARG1 \
......
...@@ -586,7 +586,7 @@ extern char ignore_fpu_irq; ...@@ -586,7 +586,7 @@ extern char ignore_fpu_irq;
# define BASE_PREFETCH "" # define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH # define ARCH_HAS_PREFETCH
#else #else
# define BASE_PREFETCH "prefetcht0 %P1" # define BASE_PREFETCH "prefetcht0 %1"
#endif #endif
/* /*
...@@ -597,7 +597,7 @@ extern char ignore_fpu_irq; ...@@ -597,7 +597,7 @@ extern char ignore_fpu_irq;
*/ */
static inline void prefetch(const void *x) static inline void prefetch(const void *x)
{ {
alternative_input(BASE_PREFETCH, "prefetchnta %P1", alternative_input(BASE_PREFETCH, "prefetchnta %1",
X86_FEATURE_XMM, X86_FEATURE_XMM,
"m" (*(const char *)x)); "m" (*(const char *)x));
} }
...@@ -609,7 +609,7 @@ static inline void prefetch(const void *x) ...@@ -609,7 +609,7 @@ static inline void prefetch(const void *x)
*/ */
static __always_inline void prefetchw(const void *x) static __always_inline void prefetchw(const void *x)
{ {
alternative_input(BASE_PREFETCH, "prefetchw %P1", alternative_input(BASE_PREFETCH, "prefetchw %1",
X86_FEATURE_3DNOWPREFETCH, X86_FEATURE_3DNOWPREFETCH,
"m" (*(const char *)x)); "m" (*(const char *)x));
} }
......
...@@ -182,8 +182,8 @@ static __always_inline void clflush(volatile void *__p) ...@@ -182,8 +182,8 @@ static __always_inline void clflush(volatile void *__p)
static inline void clflushopt(volatile void *__p) static inline void clflushopt(volatile void *__p)
{ {
alternative_io(".byte 0x3e; clflush %P0", alternative_io(".byte 0x3e; clflush %0",
".byte 0x66; clflush %P0", ".byte 0x66; clflush %0",
X86_FEATURE_CLFLUSHOPT, X86_FEATURE_CLFLUSHOPT,
"+m" (*(volatile char __force *)__p)); "+m" (*(volatile char __force *)__p));
} }
...@@ -205,9 +205,9 @@ static inline void clwb(volatile void *__p) ...@@ -205,9 +205,9 @@ static inline void clwb(volatile void *__p)
#ifdef CONFIG_X86_USER_SHADOW_STACK #ifdef CONFIG_X86_USER_SHADOW_STACK
static inline int write_user_shstk_64(u64 __user *addr, u64 val) static inline int write_user_shstk_64(u64 __user *addr, u64 val)
{ {
asm goto("1: wrussq %[val], (%[addr])\n" asm goto("1: wrussq %[val], %[addr]\n"
_ASM_EXTABLE(1b, %l[fail]) _ASM_EXTABLE(1b, %l[fail])
:: [addr] "r" (addr), [val] "r" (val) :: [addr] "m" (*addr), [val] "r" (val)
:: fail); :: fail);
return 0; return 0;
fail: fail:
......
...@@ -30,37 +30,40 @@ void *__memset(void *s, int c, size_t n); ...@@ -30,37 +30,40 @@ void *__memset(void *s, int c, size_t n);
#define __HAVE_ARCH_MEMSET16 #define __HAVE_ARCH_MEMSET16
static inline void *memset16(uint16_t *s, uint16_t v, size_t n) static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
{ {
long d0, d1; const __auto_type s0 = s;
asm volatile("rep\n\t" asm volatile (
"stosw" "rep stosw"
: "=&c" (d0), "=&D" (d1) : "+D" (s), "+c" (n)
: "a" (v), "1" (s), "0" (n) : "a" (v)
: "memory"); : "memory"
return s; );
return s0;
} }
#define __HAVE_ARCH_MEMSET32 #define __HAVE_ARCH_MEMSET32
static inline void *memset32(uint32_t *s, uint32_t v, size_t n) static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
{ {
long d0, d1; const __auto_type s0 = s;
asm volatile("rep\n\t" asm volatile (
"stosl" "rep stosl"
: "=&c" (d0), "=&D" (d1) : "+D" (s), "+c" (n)
: "a" (v), "1" (s), "0" (n) : "a" (v)
: "memory"); : "memory"
return s; );
return s0;
} }
#define __HAVE_ARCH_MEMSET64 #define __HAVE_ARCH_MEMSET64
static inline void *memset64(uint64_t *s, uint64_t v, size_t n) static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
{ {
long d0, d1; const __auto_type s0 = s;
asm volatile("rep\n\t" asm volatile (
"stosq" "rep stosq"
: "=&c" (d0), "=&D" (d1) : "+D" (s), "+c" (n)
: "a" (v), "1" (s), "0" (n) : "a" (v)
: "memory"); : "memory"
return s; );
return s0;
} }
#endif #endif
......
...@@ -78,7 +78,7 @@ extern int __get_user_bad(void); ...@@ -78,7 +78,7 @@ extern int __get_user_bad(void);
int __ret_gu; \ int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
asm volatile("call __" #fn "_%P4" \ asm volatile("call __" #fn "_%c4" \
: "=a" (__ret_gu), "=r" (__val_gu), \ : "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \ ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \ : "0" (ptr), "i" (sizeof(*(ptr)))); \
...@@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void); ...@@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void);
__chk_user_ptr(__ptr); \ __chk_user_ptr(__ptr); \
__ptr_pu = __ptr; \ __ptr_pu = __ptr; \
__val_pu = __x; \ __val_pu = __x; \
asm volatile("call __" #fn "_%P[size]" \ asm volatile("call __" #fn "_%c[size]" \
: "=c" (__ret_pu), \ : "=c" (__ret_pu), \
ASM_CALL_CONSTRAINT \ ASM_CALL_CONSTRAINT \
: "0" (__ptr_pu), \ : "0" (__ptr_pu), \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment