Commit 87b26406 authored by Brian Gerst's avatar Brian Gerst Committed by Tejun Heo

x86-64: Use absolute displacements for per-cpu accesses.

Accessing memory through %gs should not use rip-relative addressing.
Adding a P prefix for the argument tells gcc to not add (%rip) to
the memory references.
Signed-off-by: default avatarBrian Gerst <brgerst@gmail.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent c2558e0e
...@@ -39,10 +39,10 @@ ...@@ -39,10 +39,10 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __percpu_seg_str "%%"__stringify(__percpu_seg)":" #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
#define __my_cpu_offset percpu_read(this_cpu_off) #define __my_cpu_offset percpu_read(this_cpu_off)
#else #else
#define __percpu_seg_str #define __percpu_arg(x) "%" #x
#endif #endif
/* For arch-specific code, we can use direct single-insn ops (they /* For arch-specific code, we can use direct single-insn ops (they
...@@ -58,22 +58,22 @@ do { \ ...@@ -58,22 +58,22 @@ do { \
} \ } \
switch (sizeof(var)) { \ switch (sizeof(var)) { \
case 1: \ case 1: \
asm(op "b %1,"__percpu_seg_str"%0" \ asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \ : "+m" (var) \
: "ri" ((T__)val)); \ : "ri" ((T__)val)); \
break; \ break; \
case 2: \ case 2: \
asm(op "w %1,"__percpu_seg_str"%0" \ asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \ : "+m" (var) \
: "ri" ((T__)val)); \ : "ri" ((T__)val)); \
break; \ break; \
case 4: \ case 4: \
asm(op "l %1,"__percpu_seg_str"%0" \ asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \ : "+m" (var) \
: "ri" ((T__)val)); \ : "ri" ((T__)val)); \
break; \ break; \
case 8: \ case 8: \
asm(op "q %1,"__percpu_seg_str"%0" \ asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \ : "+m" (var) \
: "r" ((T__)val)); \ : "r" ((T__)val)); \
break; \ break; \
...@@ -86,22 +86,22 @@ do { \ ...@@ -86,22 +86,22 @@ do { \
typeof(var) ret__; \ typeof(var) ret__; \
switch (sizeof(var)) { \ switch (sizeof(var)) { \
case 1: \ case 1: \
asm(op "b "__percpu_seg_str"%1,%0" \ asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \ : "=r" (ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
case 2: \ case 2: \
asm(op "w "__percpu_seg_str"%1,%0" \ asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \ : "=r" (ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
case 4: \ case 4: \
asm(op "l "__percpu_seg_str"%1,%0" \ asm(op "l "__percpu_arg(1)",%0" \
: "=r" (ret__) \ : "=r" (ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
case 8: \ case 8: \
asm(op "q "__percpu_seg_str"%1,%0" \ asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \ : "=r" (ret__) \
: "m" (var)); \ : "m" (var)); \
break; \ break; \
...@@ -122,9 +122,9 @@ do { \ ...@@ -122,9 +122,9 @@ do { \
#define x86_test_and_clear_bit_percpu(bit, var) \ #define x86_test_and_clear_bit_percpu(bit, var) \
({ \ ({ \
int old__; \ int old__; \
asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
: "=r" (old__) \ : "=r" (old__), "+m" (per_cpu__##var) \
: "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \ : "dIr" (bit)); \
old__; \ old__; \
}) })
......
...@@ -94,7 +94,7 @@ do { \ ...@@ -94,7 +94,7 @@ do { \
"call __switch_to\n\t" \ "call __switch_to\n\t" \
".globl thread_return\n" \ ".globl thread_return\n" \
"thread_return:\n\t" \ "thread_return:\n\t" \
"movq "__percpu_seg_str"%P[current_task],%%rsi\n\t" \ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
"movq %P[thread_info](%%rsi),%%r8\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \ "movq %%rax,%%rdi\n\t" \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment