Commit c5386c20 authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/system.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 26b7fcc4
...@@ -38,17 +38,16 @@ do { \ ...@@ -38,17 +38,16 @@ do { \
*/ \ */ \
unsigned long ebx, ecx, edx, esi, edi; \ unsigned long ebx, ecx, edx, esi, edi; \
\ \
asm volatile( \ asm volatile("pushfl\n\t" /* save flags */ \
"pushfl \n\t" /* save flags */ \ "pushl %%ebp\n\t" /* save EBP */ \
"pushl %%ebp \n\t" /* save EBP */ \ "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
"movl %%esp,%[prev_sp] \n\t" /* save ESP */ \ "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
"movl %[next_sp],%%esp \n\t" /* restore ESP */ \ "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
"movl $1f,%[prev_ip] \n\t" /* save EIP */ \ "pushl %[next_ip]\n\t" /* restore EIP */ \
"pushl %[next_ip] \n\t" /* restore EIP */ \ "jmp __switch_to\n" /* regparm call */ \
"jmp __switch_to \n" /* regparm call */ \ "1:\t" \
"1: \t" \ "popl %%ebp\n\t" /* restore EBP */ \
"popl %%ebp \n\t" /* restore EBP */ \ "popfl\n" /* restore flags */ \
"popfl \n" /* restore flags */ \
\ \
/* output parameters */ \ /* output parameters */ \
: [prev_sp] "=m" (prev->thread.sp), \ : [prev_sp] "=m" (prev->thread.sp), \
...@@ -65,8 +64,7 @@ do { \ ...@@ -65,8 +64,7 @@ do { \
\ \
/* regparm parameters for __switch_to(): */ \ /* regparm parameters for __switch_to(): */ \
[prev] "a" (prev), \ [prev] "a" (prev), \
[next] "d" (next) \ [next] "d" (next)); \
); \
} while (0) } while (0)
/* /*
...@@ -167,14 +165,13 @@ extern void load_gs_index(unsigned); ...@@ -167,14 +165,13 @@ extern void load_gs_index(unsigned);
static inline unsigned long get_limit(unsigned long segment) static inline unsigned long get_limit(unsigned long segment)
{ {
unsigned long __limit; unsigned long __limit;
__asm__("lsll %1,%0" asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
:"=r" (__limit):"r" (segment)); return __limit + 1;
return __limit+1;
} }
static inline void native_clts(void) static inline void native_clts(void)
{ {
asm volatile ("clts"); asm volatile("clts");
} }
/* /*
...@@ -189,43 +186,43 @@ static unsigned long __force_order; ...@@ -189,43 +186,43 @@ static unsigned long __force_order;
static inline unsigned long native_read_cr0(void) static inline unsigned long native_read_cr0(void)
{ {
unsigned long val; unsigned long val;
asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
return val; return val;
} }
static inline void native_write_cr0(unsigned long val) static inline void native_write_cr0(unsigned long val)
{ {
asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
} }
static inline unsigned long native_read_cr2(void) static inline unsigned long native_read_cr2(void)
{ {
unsigned long val; unsigned long val;
asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
return val; return val;
} }
static inline void native_write_cr2(unsigned long val) static inline void native_write_cr2(unsigned long val)
{ {
asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
} }
static inline unsigned long native_read_cr3(void) static inline unsigned long native_read_cr3(void)
{ {
unsigned long val; unsigned long val;
asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
return val; return val;
} }
static inline void native_write_cr3(unsigned long val) static inline void native_write_cr3(unsigned long val)
{ {
asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
} }
static inline unsigned long native_read_cr4(void) static inline unsigned long native_read_cr4(void)
{ {
unsigned long val; unsigned long val;
asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val; return val;
} }
...@@ -237,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void) ...@@ -237,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
asm volatile("1: mov %%cr4, %0\n" asm volatile("1: mov %%cr4, %0\n"
"2:\n" "2:\n"
_ASM_EXTABLE(1b,2b) _ASM_EXTABLE(1b, 2b)
: "=r" (val), "=m" (__force_order) : "0" (0)); : "=r" (val), "=m" (__force_order) : "0" (0));
#else #else
val = native_read_cr4(); val = native_read_cr4();
...@@ -247,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void) ...@@ -247,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void)
static inline void native_write_cr4(unsigned long val) static inline void native_write_cr4(unsigned long val)
{ {
asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -268,6 +265,7 @@ static inline void native_wbinvd(void) ...@@ -268,6 +265,7 @@ static inline void native_wbinvd(void)
{ {
asm volatile("wbinvd": : :"memory"); asm volatile("wbinvd": : :"memory");
} }
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
...@@ -300,7 +298,7 @@ static inline void clflush(volatile void *__p) ...@@ -300,7 +298,7 @@ static inline void clflush(volatile void *__p)
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
} }
#define nop() __asm__ __volatile__ ("nop") #define nop() asm volatile ("nop")
void disable_hlt(void); void disable_hlt(void);
void enable_hlt(void); void enable_hlt(void);
...@@ -399,7 +397,7 @@ void default_idle(void); ...@@ -399,7 +397,7 @@ void default_idle(void);
# define smp_wmb() barrier() # define smp_wmb() barrier()
#endif #endif
#define smp_read_barrier_depends() read_barrier_depends() #define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment