Commit f69ca629 authored by Ricardo Neri's avatar Ricardo Neri Committed by Ingo Molnar

x86/cpu: Refactor sync_core() for readability

Instead of having #ifdef/#endif blocks inside sync_core() for X86_64 and
X86_32, implement the new function iret_to_self() with two versions.

In this manner, avoid having to use even more more #ifdef/#endif blocks
when adding support for SERIALIZE in sync_core().
Co-developed-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarRicardo Neri <ricardo.neri-calderon@linux.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200727043132.15082-4-ricardo.neri-calderon@linux.intel.com
parent 9998a983
...@@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p) ...@@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p)
#define nop() asm volatile ("nop") #define nop() asm volatile ("nop")
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_SPECIAL_INSNS_H */ #endif /* _ASM_X86_SPECIAL_INSNS_H */
...@@ -6,6 +6,37 @@ ...@@ -6,6 +6,37 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
{
asm volatile (
"pushfl\n\t"
"pushl %%cs\n\t"
"pushl $1f\n\t"
"iret\n\t"
"1:"
: ASM_CALL_CONSTRAINT : : "memory");
}
#else
static inline void iret_to_self(void)
{
unsigned int tmp;
asm volatile (
"mov %%ss, %0\n\t"
"pushq %q0\n\t"
"pushq %%rsp\n\t"
"addq $8, (%%rsp)\n\t"
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
"1:"
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
}
#endif /* CONFIG_X86_32 */
/* /*
* This function forces the icache and prefetched instruction stream to * This function forces the icache and prefetched instruction stream to
* catch up with reality in two very specific cases: * catch up with reality in two very specific cases:
...@@ -44,30 +75,7 @@ static inline void sync_core(void) ...@@ -44,30 +75,7 @@ static inline void sync_core(void)
* Like all of Linux's memory ordering operations, this is a * Like all of Linux's memory ordering operations, this is a
* compiler barrier as well. * compiler barrier as well.
*/ */
#ifdef CONFIG_X86_32 iret_to_self();
asm volatile (
"pushfl\n\t"
"pushl %%cs\n\t"
"pushl $1f\n\t"
"iret\n\t"
"1:"
: ASM_CALL_CONSTRAINT : : "memory");
#else
unsigned int tmp;
asm volatile (
"mov %%ss, %0\n\t"
"pushq %q0\n\t"
"pushq %%rsp\n\t"
"addq $8, (%%rsp)\n\t"
"pushfq\n\t"
"mov %%cs, %0\n\t"
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
"1:"
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
#endif
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment