Commit 1a338ac3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched, x86: Optimize the preempt_schedule() call

Remove the bloat of the C calling convention out of the
preempt_enable() sites by creating an ASM wrapper which allows us to
do an asm("call ___preempt_schedule") instead.

calling.h bits by Andi Kleen
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-tk7xdi1cvvxewixzke8t8le1@git.kernel.org
[ Fixed build error. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c2daa3be
...@@ -48,6 +48,8 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -48,6 +48,8 @@ For 32-bit we have the following conventions - kernel is built with
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#ifdef CONFIG_X86_64
/* /*
* 64-bit system call stack frame layout defines and helpers, * 64-bit system call stack frame layout defines and helpers,
* for assembly code: * for assembly code:
...@@ -192,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -192,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with
.macro icebp .macro icebp
.byte 0xf1 .byte 0xf1
.endm .endm
#else /* CONFIG_X86_64 */
/*
* For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
* are different from the entry_32.S versions in not changing the segment
* registers. So only suitable for in kernel use, not when transitioning
* from or to user space. The resulting stack frame is not a standard
* pt_regs frame. The main use case is calling C code from assembler
* when all the registers need to be preserved.
*/
.macro SAVE_ALL
pushl_cfi %eax
CFI_REL_OFFSET eax, 0
pushl_cfi %ebp
CFI_REL_OFFSET ebp, 0
pushl_cfi %edi
CFI_REL_OFFSET edi, 0
pushl_cfi %esi
CFI_REL_OFFSET esi, 0
pushl_cfi %edx
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
.endm
.macro RESTORE_ALL
popl_cfi %ebx
CFI_RESTORE ebx
popl_cfi %ecx
CFI_RESTORE ecx
popl_cfi %edx
CFI_RESTORE edx
popl_cfi %esi
CFI_RESTORE esi
popl_cfi %edi
CFI_RESTORE edi
popl_cfi %ebp
CFI_RESTORE ebp
popl_cfi %eax
CFI_RESTORE eax
.endm
#endif /* CONFIG_X86_64 */
...@@ -95,4 +95,14 @@ static __always_inline bool should_resched(void) ...@@ -95,4 +95,14 @@ static __always_inline bool should_resched(void)
return unlikely(!__this_cpu_read_4(__preempt_count)); return unlikely(!__this_cpu_read_4(__preempt_count));
} }
#ifdef CONFIG_PREEMPT
extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() asm ("call ___preempt_schedule")
extern asmlinkage void preempt_schedule(void);
# ifdef CONFIG_CONTEXT_TRACKING
extern asmlinkage void ___preempt_schedule_context(void);
# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
# endif
#endif
#endif /* __ASM_PREEMPT_H */ #endif /* __ASM_PREEMPT_H */
...@@ -36,6 +36,8 @@ obj-y += tsc.o io_delay.o rtc.o ...@@ -36,6 +36,8 @@ obj-y += tsc.o io_delay.o rtc.o
obj-y += pci-iommu_table.o obj-y += pci-iommu_table.o
obj-y += resource.o obj-y += resource.o
obj-$(CONFIG_PREEMPT) += preempt.o
obj-y += process.o obj-y += process.o
obj-y += i387.o xsave.o obj-y += i387.o xsave.o
obj-y += ptrace.o obj-y += ptrace.o
......
...@@ -37,3 +37,10 @@ EXPORT_SYMBOL(strstr); ...@@ -37,3 +37,10 @@ EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
#ifdef CONFIG_CONTEXT_TRACKING
EXPORT_SYMBOL(___preempt_schedule_context);
#endif
#endif
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/asm.h>
#include <asm/calling.h>
ENTRY(___preempt_schedule)
CFI_STARTPROC
SAVE_ALL
call preempt_schedule
RESTORE_ALL
ret
CFI_ENDPROC
#ifdef CONFIG_CONTEXT_TRACKING
ENTRY(___preempt_schedule_context)
CFI_STARTPROC
SAVE_ALL
call preempt_schedule_context
RESTORE_ALL
ret
CFI_ENDPROC
#endif
...@@ -66,3 +66,10 @@ EXPORT_SYMBOL(empty_zero_page); ...@@ -66,3 +66,10 @@ EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index); EXPORT_SYMBOL(native_load_gs_index);
#endif #endif
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
#ifdef CONFIG_CONTEXT_TRACKING
EXPORT_SYMBOL(___preempt_schedule_context);
#endif
#endif
...@@ -100,4 +100,14 @@ static __always_inline bool should_resched(void) ...@@ -100,4 +100,14 @@ static __always_inline bool should_resched(void)
return unlikely(!*preempt_count_ptr()); return unlikely(!*preempt_count_ptr());
} }
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
#ifdef CONFIG_CONTEXT_TRACKING
extern asmlinkage void preempt_schedule_context(void);
#define __preempt_schedule_context() preempt_schedule_context()
#endif
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */ #endif /* __ASM_PREEMPT_H */
...@@ -50,18 +50,17 @@ do { \ ...@@ -50,18 +50,17 @@ do { \
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
barrier(); \ barrier(); \
if (unlikely(preempt_count_dec_and_test())) \ if (unlikely(preempt_count_dec_and_test())) \
preempt_schedule(); \ __preempt_schedule(); \
} while (0) } while (0)
#define preempt_check_resched() \ #define preempt_check_resched() \
do { \ do { \
if (should_resched()) \ if (should_resched()) \
preempt_schedule(); \ __preempt_schedule(); \
} while (0) } while (0)
#else #else
...@@ -83,17 +82,15 @@ do { \ ...@@ -83,17 +82,15 @@ do { \
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#ifdef CONFIG_CONTEXT_TRACKING #ifndef CONFIG_CONTEXT_TRACKING
asmlinkage void preempt_schedule_context(void); #define __preempt_schedule_context() __preempt_schedule()
#else
#define preempt_schedule_context() preempt_schedule()
#endif #endif
#define preempt_enable_notrace() \ #define preempt_enable_notrace() \
do { \ do { \
barrier(); \ barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \ if (unlikely(__preempt_count_dec_and_test())) \
preempt_schedule_context(); \ __preempt_schedule_context(); \
} while (0) } while (0)
#else #else
#define preempt_enable_notrace() preempt_enable_no_resched_notrace() #define preempt_enable_notrace() preempt_enable_no_resched_notrace()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment