Commit 0da5db31 authored by Rusty Russell's avatar Rusty Russell Committed by Andi Kleen

[PATCH] i386: Abstract sensitive instructions

Abstract sensitive instructions in assembler code, replacing them with macros
(which currently are #defined to the native versions).  We use long names:
assembler is case-insensitive, so if something goes wrong and macros do not
expand, it would assemble anyway.

Resulting object files are exactly the same as before.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent 7b0bda74
...@@ -76,8 +76,15 @@ DF_MASK = 0x00000400 ...@@ -76,8 +76,15 @@ DF_MASK = 0x00000400
NT_MASK = 0x00004000 NT_MASK = 0x00004000
VM_MASK = 0x00020000 VM_MASK = 0x00020000
/* These are replaces for paravirtualization */
#define DISABLE_INTERRUPTS cli
#define ENABLE_INTERRUPTS sti
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
#define INTERRUPT_RETURN iret
#define GET_CR0_INTO_EAX movl %cr0, %eax
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_stop cli; TRACE_IRQS_OFF #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
#else #else
#define preempt_stop #define preempt_stop
#define resume_kernel restore_nocheck #define resume_kernel restore_nocheck
...@@ -236,7 +243,7 @@ check_userspace: ...@@ -236,7 +243,7 @@ check_userspace:
testl $(VM_MASK | 3), %eax testl $(VM_MASK | 3), %eax
jz resume_kernel jz resume_kernel
ENTRY(resume_userspace) ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt DISABLE_INTERRUPTS # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
...@@ -247,7 +254,7 @@ ENTRY(resume_userspace) ...@@ -247,7 +254,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
ENTRY(resume_kernel) ENTRY(resume_kernel)
cli DISABLE_INTERRUPTS
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_nocheck jnz restore_nocheck
need_resched: need_resched:
...@@ -275,7 +282,7 @@ sysenter_past_esp: ...@@ -275,7 +282,7 @@ sysenter_past_esp:
* No need to follow this irqs on/off section: the syscall * No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry: * disabled irqs and here we enable it straight after entry:
*/ */
sti ENABLE_INTERRUPTS
pushl $(__USER_DS) pushl $(__USER_DS)
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ss, 0*/ /*CFI_REL_OFFSET ss, 0*/
...@@ -320,7 +327,7 @@ sysenter_past_esp: ...@@ -320,7 +327,7 @@ sysenter_past_esp:
jae syscall_badsys jae syscall_badsys
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp) movl %eax,EAX(%esp)
cli DISABLE_INTERRUPTS
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx testw $_TIF_ALLWORK_MASK, %cx
...@@ -330,8 +337,7 @@ sysenter_past_esp: ...@@ -330,8 +337,7 @@ sysenter_past_esp:
movl OLDESP(%esp), %ecx movl OLDESP(%esp), %ecx
xorl %ebp,%ebp xorl %ebp,%ebp
TRACE_IRQS_ON TRACE_IRQS_ON
sti ENABLE_INTERRUPTS_SYSEXIT
sysexit
CFI_ENDPROC CFI_ENDPROC
...@@ -356,7 +362,7 @@ syscall_call: ...@@ -356,7 +362,7 @@ syscall_call:
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp) # store the return value movl %eax,EAX(%esp) # store the return value
syscall_exit: syscall_exit:
cli # make sure we don't miss an interrupt DISABLE_INTERRUPTS # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -381,11 +387,11 @@ restore_nocheck_notrace: ...@@ -381,11 +387,11 @@ restore_nocheck_notrace:
RESTORE_REGS RESTORE_REGS
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
1: iret 1: INTERRUPT_RETURN
.section .fixup,"ax" .section .fixup,"ax"
iret_exc: iret_exc:
TRACE_IRQS_ON TRACE_IRQS_ON
sti ENABLE_INTERRUPTS
pushl $0 # no error code pushl $0 # no error code
pushl $do_iret_error pushl $do_iret_error
jmp error_code jmp error_code
...@@ -409,7 +415,7 @@ ldt_ss: ...@@ -409,7 +415,7 @@ ldt_ss:
* dosemu and wine happy. */ * dosemu and wine happy. */
subl $8, %esp # reserve space for switch16 pointer subl $8, %esp # reserve space for switch16 pointer
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
cli DISABLE_INTERRUPTS
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl %esp, %eax movl %esp, %eax
/* Set up the 16bit stack frame with switch32 pointer on top, /* Set up the 16bit stack frame with switch32 pointer on top,
...@@ -419,7 +425,7 @@ ldt_ss: ...@@ -419,7 +425,7 @@ ldt_ss:
TRACE_IRQS_IRET TRACE_IRQS_IRET
RESTORE_REGS RESTORE_REGS
lss 20+4(%esp), %esp # switch to 16bit stack lss 20+4(%esp), %esp # switch to 16bit stack
1: iret 1: INTERRUPT_RETURN
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
.long 1b,iret_exc .long 1b,iret_exc
...@@ -434,7 +440,7 @@ work_pending: ...@@ -434,7 +440,7 @@ work_pending:
jz work_notifysig jz work_notifysig
work_resched: work_resched:
call schedule call schedule
cli # make sure we don't miss an interrupt DISABLE_INTERRUPTS # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -490,7 +496,7 @@ syscall_exit_work: ...@@ -490,7 +496,7 @@ syscall_exit_work:
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
jz work_pending jz work_pending
TRACE_IRQS_ON TRACE_IRQS_ON
sti # could let do_syscall_trace() call ENABLE_INTERRUPTS # could let do_syscall_trace() call
# schedule() instead # schedule() instead
movl %esp, %eax movl %esp, %eax
movl $1, %edx movl $1, %edx
...@@ -668,7 +674,7 @@ ENTRY(device_not_available) ...@@ -668,7 +674,7 @@ ENTRY(device_not_available)
pushl $-1 # mark this as an int pushl $-1 # mark this as an int
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL SAVE_ALL
movl %cr0, %eax GET_CR0_INTO_EAX
testl $0x4, %eax # EM (math emulation bit) testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate jne device_not_available_emulate
preempt_stop preempt_stop
...@@ -811,7 +817,7 @@ nmi_16bit_stack: ...@@ -811,7 +817,7 @@ nmi_16bit_stack:
call do_nmi call do_nmi
RESTORE_REGS RESTORE_REGS
lss 12+4(%esp), %esp # back to 16bit stack lss 12+4(%esp), %esp # back to 16bit stack
1: iret 1: INTERRUPT_RETURN
CFI_ENDPROC CFI_ENDPROC
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#define CLI_STRING "cli"
#define STI_STRING "sti"
/* /*
* Your basic SMP spinlocks, allowing only a single CPU anywhere * Your basic SMP spinlocks, allowing only a single CPU anywhere
* *
...@@ -55,12 +58,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla ...@@ -55,12 +58,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
"2:\t" "2:\t"
"testl $0x200, %1\n\t" "testl $0x200, %1\n\t"
"jz 4f\n\t" "jz 4f\n\t"
"sti\n" STI_STRING "\n"
"3:\t" "3:\t"
"rep;nop\n\t" "rep;nop\n\t"
"cmpb $0, %0\n\t" "cmpb $0, %0\n\t"
"jle 3b\n\t" "jle 3b\n\t"
"cli\n\t" CLI_STRING "\n\t"
"jmp 1b\n" "jmp 1b\n"
"4:\t" "4:\t"
"rep;nop\n\t" "rep;nop\n\t"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment