Commit 9dda1658 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/asm' into x86/core, to prepare for new patch

Collect all changes to arch/x86/entry/entry_64.S, before applying
patch that changes most of the file.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents b72e7464 a49976d1
...@@ -18,10 +18,10 @@ Some of these entries are: ...@@ -18,10 +18,10 @@ Some of these entries are:
- system_call: syscall instruction from 64-bit code. - system_call: syscall instruction from 64-bit code.
- ia32_syscall: int 0x80 from 32-bit or 64-bit code; compat syscall - entry_INT80_compat: int 0x80 from 32-bit or 64-bit code; compat syscall
either way. either way.
- ia32_syscall, ia32_sysenter: syscall and sysenter from 32-bit - entry_INT80_compat, ia32_sysenter: syscall and sysenter from 32-bit
code code
- interrupt: An array of entries. Every IDT vector that doesn't - interrupt: An array of entries. Every IDT vector that doesn't
......
...@@ -10893,7 +10893,7 @@ M: Andy Lutomirski <luto@amacapital.net> ...@@ -10893,7 +10893,7 @@ M: Andy Lutomirski <luto@amacapital.net>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
S: Maintained S: Maintained
F: arch/x86/vdso/ F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER XC2028/3028 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
......
obj-y += entry/
obj-$(CONFIG_KVM) += kvm/ obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support # Xen paravirtualization support
...@@ -11,7 +14,7 @@ obj-y += kernel/ ...@@ -11,7 +14,7 @@ obj-y += kernel/
obj-y += mm/ obj-y += mm/
obj-y += crypto/ obj-y += crypto/
obj-y += vdso/
obj-$(CONFIG_IA32_EMULATION) += ia32/ obj-$(CONFIG_IA32_EMULATION) += ia32/
obj-y += platform/ obj-y += platform/
......
...@@ -149,12 +149,6 @@ endif ...@@ -149,12 +149,6 @@ endif
sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp sp-$(CONFIG_X86_64) := rsp
# do binutils support CFI?
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
# does binutils support specific instructions? # does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1) ...@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS := -m elf_$(UTS_MACHINE)
...@@ -187,7 +181,7 @@ archscripts: scripts_basic ...@@ -187,7 +181,7 @@ archscripts: scripts_basic
# Syscall table generation # Syscall table generation
archheaders: archheaders:
$(Q)$(MAKE) $(build)=arch/x86/syscalls all $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archprepare: archprepare:
ifeq ($(CONFIG_KEXEC_FILE),y) ifeq ($(CONFIG_KEXEC_FILE),y)
...@@ -250,7 +244,7 @@ install: ...@@ -250,7 +244,7 @@ install:
PHONY += vdso_install PHONY += vdso_install
vdso_install: vdso_install:
$(Q)$(MAKE) $(build)=arch/x86/vdso $@ $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
archclean: archclean:
$(Q)rm -rf $(objtree)/arch/i386 $(Q)rm -rf $(objtree)/arch/i386
......
#
# Makefile for the x86 low level entry code
#
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += vdso/
obj-y += vsyscall/
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
#include <asm/dwarf2.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
...@@ -91,28 +89,27 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -91,28 +89,27 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
subq $15*8+\addskip, %rsp addq $-(15*8+\addskip), %rsp
CFI_ADJUST_CFA_OFFSET 15*8+\addskip
.endm .endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
.if \r11 .if \r11
movq_cfi r11, 6*8+\offset movq %r11, 6*8+\offset(%rsp)
.endif .endif
.if \r8910 .if \r8910
movq_cfi r10, 7*8+\offset movq %r10, 7*8+\offset(%rsp)
movq_cfi r9, 8*8+\offset movq %r9, 8*8+\offset(%rsp)
movq_cfi r8, 9*8+\offset movq %r8, 9*8+\offset(%rsp)
.endif .endif
.if \rax .if \rax
movq_cfi rax, 10*8+\offset movq %rax, 10*8+\offset(%rsp)
.endif .endif
.if \rcx .if \rcx
movq_cfi rcx, 11*8+\offset movq %rcx, 11*8+\offset(%rsp)
.endif .endif
movq_cfi rdx, 12*8+\offset movq %rdx, 12*8+\offset(%rsp)
movq_cfi rsi, 13*8+\offset movq %rsi, 13*8+\offset(%rsp)
movq_cfi rdi, 14*8+\offset movq %rdi, 14*8+\offset(%rsp)
.endm .endm
.macro SAVE_C_REGS offset=0 .macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro SAVE_EXTRA_REGS offset=0 .macro SAVE_EXTRA_REGS offset=0
movq_cfi r15, 0*8+\offset movq %r15, 0*8+\offset(%rsp)
movq_cfi r14, 1*8+\offset movq %r14, 1*8+\offset(%rsp)
movq_cfi r13, 2*8+\offset movq %r13, 2*8+\offset(%rsp)
movq_cfi r12, 3*8+\offset movq %r12, 3*8+\offset(%rsp)
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
movq_cfi rbx, 5*8+\offset movq %rbx, 5*8+\offset(%rsp)
.endm .endm
.macro SAVE_EXTRA_REGS_RBP offset=0 .macro SAVE_EXTRA_REGS_RBP offset=0
movq_cfi rbp, 4*8+\offset movq %rbp, 4*8+\offset(%rsp)
.endm .endm
.macro RESTORE_EXTRA_REGS offset=0 .macro RESTORE_EXTRA_REGS offset=0
movq_cfi_restore 0*8+\offset, r15 movq 0*8+\offset(%rsp), %r15
movq_cfi_restore 1*8+\offset, r14 movq 1*8+\offset(%rsp), %r14
movq_cfi_restore 2*8+\offset, r13 movq 2*8+\offset(%rsp), %r13
movq_cfi_restore 3*8+\offset, r12 movq 3*8+\offset(%rsp), %r12
movq_cfi_restore 4*8+\offset, rbp movq 4*8+\offset(%rsp), %rbp
movq_cfi_restore 5*8+\offset, rbx movq 5*8+\offset(%rsp), %rbx
.endm .endm
.macro ZERO_EXTRA_REGS .macro ZERO_EXTRA_REGS
...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11 .if \rstor_r11
movq_cfi_restore 6*8, r11 movq 6*8(%rsp), %r11
.endif .endif
.if \rstor_r8910 .if \rstor_r8910
movq_cfi_restore 7*8, r10 movq 7*8(%rsp), %r10
movq_cfi_restore 8*8, r9 movq 8*8(%rsp), %r9
movq_cfi_restore 9*8, r8 movq 9*8(%rsp), %r8
.endif .endif
.if \rstor_rax .if \rstor_rax
movq_cfi_restore 10*8, rax movq 10*8(%rsp), %rax
.endif .endif
.if \rstor_rcx .if \rstor_rcx
movq_cfi_restore 11*8, rcx movq 11*8(%rsp), %rcx
.endif .endif
.if \rstor_rdx .if \rstor_rdx
movq_cfi_restore 12*8, rdx movq 12*8(%rsp), %rdx
.endif .endif
movq_cfi_restore 13*8, rsi movq 13*8(%rsp), %rsi
movq_cfi_restore 14*8, rdi movq 14*8(%rsp), %rdi
.endm .endm
.macro RESTORE_C_REGS .macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1 RESTORE_C_REGS_HELPER 1,1,1,1,1
...@@ -204,8 +201,7 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -204,8 +201,7 @@ For 32-bit we have the following conventions - kernel is built with
.endm .endm
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
addq $15*8+\addskip, %rsp subq $-(15*8+\addskip), %rsp
CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm .endm
.macro icebp .macro icebp
...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
.macro SAVE_ALL .macro SAVE_ALL
pushl_cfi_reg eax pushl %eax
pushl_cfi_reg ebp pushl %ebp
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg esi pushl %esi
pushl_cfi_reg edx pushl %edx
pushl_cfi_reg ecx pushl %ecx
pushl_cfi_reg ebx pushl %ebx
.endm .endm
.macro RESTORE_ALL .macro RESTORE_ALL
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg edx popl %edx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg eax popl %eax
.endm .endm
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
/* /*
* Copyright (C) 1991,1992 Linus Torvalds
* *
* Copyright (C) 1991, 1992 Linus Torvalds * entry_32.S contains the system-call and low-level fault and trap handling routines.
*/
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
* I changed all the .align's to 4 (16 byte alignment), as that's faster
* on a 486.
* *
* Stack layout in 'syscall_exit': * Stack layout in 'syscall_exit':
* ptrace needs to have all regs on the stack. * ptrace needs to have all registers on the stack.
* if the order here is changed, it needs to be * If the order here is changed, it needs to be
* updated in fork.c:copy_process, signal.c:do_signal, * updated in fork.c:copy_process(), signal.c:do_signal(),
* ptrace.c and ptrace.h * ptrace.c and ptrace.h
* *
* 0(%esp) - %ebx * 0(%esp) - %ebx
...@@ -37,8 +26,6 @@ ...@@ -37,8 +26,6 @@
* 38(%esp) - %eflags * 38(%esp) - %eflags
* 3C(%esp) - %oldesp * 3C(%esp) - %oldesp
* 40(%esp) - %oldss * 40(%esp) - %oldss
*
* "current" is in register %ebx during any slow entries.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -50,7 +37,6 @@ ...@@ -50,7 +37,6 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/dwarf2.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/irq_vectors.h> #include <asm/irq_vectors.h>
...@@ -62,11 +48,11 @@ ...@@ -62,11 +48,11 @@
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h> #include <linux/elf-em.h>
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_LE 0x40000000 #define __AUDIT_ARCH_LE 0x40000000
#ifndef CONFIG_AUDITSYSCALL #ifndef CONFIG_AUDITSYSCALL
#define sysenter_audit syscall_trace_entry # define sysenter_audit syscall_trace_entry
#define sysexit_audit syscall_exit_work # define sysexit_audit syscall_exit_work
#endif #endif
.section .entry.text, "ax" .section .entry.text, "ax"
...@@ -85,16 +71,16 @@ ...@@ -85,16 +71,16 @@
*/ */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else #else
#define preempt_stop(clobbers) # define preempt_stop(clobbers)
#define resume_kernel restore_all # define resume_kernel restore_all
#endif #endif
.macro TRACE_IRQS_IRET .macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
jz 1f jz 1f
TRACE_IRQS_ON TRACE_IRQS_ON
1: 1:
#endif #endif
...@@ -113,11 +99,10 @@ ...@@ -113,11 +99,10 @@
/* unfortunately push/pop can't be no-op */ /* unfortunately push/pop can't be no-op */
.macro PUSH_GS .macro PUSH_GS
pushl_cfi $0 pushl $0
.endm .endm
.macro POP_GS pop=0 .macro POP_GS pop=0
addl $(4 + \pop), %esp addl $(4 + \pop), %esp
CFI_ADJUST_CFA_OFFSET -(4 + \pop)
.endm .endm
.macro POP_GS_EX .macro POP_GS_EX
.endm .endm
...@@ -137,181 +122,119 @@ ...@@ -137,181 +122,119 @@
#else /* CONFIG_X86_32_LAZY_GS */ #else /* CONFIG_X86_32_LAZY_GS */
.macro PUSH_GS .macro PUSH_GS
pushl_cfi %gs pushl %gs
/*CFI_REL_OFFSET gs, 0*/
.endm .endm
.macro POP_GS pop=0 .macro POP_GS pop=0
98: popl_cfi %gs 98: popl %gs
/*CFI_RESTORE gs*/
.if \pop <> 0 .if \pop <> 0
add $\pop, %esp add $\pop, %esp
CFI_ADJUST_CFA_OFFSET -\pop
.endif .endif
.endm .endm
.macro POP_GS_EX .macro POP_GS_EX
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
99: movl $0, (%esp) 99: movl $0, (%esp)
jmp 98b jmp 98b
.popsection .popsection
_ASM_EXTABLE(98b,99b) _ASM_EXTABLE(98b, 99b)
.endm .endm
.macro PTGS_TO_GS .macro PTGS_TO_GS
98: mov PT_GS(%esp), %gs 98: mov PT_GS(%esp), %gs
.endm .endm
.macro PTGS_TO_GS_EX .macro PTGS_TO_GS_EX
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
99: movl $0, PT_GS(%esp) 99: movl $0, PT_GS(%esp)
jmp 98b jmp 98b
.popsection .popsection
_ASM_EXTABLE(98b,99b) _ASM_EXTABLE(98b, 99b)
.endm .endm
.macro GS_TO_REG reg .macro GS_TO_REG reg
movl %gs, \reg movl %gs, \reg
/*CFI_REGISTER gs, \reg*/
.endm .endm
.macro REG_TO_PTGS reg .macro REG_TO_PTGS reg
movl \reg, PT_GS(%esp) movl \reg, PT_GS(%esp)
/*CFI_REL_OFFSET gs, PT_GS*/
.endm .endm
.macro SET_KERNEL_GS reg .macro SET_KERNEL_GS reg
movl $(__KERNEL_STACK_CANARY), \reg movl $(__KERNEL_STACK_CANARY), \reg
movl \reg, %gs movl \reg, %gs
.endm .endm
#endif /* CONFIG_X86_32_LAZY_GS */ #endif /* CONFIG_X86_32_LAZY_GS */
.macro SAVE_ALL .macro SAVE_ALL
cld cld
PUSH_GS PUSH_GS
pushl_cfi %fs pushl %fs
/*CFI_REL_OFFSET fs, 0;*/ pushl %es
pushl_cfi %es pushl %ds
/*CFI_REL_OFFSET es, 0;*/ pushl %eax
pushl_cfi %ds pushl %ebp
/*CFI_REL_OFFSET ds, 0;*/ pushl %edi
pushl_cfi %eax pushl %esi
CFI_REL_OFFSET eax, 0 pushl %edx
pushl_cfi %ebp pushl %ecx
CFI_REL_OFFSET ebp, 0 pushl %ebx
pushl_cfi %edi movl $(__USER_DS), %edx
CFI_REL_OFFSET edi, 0 movl %edx, %ds
pushl_cfi %esi movl %edx, %es
CFI_REL_OFFSET esi, 0 movl $(__KERNEL_PERCPU), %edx
pushl_cfi %edx movl %edx, %fs
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
movl %edx, %fs
SET_KERNEL_GS %edx SET_KERNEL_GS %edx
.endm .endm
.macro RESTORE_INT_REGS .macro RESTORE_INT_REGS
popl_cfi %ebx popl %ebx
CFI_RESTORE ebx popl %ecx
popl_cfi %ecx popl %edx
CFI_RESTORE ecx popl %esi
popl_cfi %edx popl %edi
CFI_RESTORE edx popl %ebp
popl_cfi %esi popl %eax
CFI_RESTORE esi
popl_cfi %edi
CFI_RESTORE edi
popl_cfi %ebp
CFI_RESTORE ebp
popl_cfi %eax
CFI_RESTORE eax
.endm .endm
.macro RESTORE_REGS pop=0 .macro RESTORE_REGS pop=0
RESTORE_INT_REGS RESTORE_INT_REGS
1: popl_cfi %ds 1: popl %ds
/*CFI_RESTORE ds;*/ 2: popl %es
2: popl_cfi %es 3: popl %fs
/*CFI_RESTORE es;*/
3: popl_cfi %fs
/*CFI_RESTORE fs;*/
POP_GS \pop POP_GS \pop
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
4: movl $0, (%esp) 4: movl $0, (%esp)
jmp 1b jmp 1b
5: movl $0, (%esp) 5: movl $0, (%esp)
jmp 2b jmp 2b
6: movl $0, (%esp) 6: movl $0, (%esp)
jmp 3b jmp 3b
.popsection .popsection
_ASM_EXTABLE(1b,4b) _ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b,5b) _ASM_EXTABLE(2b, 5b)
_ASM_EXTABLE(3b,6b) _ASM_EXTABLE(3b, 6b)
POP_GS_EX POP_GS_EX
.endm .endm
.macro RING0_INT_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 3*4
/*CFI_OFFSET cs, -2*4;*/
CFI_OFFSET eip, -3*4
.endm
.macro RING0_EC_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 4*4
/*CFI_OFFSET cs, -2*4;*/
CFI_OFFSET eip, -3*4
.endm
.macro RING0_PTREGS_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
CFI_OFFSET eip, PT_EIP-PT_OLDESP
/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
CFI_OFFSET eax, PT_EAX-PT_OLDESP
CFI_OFFSET ebp, PT_EBP-PT_OLDESP
CFI_OFFSET edi, PT_EDI-PT_OLDESP
CFI_OFFSET esi, PT_ESI-PT_OLDESP
CFI_OFFSET edx, PT_EDX-PT_OLDESP
CFI_OFFSET ecx, PT_ECX-PT_OLDESP
CFI_OFFSET ebx, PT_EBX-PT_OLDESP
.endm
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
CFI_STARTPROC pushl %eax
pushl_cfi %eax call schedule_tail
call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl_cfi %eax popl %eax
pushl_cfi $0x0202 # Reset kernel eflags pushl $0x0202 # Reset kernel eflags
popfl_cfi popfl
jmp syscall_exit jmp syscall_exit
CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
ENTRY(ret_from_kernel_thread) ENTRY(ret_from_kernel_thread)
CFI_STARTPROC pushl %eax
pushl_cfi %eax call schedule_tail
call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl_cfi %eax popl %eax
pushl_cfi $0x0202 # Reset kernel eflags pushl $0x0202 # Reset kernel eflags
popfl_cfi popfl
movl PT_EBP(%esp),%eax movl PT_EBP(%esp), %eax
call *PT_EBX(%esp) call *PT_EBX(%esp)
movl $0,PT_EAX(%esp) movl $0, PT_EAX(%esp)
jmp syscall_exit jmp syscall_exit
CFI_ENDPROC
ENDPROC(ret_from_kernel_thread) ENDPROC(ret_from_kernel_thread)
/* /*
...@@ -323,76 +246,70 @@ ENDPROC(ret_from_kernel_thread) ...@@ -323,76 +246,70 @@ ENDPROC(ret_from_kernel_thread)
# userspace resumption stub bypassing syscall exit tracing # userspace resumption stub bypassing syscall exit tracing
ALIGN ALIGN
RING0_PTREGS_FRAME
ret_from_exception: ret_from_exception:
preempt_stop(CLBR_ANY) preempt_stop(CLBR_ANY)
ret_from_intr: ret_from_intr:
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
movb PT_CS(%esp), %al movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
#else #else
/* /*
* We can be coming here from child spawned by kernel_thread(). * We can be coming here from child spawned by kernel_thread().
*/ */
movl PT_CS(%esp), %eax movl PT_CS(%esp), %eax
andl $SEGMENT_RPL_MASK, %eax andl $SEGMENT_RPL_MASK, %eax
#endif #endif
cmpl $USER_RPL, %eax cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace) ENTRY(resume_userspace)
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return? # int/exception return?
jne work_pending jne work_pending
jmp restore_all jmp restore_all
END(ret_from_exception) END(ret_from_exception)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
ENTRY(resume_kernel) ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
need_resched: need_resched:
cmpl $0,PER_CPU_VAR(__preempt_count) cmpl $0, PER_CPU_VAR(__preempt_count)
jnz restore_all jnz restore_all
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all jz restore_all
call preempt_schedule_irq call preempt_schedule_irq
jmp need_resched jmp need_resched
END(resume_kernel) END(resume_kernel)
#endif #endif
CFI_ENDPROC
/*
/* SYSENTER_RETURN points to after the "sysenter" instruction in * SYSENTER_RETURN points to after the SYSENTER instruction
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ * in the vsyscall page. See vsyscall-sysentry.S, which defines
* the symbol.
# sysenter call handler stub */
ENTRY(ia32_sysenter_target)
CFI_STARTPROC simple # SYSENTER call handler stub
CFI_SIGNAL_FRAME ENTRY(entry_SYSENTER_32)
CFI_DEF_CFA esp, 0 movl TSS_sysenter_sp0(%esp), %esp
CFI_REGISTER esp, ebp
movl TSS_sysenter_sp0(%esp),%esp
sysenter_past_esp: sysenter_past_esp:
/* /*
* Interrupts are disabled here, but we can't trace it until * Interrupts are disabled here, but we can't trace it until
* enough kernel state to call TRACE_IRQS_OFF can be called - but * enough kernel state to call TRACE_IRQS_OFF can be called - but
* we immediately enable interrupts at that point anyway. * we immediately enable interrupts at that point anyway.
*/ */
pushl_cfi $__USER_DS pushl $__USER_DS
/*CFI_REL_OFFSET ss, 0*/ pushl %ebp
pushl_cfi %ebp pushfl
CFI_REL_OFFSET esp, 0 orl $X86_EFLAGS_IF, (%esp)
pushfl_cfi pushl $__USER_CS
orl $X86_EFLAGS_IF, (%esp)
pushl_cfi $__USER_CS
/*CFI_REL_OFFSET cs, 0*/
/* /*
* Push current_thread_info()->sysenter_return to the stack. * Push current_thread_info()->sysenter_return to the stack.
* A tiny bit of offset fixup is necessary: TI_sysenter_return * A tiny bit of offset fixup is necessary: TI_sysenter_return
...@@ -401,10 +318,9 @@ sysenter_past_esp: ...@@ -401,10 +318,9 @@ sysenter_past_esp:
* TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
* and THREAD_SIZE takes us to the bottom. * and THREAD_SIZE takes us to the bottom.
*/ */
pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
CFI_REL_OFFSET eip, 0
pushl_cfi %eax pushl %eax
SAVE_ALL SAVE_ALL
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
...@@ -412,138 +328,136 @@ sysenter_past_esp: ...@@ -412,138 +328,136 @@ sysenter_past_esp:
* Load the potential sixth argument from user stack. * Load the potential sixth argument from user stack.
* Careful about security. * Careful about security.
*/ */
cmpl $__PAGE_OFFSET-3,%ebp cmpl $__PAGE_OFFSET-3, %ebp
jae syscall_fault jae syscall_fault
ASM_STAC ASM_STAC
1: movl (%ebp),%ebp 1: movl (%ebp), %ebp
ASM_CLAC ASM_CLAC
movl %ebp,PT_EBP(%esp) movl %ebp, PT_EBP(%esp)
_ASM_EXTABLE(1b,syscall_fault) _ASM_EXTABLE(1b, syscall_fault)
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
jnz sysenter_audit jnz sysenter_audit
sysenter_do_call: sysenter_do_call:
cmpl $(NR_syscalls), %eax cmpl $(NR_syscalls), %eax
jae sysenter_badsys jae sysenter_badsys
call *sys_call_table(,%eax,4) call *sys_call_table(, %eax, 4)
sysenter_after_call: sysenter_after_call:
movl %eax,PT_EAX(%esp) movl %eax, PT_EAX(%esp)
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx testl $_TIF_ALLWORK_MASK, %ecx
jnz sysexit_audit jnz sysexit_audit
sysenter_exit: sysenter_exit:
/* if something modifies registers it must also disable sysexit */ /* if something modifies registers it must also disable sysexit */
movl PT_EIP(%esp), %edx movl PT_EIP(%esp), %edx
movl PT_OLDESP(%esp), %ecx movl PT_OLDESP(%esp), %ecx
xorl %ebp,%ebp xorl %ebp, %ebp
TRACE_IRQS_ON TRACE_IRQS_ON
1: mov PT_FS(%esp), %fs 1: mov PT_FS(%esp), %fs
PTGS_TO_GS PTGS_TO_GS
ENABLE_INTERRUPTS_SYSEXIT ENABLE_INTERRUPTS_SYSEXIT
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
sysenter_audit: sysenter_audit:
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
jnz syscall_trace_entry jnz syscall_trace_entry
/* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
/* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
pushl_cfi PT_ESI(%esp) /* a3: 5th arg */ pushl PT_ESI(%esp) /* a3: 5th arg */
pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */ pushl PT_EDX+4(%esp) /* a2: 4th arg */
call __audit_syscall_entry call __audit_syscall_entry
popl_cfi %ecx /* get that remapped edx off the stack */ popl %ecx /* get that remapped edx off the stack */
popl_cfi %ecx /* get that remapped esi off the stack */ popl %ecx /* get that remapped esi off the stack */
movl PT_EAX(%esp),%eax /* reload syscall number */ movl PT_EAX(%esp), %eax /* reload syscall number */
jmp sysenter_do_call jmp sysenter_do_call
sysexit_audit: sysexit_audit:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
jnz syscall_exit_work jnz syscall_exit_work
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY) ENABLE_INTERRUPTS(CLBR_ANY)
movl %eax,%edx /* second arg, syscall return value */ movl %eax, %edx /* second arg, syscall return value */
cmpl $-MAX_ERRNO,%eax /* is it an error ? */ cmpl $-MAX_ERRNO, %eax /* is it an error ? */
setbe %al /* 1 if so, 0 if not */ setbe %al /* 1 if so, 0 if not */
movzbl %al,%eax /* zero-extend that */ movzbl %al, %eax /* zero-extend that */
call __audit_syscall_exit call __audit_syscall_exit
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
jnz syscall_exit_work jnz syscall_exit_work
movl PT_EAX(%esp),%eax /* reload syscall return value */ movl PT_EAX(%esp), %eax /* reload syscall return value */
jmp sysenter_exit jmp sysenter_exit
#endif #endif
CFI_ENDPROC .pushsection .fixup, "ax"
.pushsection .fixup,"ax" 2: movl $0, PT_FS(%esp)
2: movl $0,PT_FS(%esp) jmp 1b
jmp 1b
.popsection .popsection
_ASM_EXTABLE(1b,2b) _ASM_EXTABLE(1b, 2b)
PTGS_TO_GS_EX PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target) ENDPROC(entry_SYSENTER_32)
# system call handler stub # system call handler stub
ENTRY(system_call) ENTRY(entry_INT80_32)
RING0_INT_FRAME # can't unwind into user space anyway
ASM_CLAC ASM_CLAC
pushl_cfi %eax # save orig_eax pushl %eax # save orig_eax
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
# system call tracing in operation / emulation # system call tracing in operation / emulation
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
jnz syscall_trace_entry jnz syscall_trace_entry
cmpl $(NR_syscalls), %eax cmpl $(NR_syscalls), %eax
jae syscall_badsys jae syscall_badsys
syscall_call: syscall_call:
call *sys_call_table(,%eax,4) call *sys_call_table(, %eax, 4)
syscall_after_call: syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value movl %eax, PT_EAX(%esp) # store the return value
syscall_exit: syscall_exit:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx # current->work testl $_TIF_ALLWORK_MASK, %ecx # current->work
jnz syscall_exit_work jnz syscall_exit_work
restore_all: restore_all:
TRACE_IRQS_IRET TRACE_IRQS_IRET
restore_all_notrace: restore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we /*
# are returning to the kernel. * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# See comments in process.c:copy_thread() for details. * are returning to the kernel.
movb PT_OLDSS(%esp), %ah * See comments in process.c:copy_thread() for details.
movb PT_CS(%esp), %al */
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax movb PT_OLDSS(%esp), %ah
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax movb PT_CS(%esp), %al
CFI_REMEMBER_STATE andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
je ldt_ss # returning to user-space with LDT SS cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
je ldt_ss # returning to user-space with LDT SS
#endif #endif
restore_nocheck: restore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code RESTORE_REGS 4 # skip orig_eax/error_code
irq_return: irq_return:
INTERRUPT_RETURN INTERRUPT_RETURN
.section .fixup,"ax" .section .fixup, "ax"
ENTRY(iret_exc) ENTRY(iret_exc )
pushl $0 # no error code pushl $0 # no error code
pushl $do_iret_error pushl $do_iret_error
jmp error_code jmp error_code
.previous .previous
_ASM_EXTABLE(irq_return,iret_exc) _ASM_EXTABLE(irq_return, iret_exc)
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
CFI_RESTORE_STATE
ldt_ss: ldt_ss:
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/* /*
...@@ -554,8 +468,8 @@ ldt_ss: ...@@ -554,8 +468,8 @@ ldt_ss:
* is still available to implement the setting of the high * is still available to implement the setting of the high
* 16-bits in the INTERRUPT_RETURN paravirt-op. * 16-bits in the INTERRUPT_RETURN paravirt-op.
*/ */
cmpl $0, pv_info+PARAVIRT_enabled cmpl $0, pv_info+PARAVIRT_enabled
jne restore_nocheck jne restore_nocheck
#endif #endif
/* /*
...@@ -570,122 +484,118 @@ ldt_ss: ...@@ -570,122 +484,118 @@ ldt_ss:
* a base address that matches for the difference. * a base address that matches for the difference.
*/ */
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
mov %esp, %edx /* load kernel esp */ mov %esp, %edx /* load kernel esp */
mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */
mov %dx, %ax /* eax: new kernel esp */ mov %dx, %ax /* eax: new kernel esp */
sub %eax, %edx /* offset (low word is 0) */ sub %eax, %edx /* offset (low word is 0) */
shr $16, %edx shr $16, %edx
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
pushl_cfi $__ESPFIX_SS pushl $__ESPFIX_SS
pushl_cfi %eax /* new kernel esp */ pushl %eax /* new kernel esp */
/* Disable interrupts, but do not irqtrace this section: we /*
* Disable interrupts, but do not irqtrace this section: we
* will soon execute iret and the tracer was already set to * will soon execute iret and the tracer was already set to
* the irqstate after the iret */ * the irqstate after the IRET:
*/
DISABLE_INTERRUPTS(CLBR_EAX) DISABLE_INTERRUPTS(CLBR_EAX)
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8 jmp restore_nocheck
jmp restore_nocheck
#endif #endif
CFI_ENDPROC ENDPROC(entry_INT80_32)
ENDPROC(system_call)
# perform work that needs to be done immediately before resumption # perform work that needs to be done immediately before resumption
ALIGN ALIGN
RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending: work_pending:
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig jz work_notifysig
work_resched: work_resched:
call schedule call schedule
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing? # than syscall tracing?
jz restore_all jz restore_all
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jnz work_resched jnz work_resched
work_notifysig: # deal with pending signals and work_notifysig: # deal with pending signals and
# notify-resume requests # notify-resume requests
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
movl %esp, %eax movl %esp, %eax
jnz work_notifysig_v86 # returning to kernel-space or jnz work_notifysig_v86 # returning to kernel-space or
# vm86-space # vm86-space
1: 1:
#else #else
movl %esp, %eax movl %esp, %eax
#endif #endif
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
movb PT_CS(%esp), %bl movb PT_CS(%esp), %bl
andb $SEGMENT_RPL_MASK, %bl andb $SEGMENT_RPL_MASK, %bl
cmpb $USER_RPL, %bl cmpb $USER_RPL, %bl
jb resume_kernel jb resume_kernel
xorl %edx, %edx xorl %edx, %edx
call do_notify_resume call do_notify_resume
jmp resume_userspace jmp resume_userspace
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
ALIGN ALIGN
work_notifysig_v86: work_notifysig_v86:
pushl_cfi %ecx # save ti_flags for do_notify_resume pushl %ecx # save ti_flags for do_notify_resume
call save_v86_state # %eax contains pt_regs pointer call save_v86_state # %eax contains pt_regs pointer
popl_cfi %ecx popl %ecx
movl %eax, %esp movl %eax, %esp
jmp 1b jmp 1b
#endif #endif
END(work_pending) END(work_pending)
# perform syscall exit tracing # perform syscall exit tracing
ALIGN ALIGN
syscall_trace_entry: syscall_trace_entry:
movl $-ENOSYS,PT_EAX(%esp) movl $-ENOSYS, PT_EAX(%esp)
movl %esp, %eax movl %esp, %eax
call syscall_trace_enter call syscall_trace_enter
/* What it returned is what we'll actually use. */ /* What it returned is what we'll actually use. */
cmpl $(NR_syscalls), %eax cmpl $(NR_syscalls), %eax
jnae syscall_call jnae syscall_call
jmp syscall_exit jmp syscall_exit
END(syscall_trace_entry) END(syscall_trace_entry)
# perform syscall exit tracing # perform syscall exit tracing
ALIGN ALIGN
syscall_exit_work: syscall_exit_work:
testl $_TIF_WORK_SYSCALL_EXIT, %ecx testl $_TIF_WORK_SYSCALL_EXIT, %ecx
jz work_pending jz work_pending
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
# schedule() instead # schedule() instead
movl %esp, %eax movl %esp, %eax
call syscall_trace_leave call syscall_trace_leave
jmp resume_userspace jmp resume_userspace
END(syscall_exit_work) END(syscall_exit_work)
CFI_ENDPROC
RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault: syscall_fault:
ASM_CLAC ASM_CLAC
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
movl $-EFAULT,PT_EAX(%esp) movl $-EFAULT, PT_EAX(%esp)
jmp resume_userspace jmp resume_userspace
END(syscall_fault) END(syscall_fault)
syscall_badsys: syscall_badsys:
movl $-ENOSYS,%eax movl $-ENOSYS, %eax
jmp syscall_after_call jmp syscall_after_call
END(syscall_badsys) END(syscall_badsys)
sysenter_badsys: sysenter_badsys:
movl $-ENOSYS,%eax movl $-ENOSYS, %eax
jmp sysenter_after_call jmp sysenter_after_call
END(sysenter_badsys) END(sysenter_badsys)
CFI_ENDPROC
.macro FIXUP_ESPFIX_STACK .macro FIXUP_ESPFIX_STACK
/* /*
...@@ -697,25 +607,24 @@ END(sysenter_badsys) ...@@ -697,25 +607,24 @@ END(sysenter_badsys)
*/ */
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */ /* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
shl $16, %eax shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */ addl %esp, %eax /* the adjusted stack pointer */
pushl_cfi $__KERNEL_DS pushl $__KERNEL_DS
pushl_cfi %eax pushl %eax
lss (%esp), %esp /* switch to the normal stack segment */ lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8
#endif #endif
.endm .endm
.macro UNWIND_ESPFIX_STACK .macro UNWIND_ESPFIX_STACK
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax movl %ss, %eax
/* see if on espfix stack */ /* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
jne 27f jne 27f
movl $__KERNEL_DS, %eax movl $__KERNEL_DS, %eax
movl %eax, %ds movl %eax, %ds
movl %eax, %es movl %eax, %es
/* switch to normal stack */ /* switch to normal stack */
FIXUP_ESPFIX_STACK FIXUP_ESPFIX_STACK
27: 27:
...@@ -728,13 +637,11 @@ END(sysenter_badsys) ...@@ -728,13 +637,11 @@ END(sysenter_badsys)
*/ */
.align 8 .align 8
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ pushl $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1 vector=vector+1
jmp common_interrupt jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -4
.align 8 .align 8
.endr .endr
END(irq_entries_start) END(irq_entries_start)
...@@ -746,75 +653,65 @@ END(irq_entries_start) ...@@ -746,75 +653,65 @@ END(irq_entries_start)
.p2align CONFIG_X86_L1_CACHE_SHIFT .p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt: common_interrupt:
ASM_CLAC ASM_CLAC
addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl %esp,%eax movl %esp, %eax
call do_IRQ call do_IRQ
jmp ret_from_intr jmp ret_from_intr
ENDPROC(common_interrupt) ENDPROC(common_interrupt)
CFI_ENDPROC
#define BUILD_INTERRUPT3(name, nr, fn) \ #define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \ ENTRY(name) \
RING0_INT_FRAME; \
ASM_CLAC; \ ASM_CLAC; \
pushl_cfi $~(nr); \ pushl $~(nr); \
SAVE_ALL; \ SAVE_ALL; \
TRACE_IRQS_OFF \ TRACE_IRQS_OFF \
movl %esp,%eax; \ movl %esp, %eax; \
call fn; \ call fn; \
jmp ret_from_intr; \ jmp ret_from_intr; \
CFI_ENDPROC; \
ENDPROC(name) ENDPROC(name)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
#define TRACE_BUILD_INTERRUPT(name, nr) \ # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
#else #else
#define TRACE_BUILD_INTERRUPT(name, nr) # define TRACE_BUILD_INTERRUPT(name, nr)
#endif #endif
#define BUILD_INTERRUPT(name, nr) \ #define BUILD_INTERRUPT(name, nr) \
BUILD_INTERRUPT3(name, nr, smp_##name); \ BUILD_INTERRUPT3(name, nr, smp_##name); \
TRACE_BUILD_INTERRUPT(name, nr) TRACE_BUILD_INTERRUPT(name, nr)
/* The include is where all of the SMP etc. interrupts come from */ /* The include is where all of the SMP etc. interrupts come from */
#include <asm/entry_arch.h> #include <asm/entry_arch.h>
ENTRY(coprocessor_error) ENTRY(coprocessor_error)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_coprocessor_error pushl $do_coprocessor_error
jmp error_code jmp error_code
CFI_ENDPROC
END(coprocessor_error) END(coprocessor_error)
ENTRY(simd_coprocessor_error) ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
#ifdef CONFIG_X86_INVD_BUG #ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
ALTERNATIVE "pushl_cfi $do_general_protection", \ ALTERNATIVE "pushl $do_general_protection", \
"pushl $do_simd_coprocessor_error", \ "pushl $do_simd_coprocessor_error", \
X86_FEATURE_XMM X86_FEATURE_XMM
#else #else
pushl_cfi $do_simd_coprocessor_error pushl $do_simd_coprocessor_error
#endif #endif
jmp error_code jmp error_code
CFI_ENDPROC
END(simd_coprocessor_error) END(simd_coprocessor_error)
ENTRY(device_not_available) ENTRY(device_not_available)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $-1 # mark this as an int pushl $-1 # mark this as an int
pushl_cfi $do_device_not_available pushl $do_device_not_available
jmp error_code jmp error_code
CFI_ENDPROC
END(device_not_available) END(device_not_available)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
...@@ -830,196 +727,172 @@ END(native_irq_enable_sysexit) ...@@ -830,196 +727,172 @@ END(native_irq_enable_sysexit)
#endif #endif
ENTRY(overflow) ENTRY(overflow)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_overflow pushl $do_overflow
jmp error_code jmp error_code
CFI_ENDPROC
END(overflow) END(overflow)
ENTRY(bounds) ENTRY(bounds)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_bounds pushl $do_bounds
jmp error_code jmp error_code
CFI_ENDPROC
END(bounds) END(bounds)
ENTRY(invalid_op) ENTRY(invalid_op)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_invalid_op pushl $do_invalid_op
jmp error_code jmp error_code
CFI_ENDPROC
END(invalid_op) END(invalid_op)
ENTRY(coprocessor_segment_overrun) ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_coprocessor_segment_overrun pushl $do_coprocessor_segment_overrun
jmp error_code jmp error_code
CFI_ENDPROC
END(coprocessor_segment_overrun) END(coprocessor_segment_overrun)
ENTRY(invalid_TSS) ENTRY(invalid_TSS)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_invalid_TSS pushl $do_invalid_TSS
jmp error_code jmp error_code
CFI_ENDPROC
END(invalid_TSS) END(invalid_TSS)
ENTRY(segment_not_present) ENTRY(segment_not_present)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_segment_not_present pushl $do_segment_not_present
jmp error_code jmp error_code
CFI_ENDPROC
END(segment_not_present) END(segment_not_present)
ENTRY(stack_segment) ENTRY(stack_segment)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_stack_segment pushl $do_stack_segment
jmp error_code jmp error_code
CFI_ENDPROC
END(stack_segment) END(stack_segment)
ENTRY(alignment_check) ENTRY(alignment_check)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_alignment_check pushl $do_alignment_check
jmp error_code jmp error_code
CFI_ENDPROC
END(alignment_check) END(alignment_check)
ENTRY(divide_error) ENTRY(divide_error)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 # no error code pushl $0 # no error code
pushl_cfi $do_divide_error pushl $do_divide_error
jmp error_code jmp error_code
CFI_ENDPROC
END(divide_error) END(divide_error)
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
ENTRY(machine_check) ENTRY(machine_check)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi machine_check_vector pushl machine_check_vector
jmp error_code jmp error_code
CFI_ENDPROC
END(machine_check) END(machine_check)
#endif #endif
ENTRY(spurious_interrupt_bug) ENTRY(spurious_interrupt_bug)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $0 pushl $0
pushl_cfi $do_spurious_interrupt_bug pushl $do_spurious_interrupt_bug
jmp error_code jmp error_code
CFI_ENDPROC
END(spurious_interrupt_bug) END(spurious_interrupt_bug)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter /*
entrypoint expects, so fix it up before using the normal path. */ * Xen doesn't set %esp to be precisely what the normal SYSENTER
* entry point expects, so fix it up before using the normal path.
*/
ENTRY(xen_sysenter_target) ENTRY(xen_sysenter_target)
RING0_INT_FRAME addl $5*4, %esp /* remove xen-provided frame */
addl $5*4, %esp /* remove xen-provided frame */ jmp sysenter_past_esp
CFI_ADJUST_CFA_OFFSET -5*4
jmp sysenter_past_esp
CFI_ENDPROC
ENTRY(xen_hypervisor_callback) ENTRY(xen_hypervisor_callback)
CFI_STARTPROC pushl $-1 /* orig_ax = -1 => not a system call */
pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
/* Check to see if we got the event in the critical /*
region in xen_iret_direct, after we've reenabled * Check to see if we got the event in the critical
events and checked for pending events. This simulates * region in xen_iret_direct, after we've reenabled
iret instruction's behaviour where it delivers a * events and checked for pending events. This simulates
pending interrupt when enabling interrupts. */ * iret instruction's behaviour where it delivers a
movl PT_EIP(%esp),%eax * pending interrupt when enabling interrupts:
cmpl $xen_iret_start_crit,%eax */
jb 1f movl PT_EIP(%esp), %eax
cmpl $xen_iret_end_crit,%eax cmpl $xen_iret_start_crit, %eax
jae 1f jb 1f
cmpl $xen_iret_end_crit, %eax
jae 1f
jmp xen_iret_crit_fixup jmp xen_iret_crit_fixup
ENTRY(xen_do_upcall) ENTRY(xen_do_upcall)
1: mov %esp, %eax 1: mov %esp, %eax
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp ret_from_intr jmp ret_from_intr
CFI_ENDPROC
ENDPROC(xen_hypervisor_callback) ENDPROC(xen_hypervisor_callback)
# Hypervisor uses this for application faults while it executes. /*
# We get here for two reasons: * Hypervisor uses this for application faults while it executes.
# 1. Fault while reloading DS, ES, FS or GS * We get here for two reasons:
# 2. Fault while executing IRET * 1. Fault while reloading DS, ES, FS or GS
# Category 1 we fix up by reattempting the load, and zeroing the segment * 2. Fault while executing IRET
# register if the load fails. * Category 1 we fix up by reattempting the load, and zeroing the segment
# Category 2 we fix up by jumping to do_iret_error. We cannot use the * register if the load fails.
# normal Linux return path in this case because if we use the IRET hypercall * Category 2 we fix up by jumping to do_iret_error. We cannot use the
# to pop the stack frame we end up in an infinite loop of failsafe callbacks. * normal Linux return path in this case because if we use the IRET hypercall
# We distinguish between categories by maintaining a status value in EAX. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
* We distinguish between categories by maintaining a status value in EAX.
*/
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
CFI_STARTPROC pushl %eax
pushl_cfi %eax movl $1, %eax
movl $1,%eax 1: mov 4(%esp), %ds
1: mov 4(%esp),%ds 2: mov 8(%esp), %es
2: mov 8(%esp),%es 3: mov 12(%esp), %fs
3: mov 12(%esp),%fs 4: mov 16(%esp), %gs
4: mov 16(%esp),%gs
/* EAX == 0 => Category 1 (Bad segment) /* EAX == 0 => Category 1 (Bad segment)
EAX != 0 => Category 2 (Bad IRET) */ EAX != 0 => Category 2 (Bad IRET) */
testl %eax,%eax testl %eax, %eax
popl_cfi %eax popl %eax
lea 16(%esp),%esp lea 16(%esp), %esp
CFI_ADJUST_CFA_OFFSET -16 jz 5f
jz 5f jmp iret_exc
jmp iret_exc 5: pushl $-1 /* orig_ax = -1 => not a system call */
5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL SAVE_ALL
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
.section .fixup, "ax"
.section .fixup,"ax" 6: xorl %eax, %eax
6: xorl %eax,%eax movl %eax, 4(%esp)
movl %eax,4(%esp) jmp 1b
jmp 1b 7: xorl %eax, %eax
7: xorl %eax,%eax movl %eax, 8(%esp)
movl %eax,8(%esp) jmp 2b
jmp 2b 8: xorl %eax, %eax
8: xorl %eax,%eax movl %eax, 12(%esp)
movl %eax,12(%esp) jmp 3b
jmp 3b 9: xorl %eax, %eax
9: xorl %eax,%eax movl %eax, 16(%esp)
movl %eax,16(%esp) jmp 4b
jmp 4b
.previous .previous
_ASM_EXTABLE(1b,6b) _ASM_EXTABLE(1b, 6b)
_ASM_EXTABLE(2b,7b) _ASM_EXTABLE(2b, 7b)
_ASM_EXTABLE(3b,8b) _ASM_EXTABLE(3b, 8b)
_ASM_EXTABLE(4b,9b) _ASM_EXTABLE(4b, 9b)
ENDPROC(xen_failsafe_callback) ENDPROC(xen_failsafe_callback)
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
xen_evtchn_do_upcall) xen_evtchn_do_upcall)
#endif /* CONFIG_XEN */ #endif /* CONFIG_XEN */
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
...@@ -1036,28 +909,28 @@ ENTRY(mcount) ...@@ -1036,28 +909,28 @@ ENTRY(mcount)
END(mcount) END(mcount)
ENTRY(ftrace_caller) ENTRY(ftrace_caller)
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
pushl $0 /* Pass NULL as regs pointer */ pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx movl 0x4(%ebp), %edx
movl function_trace_op, %ecx movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call .globl ftrace_call
ftrace_call: ftrace_call:
call ftrace_stub call ftrace_stub
addl $4,%esp /* skip NULL pointer */ addl $4, %esp /* skip NULL pointer */
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
ftrace_ret: ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call .globl ftrace_graph_call
ftrace_graph_call: ftrace_graph_call:
jmp ftrace_stub jmp ftrace_stub
#endif #endif
.globl ftrace_stub .globl ftrace_stub
...@@ -1075,72 +948,72 @@ ENTRY(ftrace_regs_caller) ...@@ -1075,72 +948,72 @@ ENTRY(ftrace_regs_caller)
* as the current return ip is. We move the return ip into the * as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location. * ip location, and move flags into the return ip location.
*/ */
pushl 4(%esp) /* save return ip into ip slot */ pushl 4(%esp) /* save return ip into ip slot */
pushl $0 /* Load 0 into orig_ax */ pushl $0 /* Load 0 into orig_ax */
pushl %gs pushl %gs
pushl %fs pushl %fs
pushl %es pushl %es
pushl %ds pushl %ds
pushl %eax pushl %eax
pushl %ebp pushl %ebp
pushl %edi pushl %edi
pushl %esi pushl %esi
pushl %edx pushl %edx
pushl %ecx pushl %ecx
pushl %ebx pushl %ebx
movl 13*4(%esp), %eax /* Get the saved flags */ movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */ /* clobbering return ip */
movl $__KERNEL_CS,13*4(%esp) movl $__KERNEL_CS, 13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */ movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */ pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call) GLOBAL(ftrace_regs_call)
call ftrace_stub call ftrace_stub
addl $4, %esp /* Skip pt_regs */ addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */ movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */ movl 12*4(%esp), %eax /* Get return ip from regs->ip */
movl %eax, 14*4(%esp) /* Put return ip back for ret */ movl %eax, 14*4(%esp) /* Put return ip back for ret */
popl %ebx popl %ebx
popl %ecx popl %ecx
popl %edx popl %edx
popl %esi popl %esi
popl %edi popl %edi
popl %ebp popl %ebp
popl %eax popl %eax
popl %ds popl %ds
popl %es popl %es
popl %fs popl %fs
popl %gs popl %gs
addl $8, %esp /* Skip orig_ax and ip */ addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */ popf /* Pop flags at end (no addl to corrupt flags) */
jmp ftrace_ret jmp ftrace_ret
popf popf
jmp ftrace_stub jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount) ENTRY(mcount)
cmpl $__PAGE_OFFSET, %esp cmpl $__PAGE_OFFSET, %esp
jb ftrace_stub /* Paging not enabled yet? */ jb ftrace_stub /* Paging not enabled yet? */
cmpl $ftrace_stub, ftrace_trace_function cmpl $ftrace_stub, ftrace_trace_function
jnz trace jnz trace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller jnz ftrace_graph_caller
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
jnz ftrace_graph_caller jnz ftrace_graph_caller
#endif #endif
.globl ftrace_stub .globl ftrace_stub
ftrace_stub: ftrace_stub:
...@@ -1148,99 +1021,92 @@ ftrace_stub: ...@@ -1148,99 +1021,92 @@ ftrace_stub:
/* taken from glibc */ /* taken from glibc */
trace: trace:
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
movl 0xc(%esp), %eax movl 0xc(%esp), %eax
movl 0x4(%ebp), %edx movl 0x4(%ebp), %edx
subl $MCOUNT_INSN_SIZE, %eax subl $MCOUNT_INSN_SIZE, %eax
call *ftrace_trace_function call *ftrace_trace_function
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
jmp ftrace_stub jmp ftrace_stub
END(mcount) END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
movl 0xc(%esp), %eax movl 0xc(%esp), %eax
lea 0x4(%ebp), %edx lea 0x4(%ebp), %edx
movl (%ebp), %ecx movl (%ebp), %ecx
subl $MCOUNT_INSN_SIZE, %eax subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return call prepare_ftrace_return
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
ret ret
END(ftrace_graph_caller) END(ftrace_graph_caller)
.globl return_to_handler .globl return_to_handler
return_to_handler: return_to_handler:
pushl %eax pushl %eax
pushl %edx pushl %edx
movl %ebp, %eax movl %ebp, %eax
call ftrace_return_to_handler call ftrace_return_to_handler
movl %eax, %ecx movl %eax, %ecx
popl %edx popl %edx
popl %eax popl %eax
jmp *%ecx jmp *%ecx
#endif #endif
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
ENTRY(trace_page_fault) ENTRY(trace_page_fault)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $trace_do_page_fault pushl $trace_do_page_fault
jmp error_code jmp error_code
CFI_ENDPROC
END(trace_page_fault) END(trace_page_fault)
#endif #endif
ENTRY(page_fault) ENTRY(page_fault)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_page_fault pushl $do_page_fault
ALIGN ALIGN
error_code: error_code:
/* the function address is in %gs's slot on the stack */ /* the function address is in %gs's slot on the stack */
pushl_cfi %fs pushl %fs
/*CFI_REL_OFFSET fs, 0*/ pushl %es
pushl_cfi %es pushl %ds
/*CFI_REL_OFFSET es, 0*/ pushl %eax
pushl_cfi %ds pushl %ebp
/*CFI_REL_OFFSET ds, 0*/ pushl %edi
pushl_cfi_reg eax pushl %esi
pushl_cfi_reg ebp pushl %edx
pushl_cfi_reg edi pushl %ecx
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg edx
pushl_cfi_reg ecx
pushl_cfi_reg ebx
cld cld
movl $(__KERNEL_PERCPU), %ecx movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs movl %ecx, %fs
UNWIND_ESPFIX_STACK UNWIND_ESPFIX_STACK
GS_TO_REG %ecx GS_TO_REG %ecx
movl PT_GS(%esp), %edi # get the function address movl PT_GS(%esp), %edi # get the function address
movl PT_ORIG_EAX(%esp), %edx # get the error code movl PT_ORIG_EAX(%esp), %edx # get the error code
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx SET_KERNEL_GS %ecx
movl $(__USER_DS), %ecx movl $(__USER_DS), %ecx
movl %ecx, %ds movl %ecx, %ds
movl %ecx, %es movl %ecx, %es
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl %esp,%eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call *%edi call *%edi
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
END(page_fault) END(page_fault)
/* /*
...@@ -1257,33 +1123,28 @@ END(page_fault) ...@@ -1257,33 +1123,28 @@ END(page_fault)
* the instruction that would have done it for sysenter. * the instruction that would have done it for sysenter.
*/ */
.macro FIX_STACK offset ok label .macro FIX_STACK offset ok label
cmpw $__KERNEL_CS, 4(%esp) cmpw $__KERNEL_CS, 4(%esp)
jne \ok jne \ok
\label: \label:
movl TSS_sysenter_sp0 + \offset(%esp), %esp movl TSS_sysenter_sp0 + \offset(%esp), %esp
CFI_DEF_CFA esp, 0 pushfl
CFI_UNDEFINED eip pushl $__KERNEL_CS
pushfl_cfi pushl $sysenter_past_esp
pushl_cfi $__KERNEL_CS
pushl_cfi $sysenter_past_esp
CFI_REL_OFFSET eip, 0
.endm .endm
ENTRY(debug) ENTRY(debug)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
cmpl $ia32_sysenter_target,(%esp) cmpl $entry_SYSENTER_32, (%esp)
jne debug_stack_correct jne debug_stack_correct
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
debug_stack_correct: debug_stack_correct:
pushl_cfi $-1 # mark this as an int pushl $-1 # mark this as an int
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
xorl %edx,%edx # error code 0 xorl %edx, %edx # error code 0
movl %esp,%eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call do_debug call do_debug
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
END(debug) END(debug)
/* /*
...@@ -1295,107 +1156,93 @@ END(debug) ...@@ -1295,107 +1156,93 @@ END(debug)
* fault happened on the sysenter path. * fault happened on the sysenter path.
*/ */
ENTRY(nmi) ENTRY(nmi)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax pushl %eax
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
popl_cfi %eax popl %eax
je nmi_espfix_stack je nmi_espfix_stack
#endif #endif
cmpl $ia32_sysenter_target,(%esp) cmpl $entry_SYSENTER_32, (%esp)
je nmi_stack_fixup je nmi_stack_fixup
pushl_cfi %eax pushl %eax
movl %esp,%eax movl %esp, %eax
/* Do not access memory above the end of our stack page, /*
* Do not access memory above the end of our stack page,
* it might not exist. * it might not exist.
*/ */
andl $(THREAD_SIZE-1),%eax andl $(THREAD_SIZE-1), %eax
cmpl $(THREAD_SIZE-20),%eax cmpl $(THREAD_SIZE-20), %eax
popl_cfi %eax popl %eax
jae nmi_stack_correct jae nmi_stack_correct
cmpl $ia32_sysenter_target,12(%esp) cmpl $entry_SYSENTER_32, 12(%esp)
je nmi_debug_stack_check je nmi_debug_stack_check
nmi_stack_correct: nmi_stack_correct:
/* We have a RING0_INT_FRAME here */ pushl %eax
pushl_cfi %eax
SAVE_ALL SAVE_ALL
xorl %edx,%edx # zero error code xorl %edx, %edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call do_nmi call do_nmi
jmp restore_all_notrace jmp restore_all_notrace
CFI_ENDPROC
nmi_stack_fixup: nmi_stack_fixup:
RING0_INT_FRAME
FIX_STACK 12, nmi_stack_correct, 1 FIX_STACK 12, nmi_stack_correct, 1
jmp nmi_stack_correct jmp nmi_stack_correct
nmi_debug_stack_check: nmi_debug_stack_check:
/* We have a RING0_INT_FRAME here */ cmpw $__KERNEL_CS, 16(%esp)
cmpw $__KERNEL_CS,16(%esp) jne nmi_stack_correct
jne nmi_stack_correct cmpl $debug, (%esp)
cmpl $debug,(%esp) jb nmi_stack_correct
jb nmi_stack_correct cmpl $debug_esp_fix_insn, (%esp)
cmpl $debug_esp_fix_insn,(%esp) ja nmi_stack_correct
ja nmi_stack_correct
FIX_STACK 24, nmi_stack_correct, 1 FIX_STACK 24, nmi_stack_correct, 1
jmp nmi_stack_correct jmp nmi_stack_correct
#ifdef CONFIG_X86_ESPFIX32 #ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack: nmi_espfix_stack:
/* We have a RING0_INT_FRAME here. /*
*
* create the pointer to lss back * create the pointer to lss back
*/ */
pushl_cfi %ss pushl %ss
pushl_cfi %esp pushl %esp
addl $4, (%esp) addl $4, (%esp)
/* copy the iret frame of 12 bytes */ /* copy the iret frame of 12 bytes */
.rept 3 .rept 3
pushl_cfi 16(%esp) pushl 16(%esp)
.endr .endr
pushl_cfi %eax pushl %eax
SAVE_ALL SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code xorl %edx, %edx # zero error code
call do_nmi call do_nmi
RESTORE_REGS RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24 jmp irq_return
jmp irq_return
#endif #endif
CFI_ENDPROC
END(nmi) END(nmi)
ENTRY(int3) ENTRY(int3)
RING0_INT_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $-1 # mark this as an int pushl $-1 # mark this as an int
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF TRACE_IRQS_OFF
xorl %edx,%edx # zero error code xorl %edx, %edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call do_int3 call do_int3
jmp ret_from_exception jmp ret_from_exception
CFI_ENDPROC
END(int3) END(int3)
ENTRY(general_protection) ENTRY(general_protection)
RING0_EC_FRAME pushl $do_general_protection
pushl_cfi $do_general_protection jmp error_code
jmp error_code
CFI_ENDPROC
END(general_protection) END(general_protection)
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault) ENTRY(async_page_fault)
RING0_EC_FRAME
ASM_CLAC ASM_CLAC
pushl_cfi $do_async_page_fault pushl $do_async_page_fault
jmp error_code jmp error_code
CFI_ENDPROC
END(async_page_fault) END(async_page_fault)
#endif #endif
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
* at the top of the kernel process stack. * at the top of the kernel process stack.
* *
* Some macro usage: * Some macro usage:
* - CFI macros are used to generate dwarf2 unwind information for better
* backtraces. They don't change any code.
* - ENTRY/END Define functions in the symbol table. * - ENTRY/END Define functions in the symbol table.
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
* - idtentry - Define exception entry points. * - idtentry - Define exception entry points.
...@@ -30,8 +28,7 @@ ...@@ -30,8 +28,7 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/dwarf2.h> #include "calling.h"
#include <asm/calling.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/unistd.h> #include <asm/unistd.h>
...@@ -112,61 +109,6 @@ ENDPROC(native_usergs_sysret64) ...@@ -112,61 +109,6 @@ ENDPROC(native_usergs_sysret64)
# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
#endif #endif
/*
* empty frame
*/
.macro EMPTY_FRAME start=1 offset=0
.if \start
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,8+\offset
.else
CFI_DEF_CFA_OFFSET 8+\offset
.endif
.endm
/*
* initial frame state for interrupts (and exceptions without error code)
*/
.macro INTR_FRAME start=1 offset=0
EMPTY_FRAME \start, 5*8+\offset
/*CFI_REL_OFFSET ss, 4*8+\offset*/
CFI_REL_OFFSET rsp, 3*8+\offset
/*CFI_REL_OFFSET rflags, 2*8+\offset*/
/*CFI_REL_OFFSET cs, 1*8+\offset*/
CFI_REL_OFFSET rip, 0*8+\offset
.endm
/*
* initial frame state for exceptions with error code (and interrupts
* with vector already pushed)
*/
.macro XCPT_FRAME start=1 offset=0
INTR_FRAME \start, 1*8+\offset
.endm
/*
* frame that enables passing a complete pt_regs to a C function.
*/
.macro DEFAULT_FRAME start=1 offset=0
XCPT_FRAME \start, ORIG_RAX+\offset
CFI_REL_OFFSET rdi, RDI+\offset
CFI_REL_OFFSET rsi, RSI+\offset
CFI_REL_OFFSET rdx, RDX+\offset
CFI_REL_OFFSET rcx, RCX+\offset
CFI_REL_OFFSET rax, RAX+\offset
CFI_REL_OFFSET r8, R8+\offset
CFI_REL_OFFSET r9, R9+\offset
CFI_REL_OFFSET r10, R10+\offset
CFI_REL_OFFSET r11, R11+\offset
CFI_REL_OFFSET rbx, RBX+\offset
CFI_REL_OFFSET rbp, RBP+\offset
CFI_REL_OFFSET r12, R12+\offset
CFI_REL_OFFSET r13, R13+\offset
CFI_REL_OFFSET r14, R14+\offset
CFI_REL_OFFSET r15, R15+\offset
.endm
/* /*
* 64bit SYSCALL instruction entry. Up to 6 arguments in registers. * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
* *
...@@ -195,13 +137,7 @@ ENDPROC(native_usergs_sysret64) ...@@ -195,13 +137,7 @@ ENDPROC(native_usergs_sysret64)
* with them due to bugs in both AMD and Intel CPUs. * with them due to bugs in both AMD and Intel CPUs.
*/ */
ENTRY(system_call) ENTRY(entry_SYSCALL_64)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -213,14 +149,14 @@ ENTRY(system_call) ...@@ -213,14 +149,14 @@ ENTRY(system_call)
* after the swapgs, so that it can do the swapgs * after the swapgs, so that it can do the swapgs
* for the guest and jump here on syscall. * for the guest and jump here on syscall.
*/ */
GLOBAL(system_call_after_swapgs) GLOBAL(entry_SYSCALL_64_after_swapgs)
movq %rsp,PER_CPU_VAR(rsp_scratch) movq %rsp,PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER_DS /* pt_regs->ss */ pushq $__USER_DS /* pt_regs->ss */
pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
/* /*
* Re-enable interrupts. * Re-enable interrupts.
* We use 'rsp_scratch' as a scratch space, hence irq-off block above * We use 'rsp_scratch' as a scratch space, hence irq-off block above
...@@ -229,26 +165,24 @@ GLOBAL(system_call_after_swapgs) ...@@ -229,26 +165,24 @@ GLOBAL(system_call_after_swapgs)
* with using rsp_scratch: * with using rsp_scratch:
*/ */
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %r11 /* pt_regs->flags */ pushq %r11 /* pt_regs->flags */
pushq_cfi $__USER_CS /* pt_regs->cs */ pushq $__USER_CS /* pt_regs->cs */
pushq_cfi %rcx /* pt_regs->ip */ pushq %rcx /* pt_regs->ip */
CFI_REL_OFFSET rip,0 pushq %rax /* pt_regs->orig_ax */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */
pushq_cfi_reg rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */
pushq_cfi_reg r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */
pushq_cfi_reg r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */
pushq_cfi_reg r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */
pushq_cfi_reg r11 /* pt_regs->r11 */
sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 6*8
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz tracesys jnz tracesys
system_call_fastpath: entry_SYSCALL_64_fastpath:
#if __SYSCALL_MASK == ~0 #if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax cmpq $__NR_syscall_max,%rax
#else #else
...@@ -282,13 +216,9 @@ system_call_fastpath: ...@@ -282,13 +216,9 @@ system_call_fastpath:
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
CFI_REMEMBER_STATE
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RIP(%rsp),%rcx movq RIP(%rsp),%rcx
CFI_REGISTER rip,rcx
movq EFLAGS(%rsp),%r11 movq EFLAGS(%rsp),%r11
/*CFI_REGISTER rflags,r11*/
movq RSP(%rsp),%rsp movq RSP(%rsp),%rsp
/* /*
* 64bit SYSRET restores rip from rcx, * 64bit SYSRET restores rip from rcx,
...@@ -307,8 +237,6 @@ system_call_fastpath: ...@@ -307,8 +237,6 @@ system_call_fastpath:
*/ */
USERGS_SYSRET64 USERGS_SYSRET64
CFI_RESTORE_STATE
/* Do syscall entry tracing */ /* Do syscall entry tracing */
tracesys: tracesys:
movq %rsp, %rdi movq %rsp, %rdi
...@@ -318,7 +246,7 @@ tracesys: ...@@ -318,7 +246,7 @@ tracesys:
jnz tracesys_phase2 /* if needed, run the slow path */ jnz tracesys_phase2 /* if needed, run the slow path */
RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
movq ORIG_RAX(%rsp), %rax movq ORIG_RAX(%rsp), %rax
jmp system_call_fastpath /* and return to the fast path */ jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
tracesys_phase2: tracesys_phase2:
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
...@@ -374,9 +302,9 @@ int_careful: ...@@ -374,9 +302,9 @@ int_careful:
jnc int_very_careful jnc int_very_careful
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi pushq %rdi
SCHEDULE_USER SCHEDULE_USER
popq_cfi %rdi popq %rdi
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
jmp int_with_check jmp int_with_check
...@@ -389,10 +317,10 @@ int_very_careful: ...@@ -389,10 +317,10 @@ int_very_careful:
/* Check for syscall exit trace */ /* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal jz int_signal
pushq_cfi %rdi pushq %rdi
leaq 8(%rsp),%rdi # &ptregs -> arg1 leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace_leave call syscall_trace_leave
popq_cfi %rdi popq %rdi
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
jmp int_restore_rest jmp int_restore_rest
...@@ -475,27 +403,21 @@ syscall_return: ...@@ -475,27 +403,21 @@ syscall_return:
* perf profiles. Nothing jumps here. * perf profiles. Nothing jumps here.
*/ */
syscall_return_via_sysret: syscall_return_via_sysret:
CFI_REMEMBER_STATE
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp),%rsp movq RSP(%rsp),%rsp
USERGS_SYSRET64 USERGS_SYSRET64
CFI_RESTORE_STATE
opportunistic_sysret_failed: opportunistic_sysret_failed:
SWAPGS SWAPGS
jmp restore_c_regs_and_iret jmp restore_c_regs_and_iret
CFI_ENDPROC END(entry_SYSCALL_64)
END(system_call)
.macro FORK_LIKE func .macro FORK_LIKE func
ENTRY(stub_\func) ENTRY(stub_\func)
CFI_STARTPROC
DEFAULT_FRAME 0, 8 /* offset 8: return address */
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
jmp sys_\func jmp sys_\func
CFI_ENDPROC
END(stub_\func) END(stub_\func)
.endm .endm
...@@ -504,8 +426,6 @@ END(stub_\func) ...@@ -504,8 +426,6 @@ END(stub_\func)
FORK_LIKE vfork FORK_LIKE vfork
ENTRY(stub_execve) ENTRY(stub_execve)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call sys_execve call sys_execve
return_from_execve: return_from_execve:
testl %eax, %eax testl %eax, %eax
...@@ -515,11 +435,9 @@ return_from_execve: ...@@ -515,11 +435,9 @@ return_from_execve:
1: 1:
/* must use IRET code path (pt_regs->cs may have changed) */ /* must use IRET code path (pt_regs->cs may have changed) */
addq $8, %rsp addq $8, %rsp
CFI_ADJUST_CFA_OFFSET -8
ZERO_EXTRA_REGS ZERO_EXTRA_REGS
movq %rax,RAX(%rsp) movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_execve) END(stub_execve)
/* /*
* Remaining execve stubs are only 7 bytes long. * Remaining execve stubs are only 7 bytes long.
...@@ -527,32 +445,23 @@ END(stub_execve) ...@@ -527,32 +445,23 @@ END(stub_execve)
*/ */
.align 8 .align 8
GLOBAL(stub_execveat) GLOBAL(stub_execveat)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call sys_execveat call sys_execveat
jmp return_from_execve jmp return_from_execve
CFI_ENDPROC
END(stub_execveat) END(stub_execveat)
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
.align 8 .align 8
GLOBAL(stub_x32_execve) GLOBAL(stub_x32_execve)
GLOBAL(stub32_execve) GLOBAL(stub32_execve)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call compat_sys_execve call compat_sys_execve
jmp return_from_execve jmp return_from_execve
CFI_ENDPROC
END(stub32_execve) END(stub32_execve)
END(stub_x32_execve) END(stub_x32_execve)
.align 8 .align 8
GLOBAL(stub_x32_execveat) GLOBAL(stub_x32_execveat)
GLOBAL(stub32_execveat) GLOBAL(stub32_execveat)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call compat_sys_execveat call compat_sys_execveat
jmp return_from_execve jmp return_from_execve
CFI_ENDPROC
END(stub32_execveat) END(stub32_execveat)
END(stub_x32_execveat) END(stub_x32_execveat)
#endif #endif
...@@ -562,8 +471,6 @@ END(stub_x32_execveat) ...@@ -562,8 +471,6 @@ END(stub_x32_execveat)
* This cannot be done with SYSRET, so use the IRET return path instead. * This cannot be done with SYSRET, so use the IRET return path instead.
*/ */
ENTRY(stub_rt_sigreturn) ENTRY(stub_rt_sigreturn)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
/* /*
* SAVE_EXTRA_REGS result is not normally needed: * SAVE_EXTRA_REGS result is not normally needed:
* sigreturn overwrites all pt_regs->GPREGS. * sigreturn overwrites all pt_regs->GPREGS.
...@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn) ...@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
call sys_rt_sigreturn call sys_rt_sigreturn
return_from_stub: return_from_stub:
addq $8, %rsp addq $8, %rsp
CFI_ADJUST_CFA_OFFSET -8
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
movq %rax,RAX(%rsp) movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_rt_sigreturn) END(stub_rt_sigreturn)
#ifdef CONFIG_X86_X32_ABI #ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn) ENTRY(stub_x32_rt_sigreturn)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
call sys32_x32_rt_sigreturn call sys32_x32_rt_sigreturn
jmp return_from_stub jmp return_from_stub
CFI_ENDPROC
END(stub_x32_rt_sigreturn) END(stub_x32_rt_sigreturn)
#endif #endif
...@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn) ...@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
* rdi: prev task we switched from * rdi: prev task we switched from
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
DEFAULT_FRAME
LOCK ; btr $TIF_FORK,TI_flags(%r8) LOCK ; btr $TIF_FORK,TI_flags(%r8)
pushq_cfi $0x0002 pushq $0x0002
popfq_cfi # reset kernel eflags popfq # reset kernel eflags
call schedule_tail # rdi: 'prev' task parameter call schedule_tail # rdi: 'prev' task parameter
...@@ -615,7 +516,7 @@ ENTRY(ret_from_fork) ...@@ -615,7 +516,7 @@ ENTRY(ret_from_fork)
/* /*
* By the time we get here, we have no idea whether our pt_regs, * By the time we get here, we have no idea whether our pt_regs,
* ti flags, and ti status came from the 64-bit SYSCALL fast path, * ti flags, and ti status came from the 64-bit SYSCALL fast path,
* the slow path, or one of the ia32entry paths. * the slow path, or one of the 32-bit compat paths.
* Use IRET code path to return, since it can safely handle * Use IRET code path to return, since it can safely handle
* all of the above. * all of the above.
*/ */
...@@ -628,7 +529,6 @@ ENTRY(ret_from_fork) ...@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
movl $0, RAX(%rsp) movl $0, RAX(%rsp)
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
/* /*
...@@ -637,16 +537,13 @@ END(ret_from_fork) ...@@ -637,16 +537,13 @@ END(ret_from_fork)
*/ */
.align 8 .align 8
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ pushq $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1 vector=vector+1
jmp common_interrupt jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -8
.align 8 .align 8
.endr .endr
CFI_ENDPROC
END(irq_entries_start) END(irq_entries_start)
/* /*
...@@ -688,17 +585,7 @@ END(irq_entries_start) ...@@ -688,17 +585,7 @@ END(irq_entries_start)
movq %rsp, %rsi movq %rsp, %rsi
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
CFI_DEF_CFA_REGISTER rsi
pushq %rsi pushq %rsi
/*
* For debugger:
* "CFA (Current Frame Address) is the value on stack + offset"
*/
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
0x77 /* DW_OP_breg7 (rsp) */, 0, \
0x06 /* DW_OP_deref */, \
0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
0x22 /* DW_OP_plus */
/* We entered an interrupt context - irqs are off: */ /* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -711,7 +598,6 @@ END(irq_entries_start) ...@@ -711,7 +598,6 @@ END(irq_entries_start)
*/ */
.p2align CONFIG_X86_L1_CACHE_SHIFT .p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt: common_interrupt:
XCPT_FRAME
ASM_CLAC ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ interrupt do_IRQ
...@@ -723,16 +609,13 @@ ret_from_intr: ...@@ -723,16 +609,13 @@ ret_from_intr:
/* Restore saved previous stack */ /* Restore saved previous stack */
popq %rsi popq %rsi
CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
/* return code expects complete pt_regs - adjust rsp accordingly: */ /* return code expects complete pt_regs - adjust rsp accordingly: */
leaq -RBP(%rsi),%rsp leaq -RBP(%rsi),%rsp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET RBP
testb $3, CS(%rsp) testb $3, CS(%rsp)
jz retint_kernel jz retint_kernel
/* Interrupt came from user space */ /* Interrupt came from user space */
retint_user:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
/* /*
* %rcx: thread info. Interrupts off. * %rcx: thread info. Interrupts off.
...@@ -743,7 +626,6 @@ retint_check: ...@@ -743,7 +626,6 @@ retint_check:
LOCKDEP_SYS_EXIT_IRQ LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx movl TI_flags(%rcx),%edx
andl %edi,%edx andl %edi,%edx
CFI_REMEMBER_STATE
jnz retint_careful jnz retint_careful
retint_swapgs: /* return to user-space */ retint_swapgs: /* return to user-space */
...@@ -781,8 +663,6 @@ retint_kernel: ...@@ -781,8 +663,6 @@ retint_kernel:
restore_c_regs_and_iret: restore_c_regs_and_iret:
RESTORE_C_REGS RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8 REMOVE_PT_GPREGS_FROM_STACK 8
irq_return:
INTERRUPT_RETURN INTERRUPT_RETURN
ENTRY(native_iret) ENTRY(native_iret)
...@@ -807,8 +687,8 @@ native_irq_return_iret: ...@@ -807,8 +687,8 @@ native_irq_return_iret:
#ifdef CONFIG_X86_ESPFIX64 #ifdef CONFIG_X86_ESPFIX64
native_irq_return_ldt: native_irq_return_ldt:
pushq_cfi %rax pushq %rax
pushq_cfi %rdi pushq %rdi
SWAPGS SWAPGS
movq PER_CPU_VAR(espfix_waddr),%rdi movq PER_CPU_VAR(espfix_waddr),%rdi
movq %rax,(0*8)(%rdi) /* RAX */ movq %rax,(0*8)(%rdi) /* RAX */
...@@ -823,24 +703,23 @@ native_irq_return_ldt: ...@@ -823,24 +703,23 @@ native_irq_return_ldt:
movq (5*8)(%rsp),%rax /* RSP */ movq (5*8)(%rsp),%rax /* RSP */
movq %rax,(4*8)(%rdi) movq %rax,(4*8)(%rdi)
andl $0xffff0000,%eax andl $0xffff0000,%eax
popq_cfi %rdi popq %rdi
orq PER_CPU_VAR(espfix_stack),%rax orq PER_CPU_VAR(espfix_stack),%rax
SWAPGS SWAPGS
movq %rax,%rsp movq %rax,%rsp
popq_cfi %rax popq %rax
jmp native_irq_return_iret jmp native_irq_return_iret
#endif #endif
/* edi: workmask, edx: work */ /* edi: workmask, edx: work */
retint_careful: retint_careful:
CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx bt $TIF_NEED_RESCHED,%edx
jnc retint_signal jnc retint_signal
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi pushq %rdi
SCHEDULE_USER SCHEDULE_USER
popq_cfi %rdi popq %rdi
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -862,7 +741,6 @@ retint_signal: ...@@ -862,7 +741,6 @@ retint_signal:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
jmp retint_with_reschedule jmp retint_with_reschedule
CFI_ENDPROC
END(common_interrupt) END(common_interrupt)
/* /*
...@@ -870,13 +748,11 @@ END(common_interrupt) ...@@ -870,13 +748,11 @@ END(common_interrupt)
*/ */
.macro apicinterrupt3 num sym do_sym .macro apicinterrupt3 num sym do_sym
ENTRY(\sym) ENTRY(\sym)
INTR_FRAME
ASM_CLAC ASM_CLAC
pushq_cfi $~(\num) pushq $~(\num)
.Lcommon_\sym: .Lcommon_\sym:
interrupt \do_sym interrupt \do_sym
jmp ret_from_intr jmp ret_from_intr
CFI_ENDPROC
END(\sym) END(\sym)
.endm .endm
...@@ -966,24 +842,17 @@ ENTRY(\sym) ...@@ -966,24 +842,17 @@ ENTRY(\sym)
.error "using shift_ist requires paranoid=1" .error "using shift_ist requires paranoid=1"
.endif .endif
.if \has_error_code
XCPT_FRAME
.else
INTR_FRAME
.endif
ASM_CLAC ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
.ifeq \has_error_code .ifeq \has_error_code
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif .endif
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
.if \paranoid .if \paranoid
.if \paranoid == 1 .if \paranoid == 1
CFI_REMEMBER_STATE
testb $3, CS(%rsp) /* If coming from userspace, switch */ testb $3, CS(%rsp) /* If coming from userspace, switch */
jnz 1f /* stacks. */ jnz 1f /* stacks. */
.endif .endif
...@@ -993,8 +862,6 @@ ENTRY(\sym) ...@@ -993,8 +862,6 @@ ENTRY(\sym)
.endif .endif
/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
DEFAULT_FRAME 0
.if \paranoid .if \paranoid
.if \shift_ist != -1 .if \shift_ist != -1
TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
...@@ -1030,7 +897,6 @@ ENTRY(\sym) ...@@ -1030,7 +897,6 @@ ENTRY(\sym)
.endif .endif
.if \paranoid == 1 .if \paranoid == 1
CFI_RESTORE_STATE
/* /*
* Paranoid entry from userspace. Switch stacks and treat it * Paranoid entry from userspace. Switch stacks and treat it
* as a normal entry. This means that paranoid handlers * as a normal entry. This means that paranoid handlers
...@@ -1039,7 +905,6 @@ ENTRY(\sym) ...@@ -1039,7 +905,6 @@ ENTRY(\sym)
1: 1:
call error_entry call error_entry
DEFAULT_FRAME 0
movq %rsp,%rdi /* pt_regs pointer */ movq %rsp,%rdi /* pt_regs pointer */
call sync_regs call sync_regs
...@@ -1058,8 +923,6 @@ ENTRY(\sym) ...@@ -1058,8 +923,6 @@ ENTRY(\sym)
jmp error_exit /* %ebx: no swapgs flag */ jmp error_exit /* %ebx: no swapgs flag */
.endif .endif
CFI_ENDPROC
END(\sym) END(\sym)
.endm .endm
...@@ -1092,17 +955,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 ...@@ -1092,17 +955,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
/* Reload gs selector with exception handling */ /* Reload gs selector with exception handling */
/* edi: new selector */ /* edi: new selector */
ENTRY(native_load_gs_index) ENTRY(native_load_gs_index)
CFI_STARTPROC pushfq
pushfq_cfi
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS SWAPGS
gs_change: gs_change:
movl %edi,%gs movl %edi,%gs
2: mfence /* workaround */ 2: mfence /* workaround */
SWAPGS SWAPGS
popfq_cfi popfq
ret ret
CFI_ENDPROC
END(native_load_gs_index) END(native_load_gs_index)
_ASM_EXTABLE(gs_change,bad_gs) _ASM_EXTABLE(gs_change,bad_gs)
...@@ -1117,22 +978,15 @@ bad_gs: ...@@ -1117,22 +978,15 @@ bad_gs:
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack) ENTRY(do_softirq_own_stack)
CFI_STARTPROC pushq %rbp
pushq_cfi %rbp
CFI_REL_OFFSET rbp,0
mov %rsp,%rbp mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
incl PER_CPU_VAR(irq_count) incl PER_CPU_VAR(irq_count)
cmove PER_CPU_VAR(irq_stack_ptr),%rsp cmove PER_CPU_VAR(irq_stack_ptr),%rsp
push %rbp # backlink for old unwinder push %rbp # backlink for old unwinder
call __do_softirq call __do_softirq
leaveq leaveq
CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
ret ret
CFI_ENDPROC
END(do_softirq_own_stack) END(do_softirq_own_stack)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
...@@ -1152,28 +1006,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 ...@@ -1152,28 +1006,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* activation and restart the handler using the previous one. * activation and restart the handler using the previous one.
*/ */
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
CFI_STARTPROC
/* /*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
* see the correct pointer to the pt_regs * see the correct pointer to the pt_regs
*/ */
movq %rdi, %rsp # we don't return, adjust the stack frame movq %rdi, %rsp # we don't return, adjust the stack frame
CFI_ENDPROC
DEFAULT_FRAME
11: incl PER_CPU_VAR(irq_count) 11: incl PER_CPU_VAR(irq_count)
movq %rsp,%rbp movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
pushq %rbp # backlink for old unwinder pushq %rbp # backlink for old unwinder
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
popq %rsp popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp error_exit jmp error_exit
CFI_ENDPROC
END(xen_do_hypervisor_callback) END(xen_do_hypervisor_callback)
/* /*
...@@ -1190,16 +1038,8 @@ END(xen_do_hypervisor_callback) ...@@ -1190,16 +1038,8 @@ END(xen_do_hypervisor_callback)
* with its current contents: any discrepancy means we in category 1. * with its current contents: any discrepancy means we in category 1.
*/ */
ENTRY(xen_failsafe_callback) ENTRY(xen_failsafe_callback)
INTR_FRAME 1 (6*8)
/*CFI_REL_OFFSET gs,GS*/
/*CFI_REL_OFFSET fs,FS*/
/*CFI_REL_OFFSET es,ES*/
/*CFI_REL_OFFSET ds,DS*/
CFI_REL_OFFSET r11,8
CFI_REL_OFFSET rcx,0
movl %ds,%ecx movl %ds,%ecx
cmpw %cx,0x10(%rsp) cmpw %cx,0x10(%rsp)
CFI_REMEMBER_STATE
jne 1f jne 1f
movl %es,%ecx movl %es,%ecx
cmpw %cx,0x18(%rsp) cmpw %cx,0x18(%rsp)
...@@ -1212,29 +1052,21 @@ ENTRY(xen_failsafe_callback) ...@@ -1212,29 +1052,21 @@ ENTRY(xen_failsafe_callback)
jne 1f jne 1f
/* All segments match their saved values => Category 2 (Bad IRET). */ /* All segments match their saved values => Category 2 (Bad IRET). */
movq (%rsp),%rcx movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11 movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30 pushq $0 /* RIP */
pushq_cfi $0 /* RIP */ pushq %r11
pushq_cfi %r11 pushq %rcx
pushq_cfi %rcx
jmp general_protection jmp general_protection
CFI_RESTORE_STATE
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp),%rcx movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11 movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30 pushq $-1 /* orig_ax = -1 => not a system call */
pushq_cfi $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS SAVE_C_REGS
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
jmp error_exit jmp error_exit
CFI_ENDPROC
END(xen_failsafe_callback) END(xen_failsafe_callback)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
...@@ -1270,7 +1102,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector( ...@@ -1270,7 +1102,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/ */
ENTRY(paranoid_entry) ENTRY(paranoid_entry)
XCPT_FRAME 1 15*8
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
...@@ -1282,7 +1113,6 @@ ENTRY(paranoid_entry) ...@@ -1282,7 +1113,6 @@ ENTRY(paranoid_entry)
SWAPGS SWAPGS
xorl %ebx,%ebx xorl %ebx,%ebx
1: ret 1: ret
CFI_ENDPROC
END(paranoid_entry) END(paranoid_entry)
/* /*
...@@ -1297,7 +1127,6 @@ END(paranoid_entry) ...@@ -1297,7 +1127,6 @@ END(paranoid_entry)
*/ */
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(paranoid_exit) ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */ testl %ebx,%ebx /* swapgs needed? */
...@@ -1312,7 +1141,6 @@ paranoid_exit_restore: ...@@ -1312,7 +1141,6 @@ paranoid_exit_restore:
RESTORE_C_REGS RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8 REMOVE_PT_GPREGS_FROM_STACK 8
INTERRUPT_RETURN INTERRUPT_RETURN
CFI_ENDPROC
END(paranoid_exit) END(paranoid_exit)
/* /*
...@@ -1320,7 +1148,6 @@ END(paranoid_exit) ...@@ -1320,7 +1148,6 @@ END(paranoid_exit)
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/ */
ENTRY(error_entry) ENTRY(error_entry)
XCPT_FRAME 1 15*8
cld cld
SAVE_C_REGS 8 SAVE_C_REGS 8
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
...@@ -1340,7 +1167,6 @@ error_sti: ...@@ -1340,7 +1167,6 @@ error_sti:
* for these here too. * for these here too.
*/ */
error_kernelspace: error_kernelspace:
CFI_REL_OFFSET rcx, RCX+8
incl %ebx incl %ebx
leaq native_irq_return_iret(%rip),%rcx leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp) cmpq %rcx,RIP+8(%rsp)
...@@ -1364,32 +1190,22 @@ error_bad_iret: ...@@ -1364,32 +1190,22 @@ error_bad_iret:
mov %rax,%rsp mov %rax,%rsp
decl %ebx /* Return to usergs */ decl %ebx /* Return to usergs */
jmp error_sti jmp error_sti
CFI_ENDPROC
END(error_entry) END(error_entry)
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit) ENTRY(error_exit)
DEFAULT_FRAME
movl %ebx,%eax movl %ebx,%eax
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
testl %eax,%eax testl %eax,%eax
jnz retint_kernel jnz retint_kernel
LOCKDEP_SYS_EXIT_IRQ jmp retint_user
movl TI_flags(%rcx),%edx
movl $_TIF_WORK_MASK,%edi
andl %edi,%edx
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
END(error_exit) END(error_exit)
/* Runs on exception stack */ /* Runs on exception stack */
ENTRY(nmi) ENTRY(nmi)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
/* /*
* We allow breakpoints in NMIs. If a breakpoint occurs, then * We allow breakpoints in NMIs. If a breakpoint occurs, then
...@@ -1424,8 +1240,7 @@ ENTRY(nmi) ...@@ -1424,8 +1240,7 @@ ENTRY(nmi)
*/ */
/* Use %rdx as our temp variable throughout */ /* Use %rdx as our temp variable throughout */
pushq_cfi %rdx pushq %rdx
CFI_REL_OFFSET rdx, 0
/* /*
* If %cs was not the kernel segment, then the NMI triggered in user * If %cs was not the kernel segment, then the NMI triggered in user
...@@ -1459,8 +1274,6 @@ ENTRY(nmi) ...@@ -1459,8 +1274,6 @@ ENTRY(nmi)
jb first_nmi jb first_nmi
/* Ah, it is within the NMI stack, treat it as nested */ /* Ah, it is within the NMI stack, treat it as nested */
CFI_REMEMBER_STATE
nested_nmi: nested_nmi:
/* /*
* Do nothing if we interrupted the fixup in repeat_nmi. * Do nothing if we interrupted the fixup in repeat_nmi.
...@@ -1478,26 +1291,22 @@ nested_nmi: ...@@ -1478,26 +1291,22 @@ nested_nmi:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */ /* Set up the interrupted NMIs stack to jump to repeat_nmi */
leaq -1*8(%rsp), %rdx leaq -1*8(%rsp), %rdx
movq %rdx, %rsp movq %rdx, %rsp
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS pushq $__KERNEL_DS
pushq_cfi %rdx pushq %rdx
pushfq_cfi pushfq
pushq_cfi $__KERNEL_CS pushq $__KERNEL_CS
pushq_cfi $repeat_nmi pushq $repeat_nmi
/* Put stack back */ /* Put stack back */
addq $(6*8), %rsp addq $(6*8), %rsp
CFI_ADJUST_CFA_OFFSET -6*8
nested_nmi_out: nested_nmi_out:
popq_cfi %rdx popq %rdx
CFI_RESTORE rdx
/* No need to check faults here */ /* No need to check faults here */
INTERRUPT_RETURN INTERRUPT_RETURN
CFI_RESTORE_STATE
first_nmi: first_nmi:
/* /*
* Because nested NMIs will use the pushed location that we * Because nested NMIs will use the pushed location that we
...@@ -1536,22 +1345,19 @@ first_nmi: ...@@ -1536,22 +1345,19 @@ first_nmi:
*/ */
/* Do not pop rdx, nested NMIs will corrupt that part of the stack */ /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
movq (%rsp), %rdx movq (%rsp), %rdx
CFI_RESTORE rdx
/* Set the NMI executing variable on the stack. */ /* Set the NMI executing variable on the stack. */
pushq_cfi $1 pushq $1
/* /*
* Leave room for the "copied" frame * Leave room for the "copied" frame
*/ */
subq $(5*8), %rsp subq $(5*8), %rsp
CFI_ADJUST_CFA_OFFSET 5*8
/* Copy the stack frame to the Saved frame */ /* Copy the stack frame to the Saved frame */
.rept 5 .rept 5
pushq_cfi 11*8(%rsp) pushq 11*8(%rsp)
.endr .endr
CFI_DEF_CFA_OFFSET 5*8
/* Everything up to here is safe from nested NMIs */ /* Everything up to here is safe from nested NMIs */
...@@ -1574,12 +1380,10 @@ repeat_nmi: ...@@ -1574,12 +1380,10 @@ repeat_nmi:
/* Make another copy, this one may be modified by nested NMIs */ /* Make another copy, this one may be modified by nested NMIs */
addq $(10*8), %rsp addq $(10*8), %rsp
CFI_ADJUST_CFA_OFFSET -10*8
.rept 5 .rept 5
pushq_cfi -6*8(%rsp) pushq -6*8(%rsp)
.endr .endr
subq $(5*8), %rsp subq $(5*8), %rsp
CFI_DEF_CFA_OFFSET 5*8
end_repeat_nmi: end_repeat_nmi:
/* /*
...@@ -1587,7 +1391,7 @@ end_repeat_nmi: ...@@ -1587,7 +1391,7 @@ end_repeat_nmi:
* NMI if the first NMI took an exception and reset our iret stack * NMI if the first NMI took an exception and reset our iret stack
* so that we repeat another NMI. * so that we repeat another NMI.
*/ */
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq $-1 /* ORIG_RAX: no syscall to restart */
ALLOC_PT_GPREGS_ON_STACK ALLOC_PT_GPREGS_ON_STACK
/* /*
...@@ -1598,7 +1402,6 @@ end_repeat_nmi: ...@@ -1598,7 +1402,6 @@ end_repeat_nmi:
* exceptions might do. * exceptions might do.
*/ */
call paranoid_entry call paranoid_entry
DEFAULT_FRAME 0
/* /*
* Save off the CR2 register. If we take a page fault in the NMI then * Save off the CR2 register. If we take a page fault in the NMI then
...@@ -1634,14 +1437,11 @@ nmi_restore: ...@@ -1634,14 +1437,11 @@ nmi_restore:
/* Clear the NMI executing stack variable */ /* Clear the NMI executing stack variable */
movq $0, 5*8(%rsp) movq $0, 5*8(%rsp)
jmp irq_return INTERRUPT_RETURN
CFI_ENDPROC
END(nmi) END(nmi)
ENTRY(ignore_sysret) ENTRY(ignore_sysret)
CFI_STARTPROC
mov $-ENOSYS,%eax mov $-ENOSYS,%eax
sysret sysret
CFI_ENDPROC
END(ignore_sysret) END(ignore_sysret)
/* /*
* Compatibility mode system call entry point for x86-64. * Compatibility mode system call entry point for x86-64.
* *
* Copyright 2000-2002 Andi Kleen, SuSE Labs. * Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/ */
#include "calling.h"
#include <asm/dwarf2.h>
#include <asm/calling.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/ia32_unistd.h> #include <asm/ia32_unistd.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -21,57 +19,15 @@ ...@@ -21,57 +19,15 @@
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h> #include <linux/elf-em.h>
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define __AUDIT_ARCH_LE 0x40000000 #define __AUDIT_ARCH_LE 0x40000000
#ifndef CONFIG_AUDITSYSCALL #ifndef CONFIG_AUDITSYSCALL
#define sysexit_audit ia32_ret_from_sys_call # define sysexit_audit ia32_ret_from_sys_call
#define sysretl_audit ia32_ret_from_sys_call # define sysretl_audit ia32_ret_from_sys_call
#endif #endif
.section .entry.text, "ax" .section .entry.text, "ax"
/* clobbers %rax */
.macro CLEAR_RREGS _r9=rax
xorl %eax,%eax
movq %rax,R11(%rsp)
movq %rax,R10(%rsp)
movq %\_r9,R9(%rsp)
movq %rax,R8(%rsp)
.endm
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %eax because syscall_trace_enter() returned
* the %rax value we should see. Instead, we just truncate that
* value to 32 bits again as we did on entry from user mode.
* If it's a new value set by user_regset during entry tracing,
* this matches the normal truncation of the user-mode value.
* If it's -1 to make us punt the syscall, then (u32)-1 is still
* an appropriately invalid value.
*/
.macro LOAD_ARGS32 _r9=0
.if \_r9
movl R9(%rsp),%r9d
.endif
movl RCX(%rsp),%ecx
movl RDX(%rsp),%edx
movl RSI(%rsp),%esi
movl RDI(%rsp),%edi
movl %eax,%eax /* zero extension */
.endm
.macro CFI_STARTPROC32 simple
CFI_STARTPROC \simple
CFI_UNDEFINED r8
CFI_UNDEFINED r9
CFI_UNDEFINED r10
CFI_UNDEFINED r11
CFI_UNDEFINED r12
CFI_UNDEFINED r13
CFI_UNDEFINED r14
CFI_UNDEFINED r15
.endm
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret32) ENTRY(native_usergs_sysret32)
swapgs swapgs
...@@ -80,7 +36,7 @@ ENDPROC(native_usergs_sysret32) ...@@ -80,7 +36,7 @@ ENDPROC(native_usergs_sysret32)
#endif #endif
/* /*
* 32bit SYSENTER instruction entry. * 32-bit SYSENTER instruction entry.
* *
* SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs. * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
* IF and VM in rflags are cleared (IOW: interrupts are off). * IF and VM in rflags are cleared (IOW: interrupts are off).
...@@ -101,12 +57,7 @@ ENDPROC(native_usergs_sysret32) ...@@ -101,12 +57,7 @@ ENDPROC(native_usergs_sysret32)
* path below. We set up a complete hardware stack frame to share code * path below. We set up a complete hardware stack frame to share code
* with the int 0x80 path. * with the int 0x80 path.
*/ */
ENTRY(ia32_sysenter_target) ENTRY(entry_SYSENTER_compat)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -121,33 +72,29 @@ ENTRY(ia32_sysenter_target) ...@@ -121,33 +72,29 @@ ENTRY(ia32_sysenter_target)
movl %eax, %eax movl %eax, %eax
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
CFI_REGISTER rip,r10
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */ pushq $__USER32_DS /* pt_regs->ss */
pushq_cfi %rbp /* pt_regs->sp */ pushq %rbp /* pt_regs->sp */
CFI_REL_OFFSET rsp,0 pushfq /* pt_regs->flags */
pushfq_cfi /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */
pushq_cfi $__USER32_CS /* pt_regs->cs */ pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */ pushq %rax /* pt_regs->orig_ax */
CFI_REL_OFFSET rip,0 pushq %rdi /* pt_regs->di */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rcx /* pt_regs->cx */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq $-ENOSYS /* pt_regs->ax */
pushq_cfi_reg rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */
cld cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/* /*
* no need to do an access_ok check here because rbp has been * no need to do an access_ok check here because rbp has been
* 32bit zero extended * 32-bit zero extended
*/ */
ASM_STAC ASM_STAC
1: movl (%rbp),%ebp 1: movl (%rbp), %ebp
_ASM_EXTABLE(1b,ia32_badarg) _ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC ASM_CLAC
/* /*
...@@ -155,26 +102,26 @@ ENTRY(ia32_sysenter_target) ...@@ -155,26 +102,26 @@ ENTRY(ia32_sysenter_target)
* ourselves. To save a few cycles, we can check whether * ourselves. To save a few cycles, we can check whether
* NT was set instead of doing an unconditional popfq. * NT was set instead of doing an unconditional popfq.
*/ */
testl $X86_EFLAGS_NT,EFLAGS(%rsp) testl $X86_EFLAGS_NT, EFLAGS(%rsp)
jnz sysenter_fix_flags jnz sysenter_fix_flags
sysenter_flags_fixed: sysenter_flags_fixed:
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE jnz sysenter_tracesys
jnz sysenter_tracesys
sysenter_do_call: sysenter_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32-bit syscall -> 64-bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi, %r8d /* arg5 */
movl %ebp,%r9d /* arg6 */ movl %ebp, %r9d /* arg6 */
xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
movl %ebx,%edi /* arg1 */ movl %ebx, %edi /* arg1 */
movl %edx,%edx /* arg3 (zero extension) */ movl %edx, %edx /* arg3 (zero extension) */
sysenter_dispatch: sysenter_dispatch:
cmpq $(IA32_NR_syscalls-1),%rax cmpq $(IA32_NR_syscalls-1), %rax
ja 1f ja 1f
call *ia32_sys_call_table(,%rax,8) call *ia32_sys_call_table(, %rax, 8)
movq %rax,RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -191,23 +138,21 @@ sysexit_from_sys_call: ...@@ -191,23 +138,21 @@ sysexit_from_sys_call:
* This code path is still called 'sysexit' because it pairs * This code path is still called 'sysexit' because it pairs
* with 'sysenter' and it uses the SYSENTER calling convention. * with 'sysenter' and it uses the SYSENTER calling convention.
*/ */
andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp),%ecx /* User %eip */ movl RIP(%rsp), %ecx /* User %eip */
CFI_REGISTER rip,rcx
RESTORE_RSI_RDI RESTORE_RSI_RDI
xorl %edx,%edx /* avoid info leaks */ xorl %edx, %edx /* Do not leak kernel information */
xorq %r8,%r8 xorq %r8, %r8
xorq %r9,%r9 xorq %r9, %r9
xorq %r10,%r10 xorq %r10, %r10
movl EFLAGS(%rsp),%r11d /* User eflags */ movl EFLAGS(%rsp), %r11d /* User eflags */
/*CFI_RESTORE rflags*/
TRACE_IRQS_ON TRACE_IRQS_ON
/* /*
* SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT, * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
* since it avoids a dicey window with interrupts enabled. * since it avoids a dicey window with interrupts enabled.
*/ */
movl RSP(%rsp),%esp movl RSP(%rsp), %esp
/* /*
* USERGS_SYSRET32 does: * USERGS_SYSRET32 does:
...@@ -231,60 +176,62 @@ sysexit_from_sys_call: ...@@ -231,60 +176,62 @@ sysexit_from_sys_call:
*/ */
USERGS_SYSRET32 USERGS_SYSRET32
CFI_RESTORE_STATE
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
.macro auditsys_entry_common .macro auditsys_entry_common
movl %esi,%r8d /* 5th arg: 4th syscall arg */ movl %esi, %r8d /* 5th arg: 4th syscall arg */
movl %ecx,%r9d /*swap with edx*/ movl %ecx, %r9d /* swap with edx */
movl %edx,%ecx /* 4th arg: 3rd syscall arg */ movl %edx, %ecx /* 4th arg: 3rd syscall arg */
movl %r9d,%edx /* 3rd arg: 2nd syscall arg */ movl %r9d, %edx /* 3rd arg: 2nd syscall arg */
movl %ebx,%esi /* 2nd arg: 1st syscall arg */ movl %ebx, %esi /* 2nd arg: 1st syscall arg */
movl %eax,%edi /* 1st arg: syscall number */ movl %eax, %edi /* 1st arg: syscall number */
call __audit_syscall_entry call __audit_syscall_entry
movl ORIG_RAX(%rsp),%eax /* reload syscall number */ movl ORIG_RAX(%rsp), %eax /* reload syscall number */
movl %ebx,%edi /* reload 1st syscall arg */ movl %ebx, %edi /* reload 1st syscall arg */
movl RCX(%rsp),%esi /* reload 2nd syscall arg */ movl RCX(%rsp), %esi /* reload 2nd syscall arg */
movl RDX(%rsp),%edx /* reload 3rd syscall arg */ movl RDX(%rsp), %edx /* reload 3rd syscall arg */
movl RSI(%rsp),%ecx /* reload 4th syscall arg */ movl RSI(%rsp), %ecx /* reload 4th syscall arg */
movl RDI(%rsp),%r8d /* reload 5th syscall arg */ movl RDI(%rsp), %r8d /* reload 5th syscall arg */
.endm .endm
.macro auditsys_exit exit .macro auditsys_exit exit
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz ia32_ret_from_sys_call jnz ia32_ret_from_sys_call
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
movl %eax,%esi /* second arg, syscall return value */ movl %eax, %esi /* second arg, syscall return value */
cmpl $-MAX_ERRNO,%eax /* is it an error ? */ cmpl $-MAX_ERRNO, %eax /* is it an error ? */
jbe 1f jbe 1f
movslq %eax, %rsi /* if error sign extend to 64 bits */ movslq %eax, %rsi /* if error sign extend to 64 bits */
1: setbe %al /* 1 if error, 0 if not */ 1: setbe %al /* 1 if error, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */ movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit call __audit_syscall_exit
movq RAX(%rsp),%rax /* reload syscall return value */ movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz \exit jz \exit
CLEAR_RREGS xorl %eax, %eax /* Do not leak kernel information */
jmp int_with_check movq %rax, R11(%rsp)
movq %rax, R10(%rsp)
movq %rax, R9(%rsp)
movq %rax, R8(%rsp)
jmp int_with_check
.endm .endm
sysenter_auditsys: sysenter_auditsys:
auditsys_entry_common auditsys_entry_common
movl %ebp,%r9d /* reload 6th syscall arg */ movl %ebp, %r9d /* reload 6th syscall arg */
jmp sysenter_dispatch jmp sysenter_dispatch
sysexit_audit: sysexit_audit:
auditsys_exit sysexit_from_sys_call auditsys_exit sysexit_from_sys_call
#endif #endif
sysenter_fix_flags: sysenter_fix_flags:
pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq_cfi popfq
jmp sysenter_flags_fixed jmp sysenter_flags_fixed
sysenter_tracesys: sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
...@@ -292,26 +239,36 @@ sysenter_tracesys: ...@@ -292,26 +239,36 @@ sysenter_tracesys:
jz sysenter_auditsys jz sysenter_auditsys
#endif #endif
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
CLEAR_RREGS xorl %eax, %eax /* Do not leak kernel information */
movq %rsp,%rdi /* &pt_regs -> arg1 */ movq %rax, R11(%rsp)
movq %rax, R10(%rsp)
movq %rax, R9(%rsp)
movq %rax, R8(%rsp)
movq %rsp, %rdi /* &pt_regs -> arg1 */
call syscall_trace_enter call syscall_trace_enter
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
/* Reload arg registers from stack. (see sysenter_tracesys) */
movl RCX(%rsp), %ecx
movl RDX(%rsp), %edx
movl RSI(%rsp), %esi
movl RDI(%rsp), %edi
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp sysenter_do_call jmp sysenter_do_call
CFI_ENDPROC ENDPROC(entry_SYSENTER_compat)
ENDPROC(ia32_sysenter_target)
/* /*
* 32bit SYSCALL instruction entry. * 32-bit SYSCALL instruction entry.
* *
* 32bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, * 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
* then loads new ss, cs, and rip from previously programmed MSRs. * then loads new ss, cs, and rip from previously programmed MSRs.
* rflags gets masked by a value from another MSR (so CLD and CLAC * rflags gets masked by a value from another MSR (so CLD and CLAC
* are not needed). SYSCALL does not save anything on the stack * are not needed). SYSCALL does not save anything on the stack
* and does not change rsp. * and does not change rsp.
* *
* Note: rflags saving+masking-with-MSR happens only in Long mode * Note: rflags saving+masking-with-MSR happens only in Long mode
* (in legacy 32bit mode, IF, RF and VM bits are cleared and that's it). * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
* Don't get confused: rflags saving+masking depends on Long Mode Active bit * Don't get confused: rflags saving+masking depends on Long Mode Active bit
* (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
* or target CS descriptor's L bit (SYSCALL does not read segment descriptors). * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
...@@ -331,92 +288,83 @@ ENDPROC(ia32_sysenter_target) ...@@ -331,92 +288,83 @@ ENDPROC(ia32_sysenter_target)
* path below. We set up a complete hardware stack frame to share code * path below. We set up a complete hardware stack frame to share code
* with the int 0x80 path. * with the int 0x80 path.
*/ */
ENTRY(ia32_cstar_target) ENTRY(entry_SYSCALL_compat)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
* it is too small to ever cause noticeable irq latency. * it is too small to ever cause noticeable irq latency.
*/ */
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
movl %esp,%r8d movl %esp, %r8d
CFI_REGISTER rsp,r8 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
/* Zero-extending 32-bit regs, do not remove */ /* Zero-extending 32-bit regs, do not remove */
movl %eax,%eax movl %eax, %eax
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */ pushq $__USER32_DS /* pt_regs->ss */
pushq_cfi %r8 /* pt_regs->sp */ pushq %r8 /* pt_regs->sp */
CFI_REL_OFFSET rsp,0 pushq %r11 /* pt_regs->flags */
pushq_cfi %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */
pushq_cfi $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */
pushq_cfi %rcx /* pt_regs->ip */ pushq %rax /* pt_regs->orig_ax */
CFI_REL_OFFSET rip,0 pushq %rdi /* pt_regs->di */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rbp /* pt_regs->cx */
pushq_cfi_reg rdx /* pt_regs->dx */ movl %ebp, %ecx
pushq_cfi_reg rbp /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */
movl %ebp,%ecx sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
pushq_cfi $-ENOSYS /* pt_regs->ax */
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/* /*
* no need to do an access_ok check here because r8 has been * No need to do an access_ok check here because r8 has been
* 32bit zero extended * 32-bit zero extended:
*/ */
ASM_STAC ASM_STAC
1: movl (%r8),%r9d 1: movl (%r8), %ebp
_ASM_EXTABLE(1b,ia32_badarg) _ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz cstar_tracesys jnz cstar_tracesys
cstar_do_call: cstar_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32-bit syscall -> 64-bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi, %r8d /* arg5 */
/* r9 already loaded */ /* arg6 */ movl %ebp, %r9d /* arg6 */
xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
movl %ebx,%edi /* arg1 */ movl %ebx, %edi /* arg1 */
movl %edx,%edx /* arg3 (zero extension) */ movl %edx, %edx /* arg3 (zero extension) */
cstar_dispatch: cstar_dispatch:
cmpq $(IA32_NR_syscalls-1),%rax cmpq $(IA32_NR_syscalls-1), %rax
ja 1f ja 1f
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX(%rsp) call *ia32_sys_call_table(, %rax, 8)
movq %rax, RAX(%rsp)
1: 1:
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz sysretl_audit jnz sysretl_audit
sysretl_from_sys_call: sysretl_from_sys_call:
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RCX(%rsp), %ebp
RESTORE_RSI_RDI_RDX RESTORE_RSI_RDI_RDX
movl RIP(%rsp),%ecx movl RIP(%rsp), %ecx
CFI_REGISTER rip,rcx movl EFLAGS(%rsp), %r11d
movl EFLAGS(%rsp),%r11d xorq %r10, %r10
/*CFI_REGISTER rflags,r11*/ xorq %r9, %r9
xorq %r10,%r10 xorq %r8, %r8
xorq %r9,%r9
xorq %r8,%r8
TRACE_IRQS_ON TRACE_IRQS_ON
movl RSP(%rsp),%esp movl RSP(%rsp), %esp
CFI_RESTORE rsp
/* /*
* 64bit->32bit SYSRET restores eip from ecx, * 64-bit->32-bit SYSRET restores eip from ecx,
* eflags from r11 (but RF and VM bits are forced to 0), * eflags from r11 (but RF and VM bits are forced to 0),
* cs and ss are loaded from MSRs. * cs and ss are loaded from MSRs.
* (Note: 32bit->32bit SYSRET is different: since r11 * (Note: 32-bit->32-bit SYSRET is different: since r11
* does not exist, it merely sets eflags.IF=1). * does not exist, it merely sets eflags.IF=1).
* *
* NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
...@@ -430,11 +378,9 @@ sysretl_from_sys_call: ...@@ -430,11 +378,9 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
cstar_auditsys: cstar_auditsys:
CFI_RESTORE_STATE
movl %r9d,R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common auditsys_entry_common
movl R9(%rsp),%r9d /* reload 6th syscall arg */ movl %ebp, %r9d /* reload 6th syscall arg */
jmp cstar_dispatch jmp cstar_dispatch
sysretl_audit: sysretl_audit:
auditsys_exit sysretl_from_sys_call auditsys_exit sysretl_from_sys_call
...@@ -442,25 +388,41 @@ sysretl_audit: ...@@ -442,25 +388,41 @@ sysretl_audit:
cstar_tracesys: cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz cstar_auditsys jz cstar_auditsys
#endif #endif
xchgl %r9d,%ebp
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
CLEAR_RREGS r9 xorl %eax, %eax /* Do not leak kernel information */
movq %rsp,%rdi /* &pt_regs -> arg1 */ movq %rax, R11(%rsp)
call syscall_trace_enter movq %rax, R10(%rsp)
LOAD_ARGS32 1 /* reload args from stack in case ptrace changed it */ movq %rax, R9(%rsp)
movq %rax, R8(%rsp)
movq %rsp, %rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
/* Reload arg registers from stack. (see sysenter_tracesys) */
movl RCX(%rsp), %ecx
movl RDX(%rsp), %edx
movl RSI(%rsp), %esi
movl RDI(%rsp), %edi
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
xchgl %ebp,%r9d jmp cstar_do_call
jmp cstar_do_call END(entry_SYSCALL_compat)
END(ia32_cstar_target)
ia32_badarg: ia32_badarg:
ASM_CLAC ASM_CLAC
movq $-EFAULT,%rax movq $-EFAULT, %rax
jmp ia32_sysret jmp ia32_sysret
CFI_ENDPROC
ia32_ret_from_sys_call:
xorl %eax, %eax /* Do not leak kernel information */
movq %rax, R11(%rsp)
movq %rax, R10(%rsp)
movq %rax, R9(%rsp)
movq %rax, R8(%rsp)
jmp int_ret_from_sys_call
/* /*
* Emulated IA32 system calls via int 0x80. * Emulated IA32 system calls via int 0x80.
...@@ -483,16 +445,7 @@ ia32_badarg: ...@@ -483,16 +445,7 @@ ia32_badarg:
* Assumes it is only called from user space and entered with interrupts off. * Assumes it is only called from user space and entered with interrupts off.
*/ */
ENTRY(ia32_syscall) ENTRY(entry_INT80_compat)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,5*8
/*CFI_REL_OFFSET ss,4*8 */
CFI_REL_OFFSET rsp,3*8
/*CFI_REL_OFFSET rflags,2*8 */
/*CFI_REL_OFFSET cs,1*8 */
CFI_REL_OFFSET rip,0*8
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -503,89 +456,92 @@ ENTRY(ia32_syscall) ...@@ -503,89 +456,92 @@ ENTRY(ia32_syscall)
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
/* Zero-extending 32-bit regs, do not remove */ /* Zero-extending 32-bit regs, do not remove */
movl %eax,%eax movl %eax, %eax
/* Construct struct pt_regs on stack (iret frame is already on stack) */ /* Construct struct pt_regs on stack (iret frame is already on stack) */
pushq_cfi_reg rax /* pt_regs->orig_ax */ pushq %rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */ pushq %rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */ pushq %rdx /* pt_regs->dx */
pushq_cfi_reg rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */ pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 */
pushq $0 /* pt_regs->r9 */
pushq $0 /* pt_regs->r10 */
pushq $0 /* pt_regs->r11 */
cld cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz ia32_tracesys
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz ia32_tracesys
ia32_do_call: ia32_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */ /* 32-bit syscall -> 64-bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */ movl %edi, %r8d /* arg5 */
movl %ebp,%r9d /* arg6 */ movl %ebp, %r9d /* arg6 */
xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
movl %ebx,%edi /* arg1 */ movl %ebx, %edi /* arg1 */
movl %edx,%edx /* arg3 (zero extension) */ movl %edx, %edx /* arg3 (zero extension) */
cmpq $(IA32_NR_syscalls-1),%rax cmpq $(IA32_NR_syscalls-1), %rax
ja 1f ja 1f
call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
call *ia32_sys_call_table(, %rax, 8) /* RIP relative */
ia32_sysret: ia32_sysret:
movq %rax,RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
ia32_ret_from_sys_call: jmp int_ret_from_sys_call
CLEAR_RREGS
jmp int_ret_from_sys_call
ia32_tracesys: ia32_tracesys:
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
CLEAR_RREGS movq %rsp, %rdi /* &pt_regs -> arg1 */
movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter
call syscall_trace_enter /*
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ * Reload arg registers from stack in case ptrace changed them.
* Don't reload %eax because syscall_trace_enter() returned
* the %rax value we should see. But do truncate it to 32 bits.
* If it's -1 to make us punt the syscall, then (u32)-1 is still
* an appropriately invalid value.
*/
movl RCX(%rsp), %ecx
movl RDX(%rsp), %edx
movl RSI(%rsp), %esi
movl RDI(%rsp), %edi
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp ia32_do_call jmp ia32_do_call
CFI_ENDPROC END(entry_INT80_compat)
END(ia32_syscall)
.macro PTREGSCALL label, func .macro PTREGSCALL label, func
ALIGN ALIGN
GLOBAL(\label) GLOBAL(\label)
leaq \func(%rip),%rax leaq \func(%rip), %rax
jmp ia32_ptregs_common jmp ia32_ptregs_common
.endm .endm
CFI_STARTPROC32 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
PTREGSCALL stub32_sigreturn, sys32_sigreturn
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn PTREGSCALL stub32_fork, sys_fork
PTREGSCALL stub32_sigreturn, sys32_sigreturn PTREGSCALL stub32_vfork, sys_vfork
PTREGSCALL stub32_fork, sys_fork
PTREGSCALL stub32_vfork, sys_vfork
ALIGN ALIGN
GLOBAL(stub32_clone) GLOBAL(stub32_clone)
leaq sys_clone(%rip),%rax leaq sys_clone(%rip), %rax
mov %r8, %rcx /*
jmp ia32_ptregs_common * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
*
* The native 64-bit kernel's sys_clone() implements the latter,
* so we need to swap arguments here before calling it:
*/
xchg %r8, %rcx
jmp ia32_ptregs_common
ALIGN ALIGN
ia32_ptregs_common: ia32_ptregs_common:
CFI_ENDPROC
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,SIZEOF_PTREGS
CFI_REL_OFFSET rax,RAX
CFI_REL_OFFSET rcx,RCX
CFI_REL_OFFSET rdx,RDX
CFI_REL_OFFSET rsi,RSI
CFI_REL_OFFSET rdi,RDI
CFI_REL_OFFSET rip,RIP
/* CFI_REL_OFFSET cs,CS*/
/* CFI_REL_OFFSET rflags,EFLAGS*/
CFI_REL_OFFSET rsp,RSP
/* CFI_REL_OFFSET ss,SS*/
SAVE_EXTRA_REGS 8 SAVE_EXTRA_REGS 8
call *%rax call *%rax
RESTORE_EXTRA_REGS 8 RESTORE_EXTRA_REGS 8
ret ret
CFI_ENDPROC
END(ia32_ptregs_common) END(ia32_ptregs_common)
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#else #else
#define SYM(sym, compat) sym #define SYM(sym, compat) sym
#define ia32_sys_call_table sys_call_table #define ia32_sys_call_table sys_call_table
#define __NR_ia32_syscall_max __NR_syscall_max #define __NR_entry_INT80_compat_max __NR_syscall_max
#endif #endif
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ; #define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
...@@ -23,11 +23,11 @@ typedef asmlinkage void (*sys_call_ptr_t)(void); ...@@ -23,11 +23,11 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void); extern asmlinkage void sys_ni_syscall(void);
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = { __visible const sys_call_ptr_t ia32_sys_call_table[__NR_entry_INT80_compat_max+1] = {
/* /*
* Smells like a compiler bug -- it doesn't work * Smells like a compiler bug -- it doesn't work
* when the & below is removed. * when the & below is removed.
*/ */
[0 ... __NR_ia32_syscall_max] = &sys_ni_syscall, [0 ... __NR_entry_INT80_compat_max] = &sys_ni_syscall,
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
}; };
out := $(obj)/../include/generated/asm out := $(obj)/../../include/generated/asm
uapi := $(obj)/../include/generated/uapi/asm uapi := $(obj)/../../include/generated/uapi/asm
# Create output directory if not already present # Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
......
...@@ -6,16 +6,14 @@ ...@@ -6,16 +6,14 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* put return address in eax (arg1) */ /* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0 .macro THUNK name, func, put_ret_addr_in_eax=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC pushl %eax
pushl_cfi_reg eax pushl %ecx
pushl_cfi_reg ecx pushl %edx
pushl_cfi_reg edx
.if \put_ret_addr_in_eax .if \put_ret_addr_in_eax
/* Place EIP in the arg1 */ /* Place EIP in the arg1 */
...@@ -23,11 +21,10 @@ ...@@ -23,11 +21,10 @@
.endif .endif
call \func call \func
popl_cfi_reg edx popl %edx
popl_cfi_reg ecx popl %ecx
popl_cfi_reg eax popl %eax
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
......
...@@ -6,35 +6,32 @@ ...@@ -6,35 +6,32 @@
* Subject to the GNU public license, v.2. No warranty of any kind. * Subject to the GNU public license, v.2. No warranty of any kind.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include "calling.h"
#include <asm/calling.h>
#include <asm/asm.h> #include <asm/asm.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0 .macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name .globl \name
\name: \name:
CFI_STARTPROC
/* this one pushes 9 elems, the next one would be %rIP */ /* this one pushes 9 elems, the next one would be %rIP */
pushq_cfi_reg rdi pushq %rdi
pushq_cfi_reg rsi pushq %rsi
pushq_cfi_reg rdx pushq %rdx
pushq_cfi_reg rcx pushq %rcx
pushq_cfi_reg rax pushq %rax
pushq_cfi_reg r8 pushq %r8
pushq_cfi_reg r9 pushq %r9
pushq_cfi_reg r10 pushq %r10
pushq_cfi_reg r11 pushq %r11
.if \put_ret_addr_in_rdi .if \put_ret_addr_in_rdi
/* 9*8(%rsp) is return addr on stack */ /* 9*8(%rsp) is return addr on stack */
movq_cfi_restore 9*8, rdi movq 9*8(%rsp), %rdi
.endif .endif
call \func call \func
jmp restore jmp restore
CFI_ENDPROC
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
...@@ -57,19 +54,16 @@ ...@@ -57,19 +54,16 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \ #if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT)
CFI_STARTPROC
CFI_ADJUST_CFA_OFFSET 9*8
restore: restore:
popq_cfi_reg r11 popq %r11
popq_cfi_reg r10 popq %r10
popq_cfi_reg r9 popq %r9
popq_cfi_reg r8 popq %r8
popq_cfi_reg rax popq %rax
popq_cfi_reg rcx popq %rcx
popq_cfi_reg rdx popq %rdx
popq_cfi_reg rsi popq %rsi
popq_cfi_reg rdi popq %rdi
ret ret
CFI_ENDPROC
_ASM_NOKPROBE(restore) _ASM_NOKPROBE(restore)
#endif #endif
#
# Makefile for the x86 low level vsyscall code
#
obj-y := vsyscall_gtod.o
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
...@@ -24,6 +24,6 @@ TRACE_EVENT(emulate_vsyscall, ...@@ -24,6 +24,6 @@ TRACE_EVENT(emulate_vsyscall,
#endif #endif
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../arch/x86/kernel #define TRACE_INCLUDE_PATH ../../arch/x86/entry/vsyscall/
#define TRACE_INCLUDE_FILE vsyscall_trace #define TRACE_INCLUDE_FILE vsyscall_trace
#include <trace/define_trace.h> #include <trace/define_trace.h>
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for the ia32 kernel emulation subsystem. # Makefile for the ia32 kernel emulation subsystem.
# #
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_signal.o
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
......
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
* Macros for dwarf2 CFI unwind table entries.
* See "as.info" for details on these pseudo ops. Unfortunately
* they are only supported in very new binutils, so define them
* away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else
/*
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_ESCAPE cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro pushq_cfi_reg reg
pushq %\reg
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET \reg, 0
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro popq_cfi_reg reg
popq %\reg
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE \reg
.endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro pushl_cfi_reg reg
pushl %\reg
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET \reg, 0
.endm
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro popl_cfi_reg reg
popl %\reg
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE \reg
.endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look /* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */ frame pointer later */
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
.macro FRAME .macro FRAME
__ASM_SIZE(push,_cfi) %__ASM_REG(bp) __ASM_SIZE(push,) %__ASM_REG(bp)
CFI_REL_OFFSET __ASM_REG(bp), 0
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm .endm
.macro ENDFRAME .macro ENDFRAME
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp) __ASM_SIZE(pop,) %__ASM_REG(bp)
CFI_RESTORE __ASM_REG(bp)
.endm .endm
#else #else
.macro FRAME .macro FRAME
......
...@@ -206,8 +206,13 @@ do { \ ...@@ -206,8 +206,13 @@ do { \
#endif /* !CONFIG_PARAVIRT */ #endif /* !CONFIG_PARAVIRT */
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ /*
(u32)((val) >> 32)) * 64-bit version of wrmsr_safe():
*/
static inline int wrmsrl_safe(u32 msr, u64 val)
{
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
......
...@@ -5,12 +5,14 @@ ...@@ -5,12 +5,14 @@
/* misc architecture specific prototypes */ /* misc architecture specific prototypes */
void system_call(void);
void syscall_init(void); void syscall_init(void);
void ia32_syscall(void); void entry_SYSCALL_64(void);
void ia32_cstar_target(void); void entry_SYSCALL_compat(void);
void ia32_sysenter_target(void); void entry_INT80_32(void);
void entry_INT80_compat(void);
void entry_SYSENTER_32(void);
void entry_SYSENTER_compat(void);
void x86_configure_nx(void); void x86_configure_nx(void);
void x86_report_nx(void); void x86_report_nx(void);
......
...@@ -231,11 +231,21 @@ ...@@ -231,11 +231,21 @@
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* early_idt_handler_array is an array of entry points referenced in the
* early IDT. For simplicity, it's a real array with one entry point
* every nine bytes. That leaves room for an optional 'push $0' if the
* vector has no error code (two bytes), a 'push $vector_number' (two
* bytes), and a jump to the common entry code (up to five bytes).
*/
#define EARLY_IDT_HANDLER_SIZE 9
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
# define trace_early_idt_handlers early_idt_handlers # define trace_early_idt_handler_array early_idt_handler_array
#endif #endif
/* /*
......
...@@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n ...@@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
CFLAGS_irq.o := -I$(src)/../include/asm/trace CFLAGS_irq.o := -I$(src)/../include/asm/trace
obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y := process_$(BITS).o signal.o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
...@@ -31,9 +31,6 @@ obj-y += probe_roms.o ...@@ -31,9 +31,6 @@ obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o obj-$(CONFIG_X86_64) += mcount_64.o
obj-y += syscall_$(BITS).o vsyscall_gtod.o
obj-$(CONFIG_IA32_EMULATION) += syscall_32.o
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o obj-y += bootflag.o e820.o
......
...@@ -66,7 +66,7 @@ int main(void) ...@@ -66,7 +66,7 @@ int main(void)
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1); DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
DEFINE(NR_syscalls, sizeof(syscalls_64)); DEFINE(NR_syscalls, sizeof(syscalls_64));
DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1); DEFINE(__NR_entry_INT80_compat_max, sizeof(syscalls_ia32) - 1);
DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32)); DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
return 0; return 0;
......
...@@ -1026,7 +1026,7 @@ void enable_sep_cpu(void) ...@@ -1026,7 +1026,7 @@ void enable_sep_cpu(void)
(unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
0); 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
out: out:
put_cpu(); put_cpu();
...@@ -1204,10 +1204,10 @@ void syscall_init(void) ...@@ -1204,10 +1204,10 @@ void syscall_init(void)
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
*/ */
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call); wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, ia32_cstar_target); wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
/* /*
* This only works on Intel CPUs. * This only works on Intel CPUs.
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
...@@ -1216,7 +1216,7 @@ void syscall_init(void) ...@@ -1216,7 +1216,7 @@ void syscall_init(void)
*/ */
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else #else
wrmsrl(MSR_CSTAR, ignore_sysret); wrmsrl(MSR_CSTAR, ignore_sysret);
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
......
...@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss(); clear_bss();
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handlers[i]); set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr); load_idt((const struct desc_ptr *)&idt_descr);
copy_bootdata(__va(real_mode_data)); copy_bootdata(__va(real_mode_data));
......
...@@ -478,21 +478,22 @@ is486: ...@@ -478,21 +478,22 @@ is486:
__INIT __INIT
setup_once: setup_once:
/* /*
* Set up a idt with 256 entries pointing to ignore_int, * Set up a idt with 256 interrupt gates that push zero if there
* interrupt gates. It doesn't actually load idt - that needs * is no error code and then jump to early_idt_handler_common.
* to be done on each CPU. Interrupts are enabled elsewhere, * It doesn't actually load the idt - that needs to be done on
* when we can be relatively sure everything is ok. * each CPU. Interrupts are enabled elsewhere, when we can be
* relatively sure everything is ok.
*/ */
movl $idt_table,%edi movl $idt_table,%edi
movl $early_idt_handlers,%eax movl $early_idt_handler_array,%eax
movl $NUM_EXCEPTION_VECTORS,%ecx movl $NUM_EXCEPTION_VECTORS,%ecx
1: 1:
movl %eax,(%edi) movl %eax,(%edi)
movl %eax,4(%edi) movl %eax,4(%edi)
/* interrupt gate, dpl=0, present */ /* interrupt gate, dpl=0, present */
movl $(0x8E000000 + __KERNEL_CS),2(%edi) movl $(0x8E000000 + __KERNEL_CS),2(%edi)
addl $9,%eax addl $EARLY_IDT_HANDLER_SIZE,%eax
addl $8,%edi addl $8,%edi
loop 1b loop 1b
...@@ -524,26 +525,28 @@ setup_once: ...@@ -524,26 +525,28 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */ andl $0,setup_once_ref /* Once is enough, thanks */
ret ret
ENTRY(early_idt_handlers) ENTRY(early_idt_handler_array)
# 36(%esp) %eflags # 36(%esp) %eflags
# 32(%esp) %cs # 32(%esp) %cs
# 28(%esp) %eip # 28(%esp) %eip
# 24(%rsp) error code # 24(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushl $0 # Dummy error code, to make stack frame uniform pushl $0 # Dummy error code, to make stack frame uniform
.endif .endif
pushl $i # 20(%esp) Vector number pushl $i # 20(%esp) Vector number
jmp early_idt_handler jmp early_idt_handler_common
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handlers) ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */ early_idt_handler_common:
ENTRY(early_idt_handler) /*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld cld
cmpl $2,(%esp) # X86_TRAP_NMI cmpl $2,(%esp) # X86_TRAP_NMI
...@@ -603,7 +606,7 @@ ex_entry: ...@@ -603,7 +606,7 @@ ex_entry:
.Lis_nmi: .Lis_nmi:
addl $8,%esp /* drop vector number and error code */ addl $8,%esp /* drop vector number and error code */
iret iret
ENDPROC(early_idt_handler) ENDPROC(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
ALIGN ALIGN
......
...@@ -321,26 +321,28 @@ bad_address: ...@@ -321,26 +321,28 @@ bad_address:
jmp bad_address jmp bad_address
__INIT __INIT
.globl early_idt_handlers ENTRY(early_idt_handler_array)
early_idt_handlers:
# 104(%rsp) %rflags # 104(%rsp) %rflags
# 96(%rsp) %cs # 96(%rsp) %cs
# 88(%rsp) %rip # 88(%rsp) %rip
# 80(%rsp) error code # 80(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushq $0 # Dummy error code, to make stack frame uniform pushq $0 # Dummy error code, to make stack frame uniform
.endif .endif
pushq $i # 72(%rsp) Vector number pushq $i # 72(%rsp) Vector number
jmp early_idt_handler jmp early_idt_handler_common
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */ early_idt_handler_common:
ENTRY(early_idt_handler) /*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld cld
cmpl $2,(%rsp) # X86_TRAP_NMI cmpl $2,(%rsp) # X86_TRAP_NMI
...@@ -412,7 +414,7 @@ ENTRY(early_idt_handler) ...@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
.Lis_nmi: .Lis_nmi:
addq $16,%rsp # drop vector number and error code addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN INTERRUPT_RETURN
ENDPROC(early_idt_handler) ENDPROC(early_idt_handler_common)
__INITDATA __INITDATA
......
...@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; ...@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#else #else
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/proto.h>
asmlinkage int system_call(void);
#endif #endif
/* Must be page-aligned because the real IDT is used in a fixmap. */ /* Must be page-aligned because the real IDT is used in a fixmap. */
...@@ -980,12 +979,12 @@ void __init trap_init(void) ...@@ -980,12 +979,12 @@ void __init trap_init(void)
set_bit(i, used_vectors); set_bit(i, used_vectors);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
set_bit(IA32_SYSCALL_VECTOR, used_vectors); set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
set_system_trap_gate(IA32_SYSCALL_VECTOR, &system_call); set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
set_bit(IA32_SYSCALL_VECTOR, used_vectors); set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#endif #endif
......
...@@ -17,7 +17,6 @@ clean-files := inat-tables.c ...@@ -17,7 +17,6 @@ clean-files := inat-tables.c
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o lib-y := delay.o misc.o cmdline.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
......
...@@ -11,26 +11,23 @@ ...@@ -11,26 +11,23 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* if you want SMP support, implement these with real spinlocks */ /* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg .macro LOCK reg
pushfl_cfi pushfl
cli cli
.endm .endm
.macro UNLOCK reg .macro UNLOCK reg
popfl_cfi popfl
.endm .endm
#define BEGIN(op) \ #define BEGIN(op) \
.macro endp; \ .macro endp; \
CFI_ENDPROC; \
ENDPROC(atomic64_##op##_386); \ ENDPROC(atomic64_##op##_386); \
.purgem endp; \ .purgem endp; \
.endm; \ .endm; \
ENTRY(atomic64_##op##_386); \ ENTRY(atomic64_##op##_386); \
CFI_STARTPROC; \
LOCK v; LOCK v;
#define ENDP endp #define ENDP endp
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
.macro read64 reg .macro read64 reg
movl %ebx, %eax movl %ebx, %eax
...@@ -22,16 +21,11 @@ ...@@ -22,16 +21,11 @@
.endm .endm
ENTRY(atomic64_read_cx8) ENTRY(atomic64_read_cx8)
CFI_STARTPROC
read64 %ecx read64 %ecx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_read_cx8) ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8) ENTRY(atomic64_set_cx8)
CFI_STARTPROC
1: 1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes /* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */ * are atomic on 586 and newer */
...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8) ...@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_set_cx8) ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8) ENTRY(atomic64_xchg_cx8)
CFI_STARTPROC
1: 1:
LOCK_PREFIX LOCK_PREFIX
cmpxchg8b (%esi) cmpxchg8b (%esi)
jne 1b jne 1b
ret ret
CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8) ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc .macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi
movl %eax, %esi movl %eax, %esi
movl %edx, %edi movl %edx, %edi
...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebx popl %ebx
popl_cfi_reg ebp popl %ebp
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -93,8 +81,7 @@ addsub_return sub sub sbb ...@@ -93,8 +81,7 @@ addsub_return sub sub sbb
.macro incdec_return func ins insc .macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10: 10:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8) ENDPROC(atomic64_\func\()_return_cx8)
.endm .endm
...@@ -119,8 +105,7 @@ incdec_return inc add adc ...@@ -119,8 +105,7 @@ incdec_return inc add adc
incdec_return dec sub sbb incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8) ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8) ...@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
2: 2:
movl %ebx, %eax movl %ebx, %eax
movl %ecx, %edx movl %ecx, %edx
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8) ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8) ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC pushl %ebp
pushl_cfi_reg ebp pushl %ebx
pushl_cfi_reg ebx
/* these just push these two parameters on the stack */ /* these just push these two parameters on the stack */
pushl_cfi_reg edi pushl %edi
pushl_cfi_reg ecx pushl %ecx
movl %eax, %ebp movl %eax, %ebp
movl %edx, %edi movl %edx, %edi
...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8) ...@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
movl $1, %eax movl $1, %eax
3: 3:
addl $8, %esp addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8 popl %ebx
popl_cfi_reg ebx popl %ebp
popl_cfi_reg ebp
ret ret
4: 4:
cmpl %edx, 4(%esp) cmpl %edx, 4(%esp)
jne 2b jne 2b
xorl %eax, %eax xorl %eax, %eax
jmp 3b jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_add_unless_cx8) ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8) ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx
read64 %esi read64 %esi
1: 1:
...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8) ...@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax movl $1, %eax
3: 3:
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8) ENDPROC(atomic64_inc_not_zero_cx8)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff movl 12(%esp),%esi # Function arg: unsigned char *buff
...@@ -129,10 +127,9 @@ ENTRY(csum_partial) ...@@ -129,10 +127,9 @@ ENTRY(csum_partial)
jz 8f jz 8f
roll $8, %eax roll $8, %eax
8: 8:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#else #else
...@@ -140,9 +137,8 @@ ENDPROC(csum_partial) ...@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
/* Version for PentiumII/PPro */ /* Version for PentiumII/PPro */
ENTRY(csum_partial) ENTRY(csum_partial)
CFI_STARTPROC pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf movl 12(%esp),%esi # Function arg: const unsigned char *buf
...@@ -249,10 +245,9 @@ ENTRY(csum_partial) ...@@ -249,10 +245,9 @@ ENTRY(csum_partial)
jz 90f jz 90f
roll $8, %eax roll $8, %eax
90: 90:
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
ret ret
CFI_ENDPROC
ENDPROC(csum_partial) ENDPROC(csum_partial)
#endif #endif
...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, ...@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define FP 12 #define FP 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
subl $4,%esp subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4 pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi pushl %ebx
pushl_cfi_reg ebx
movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src movl ARGBASE+4(%esp),%esi # src
...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) ) ...@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
.previous .previous
popl_cfi_reg ebx popl %ebx
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi %ecx # equivalent to addl $4,%esp popl %ecx # equivalent to addl $4,%esp
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#else #else
...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic) ...@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12 #define ARGBASE 12
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %edi
pushl_cfi_reg edi pushl %esi
pushl_cfi_reg esi
movl ARGBASE+4(%esp),%esi #src movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len movl ARGBASE+12(%esp),%ecx #len
...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) ) ...@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
jmp 7b jmp 7b
.previous .previous
popl_cfi_reg esi popl %esi
popl_cfi_reg edi popl %edi
popl_cfi_reg ebx popl %ebx
ret ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
#undef ROUND #undef ROUND
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -15,7 +14,6 @@ ...@@ -15,7 +14,6 @@
* %rdi - page * %rdi - page
*/ */
ENTRY(clear_page) ENTRY(clear_page)
CFI_STARTPROC
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
"jmp clear_page_c_e", X86_FEATURE_ERMS "jmp clear_page_c_e", X86_FEATURE_ERMS
...@@ -24,11 +22,9 @@ ENTRY(clear_page) ...@@ -24,11 +22,9 @@ ENTRY(clear_page)
xorl %eax,%eax xorl %eax,%eax
rep stosq rep stosq
ret ret
CFI_ENDPROC
ENDPROC(clear_page) ENDPROC(clear_page)
ENTRY(clear_page_orig) ENTRY(clear_page_orig)
CFI_STARTPROC
xorl %eax,%eax xorl %eax,%eax
movl $4096/64,%ecx movl $4096/64,%ecx
...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig) ...@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
jnz .Lloop jnz .Lloop
nop nop
ret ret
CFI_ENDPROC
ENDPROC(clear_page_orig) ENDPROC(clear_page_orig)
ENTRY(clear_page_c_e) ENTRY(clear_page_c_e)
CFI_STARTPROC
movl $4096,%ecx movl $4096,%ecx
xorl %eax,%eax xorl %eax,%eax
rep stosb rep stosb
ret ret
CFI_ENDPROC
ENDPROC(clear_page_c_e) ENDPROC(clear_page_c_e)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* *
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/percpu.h> #include <asm/percpu.h>
.text .text
...@@ -21,7 +20,6 @@ ...@@ -21,7 +20,6 @@
* %al : Operation successful * %al : Operation successful
*/ */
ENTRY(this_cpu_cmpxchg16b_emu) ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
...@@ -32,7 +30,7 @@ CFI_STARTPROC ...@@ -32,7 +30,7 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of # *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros). # macros).
# #
pushfq_cfi pushfq
cli cli
cmpq PER_CPU_VAR((%rsi)), %rax cmpq PER_CPU_VAR((%rsi)), %rax
...@@ -43,17 +41,13 @@ CFI_STARTPROC ...@@ -43,17 +41,13 @@ CFI_STARTPROC
movq %rbx, PER_CPU_VAR((%rsi)) movq %rbx, PER_CPU_VAR((%rsi))
movq %rcx, PER_CPU_VAR(8(%rsi)) movq %rcx, PER_CPU_VAR(8(%rsi))
CFI_REMEMBER_STATE popfq
popfq_cfi
mov $1, %al mov $1, %al
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
popfq_cfi popfq
xor %al,%al xor %al,%al
ret ret
CFI_ENDPROC
ENDPROC(this_cpu_cmpxchg16b_emu) ENDPROC(this_cpu_cmpxchg16b_emu)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
.text .text
...@@ -20,14 +19,13 @@ ...@@ -20,14 +19,13 @@
* %ecx : high 32 bits of new value * %ecx : high 32 bits of new value
*/ */
ENTRY(cmpxchg8b_emu) ENTRY(cmpxchg8b_emu)
CFI_STARTPROC
# #
# Emulate 'cmpxchg8b (%esi)' on UP except we don't # Emulate 'cmpxchg8b (%esi)' on UP except we don't
# set the whole ZF thing (caller will just compare # set the whole ZF thing (caller will just compare
# eax:edx with the expected value) # eax:edx with the expected value)
# #
pushfl_cfi pushfl
cli cli
cmpl (%esi), %eax cmpl (%esi), %eax
...@@ -38,18 +36,15 @@ CFI_STARTPROC ...@@ -38,18 +36,15 @@ CFI_STARTPROC
movl %ebx, (%esi) movl %ebx, (%esi)
movl %ecx, 4(%esi) movl %ecx, 4(%esi)
CFI_REMEMBER_STATE popfl
popfl_cfi
ret ret
CFI_RESTORE_STATE
.Lnot_same: .Lnot_same:
movl (%esi), %eax movl (%esi), %eax
.Lhalf_same: .Lhalf_same:
movl 4(%esi), %edx movl 4(%esi), %edx
popfl_cfi popfl
ret ret
CFI_ENDPROC
ENDPROC(cmpxchg8b_emu) ENDPROC(cmpxchg8b_emu)
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -13,22 +12,16 @@ ...@@ -13,22 +12,16 @@
*/ */
ALIGN ALIGN
ENTRY(copy_page) ENTRY(copy_page)
CFI_STARTPROC
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx movl $4096/8, %ecx
rep movsq rep movsq
ret ret
CFI_ENDPROC
ENDPROC(copy_page) ENDPROC(copy_page)
ENTRY(copy_page_regs) ENTRY(copy_page_regs)
CFI_STARTPROC
subq $2*8, %rsp subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx, (%rsp) movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0
movq %r12, 1*8(%rsp) movq %r12, 1*8(%rsp)
CFI_REL_OFFSET r12, 1*8
movl $(4096/64)-5, %ecx movl $(4096/64)-5, %ecx
.p2align 4 .p2align 4
...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs) ...@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
jnz .Loop2 jnz .Loop2
movq (%rsp), %rbx movq (%rsp), %rbx
CFI_RESTORE rbx
movq 1*8(%rsp), %r12 movq 1*8(%rsp), %r12
CFI_RESTORE r12
addq $2*8, %rsp addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8
ret ret
CFI_ENDPROC
ENDPROC(copy_page_regs) ENDPROC(copy_page_regs)
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -18,7 +17,6 @@ ...@@ -18,7 +17,6 @@
/* Standard copy_to_user with segment limit checking */ /* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user) ENTRY(_copy_to_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rdi,%rcx movq %rdi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user) ...@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_to_user) ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */ /* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user) ENTRY(_copy_from_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax) GET_THREAD_INFO(%rax)
movq %rsi,%rcx movq %rsi,%rcx
addq %rdx,%rcx addq %rdx,%rcx
...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user) ...@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
X86_FEATURE_REP_GOOD, \ X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \ "jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_from_user) ENDPROC(_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
/* must zero dest */ /* must zero dest */
ENTRY(bad_from_user) ENTRY(bad_from_user)
bad_from_user: bad_from_user:
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
xorl %eax,%eax xorl %eax,%eax
rep rep
...@@ -62,7 +56,6 @@ bad_from_user: ...@@ -62,7 +56,6 @@ bad_from_user:
bad_to_user: bad_to_user:
movl %edx,%eax movl %edx,%eax
ret ret
CFI_ENDPROC
ENDPROC(bad_from_user) ENDPROC(bad_from_user)
.previous .previous
...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user) ...@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_unrolled) ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled) ...@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(copy_user_generic_unrolled) ENDPROC(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions. /* Some CPUs run faster using the string copy instructions.
...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled) ...@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_generic_string) ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */ jb 2f /* less than 8 bytes, go to byte copy loop */
...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string) ...@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b) _ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b) _ASM_EXTABLE(3b,12b)
CFI_ENDPROC
ENDPROC(copy_user_generic_string) ENDPROC(copy_user_generic_string)
/* /*
...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string) ...@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
ENTRY(copy_user_enhanced_fast_string) ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC ASM_STAC
movl %edx,%ecx movl %edx,%ecx
1: rep 1: rep
...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string) ...@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
.previous .previous
_ASM_EXTABLE(1b,12b) _ASM_EXTABLE(1b,12b)
CFI_ENDPROC
ENDPROC(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string)
/* /*
...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string) ...@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
* This will force destination/source out of cache for more performance. * This will force destination/source out of cache for more performance.
*/ */
ENTRY(__copy_user_nocache) ENTRY(__copy_user_nocache)
CFI_STARTPROC
ASM_STAC ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache) ...@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(19b,40b) _ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b) _ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b) _ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* for more details. No warranty for anything given at all. * for more details. No warranty for anything given at all.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -47,23 +46,16 @@ ...@@ -47,23 +46,16 @@
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
cmpl $3*64, %edx cmpl $3*64, %edx
jle .Lignore jle .Lignore
.Lignore: .Lignore:
subq $7*8, %rsp subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8
movq %rbx, 2*8(%rsp) movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8
movq %r12, 3*8(%rsp) movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
movq %r14, 4*8(%rsp) movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8
movq %r13, 5*8(%rsp) movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8
movq %rbp, 6*8(%rsp) movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8
movq %r8, (%rsp) movq %r8, (%rsp)
movq %r9, 1*8(%rsp) movq %r9, 1*8(%rsp)
...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic) ...@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
addl %ebx, %eax addl %ebx, %eax
adcl %r9d, %eax /* carry */ adcl %r9d, %eax /* carry */
CFI_REMEMBER_STATE
.Lende: .Lende:
movq 2*8(%rsp), %rbx movq 2*8(%rsp), %rbx
CFI_RESTORE rbx
movq 3*8(%rsp), %r12 movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %r14 movq 4*8(%rsp), %r14
CFI_RESTORE r14
movq 5*8(%rsp), %r13 movq 5*8(%rsp), %r13
CFI_RESTORE r13
movq 6*8(%rsp), %rbp movq 6*8(%rsp), %rbp
CFI_RESTORE rbp
addq $7*8, %rsp addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
ret ret
CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */ /* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source: .Lbad_source:
...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic) ...@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende jz .Lende
movl $-EFAULT, (%rax) movl $-EFAULT, (%rax)
jmp .Lende jmp .Lende
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -36,7 +35,6 @@ ...@@ -36,7 +35,6 @@
.text .text
ENTRY(__get_user_1) ENTRY(__get_user_1)
CFI_STARTPROC
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
...@@ -45,11 +43,9 @@ ENTRY(__get_user_1) ...@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
ENTRY(__get_user_2) ENTRY(__get_user_2)
CFI_STARTPROC
add $1,%_ASM_AX add $1,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -60,11 +56,9 @@ ENTRY(__get_user_2) ...@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
ENTRY(__get_user_4) ENTRY(__get_user_4)
CFI_STARTPROC
add $3,%_ASM_AX add $3,%_ASM_AX
jc bad_get_user jc bad_get_user
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
...@@ -75,11 +69,9 @@ ENTRY(__get_user_4) ...@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
ENDPROC(__get_user_4) ENDPROC(__get_user_4)
ENTRY(__get_user_8) ENTRY(__get_user_8)
CFI_STARTPROC
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
add $7,%_ASM_AX add $7,%_ASM_AX
jc bad_get_user jc bad_get_user
...@@ -104,28 +96,23 @@ ENTRY(__get_user_8) ...@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
ASM_CLAC ASM_CLAC
ret ret
#endif #endif
CFI_ENDPROC
ENDPROC(__get_user_8) ENDPROC(__get_user_8)
bad_get_user: bad_get_user:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user) END(bad_get_user)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
bad_get_user_8: bad_get_user_8:
CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
xor %ecx,%ecx xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC ASM_CLAC
ret ret
CFI_ENDPROC
END(bad_get_user_8) END(bad_get_user_8)
#endif #endif
......
...@@ -16,15 +16,12 @@ ...@@ -16,15 +16,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* override generic version in lib/iomap_copy.c * override generic version in lib/iomap_copy.c
*/ */
ENTRY(__iowrite32_copy) ENTRY(__iowrite32_copy)
CFI_STARTPROC
movl %edx,%ecx movl %edx,%ecx
rep movsd rep movsd
ret ret
CFI_ENDPROC
ENDPROC(__iowrite32_copy) ENDPROC(__iowrite32_copy)
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/dwarf2.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms) ...@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
ENDPROC(memcpy_erms) ENDPROC(memcpy_erms)
ENTRY(memcpy_orig) ENTRY(memcpy_orig)
CFI_STARTPROC
movq %rdi, %rax movq %rdi, %rax
cmpq $0x20, %rdx cmpq $0x20, %rdx
...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig) ...@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
.Lend: .Lend:
retq retq
CFI_ENDPROC
ENDPROC(memcpy_orig) ENDPROC(memcpy_orig)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -27,7 +26,6 @@ ...@@ -27,7 +26,6 @@
ENTRY(memmove) ENTRY(memmove)
ENTRY(__memmove) ENTRY(__memmove)
CFI_STARTPROC
/* Handle more 32 bytes in loop */ /* Handle more 32 bytes in loop */
mov %rdi, %rax mov %rdi, %rax
...@@ -207,6 +205,5 @@ ENTRY(__memmove) ...@@ -207,6 +205,5 @@ ENTRY(__memmove)
movb %r11b, (%rdi) movb %r11b, (%rdi)
13: 13:
retq retq
CFI_ENDPROC
ENDPROC(__memmove) ENDPROC(__memmove)
ENDPROC(memmove) ENDPROC(memmove)
/* Copyright 2002 Andi Kleen, SuSE Labs */ /* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
...@@ -66,7 +65,6 @@ ENTRY(memset_erms) ...@@ -66,7 +65,6 @@ ENTRY(memset_erms)
ENDPROC(memset_erms) ENDPROC(memset_erms)
ENTRY(memset_orig) ENTRY(memset_orig)
CFI_STARTPROC
movq %rdi,%r10 movq %rdi,%r10
/* expand byte value */ /* expand byte value */
...@@ -78,7 +76,6 @@ ENTRY(memset_orig) ...@@ -78,7 +76,6 @@ ENTRY(memset_orig)
movl %edi,%r9d movl %edi,%r9d
andl $7,%r9d andl $7,%r9d
jnz .Lbad_alignment jnz .Lbad_alignment
CFI_REMEMBER_STATE
.Lafter_bad_alignment: .Lafter_bad_alignment:
movq %rdx,%rcx movq %rdx,%rcx
...@@ -128,7 +125,6 @@ ENTRY(memset_orig) ...@@ -128,7 +125,6 @@ ENTRY(memset_orig)
movq %r10,%rax movq %r10,%rax
ret ret
CFI_RESTORE_STATE
.Lbad_alignment: .Lbad_alignment:
cmpq $7,%rdx cmpq $7,%rdx
jbe .Lhandle_7 jbe .Lhandle_7
...@@ -139,5 +135,4 @@ ENTRY(memset_orig) ...@@ -139,5 +135,4 @@ ENTRY(memset_orig)
subq %r8,%rdx subq %r8,%rdx
jmp .Lafter_bad_alignment jmp .Lafter_bad_alignment
.Lfinal: .Lfinal:
CFI_ENDPROC
ENDPROC(memset_orig) ENDPROC(memset_orig)
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/dwarf2.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -13,9 +12,8 @@ ...@@ -13,9 +12,8 @@
*/ */
.macro op_safe_regs op .macro op_safe_regs op
ENTRY(\op\()_safe_regs) ENTRY(\op\()_safe_regs)
CFI_STARTPROC pushq %rbx
pushq_cfi_reg rbx pushq %rbp
pushq_cfi_reg rbp
movq %rdi, %r10 /* Save pointer */ movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */ xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax movl (%rdi), %eax
...@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs) ...@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
movl 20(%rdi), %ebp movl 20(%rdi), %ebp
movl 24(%rdi), %esi movl 24(%rdi), %esi
movl 28(%rdi), %edi movl 28(%rdi), %edi
CFI_REMEMBER_STATE
1: \op 1: \op
2: movl %eax, (%r10) 2: movl %eax, (%r10)
movl %r11d, %eax /* Return value */ movl %r11d, %eax /* Return value */
...@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs) ...@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
movl %ebp, 20(%r10) movl %ebp, 20(%r10)
movl %esi, 24(%r10) movl %esi, 24(%r10)
movl %edi, 28(%r10) movl %edi, 28(%r10)
popq_cfi_reg rbp popq %rbp
popq_cfi_reg rbx popq %rbx
ret ret
3: 3:
CFI_RESTORE_STATE
movl $-EIO, %r11d movl $-EIO, %r11d
jmp 2b jmp 2b
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs) ENDPROC(\op\()_safe_regs)
.endm .endm
...@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs) ...@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
.macro op_safe_regs op .macro op_safe_regs op
ENTRY(\op\()_safe_regs) ENTRY(\op\()_safe_regs)
CFI_STARTPROC pushl %ebx
pushl_cfi_reg ebx pushl %ebp
pushl_cfi_reg ebp pushl %esi
pushl_cfi_reg esi pushl %edi
pushl_cfi_reg edi pushl $0 /* Return value */
pushl_cfi $0 /* Return value */ pushl %eax
pushl_cfi %eax
movl 4(%eax), %ecx movl 4(%eax), %ecx
movl 8(%eax), %edx movl 8(%eax), %edx
movl 12(%eax), %ebx movl 12(%eax), %ebx
...@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs) ...@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
movl 24(%eax), %esi movl 24(%eax), %esi
movl 28(%eax), %edi movl 28(%eax), %edi
movl (%eax), %eax movl (%eax), %eax
CFI_REMEMBER_STATE
1: \op 1: \op
2: pushl_cfi %eax 2: pushl %eax
movl 4(%esp), %eax movl 4(%esp), %eax
popl_cfi (%eax) popl (%eax)
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
movl %ecx, 4(%eax) movl %ecx, 4(%eax)
movl %edx, 8(%eax) movl %edx, 8(%eax)
movl %ebx, 12(%eax) movl %ebx, 12(%eax)
movl %ebp, 20(%eax) movl %ebp, 20(%eax)
movl %esi, 24(%eax) movl %esi, 24(%eax)
movl %edi, 28(%eax) movl %edi, 28(%eax)
popl_cfi %eax popl %eax
popl_cfi_reg edi popl %edi
popl_cfi_reg esi popl %esi
popl_cfi_reg ebp popl %ebp
popl_cfi_reg ebx popl %ebx
ret ret
3: 3:
CFI_RESTORE_STATE
movl $-EIO, 4(%esp) movl $-EIO, 4(%esp)
jmp 2b jmp 2b
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs) ENDPROC(\op\()_safe_regs)
.endm .endm
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
* return value. * return value.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -30,11 +29,9 @@ ...@@ -30,11 +29,9 @@
* as they get called from within inline assembly. * as they get called from within inline assembly.
*/ */
#define ENTER CFI_STARTPROC ; \ #define ENTER GET_THREAD_INFO(%_ASM_BX)
GET_THREAD_INFO(%_ASM_BX)
#define EXIT ASM_CLAC ; \ #define EXIT ASM_CLAC ; \
ret ; \ ret
CFI_ENDPROC
.text .text
ENTRY(__put_user_1) ENTRY(__put_user_1)
...@@ -87,7 +84,6 @@ ENTRY(__put_user_8) ...@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
ENDPROC(__put_user_8) ENDPROC(__put_user_8)
bad_put_user: bad_put_user:
CFI_STARTPROC
movl $-EFAULT,%eax movl $-EFAULT,%eax
EXIT EXIT
END(bad_put_user) END(bad_put_user)
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
...@@ -34,10 +33,10 @@ ...@@ -34,10 +33,10 @@
*/ */
#define save_common_regs \ #define save_common_regs \
pushl_cfi_reg ecx pushl %ecx
#define restore_common_regs \ #define restore_common_regs \
popl_cfi_reg ecx popl %ecx
/* Avoid uglifying the argument copying x86-64 needs to do. */ /* Avoid uglifying the argument copying x86-64 needs to do. */
.macro movq src, dst .macro movq src, dst
...@@ -64,50 +63,45 @@ ...@@ -64,50 +63,45 @@
*/ */
#define save_common_regs \ #define save_common_regs \
pushq_cfi_reg rdi; \ pushq %rdi; \
pushq_cfi_reg rsi; \ pushq %rsi; \
pushq_cfi_reg rcx; \ pushq %rcx; \
pushq_cfi_reg r8; \ pushq %r8; \
pushq_cfi_reg r9; \ pushq %r9; \
pushq_cfi_reg r10; \ pushq %r10; \
pushq_cfi_reg r11 pushq %r11
#define restore_common_regs \ #define restore_common_regs \
popq_cfi_reg r11; \ popq %r11; \
popq_cfi_reg r10; \ popq %r10; \
popq_cfi_reg r9; \ popq %r9; \
popq_cfi_reg r8; \ popq %r8; \
popq_cfi_reg rcx; \ popq %rcx; \
popq_cfi_reg rsi; \ popq %rsi; \
popq_cfi_reg rdi popq %rdi
#endif #endif
/* Fix up special calling conventions */ /* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_read_failed)
CFI_STARTPROC
save_common_regs save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi movq %rax,%rdi
call rwsem_down_read_failed call rwsem_down_read_failed
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_read_failed) ENDPROC(call_rwsem_down_read_failed)
ENTRY(call_rwsem_down_write_failed) ENTRY(call_rwsem_down_write_failed)
CFI_STARTPROC
save_common_regs save_common_regs
movq %rax,%rdi movq %rax,%rdi
call rwsem_down_write_failed call rwsem_down_write_failed
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_write_failed) ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_wake) ENTRY(call_rwsem_wake)
CFI_STARTPROC
/* do nothing if still outstanding active readers */ /* do nothing if still outstanding active readers */
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f jnz 1f
...@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake) ...@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
call rwsem_wake call rwsem_wake
restore_common_regs restore_common_regs
1: ret 1: ret
CFI_ENDPROC
ENDPROC(call_rwsem_wake) ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake) ENTRY(call_rwsem_downgrade_wake)
CFI_STARTPROC
save_common_regs save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi movq %rax,%rdi
call rwsem_downgrade_wake call rwsem_downgrade_wake
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs restore_common_regs
ret ret
CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake) ENDPROC(call_rwsem_downgrade_wake)
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
* of the License. * of the License.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h>
/* /*
* Calling convention : * Calling convention :
......
...@@ -26,7 +26,7 @@ else ...@@ -26,7 +26,7 @@ else
obj-y += syscalls_64.o vdso/ obj-y += syscalls_64.o vdso/
subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../lib/thunk_64.o \ subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
../lib/rwsem.o ../lib/rwsem.o
endif endif
......
...@@ -114,7 +114,7 @@ RELOC(xen_sysret32, 1b+1) ...@@ -114,7 +114,7 @@ RELOC(xen_sysret32, 1b+1)
/* Normal 64-bit system call target */ /* Normal 64-bit system call target */
ENTRY(xen_syscall_target) ENTRY(xen_syscall_target)
undo_xen_syscall undo_xen_syscall
jmp system_call_after_swapgs jmp entry_SYSCALL_64_after_swapgs
ENDPROC(xen_syscall_target) ENDPROC(xen_syscall_target)
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
...@@ -122,13 +122,13 @@ ENDPROC(xen_syscall_target) ...@@ -122,13 +122,13 @@ ENDPROC(xen_syscall_target)
/* 32-bit compat syscall target */ /* 32-bit compat syscall target */
ENTRY(xen_syscall32_target) ENTRY(xen_syscall32_target)
undo_xen_syscall undo_xen_syscall
jmp ia32_cstar_target jmp entry_SYSCALL_compat
ENDPROC(xen_syscall32_target) ENDPROC(xen_syscall32_target)
/* 32-bit compat sysenter target */ /* 32-bit compat sysenter target */
ENTRY(xen_sysenter_target) ENTRY(xen_sysenter_target)
undo_xen_syscall undo_xen_syscall
jmp ia32_sysenter_target jmp entry_SYSENTER_compat
ENDPROC(xen_sysenter_target) ENDPROC(xen_sysenter_target)
#else /* !CONFIG_IA32_EMULATION */ #else /* !CONFIG_IA32_EMULATION */
......
...@@ -212,5 +212,5 @@ EOF ...@@ -212,5 +212,5 @@ EOF
) )
} }
(ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \ (ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
$* -E -x c - > /dev/null $* -E -x c - > /dev/null
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment