Commit 6d685e53 authored by Jiri Slaby's avatar Jiri Slaby Committed by Borislav Petkov

x86/asm/32: Change all ENTRY+ENDPROC to SYM_FUNC_*

These are all functions which are invoked from elsewhere, so annotate
them as global using the new SYM_FUNC_START and their ENDPROC's by
SYM_FUNC_END.

Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so.
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Allison Randal <allison@lohutok.net>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Andy Shevchenko <andy@infradead.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Bill Metzenthen <billm@melbpc.org.au>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: linux-efi <linux-efi@vger.kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: platform-driver-x86@vger.kernel.org
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20191011115108.12392-28-jslaby@suse.cz
parent 5e63306f
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
*/ */
.text .text
ENTRY(efi_call_phys) SYM_FUNC_START(efi_call_phys)
/* /*
* 0. The function can only be called in Linux kernel. So CS has been * 0. The function can only be called in Linux kernel. So CS has been
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
...@@ -77,7 +77,7 @@ ENTRY(efi_call_phys) ...@@ -77,7 +77,7 @@ ENTRY(efi_call_phys)
movl saved_return_addr(%edx), %ecx movl saved_return_addr(%edx), %ecx
pushl %ecx pushl %ecx
ret ret
ENDPROC(efi_call_phys) SYM_FUNC_END(efi_call_phys)
.previous .previous
.data .data
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
.hidden _egot .hidden _egot
__HEAD __HEAD
ENTRY(startup_32) SYM_FUNC_START(startup_32)
cld cld
/* /*
* Test KEEP_SEGMENTS flag to see if the bootloader is asking * Test KEEP_SEGMENTS flag to see if the bootloader is asking
...@@ -142,14 +142,14 @@ ENTRY(startup_32) ...@@ -142,14 +142,14 @@ ENTRY(startup_32)
*/ */
leal .Lrelocated(%ebx), %eax leal .Lrelocated(%ebx), %eax
jmp *%eax jmp *%eax
ENDPROC(startup_32) SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
/* /*
* We don't need the return address, so set up the stack so efi_main() can find * We don't need the return address, so set up the stack so efi_main() can find
* its arguments. * its arguments.
*/ */
ENTRY(efi_pe_entry) SYM_FUNC_START(efi_pe_entry)
add $0x4, %esp add $0x4, %esp
call 1f call 1f
...@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry) ...@@ -174,9 +174,9 @@ ENTRY(efi_pe_entry)
pushl %eax pushl %eax
pushl %ecx pushl %ecx
jmp 2f /* Skip efi_config initialization */ jmp 2f /* Skip efi_config initialization */
ENDPROC(efi_pe_entry) SYM_FUNC_END(efi_pe_entry)
ENTRY(efi32_stub_entry) SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp add $0x4, %esp
popl %ecx popl %ecx
popl %edx popl %edx
...@@ -205,7 +205,7 @@ fail: ...@@ -205,7 +205,7 @@ fail:
movl BP_code32_start(%esi), %eax movl BP_code32_start(%esi), %eax
leal startup_32(%eax), %eax leal startup_32(%eax), %eax
jmp *%eax jmp *%eax
ENDPROC(efi32_stub_entry) SYM_FUNC_END(efi32_stub_entry)
#endif #endif
.text .text
......
...@@ -497,7 +497,7 @@ ...@@ -497,7 +497,7 @@
pxor t0, x3; \ pxor t0, x3; \
movdqu x3, (3*4*4)(out); movdqu x3, (3*4*4)(out);
ENTRY(__serpent_enc_blk_4way) SYM_FUNC_START(__serpent_enc_blk_4way)
/* input: /* input:
* arg_ctx(%esp): ctx, CTX * arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst * arg_dst(%esp): dst
...@@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way) ...@@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way)
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret; ret;
ENDPROC(__serpent_enc_blk_4way) SYM_FUNC_END(__serpent_enc_blk_4way)
ENTRY(serpent_dec_blk_4way) SYM_FUNC_START(serpent_dec_blk_4way)
/* input: /* input:
* arg_ctx(%esp): ctx, CTX * arg_ctx(%esp): ctx, CTX
* arg_dst(%esp): dst * arg_dst(%esp): dst
...@@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way) ...@@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way)
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
ret; ret;
ENDPROC(serpent_dec_blk_4way) SYM_FUNC_END(serpent_dec_blk_4way)
...@@ -207,7 +207,7 @@ ...@@ -207,7 +207,7 @@
xor %esi, d ## D;\ xor %esi, d ## D;\
ror $1, d ## D; ror $1, d ## D;
ENTRY(twofish_enc_blk) SYM_FUNC_START(twofish_enc_blk)
push %ebp /* save registers according to calling convention*/ push %ebp /* save registers according to calling convention*/
push %ebx push %ebx
push %esi push %esi
...@@ -261,9 +261,9 @@ ENTRY(twofish_enc_blk) ...@@ -261,9 +261,9 @@ ENTRY(twofish_enc_blk)
pop %ebp pop %ebp
mov $1, %eax mov $1, %eax
ret ret
ENDPROC(twofish_enc_blk) SYM_FUNC_END(twofish_enc_blk)
ENTRY(twofish_dec_blk) SYM_FUNC_START(twofish_dec_blk)
push %ebp /* save registers according to calling convention*/ push %ebp /* save registers according to calling convention*/
push %ebx push %ebx
push %esi push %esi
...@@ -318,4 +318,4 @@ ENTRY(twofish_dec_blk) ...@@ -318,4 +318,4 @@ ENTRY(twofish_dec_blk)
pop %ebp pop %ebp
mov $1, %eax mov $1, %eax
ret ret
ENDPROC(twofish_dec_blk) SYM_FUNC_END(twofish_dec_blk)
...@@ -757,7 +757,7 @@ SYM_CODE_END(__switch_to_asm) ...@@ -757,7 +757,7 @@ SYM_CODE_END(__switch_to_asm)
* asmlinkage function so its argument has to be pushed on the stack. This * asmlinkage function so its argument has to be pushed on the stack. This
* wrapper creates a proper "end of stack" frame header before the call. * wrapper creates a proper "end of stack" frame header before the call.
*/ */
ENTRY(schedule_tail_wrapper) SYM_FUNC_START(schedule_tail_wrapper)
FRAME_BEGIN FRAME_BEGIN
pushl %eax pushl %eax
...@@ -766,7 +766,7 @@ ENTRY(schedule_tail_wrapper) ...@@ -766,7 +766,7 @@ ENTRY(schedule_tail_wrapper)
FRAME_END FRAME_END
ret ret
ENDPROC(schedule_tail_wrapper) SYM_FUNC_END(schedule_tail_wrapper)
/* /*
* A newly forked process directly context switches into this address. * A newly forked process directly context switches into this address.
* *
...@@ -885,7 +885,7 @@ SYM_CODE_END(xen_sysenter_target) ...@@ -885,7 +885,7 @@ SYM_CODE_END(xen_sysenter_target)
* ebp user stack * ebp user stack
* 0(%ebp) arg6 * 0(%ebp) arg6
*/ */
ENTRY(entry_SYSENTER_32) SYM_FUNC_START(entry_SYSENTER_32)
/* /*
* On entry-stack with all userspace-regs live - save and * On entry-stack with all userspace-regs live - save and
* restore eflags and %eax to use it as scratch-reg for the cr3 * restore eflags and %eax to use it as scratch-reg for the cr3
...@@ -1013,7 +1013,7 @@ ENTRY(entry_SYSENTER_32) ...@@ -1013,7 +1013,7 @@ ENTRY(entry_SYSENTER_32)
popfl popfl
jmp .Lsysenter_flags_fixed jmp .Lsysenter_flags_fixed
SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
ENDPROC(entry_SYSENTER_32) SYM_FUNC_END(entry_SYSENTER_32)
/* /*
* 32-bit legacy system call entry. * 32-bit legacy system call entry.
...@@ -1043,7 +1043,7 @@ ENDPROC(entry_SYSENTER_32) ...@@ -1043,7 +1043,7 @@ ENDPROC(entry_SYSENTER_32)
* edi arg5 * edi arg5
* ebp arg6 * ebp arg6
*/ */
ENTRY(entry_INT80_32) SYM_FUNC_START(entry_INT80_32)
ASM_CLAC ASM_CLAC
pushl %eax /* pt_regs->orig_ax */ pushl %eax /* pt_regs->orig_ax */
...@@ -1120,7 +1120,7 @@ SYM_CODE_START(iret_exc) ...@@ -1120,7 +1120,7 @@ SYM_CODE_START(iret_exc)
SYM_CODE_END(iret_exc) SYM_CODE_END(iret_exc)
.previous .previous
_ASM_EXTABLE(.Lirq_return, iret_exc) _ASM_EXTABLE(.Lirq_return, iret_exc)
ENDPROC(entry_INT80_32) SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK .macro FIXUP_ESPFIX_STACK
/* /*
...@@ -1213,7 +1213,7 @@ SYM_CODE_START_LOCAL(common_interrupt) ...@@ -1213,7 +1213,7 @@ SYM_CODE_START_LOCAL(common_interrupt)
SYM_CODE_END(common_interrupt) SYM_CODE_END(common_interrupt)
#define BUILD_INTERRUPT3(name, nr, fn) \ #define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \ SYM_FUNC_START(name) \
ASM_CLAC; \ ASM_CLAC; \
pushl $~(nr); \ pushl $~(nr); \
SAVE_ALL switch_stacks=1; \ SAVE_ALL switch_stacks=1; \
...@@ -1222,7 +1222,7 @@ ENTRY(name) \ ...@@ -1222,7 +1222,7 @@ ENTRY(name) \
movl %esp, %eax; \ movl %esp, %eax; \
call fn; \ call fn; \
jmp ret_from_intr; \ jmp ret_from_intr; \
ENDPROC(name) SYM_FUNC_END(name)
#define BUILD_INTERRUPT(name, nr) \ #define BUILD_INTERRUPT(name, nr) \
BUILD_INTERRUPT3(name, nr, smp_##name); \ BUILD_INTERRUPT3(name, nr, smp_##name); \
...@@ -1341,7 +1341,7 @@ SYM_CODE_START(spurious_interrupt_bug) ...@@ -1341,7 +1341,7 @@ SYM_CODE_START(spurious_interrupt_bug)
SYM_CODE_END(spurious_interrupt_bug) SYM_CODE_END(spurious_interrupt_bug)
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
ENTRY(xen_hypervisor_callback) SYM_FUNC_START(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */ pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL SAVE_ALL
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
...@@ -1369,7 +1369,7 @@ SYM_INNER_LABEL_ALIGN(xen_do_upcall, SYM_L_GLOBAL) ...@@ -1369,7 +1369,7 @@ SYM_INNER_LABEL_ALIGN(xen_do_upcall, SYM_L_GLOBAL)
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp ret_from_intr jmp ret_from_intr
ENDPROC(xen_hypervisor_callback) SYM_FUNC_END(xen_hypervisor_callback)
/* /*
* Hypervisor uses this for application faults while it executes. * Hypervisor uses this for application faults while it executes.
...@@ -1383,7 +1383,7 @@ ENDPROC(xen_hypervisor_callback) ...@@ -1383,7 +1383,7 @@ ENDPROC(xen_hypervisor_callback)
* to pop the stack frame we end up in an infinite loop of failsafe callbacks. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
* We distinguish between categories by maintaining a status value in EAX. * We distinguish between categories by maintaining a status value in EAX.
*/ */
ENTRY(xen_failsafe_callback) SYM_FUNC_START(xen_failsafe_callback)
pushl %eax pushl %eax
movl $1, %eax movl $1, %eax
1: mov 4(%esp), %ds 1: mov 4(%esp), %ds
...@@ -1420,7 +1420,7 @@ ENTRY(xen_failsafe_callback) ...@@ -1420,7 +1420,7 @@ ENTRY(xen_failsafe_callback)
_ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(2b, 7b)
_ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(3b, 8b)
_ASM_EXTABLE(4b, 9b) _ASM_EXTABLE(4b, 9b)
ENDPROC(xen_failsafe_callback) SYM_FUNC_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */ #endif /* CONFIG_XEN_PV */
#ifdef CONFIG_XEN_PVHVM #ifdef CONFIG_XEN_PVHVM
......
...@@ -180,12 +180,12 @@ SYM_CODE_END(startup_32) ...@@ -180,12 +180,12 @@ SYM_CODE_END(startup_32)
* up already except stack. We just set up stack here. Then call * up already except stack. We just set up stack here. Then call
* start_secondary(). * start_secondary().
*/ */
ENTRY(start_cpu0) SYM_FUNC_START(start_cpu0)
movl initial_stack, %ecx movl initial_stack, %ecx
movl %ecx, %esp movl %ecx, %esp
call *(initial_code) call *(initial_code)
1: jmp 1b 1: jmp 1b
ENDPROC(start_cpu0) SYM_FUNC_END(start_cpu0)
#endif #endif
/* /*
...@@ -196,7 +196,7 @@ ENDPROC(start_cpu0) ...@@ -196,7 +196,7 @@ ENDPROC(start_cpu0)
* If cpu hotplug is not supported then this code can go in init section * If cpu hotplug is not supported then this code can go in init section
* which will be freed later * which will be freed later
*/ */
ENTRY(startup_32_smp) SYM_FUNC_START(startup_32_smp)
cld cld
movl $(__BOOT_DS),%eax movl $(__BOOT_DS),%eax
movl %eax,%ds movl %eax,%ds
...@@ -363,7 +363,7 @@ ENTRY(startup_32_smp) ...@@ -363,7 +363,7 @@ ENTRY(startup_32_smp)
call *(initial_code) call *(initial_code)
1: jmp 1b 1: jmp 1b
ENDPROC(startup_32_smp) SYM_FUNC_END(startup_32_smp)
#include "verify_cpu.S" #include "verify_cpu.S"
...@@ -393,7 +393,7 @@ setup_once: ...@@ -393,7 +393,7 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */ andl $0,setup_once_ref /* Once is enough, thanks */
ret ret
ENTRY(early_idt_handler_array) SYM_FUNC_START(early_idt_handler_array)
# 36(%esp) %eflags # 36(%esp) %eflags
# 32(%esp) %cs # 32(%esp) %cs
# 28(%esp) %eip # 28(%esp) %eip
...@@ -408,7 +408,7 @@ ENTRY(early_idt_handler_array) ...@@ -408,7 +408,7 @@ ENTRY(early_idt_handler_array)
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handler_array) SYM_FUNC_END(early_idt_handler_array)
SYM_CODE_START_LOCAL(early_idt_handler_common) SYM_CODE_START_LOCAL(early_idt_handler_common)
/* /*
...@@ -464,7 +464,7 @@ SYM_CODE_START_LOCAL(early_idt_handler_common) ...@@ -464,7 +464,7 @@ SYM_CODE_START_LOCAL(early_idt_handler_common)
SYM_CODE_END(early_idt_handler_common) SYM_CODE_END(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
ENTRY(early_ignore_irq) SYM_FUNC_START(early_ignore_irq)
cld cld
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
pushl %eax pushl %eax
...@@ -499,7 +499,7 @@ ENTRY(early_ignore_irq) ...@@ -499,7 +499,7 @@ ENTRY(early_ignore_irq)
hlt_loop: hlt_loop:
hlt hlt
jmp hlt_loop jmp hlt_loop
ENDPROC(early_ignore_irq) SYM_FUNC_END(early_ignore_irq)
__INITDATA __INITDATA
.align 4 .align 4
......
...@@ -20,10 +20,10 @@ ...@@ -20,10 +20,10 @@
#define BEGIN(op) \ #define BEGIN(op) \
.macro endp; \ .macro endp; \
ENDPROC(atomic64_##op##_386); \ SYM_FUNC_END(atomic64_##op##_386); \
.purgem endp; \ .purgem endp; \
.endm; \ .endm; \
ENTRY(atomic64_##op##_386); \ SYM_FUNC_START(atomic64_##op##_386); \
LOCK v; LOCK v;
#define ENDP endp #define ENDP endp
......
...@@ -16,12 +16,12 @@ ...@@ -16,12 +16,12 @@
cmpxchg8b (\reg) cmpxchg8b (\reg)
.endm .endm
ENTRY(atomic64_read_cx8) SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx read64 %ecx
ret ret
ENDPROC(atomic64_read_cx8) SYM_FUNC_END(atomic64_read_cx8)
ENTRY(atomic64_set_cx8) SYM_FUNC_START(atomic64_set_cx8)
1: 1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes /* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */ * are atomic on 586 and newer */
...@@ -29,19 +29,19 @@ ENTRY(atomic64_set_cx8) ...@@ -29,19 +29,19 @@ ENTRY(atomic64_set_cx8)
jne 1b jne 1b
ret ret
ENDPROC(atomic64_set_cx8) SYM_FUNC_END(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8) SYM_FUNC_START(atomic64_xchg_cx8)
1: 1:
LOCK_PREFIX LOCK_PREFIX
cmpxchg8b (%esi) cmpxchg8b (%esi)
jne 1b jne 1b
ret ret
ENDPROC(atomic64_xchg_cx8) SYM_FUNC_END(atomic64_xchg_cx8)
.macro addsub_return func ins insc .macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebp pushl %ebp
pushl %ebx pushl %ebx
pushl %esi pushl %esi
...@@ -69,14 +69,14 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -69,14 +69,14 @@ ENTRY(atomic64_\func\()_return_cx8)
popl %ebx popl %ebx
popl %ebp popl %ebp
ret ret
ENDPROC(atomic64_\func\()_return_cx8) SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm .endm
addsub_return add add adc addsub_return add add adc
addsub_return sub sub sbb addsub_return sub sub sbb
.macro incdec_return func ins insc .macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8) SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebx pushl %ebx
read64 %esi read64 %esi
...@@ -94,13 +94,13 @@ ENTRY(atomic64_\func\()_return_cx8) ...@@ -94,13 +94,13 @@ ENTRY(atomic64_\func\()_return_cx8)
movl %ecx, %edx movl %ecx, %edx
popl %ebx popl %ebx
ret ret
ENDPROC(atomic64_\func\()_return_cx8) SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm .endm
incdec_return inc add adc incdec_return inc add adc
incdec_return dec sub sbb incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8) SYM_FUNC_START(atomic64_dec_if_positive_cx8)
pushl %ebx pushl %ebx
read64 %esi read64 %esi
...@@ -119,9 +119,9 @@ ENTRY(atomic64_dec_if_positive_cx8) ...@@ -119,9 +119,9 @@ ENTRY(atomic64_dec_if_positive_cx8)
movl %ecx, %edx movl %ecx, %edx
popl %ebx popl %ebx
ret ret
ENDPROC(atomic64_dec_if_positive_cx8) SYM_FUNC_END(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8) SYM_FUNC_START(atomic64_add_unless_cx8)
pushl %ebp pushl %ebp
pushl %ebx pushl %ebx
/* these just push these two parameters on the stack */ /* these just push these two parameters on the stack */
...@@ -155,9 +155,9 @@ ENTRY(atomic64_add_unless_cx8) ...@@ -155,9 +155,9 @@ ENTRY(atomic64_add_unless_cx8)
jne 2b jne 2b
xorl %eax, %eax xorl %eax, %eax
jmp 3b jmp 3b
ENDPROC(atomic64_add_unless_cx8) SYM_FUNC_END(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8) SYM_FUNC_START(atomic64_inc_not_zero_cx8)
pushl %ebx pushl %ebx
read64 %esi read64 %esi
...@@ -177,4 +177,4 @@ ENTRY(atomic64_inc_not_zero_cx8) ...@@ -177,4 +177,4 @@ ENTRY(atomic64_inc_not_zero_cx8)
3: 3:
popl %ebx popl %ebx
ret ret
ENDPROC(atomic64_inc_not_zero_cx8) SYM_FUNC_END(atomic64_inc_not_zero_cx8)
...@@ -46,7 +46,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -46,7 +46,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte * Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
ENTRY(csum_partial) SYM_FUNC_START(csum_partial)
pushl %esi pushl %esi
pushl %ebx pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
...@@ -128,13 +128,13 @@ ENTRY(csum_partial) ...@@ -128,13 +128,13 @@ ENTRY(csum_partial)
popl %ebx popl %ebx
popl %esi popl %esi
ret ret
ENDPROC(csum_partial) SYM_FUNC_END(csum_partial)
#else #else
/* Version for PentiumII/PPro */ /* Version for PentiumII/PPro */
ENTRY(csum_partial) SYM_FUNC_START(csum_partial)
pushl %esi pushl %esi
pushl %ebx pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
...@@ -246,7 +246,7 @@ ENTRY(csum_partial) ...@@ -246,7 +246,7 @@ ENTRY(csum_partial)
popl %ebx popl %ebx
popl %esi popl %esi
ret ret
ENDPROC(csum_partial) SYM_FUNC_END(csum_partial)
#endif #endif
EXPORT_SYMBOL(csum_partial) EXPORT_SYMBOL(csum_partial)
......
...@@ -75,7 +75,7 @@ FPU_result_1: ...@@ -75,7 +75,7 @@ FPU_result_1:
.text .text
ENTRY(div_Xsig) SYM_FUNC_START(div_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
#ifndef NON_REENTRANT_FPU #ifndef NON_REENTRANT_FPU
...@@ -364,4 +364,4 @@ L_bugged_2: ...@@ -364,4 +364,4 @@ L_bugged_2:
pop %ebx pop %ebx
jmp L_exit jmp L_exit
#endif /* PARANOID */ #endif /* PARANOID */
ENDPROC(div_Xsig) SYM_FUNC_END(div_Xsig)
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "fpu_emu.h" #include "fpu_emu.h"
.text .text
ENTRY(FPU_div_small) SYM_FUNC_START(FPU_div_small)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
...@@ -45,4 +45,4 @@ ENTRY(FPU_div_small) ...@@ -45,4 +45,4 @@ ENTRY(FPU_div_small)
leave leave
ret ret
ENDPROC(FPU_div_small) SYM_FUNC_END(FPU_div_small)
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "fpu_emu.h" #include "fpu_emu.h"
.text .text
ENTRY(mul32_Xsig) SYM_FUNC_START(mul32_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
subl $16,%esp subl $16,%esp
...@@ -63,10 +63,10 @@ ENTRY(mul32_Xsig) ...@@ -63,10 +63,10 @@ ENTRY(mul32_Xsig)
popl %esi popl %esi
leave leave
ret ret
ENDPROC(mul32_Xsig) SYM_FUNC_END(mul32_Xsig)
ENTRY(mul64_Xsig) SYM_FUNC_START(mul64_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
subl $16,%esp subl $16,%esp
...@@ -116,11 +116,11 @@ ENTRY(mul64_Xsig) ...@@ -116,11 +116,11 @@ ENTRY(mul64_Xsig)
popl %esi popl %esi
leave leave
ret ret
ENDPROC(mul64_Xsig) SYM_FUNC_END(mul64_Xsig)
ENTRY(mul_Xsig_Xsig) SYM_FUNC_START(mul_Xsig_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
subl $16,%esp subl $16,%esp
...@@ -176,4 +176,4 @@ ENTRY(mul_Xsig_Xsig) ...@@ -176,4 +176,4 @@ ENTRY(mul_Xsig_Xsig)
popl %esi popl %esi
leave leave
ret ret
ENDPROC(mul_Xsig_Xsig) SYM_FUNC_END(mul_Xsig_Xsig)
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define OVERFLOWED -16(%ebp) /* addition overflow flag */ #define OVERFLOWED -16(%ebp) /* addition overflow flag */
.text .text
ENTRY(polynomial_Xsig) SYM_FUNC_START(polynomial_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
subl $32,%esp subl $32,%esp
...@@ -134,4 +134,4 @@ L_accum_done: ...@@ -134,4 +134,4 @@ L_accum_done:
popl %esi popl %esi
leave leave
ret ret
ENDPROC(polynomial_Xsig) SYM_FUNC_END(polynomial_Xsig)
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
.text .text
ENTRY(FPU_normalize) SYM_FUNC_START(FPU_normalize)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %ebx pushl %ebx
...@@ -95,12 +95,12 @@ L_overflow: ...@@ -95,12 +95,12 @@ L_overflow:
call arith_overflow call arith_overflow
pop %ebx pop %ebx
jmp L_exit jmp L_exit
ENDPROC(FPU_normalize) SYM_FUNC_END(FPU_normalize)
/* Normalise without reporting underflow or overflow */ /* Normalise without reporting underflow or overflow */
ENTRY(FPU_normalize_nuo) SYM_FUNC_START(FPU_normalize_nuo)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %ebx pushl %ebx
...@@ -147,4 +147,4 @@ L_exit_nuo_zero: ...@@ -147,4 +147,4 @@ L_exit_nuo_zero:
popl %ebx popl %ebx
leave leave
ret ret
ENDPROC(FPU_normalize_nuo) SYM_FUNC_END(FPU_normalize_nuo)
...@@ -109,7 +109,7 @@ FPU_denormal: ...@@ -109,7 +109,7 @@ FPU_denormal:
.globl fpu_Arith_exit .globl fpu_Arith_exit
/* Entry point when called from C */ /* Entry point when called from C */
ENTRY(FPU_round) SYM_FUNC_START(FPU_round)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %esi pushl %esi
...@@ -708,4 +708,4 @@ L_exception_exit: ...@@ -708,4 +708,4 @@ L_exception_exit:
jmp fpu_reg_round_special_exit jmp fpu_reg_round_special_exit
#endif /* PARANOID */ #endif /* PARANOID */
ENDPROC(FPU_round) SYM_FUNC_END(FPU_round)
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include "control_w.h" #include "control_w.h"
.text .text
ENTRY(FPU_u_add) SYM_FUNC_START(FPU_u_add)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %esi pushl %esi
...@@ -166,4 +166,4 @@ L_exit: ...@@ -166,4 +166,4 @@ L_exit:
leave leave
ret ret
#endif /* PARANOID */ #endif /* PARANOID */
ENDPROC(FPU_u_add) SYM_FUNC_END(FPU_u_add)
...@@ -75,7 +75,7 @@ FPU_ovfl_flag: ...@@ -75,7 +75,7 @@ FPU_ovfl_flag:
#define DEST PARAM3 #define DEST PARAM3
.text .text
ENTRY(FPU_u_div) SYM_FUNC_START(FPU_u_div)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
#ifndef NON_REENTRANT_FPU #ifndef NON_REENTRANT_FPU
...@@ -471,4 +471,4 @@ L_exit: ...@@ -471,4 +471,4 @@ L_exit:
ret ret
#endif /* PARANOID */ #endif /* PARANOID */
ENDPROC(FPU_u_div) SYM_FUNC_END(FPU_u_div)
...@@ -45,7 +45,7 @@ FPU_accum_1: ...@@ -45,7 +45,7 @@ FPU_accum_1:
.text .text
ENTRY(FPU_u_mul) SYM_FUNC_START(FPU_u_mul)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
#ifndef NON_REENTRANT_FPU #ifndef NON_REENTRANT_FPU
...@@ -147,4 +147,4 @@ L_exit: ...@@ -147,4 +147,4 @@ L_exit:
ret ret
#endif /* PARANOID */ #endif /* PARANOID */
ENDPROC(FPU_u_mul) SYM_FUNC_END(FPU_u_mul)
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include "control_w.h" #include "control_w.h"
.text .text
ENTRY(FPU_u_sub) SYM_FUNC_START(FPU_u_sub)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %esi pushl %esi
...@@ -271,4 +271,4 @@ L_exit: ...@@ -271,4 +271,4 @@ L_exit:
popl %esi popl %esi
leave leave
ret ret
ENDPROC(FPU_u_sub) SYM_FUNC_END(FPU_u_sub)
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
.text .text
ENTRY(round_Xsig) SYM_FUNC_START(round_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %ebx /* Reserve some space */ pushl %ebx /* Reserve some space */
...@@ -79,11 +79,11 @@ L_exit: ...@@ -79,11 +79,11 @@ L_exit:
popl %ebx popl %ebx
leave leave
ret ret
ENDPROC(round_Xsig) SYM_FUNC_END(round_Xsig)
ENTRY(norm_Xsig) SYM_FUNC_START(norm_Xsig)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %ebx /* Reserve some space */ pushl %ebx /* Reserve some space */
...@@ -139,4 +139,4 @@ L_n_exit: ...@@ -139,4 +139,4 @@ L_n_exit:
popl %ebx popl %ebx
leave leave
ret ret
ENDPROC(norm_Xsig) SYM_FUNC_END(norm_Xsig)
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "fpu_emu.h" #include "fpu_emu.h"
.text .text
ENTRY(shr_Xsig) SYM_FUNC_START(shr_Xsig)
push %ebp push %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %esi pushl %esi
...@@ -86,4 +86,4 @@ L_more_than_95: ...@@ -86,4 +86,4 @@ L_more_than_95:
popl %esi popl %esi
leave leave
ret ret
ENDPROC(shr_Xsig) SYM_FUNC_END(shr_Xsig)
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
| Results returned in the 64 bit arg and eax. | | Results returned in the 64 bit arg and eax. |
+---------------------------------------------------------------------------*/ +---------------------------------------------------------------------------*/
ENTRY(FPU_shrx) SYM_FUNC_START(FPU_shrx)
push %ebp push %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %esi pushl %esi
...@@ -93,7 +93,7 @@ L_more_than_95: ...@@ -93,7 +93,7 @@ L_more_than_95:
popl %esi popl %esi
leave leave
ret ret
ENDPROC(FPU_shrx) SYM_FUNC_END(FPU_shrx)
/*---------------------------------------------------------------------------+ /*---------------------------------------------------------------------------+
...@@ -112,7 +112,7 @@ ENDPROC(FPU_shrx) ...@@ -112,7 +112,7 @@ ENDPROC(FPU_shrx)
| part which has been shifted out of the arg. | | part which has been shifted out of the arg. |
| Results returned in the 64 bit arg and eax. | | Results returned in the 64 bit arg and eax. |
+---------------------------------------------------------------------------*/ +---------------------------------------------------------------------------*/
ENTRY(FPU_shrxs) SYM_FUNC_START(FPU_shrxs)
push %ebp push %ebp
movl %esp,%ebp movl %esp,%ebp
pushl %esi pushl %esi
...@@ -204,4 +204,4 @@ Ls_more_than_95: ...@@ -204,4 +204,4 @@ Ls_more_than_95:
popl %esi popl %esi
leave leave
ret ret
ENDPROC(FPU_shrxs) SYM_FUNC_END(FPU_shrxs)
...@@ -75,7 +75,7 @@ FPU_fsqrt_arg_0: ...@@ -75,7 +75,7 @@ FPU_fsqrt_arg_0:
.text .text
ENTRY(wm_sqrt) SYM_FUNC_START(wm_sqrt)
pushl %ebp pushl %ebp
movl %esp,%ebp movl %esp,%ebp
#ifndef NON_REENTRANT_FPU #ifndef NON_REENTRANT_FPU
...@@ -469,4 +469,4 @@ sqrt_more_prec_large: ...@@ -469,4 +469,4 @@ sqrt_more_prec_large:
/* Our estimate is too large */ /* Our estimate is too large */
movl $0x7fffff00,%eax movl $0x7fffff00,%eax
jmp sqrt_round_result jmp sqrt_round_result
ENDPROC(wm_sqrt) SYM_FUNC_END(wm_sqrt)
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
*/ */
.text .text
ENTRY(efi_call_phys) SYM_FUNC_START(efi_call_phys)
/* /*
* 0. The function can only be called in Linux kernel. So CS has been * 0. The function can only be called in Linux kernel. So CS has been
* set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
...@@ -114,7 +114,7 @@ ENTRY(efi_call_phys) ...@@ -114,7 +114,7 @@ ENTRY(efi_call_phys)
movl (%edx), %ecx movl (%edx), %ecx
pushl %ecx pushl %ecx
ret ret
ENDPROC(efi_call_phys) SYM_FUNC_END(efi_call_phys)
.previous .previous
.data .data
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
.text .text
ENTRY(swsusp_arch_suspend) SYM_FUNC_START(swsusp_arch_suspend)
movl %esp, saved_context_esp movl %esp, saved_context_esp
movl %ebx, saved_context_ebx movl %ebx, saved_context_ebx
movl %ebp, saved_context_ebp movl %ebp, saved_context_ebp
...@@ -33,7 +33,7 @@ ENTRY(swsusp_arch_suspend) ...@@ -33,7 +33,7 @@ ENTRY(swsusp_arch_suspend)
call swsusp_save call swsusp_save
FRAME_END FRAME_END
ret ret
ENDPROC(swsusp_arch_suspend) SYM_FUNC_END(swsusp_arch_suspend)
SYM_CODE_START(restore_image) SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */ /* prepare to jump to the image kernel */
...@@ -82,7 +82,7 @@ SYM_CODE_END(core_restore_code) ...@@ -82,7 +82,7 @@ SYM_CODE_END(core_restore_code)
/* code below belongs to the image kernel */ /* code below belongs to the image kernel */
.align PAGE_SIZE .align PAGE_SIZE
ENTRY(restore_registers) SYM_FUNC_START(restore_registers)
/* go back to the original page tables */ /* go back to the original page tables */
movl %ebp, %cr3 movl %ebp, %cr3
movl mmu_cr4_features, %ecx movl mmu_cr4_features, %ecx
...@@ -109,4 +109,4 @@ ENTRY(restore_registers) ...@@ -109,4 +109,4 @@ ENTRY(restore_registers)
movl %eax, in_suspend movl %eax, in_suspend
ret ret
ENDPROC(restore_registers) SYM_FUNC_END(restore_registers)
...@@ -112,15 +112,13 @@ ...@@ -112,15 +112,13 @@
.globl name ASM_NL \ .globl name ASM_NL \
name: name:
#endif #endif
#endif
#ifndef CONFIG_X86_64
#ifndef ENTRY #ifndef ENTRY
/* deprecated, use SYM_FUNC_START */ /* deprecated, use SYM_FUNC_START */
#define ENTRY(name) \ #define ENTRY(name) \
SYM_FUNC_START(name) SYM_FUNC_START(name)
#endif #endif
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86 */
#endif /* LINKER_SCRIPT */ #endif /* LINKER_SCRIPT */
#ifndef WEAK #ifndef WEAK
...@@ -135,9 +133,7 @@ ...@@ -135,9 +133,7 @@
#define END(name) \ #define END(name) \
.size name, .-name .size name, .-name
#endif #endif
#endif /* CONFIG_X86 */
#ifndef CONFIG_X86_64
/* If symbol 'name' is treated as a subroutine (gets called, and returns) /* If symbol 'name' is treated as a subroutine (gets called, and returns)
* then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
* static analysis tools such as stack depth analyzer. * static analysis tools such as stack depth analyzer.
...@@ -147,7 +143,7 @@ ...@@ -147,7 +143,7 @@
#define ENDPROC(name) \ #define ENDPROC(name) \
SYM_FUNC_END(name) SYM_FUNC_END(name)
#endif #endif
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86 */
/* === generic annotations === */ /* === generic annotations === */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment