perf bench: Update the copies of x86's mem{cpy,set}_64.S

And update linux/linkage.h, which requires in turn that we make these
files switch from ENTRY()/ENDPROC() to SYM_FUNC_START()/SYM_FUNC_END():

  tools/perf/arch/arm64/tests/regs_load.S
  tools/perf/arch/arm/tests/regs_load.S
  tools/perf/arch/powerpc/tests/regs_load.S
  tools/perf/arch/x86/tests/regs_load.S

We also need to switch SYM_FUNC_START_LOCAL() to SYM_FUNC_START() for
the functions used directly by 'perf bench', and update
tools/perf/check_headers.sh to ignore those changes when checking if the
kernel original files drifted from the copies we carry.

This is to get the changes from:

  6dcc5627 ("x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*")
  ef1e0315 ("x86/asm: Make some functions local")
  e9b9d020 ("x86/asm: Annotate aliases")

And address these tools/perf build warnings:

  Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S'
  diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S
  Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S'
  diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-tay3l8x8k11p7y3qcpqh9qh5@git.kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 77b91c1a
...@@ -28,8 +28,8 @@ ...@@ -28,8 +28,8 @@
* Output: * Output:
* rax original destination * rax original destination
*/ */
ENTRY(__memcpy) SYM_FUNC_START_ALIAS(__memcpy)
ENTRY(memcpy) SYM_FUNC_START_LOCAL(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS "jmp memcpy_erms", X86_FEATURE_ERMS
...@@ -41,8 +41,8 @@ ENTRY(memcpy) ...@@ -41,8 +41,8 @@ ENTRY(memcpy)
movl %edx, %ecx movl %edx, %ecx
rep movsb rep movsb
ret ret
ENDPROC(memcpy) SYM_FUNC_END(memcpy)
ENDPROC(__memcpy) SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy) EXPORT_SYMBOL(__memcpy)
...@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy) ...@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy)
* memcpy_erms() - enhanced fast string memcpy. This is faster and * memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible. * simpler than memcpy. Use memcpy_erms when possible.
*/ */
ENTRY(memcpy_erms) SYM_FUNC_START(memcpy_erms)
movq %rdi, %rax movq %rdi, %rax
movq %rdx, %rcx movq %rdx, %rcx
rep movsb rep movsb
ret ret
ENDPROC(memcpy_erms) SYM_FUNC_END(memcpy_erms)
ENTRY(memcpy_orig) SYM_FUNC_START(memcpy_orig)
movq %rdi, %rax movq %rdi, %rax
cmpq $0x20, %rdx cmpq $0x20, %rdx
...@@ -182,7 +182,7 @@ ENTRY(memcpy_orig) ...@@ -182,7 +182,7 @@ ENTRY(memcpy_orig)
.Lend: .Lend:
retq retq
ENDPROC(memcpy_orig) SYM_FUNC_END(memcpy_orig)
#ifndef CONFIG_UML #ifndef CONFIG_UML
...@@ -193,7 +193,7 @@ MCSAFE_TEST_CTL ...@@ -193,7 +193,7 @@ MCSAFE_TEST_CTL
* Note that we only catch machine checks when reading the source addresses. * Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks. * Writes to target are posted and don't generate machine checks.
*/ */
ENTRY(__memcpy_mcsafe) SYM_FUNC_START(__memcpy_mcsafe)
cmpl $8, %edx cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */ /* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words jb .L_no_whole_words
...@@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe) ...@@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe)
xorl %eax, %eax xorl %eax, %eax
.L_done: .L_done:
ret ret
ENDPROC(__memcpy_mcsafe) SYM_FUNC_END(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
.section .fixup, "ax" .section .fixup, "ax"
......
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
* *
* rax original destination * rax original destination
*/ */
ENTRY(memset) SYM_FUNC_START_ALIAS(memset)
ENTRY(__memset) SYM_FUNC_START(__memset)
/* /*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions. * to use it when possible. If not available, use fast string instructions.
...@@ -42,8 +42,8 @@ ENTRY(__memset) ...@@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb rep stosb
movq %r9,%rax movq %r9,%rax
ret ret
ENDPROC(memset) SYM_FUNC_END(__memset)
ENDPROC(__memset) SYM_FUNC_END_ALIAS(memset)
/* /*
* ISO C memset - set a memory block to a byte value. This function uses * ISO C memset - set a memory block to a byte value. This function uses
...@@ -56,16 +56,16 @@ ENDPROC(__memset) ...@@ -56,16 +56,16 @@ ENDPROC(__memset)
* *
* rax original destination * rax original destination
*/ */
ENTRY(memset_erms) SYM_FUNC_START(memset_erms)
movq %rdi,%r9 movq %rdi,%r9
movb %sil,%al movb %sil,%al
movq %rdx,%rcx movq %rdx,%rcx
rep stosb rep stosb
movq %r9,%rax movq %r9,%rax
ret ret
ENDPROC(memset_erms) SYM_FUNC_END(memset_erms)
ENTRY(memset_orig) SYM_FUNC_START(memset_orig)
movq %rdi,%r10 movq %rdi,%r10
/* expand byte value */ /* expand byte value */
...@@ -136,4 +136,4 @@ ENTRY(memset_orig) ...@@ -136,4 +136,4 @@ ENTRY(memset_orig)
subq %r8,%rdx subq %r8,%rdx
jmp .Lafter_bad_alignment jmp .Lafter_bad_alignment
.Lfinal: .Lfinal:
ENDPROC(memset_orig) SYM_FUNC_END(memset_orig)
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
.text .text
.type perf_regs_load,%function .type perf_regs_load,%function
ENTRY(perf_regs_load) SYM_FUNC_START(perf_regs_load)
str r0, [r0, #R0] str r0, [r0, #R0]
str r1, [r0, #R1] str r1, [r0, #R1]
str r2, [r0, #R2] str r2, [r0, #R2]
...@@ -56,4 +56,4 @@ ENTRY(perf_regs_load) ...@@ -56,4 +56,4 @@ ENTRY(perf_regs_load)
str lr, [r0, #PC] // store pc as lr in order to skip the call str lr, [r0, #PC] // store pc as lr in order to skip the call
// to this function // to this function
mov pc, lr mov pc, lr
ENDPROC(perf_regs_load) SYM_FUNC_END(perf_regs_load)
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define LDR_REG(r) ldr x##r, [x0, 8 * r] #define LDR_REG(r) ldr x##r, [x0, 8 * r]
#define SP (8 * 31) #define SP (8 * 31)
#define PC (8 * 32) #define PC (8 * 32)
ENTRY(perf_regs_load) SYM_FUNC_START(perf_regs_load)
STR_REG(0) STR_REG(0)
STR_REG(1) STR_REG(1)
STR_REG(2) STR_REG(2)
...@@ -44,4 +44,4 @@ ENTRY(perf_regs_load) ...@@ -44,4 +44,4 @@ ENTRY(perf_regs_load)
str x30, [x0, #PC] str x30, [x0, #PC]
LDR_REG(1) LDR_REG(1)
ret ret
ENDPROC(perf_regs_load) SYM_FUNC_END(perf_regs_load)
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
.text .text
#ifdef HAVE_ARCH_X86_64_SUPPORT #ifdef HAVE_ARCH_X86_64_SUPPORT
ENTRY(perf_regs_load) SYM_FUNC_START(perf_regs_load)
movq %rax, AX(%rdi) movq %rax, AX(%rdi)
movq %rbx, BX(%rdi) movq %rbx, BX(%rdi)
movq %rcx, CX(%rdi) movq %rcx, CX(%rdi)
...@@ -60,9 +60,9 @@ ENTRY(perf_regs_load) ...@@ -60,9 +60,9 @@ ENTRY(perf_regs_load)
movq %r14, R14(%rdi) movq %r14, R14(%rdi)
movq %r15, R15(%rdi) movq %r15, R15(%rdi)
ret ret
ENDPROC(perf_regs_load) SYM_FUNC_END(perf_regs_load)
#else #else
ENTRY(perf_regs_load) SYM_FUNC_START(perf_regs_load)
push %edi push %edi
movl 8(%esp), %edi movl 8(%esp), %edi
movl %eax, AX(%edi) movl %eax, AX(%edi)
...@@ -88,7 +88,7 @@ ENTRY(perf_regs_load) ...@@ -88,7 +88,7 @@ ENTRY(perf_regs_load)
movl $0, FS(%edi) movl $0, FS(%edi)
movl $0, GS(%edi) movl $0, GS(%edi)
ret ret
ENDPROC(perf_regs_load) SYM_FUNC_END(perf_regs_load)
#endif #endif
/* /*
......
...@@ -110,8 +110,8 @@ for i in $FILES; do ...@@ -110,8 +110,8 @@ for i in $FILES; do
done done
# diff with extra ignore lines # diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"' check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
check include/linux/ctype.h '-I "isdigit("' check include/linux/ctype.h '-I "isdigit("'
......
...@@ -5,10 +5,93 @@ ...@@ -5,10 +5,93 @@
/* linkage.h ... for including arch/x86/lib/memcpy_64.S */ /* linkage.h ... for including arch/x86/lib/memcpy_64.S */
#define ENTRY(name) \ /* Some toolchains use other characters (e.g. '`') to mark new line in macro */
.globl name; \ #ifndef ASM_NL
#define ASM_NL ;
#endif
#ifndef __ALIGN
#define __ALIGN .align 4,0x90
#define __ALIGN_STR ".align 4,0x90"
#endif
/* SYM_T_FUNC -- type used by assembler to mark functions */
#ifndef SYM_T_FUNC
#define SYM_T_FUNC STT_FUNC
#endif
/* SYM_A_* -- align the symbol? */
#define SYM_A_ALIGN ALIGN
/* SYM_L_* -- linkage of symbols */
#define SYM_L_GLOBAL(name) .globl name
#define SYM_L_LOCAL(name) /* nothing */
#define ALIGN __ALIGN
/* === generic annotations === */
/* SYM_ENTRY -- use only if you have to for non-paired symbols */
#ifndef SYM_ENTRY
#define SYM_ENTRY(name, linkage, align...) \
linkage(name) ASM_NL \
align ASM_NL \
name: name:
#endif
/* SYM_START -- use only if you have to */
#ifndef SYM_START
#define SYM_START(name, linkage, align...) \
SYM_ENTRY(name, linkage, align)
#endif
/* SYM_END -- use only if you have to */
#ifndef SYM_END
#define SYM_END(name, sym_type) \
.type name sym_type ASM_NL \
.size name, .-name
#endif
/*
* SYM_FUNC_START_ALIAS -- use where there are two global names for one
* function
*/
#ifndef SYM_FUNC_START_ALIAS
#define SYM_FUNC_START_ALIAS(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
/* SYM_FUNC_START -- use for global functions */
#ifndef SYM_FUNC_START
/*
* The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
* later.
*/
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
/* SYM_FUNC_START_LOCAL -- use for local functions */
#ifndef SYM_FUNC_START_LOCAL
/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
#ifndef SYM_FUNC_END_ALIAS
#define SYM_FUNC_END_ALIAS(name) \
SYM_END(name, SYM_T_FUNC)
#endif
#define ENDPROC(name) /*
* SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
* SYM_FUNC_START_WEAK, ...
*/
#ifndef SYM_FUNC_END
/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_END(name) \
SYM_END(name, SYM_T_FUNC)
#endif
#endif /* PERF_LINUX_LINKAGE_H_ */ #endif /* PERF_LINUX_LINKAGE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment