Commit 494b5168 authored by Nadav Amit's avatar Nadav Amit Committed by Ingo Molnar

x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops

As described in:

  77b0bf55: ("kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs")

GCC's inlining heuristics are broken with common asm() patterns used in
kernel code, resulting in the effective disabling of inlining.

The workaround is to set an assembly macro and call it from the inline
assembly block. As a result GCC considers the inline assembly block as
a single instruction. (Which it isn't, but that's the best we can get.)

In this patch we wrap the paravirt call section tricks in a macro,
to hide it from GCC.

The effect of the patch is a more aggressive inlining, which also
causes a size increase of kernel.

      text     data     bss      dec     hex  filename
  18147336 10226688 2957312 31331336 1de1408  ./vmlinux before
  18162555 10226288 2957312 31346155 1de4deb  ./vmlinux after (+14819)

The number of static text symbols (non-inlined functions) goes down:

  Before: 40053
  After:  39942 (-111)

[ mingo: Rewrote the changelog. ]
Tested-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20181003213100.189959-8-namit@vmware.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f81f8ad5
...@@ -346,23 +346,11 @@ extern struct pv_lock_ops pv_lock_ops; ...@@ -346,23 +346,11 @@ extern struct pv_lock_ops pv_lock_ops;
#define paravirt_clobber(clobber) \ #define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber) [paravirt_clobber] "i" (clobber)
/*
* Generate some code, and mark it as patchable by the
* apply_paravirt() alternate instruction patcher.
*/
#define _paravirt_alt(insn_string, type, clobber) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
" .short " clobber "\n" \
".popsection\n"
/* Generate patchable code, with the default asm parameters. */ /* Generate patchable code, with the default asm parameters. */
#define paravirt_alt(insn_string) \ #define paravirt_call \
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") "PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \
" clobber=\"%c[paravirt_clobber]\"" \
" pv_opptr=\"%c[paravirt_opptr]\";"
/* Simple instruction patching code. */ /* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
...@@ -390,16 +378,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, ...@@ -390,16 +378,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
int paravirt_disable_iospace(void); int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%c[paravirt_opptr];"
/* /*
* These macros are intended to wrap calls through one of the paravirt * These macros are intended to wrap calls through one of the paravirt
* ops structs, so that they can be later identified and patched at * ops structs, so that they can be later identified and patched at
...@@ -537,7 +515,7 @@ int paravirt_disable_iospace(void); ...@@ -537,7 +515,7 @@ int paravirt_disable_iospace(void);
/* since this condition will never hold */ \ /* since this condition will never hold */ \
if (sizeof(rettype) > sizeof(unsigned long)) { \ if (sizeof(rettype) > sizeof(unsigned long)) { \
asm volatile(pre \ asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \ paravirt_call \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
...@@ -547,7 +525,7 @@ int paravirt_disable_iospace(void); ...@@ -547,7 +525,7 @@ int paravirt_disable_iospace(void);
__ret = (rettype)((((u64)__edx) << 32) | __eax); \ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \ } else { \
asm volatile(pre \ asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \ paravirt_call \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
...@@ -574,7 +552,7 @@ int paravirt_disable_iospace(void); ...@@ -574,7 +552,7 @@ int paravirt_disable_iospace(void);
PVOP_VCALL_ARGS; \ PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \ PVOP_TEST_NULL(op); \
asm volatile(pre \ asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \ paravirt_call \
post \ post \
: call_clbr, ASM_CALL_CONSTRAINT \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
...@@ -694,6 +672,26 @@ struct paravirt_patch_site { ...@@ -694,6 +672,26 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[], extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[]; __parainstructions_end[];
#else /* __ASSEMBLY__ */
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
771: ANNOTATE_RETPOLINE_SAFE
call *\pv_opptr
772: .pushsection .parainstructions,"a"
_ASM_ALIGN
_ASM_PTR 771b
.byte \type
.byte 772b-771b
.short \clobber
.popsection
.endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */
...@@ -10,3 +10,4 @@ ...@@ -10,3 +10,4 @@
#include <asm/refcount.h> #include <asm/refcount.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/paravirt.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment