Commit fc5e5c59 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_paravirt_for_v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 paravirt updates from Borislav Petkov:

 - Replace the paravirt patching functionality using the alternatives
   infrastructure and remove the former

 - Misc other improvements

* tag 'x86_paravirt_for_v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/alternative: Correct feature bit debug output
  x86/paravirt: Remove no longer needed paravirt patching code
  x86/paravirt: Switch mixed paravirt/alternative calls to alternatives
  x86/alternative: Add indirect call patching
  x86/paravirt: Move some functions and defines to alternative.c
  x86/paravirt: Introduce ALT_NOT_XEN
  x86/paravirt: Make the struct paravirt_patch_site packed
  x86/paravirt: Use relative reference for the original instruction offset
parents 41a80ca4 7991ed43
......@@ -10,6 +10,9 @@
#define ALT_FLAG_NOT (1 << 0)
#define ALT_NOT(feature) ((ALT_FLAG_NOT << ALT_FLAGS_SHIFT) | (feature))
#define ALT_FLAG_DIRECT_CALL (1 << 1)
#define ALT_DIRECT_CALL(feature) ((ALT_FLAG_DIRECT_CALL << ALT_FLAGS_SHIFT) | (feature))
#define ALT_CALL_ALWAYS ALT_DIRECT_CALL(X86_FEATURE_ALWAYS)
#ifndef __ASSEMBLY__
......@@ -86,6 +89,8 @@ struct alt_instr {
u8 replacementlen; /* length of new instruction */
} __packed;
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
......@@ -101,11 +106,10 @@ extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
s32 *start_cfi, s32 *end_cfi);
struct module;
struct paravirt_patch_site;
struct callthunk_sites {
s32 *call_start, *call_end;
struct paravirt_patch_site *pv_start, *pv_end;
struct alt_instr *alt_start, *alt_end;
};
#ifdef CONFIG_CALL_THUNKS
......@@ -150,6 +154,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
#define ALT_CALL_INSTR "call BUG_func"
#define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
......@@ -330,6 +336,22 @@ static inline int alternatives_text_reserved(void *start, void *end)
*/
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
/* Macro for creating assembler functions avoiding any C magic. */
#define DEFINE_ASM_FUNC(func, instr, sec) \
asm (".pushsection " #sec ", \"ax\"\n" \
".global " #func "\n\t" \
".type " #func ", @function\n\t" \
ASM_FUNC_ALIGN "\n" \
#func ":\n\t" \
ASM_ENDBR \
instr "\n\t" \
ASM_RET \
".size " #func ", . - " #func "\n\t" \
".popsection")
void BUG_func(void);
void nop_func(void);
#else /* __ASSEMBLY__ */
#ifdef CONFIG_SMP
......@@ -370,6 +392,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
.byte \alt_len
.endm
.macro ALT_CALL_INSTR
call BUG_func
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
......
......@@ -142,8 +142,7 @@ static inline void write_cr0(unsigned long x)
static __always_inline unsigned long read_cr2(void)
{
return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
"mov %%cr2, %%rax;",
ALT_NOT(X86_FEATURE_XENPV));
"mov %%cr2, %%rax;", ALT_NOT_XEN);
}
static __always_inline void write_cr2(unsigned long x)
......@@ -154,13 +153,12 @@ static __always_inline void write_cr2(unsigned long x)
static inline unsigned long __read_cr3(void)
{
return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
"mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
"mov %%cr3, %%rax;", ALT_NOT_XEN);
}
static inline void write_cr3(unsigned long x)
{
PVOP_ALT_VCALL1(mmu.write_cr3, x,
"mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3", ALT_NOT_XEN);
}
static inline void __write_cr4(unsigned long x)
......@@ -182,7 +180,7 @@ extern noinstr void pv_native_wbinvd(void);
static __always_inline void wbinvd(void)
{
PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
}
static inline u64 paravirt_read_msr(unsigned msr)
......@@ -390,27 +388,25 @@ static inline void paravirt_release_p4d(unsigned long pfn)
static inline pte_t __pte(pteval_t val)
{
return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
"mov %%rdi, %%rax",
ALT_NOT(X86_FEATURE_XENPV)) };
"mov %%rdi, %%rax", ALT_NOT_XEN) };
}
static inline pteval_t pte_val(pte_t pte)
{
return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline pgd_t __pgd(pgdval_t val)
{
return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
"mov %%rdi, %%rax",
ALT_NOT(X86_FEATURE_XENPV)) };
"mov %%rdi, %%rax", ALT_NOT_XEN) };
}
static inline pgdval_t pgd_val(pgd_t pgd)
{
return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
......@@ -444,14 +440,13 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline pmd_t __pmd(pmdval_t val)
{
return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
"mov %%rdi, %%rax",
ALT_NOT(X86_FEATURE_XENPV)) };
"mov %%rdi, %%rax", ALT_NOT_XEN) };
}
static inline pmdval_t pmd_val(pmd_t pmd)
{
return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline void set_pud(pud_t *pudp, pud_t pud)
......@@ -464,7 +459,7 @@ static inline pud_t __pud(pudval_t val)
pudval_t ret;
ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
return (pud_t) { ret };
}
......@@ -472,7 +467,7 @@ static inline pud_t __pud(pudval_t val)
static inline pudval_t pud_val(pud_t pud)
{
return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline void pud_clear(pud_t *pudp)
......@@ -492,8 +487,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
static inline p4d_t __p4d(p4dval_t val)
{
p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
"mov %%rdi, %%rax",
ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
return (p4d_t) { ret };
}
......@@ -501,7 +495,7 @@ static inline p4d_t __p4d(p4dval_t val)
static inline p4dval_t p4d_val(p4d_t p4d)
{
return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
"mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
"mov %%rdi, %%rax", ALT_NOT_XEN);
}
static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
......@@ -687,17 +681,17 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
static __always_inline unsigned long arch_local_save_flags(void)
{
return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
ALT_NOT(X86_FEATURE_XENPV));
ALT_NOT_XEN);
}
static __always_inline void arch_local_irq_disable(void)
{
PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT_XEN);
}
static __always_inline void arch_local_irq_enable(void)
{
PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT_XEN);
}
static __always_inline unsigned long arch_local_irq_save(void)
......@@ -726,52 +720,25 @@ static __always_inline unsigned long arch_local_irq_save(void)
#undef PVOP_VCALL4
#undef PVOP_CALL4
#define DEFINE_PARAVIRT_ASM(func, instr, sec) \
asm (".pushsection " #sec ", \"ax\"\n" \
".global " #func "\n\t" \
".type " #func ", @function\n\t" \
ASM_FUNC_ALIGN "\n" \
#func ":\n\t" \
ASM_ENDBR \
instr "\n\t" \
ASM_RET \
".size " #func ", . - " #func "\n\t" \
".popsection")
extern void default_banner(void);
void native_pv_lock_init(void) __init;
#else /* __ASSEMBLY__ */
#define _PVSITE(ptype, ops, word, algn) \
771:; \
ops; \
772:; \
.pushsection .parainstructions,"a"; \
.align algn; \
word 771b; \
.byte ptype; \
.byte 772b-771b; \
_ASM_ALIGN; \
.popsection
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
#ifdef CONFIG_DEBUG_ENTRY
#define PARA_PATCH(off) ((off) / 8)
#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
#ifdef CONFIG_DEBUG_ENTRY
.macro PARA_IRQ_save_fl
PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
ANNOTATE_RETPOLINE_SAFE;
call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);
.endm
#define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
ALT_NOT(X86_FEATURE_XENPV)
#define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;", \
"ALT_CALL_INSTR;", ALT_CALL_ALWAYS, \
"pushf; pop %rax;", ALT_NOT_XEN
#endif
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* CONFIG_X86_64 */
......
......@@ -2,15 +2,6 @@
#ifndef _ASM_X86_PARAVIRT_TYPES_H
#define _ASM_X86_PARAVIRT_TYPES_H
#ifndef __ASSEMBLY__
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch_site {
u8 *instr; /* original instructions */
u8 type; /* type of this instruction */
u8 len; /* length of original instruction */
};
#endif
#ifdef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__
......@@ -250,43 +241,11 @@ struct paravirt_patch_template {
extern struct pv_info pv_info;
extern struct paravirt_patch_template pv_ops;
#define PARAVIRT_PATCH(x) \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
#define paravirt_type(op) \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
[paravirt_opptr] "m" (pv_ops.op)
/*
* Generate some code, and mark it as patchable by the
* apply_paravirt() alternate instruction patcher.
*/
#define _paravirt_alt(insn_string, type) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
_ASM_ALIGN "\n" \
".popsection\n"
/* Generate patchable code, with the default asm parameters. */
#define paravirt_alt(insn_string) \
_paravirt_alt(insn_string, "%c[paravirt_typenum]")
/* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len);
#define paravirt_ptr(op) [paravirt_opptr] "m" (pv_ops.op)
int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
* The type number, computed in PARAVIRT_PATCH, is derived from the
* offset into the paravirt_patch_template structure, and can therefore be
* freely converted back into a structure offset.
*/
/* This generates an indirect call based on the operation type number. */
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%[paravirt_opptr];"
......@@ -319,12 +278,6 @@ int paravirt_disable_iospace(void);
* However, x86_64 also has to clobber all caller saved registers, which
* unfortunately, are quite a bit (r8 - r11)
*
* The call instruction itself is marked by placing its start address
* and size into the .parainstructions section, so that
* apply_paravirt() in arch/i386/kernel/alternative.c can do the
* appropriate patching under the control of the backend pv_init_ops
* implementation.
*
* Unfortunately there's no way to get gcc to generate the args setup
* for the call, and then allow the call itself to be generated by an
* inline asm. Because of this, we must do the complete arg setup and
......@@ -423,14 +376,27 @@ int paravirt_disable_iospace(void);
__mask & __eax; \
})
/*
* Use alternative patching for paravirt calls:
* - For replacing an indirect call with a direct one, use the "normal"
* ALTERNATIVE() macro with the indirect call as the initial code sequence,
* which will be replaced with the related direct call by using the
* ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
* - In case the replacement is either a direct call or a short code sequence
* depending on a feature bit, the ALTERNATIVE_2() macro is being used.
* The indirect call is the initial code sequence again, while the special
* code sequence is selected with the specified feature bit. In case the
* feature is not active, the direct call is used as above via the
* ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
*/
#define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...) \
({ \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(paravirt_alt(PARAVIRT_CALL) \
asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR, \
ALT_CALL_ALWAYS) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
: paravirt_ptr(op), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
ret; \
......@@ -441,10 +407,11 @@ int paravirt_disable_iospace(void);
({ \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \
asm volatile(ALTERNATIVE_2(PARAVIRT_CALL, \
ALT_CALL_INSTR, ALT_CALL_ALWAYS, \
alt, cond) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
: paravirt_ptr(op), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
ret; \
......@@ -542,8 +509,6 @@ int paravirt_disable_iospace(void);
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
void _paravirt_nop(void);
void paravirt_BUG(void);
unsigned long paravirt_ret0(void);
#ifdef CONFIG_PARAVIRT_XXL
u64 _paravirt_ident_64(u64);
......@@ -553,11 +518,11 @@ void pv_native_irq_enable(void);
unsigned long pv_native_read_cr2(void);
#endif
#define paravirt_nop ((void *)_paravirt_nop)
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#define paravirt_nop ((void *)nop_func)
#endif /* __ASSEMBLY__ */
#define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV)
#endif /* CONFIG_PARAVIRT */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */
......@@ -56,7 +56,7 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
"pop %rdx\n\t" \
FRAME_END
DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock,
PV_UNLOCK_ASM, .spinlock.text);
#else /* CONFIG_64BIT */
......
......@@ -6,18 +6,6 @@
#include <linux/stddef.h>
#include <asm/ptrace.h>
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end);
#else
static inline void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{}
#define __parainstructions NULL
#define __parainstructions_end NULL
#endif
/*
* Currently, the max observed size in the kernel code is
* JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
......
......@@ -160,7 +160,6 @@ extern s32 __retpoline_sites[], __retpoline_sites_end[];
extern s32 __return_sites[], __return_sites_end[];
extern s32 __cfi_sites[], __cfi_sites_end[];
extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
void text_poke_early(void *addr, const void *opcode, size_t len);
......@@ -395,6 +394,63 @@ apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
}
}
/* Low-level backend functions usable from alternative code replacements. */
DEFINE_ASM_FUNC(nop_func, "", .entry.text);
EXPORT_SYMBOL_GPL(nop_func);
noinstr void BUG_func(void)
{
BUG();
}
EXPORT_SYMBOL_GPL(BUG_func);
#define CALL_RIP_REL_OPCODE 0xff
#define CALL_RIP_REL_MODRM 0x15
/*
* Rewrite the "call BUG_func" replacement to point to the target of the
* indirect pv_ops call "call *disp(%ip)".
*/
static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
{
void *target, *bug = &BUG_func;
s32 disp;
if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
BUG();
}
if (a->instrlen != 6 ||
instr[0] != CALL_RIP_REL_OPCODE ||
instr[1] != CALL_RIP_REL_MODRM) {
pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
BUG();
}
/* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
disp = *(s32 *)(instr + 2);
#ifdef CONFIG_X86_64
/* ff 15 00 00 00 00 call *0x0(%rip) */
/* target address is stored at "next instruction + disp". */
target = *(void **)(instr + a->instrlen + disp);
#else
/* ff 15 00 00 00 00 call *0x0 */
/* target address is stored at disp. */
target = *(void **)disp;
#endif
if (!target)
target = bug;
/* (BUG_func - .) + (target - BUG_func) := target - . */
*(s32 *)(insn_buff + 1) += target - bug;
if (target == &nop_func)
return 0;
return 5;
}
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
......@@ -452,16 +508,21 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
continue;
}
DPRINTK(ALT, "feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
(a->flags & ALT_FLAG_NOT) ? "!" : "",
DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
a->cpuid >> 5,
a->cpuid & 0x1f,
instr, instr, a->instrlen,
replacement, a->replacementlen);
replacement, a->replacementlen, a->flags);
memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;
if (a->flags & ALT_FLAG_DIRECT_CALL) {
insn_buff_sz = alt_replace_call(instr, insn_buff, a);
if (insn_buff_sz < 0)
continue;
}
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90;
......@@ -1421,46 +1482,6 @@ int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PARAVIRT
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void __init_or_module add_nops(void *insns, unsigned int len)
{
while (len > 0) {
unsigned int noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
memcpy(insns, x86_nops[noplen], noplen);
insns += noplen;
len -= noplen;
}
}
void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{
struct paravirt_patch_site *p;
char insn_buff[MAX_PATCH_LEN];
for (p = start; p < end; p++) {
unsigned int used;
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
memcpy(insn_buff, p->instr, p->len);
used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
BUG_ON(used > p->len);
/* Pad the rest with nops */
add_nops(insn_buff + used, p->len - used);
text_poke_early(p->instr, insn_buff, p->len);
}
}
extern struct paravirt_patch_site __start_parainstructions[],
__stop_parainstructions[];
#endif /* CONFIG_PARAVIRT */
/*
* Self-test for the INT3 based CALL emulation code.
*
......@@ -1596,28 +1617,11 @@ void __init alternative_instructions(void)
*/
/*
* Paravirt patching and alternative patching can be combined to
* replace a function call with a short direct code sequence (e.g.
* by setting a constant return value instead of doing that in an
* external function).
* In order to make this work the following sequence is required:
* 1. set (artificial) features depending on used paravirt
* functions which can later influence alternative patching
* 2. apply paravirt patching (generally replacing an indirect
* function call with a direct one)
* 3. apply alternative patching (e.g. replacing a direct function
* call with a custom code sequence)
* Doing paravirt patching after alternative patching would clobber
* the optimization of the custom code with a function call again.
* Make sure to set (artificial) features depending on used paravirt
* functions which can later influence alternative patching.
*/
paravirt_set_cap();
/*
* First patch paravirt functions, such that we overwrite the indirect
* call with the direct call.
*/
apply_paravirt(__parainstructions, __parainstructions_end);
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
__cfi_sites, __cfi_sites_end, true);
......@@ -1628,10 +1632,6 @@ void __init alternative_instructions(void)
apply_retpolines(__retpoline_sites, __retpoline_sites_end);
apply_returns(__return_sites, __return_sites_end);
/*
* Then patch alternatives, such that those paravirt calls that are in
* alternatives can be overwritten by their immediate fragments.
*/
apply_alternatives(__alt_instructions, __alt_instructions_end);
/*
......
......@@ -233,14 +233,13 @@ patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
}
static __init_or_module void
patch_paravirt_call_sites(struct paravirt_patch_site *start,
struct paravirt_patch_site *end,
patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
const struct core_text *ct)
{
struct paravirt_patch_site *p;
struct alt_instr *a;
for (p = start; p < end; p++)
patch_call(p->instr, ct);
for (a = start; a < end; a++)
patch_call((void *)&a->instr_offset + a->instr_offset, ct);
}
static __init_or_module void
......@@ -248,7 +247,7 @@ callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
{
prdbg("Patching call sites %s\n", ct->name);
patch_call_sites(cs->call_start, cs->call_end, ct);
patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
prdbg("Patching call sites done%s\n", ct->name);
}
......@@ -257,8 +256,8 @@ void __init callthunks_patch_builtin_calls(void)
struct callthunk_sites cs = {
.call_start = __call_sites,
.call_end = __call_sites_end,
.pv_start = __parainstructions,
.pv_end = __parainstructions_end
.alt_start = __alt_instructions,
.alt_end = __alt_instructions_end
};
if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
......
......@@ -803,7 +803,7 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
"setne %al\n\t"
DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted,
PV_VCPU_PREEMPTED_ASM, .text);
#endif
......
......@@ -276,7 +276,7 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
const Elf_Shdr *s, *alt = NULL, *locks = NULL,
*para = NULL, *orc = NULL, *orc_ip = NULL,
*orc = NULL, *orc_ip = NULL,
*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
*calls = NULL, *cfi = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
......@@ -286,8 +286,6 @@ int module_finalize(const Elf_Ehdr *hdr,
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
if (!strcmp(".orc_unwind", secstrings + s->sh_name))
orc = s;
if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
......@@ -304,14 +302,6 @@ int module_finalize(const Elf_Ehdr *hdr,
ibt_endbr = s;
}
/*
* See alternative_instructions() for the ordering rules between the
* various patching types.
*/
if (para) {
void *pseg = (void *)para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size);
}
if (retpolines || cfi) {
void *rseg = NULL, *cseg = NULL;
unsigned int rsize = 0, csize = 0;
......@@ -341,7 +331,7 @@ int module_finalize(const Elf_Ehdr *hdr,
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
if (calls || para) {
if (calls || alt) {
struct callthunk_sites cs = {};
if (calls) {
......@@ -349,9 +339,9 @@ int module_finalize(const Elf_Ehdr *hdr,
cs.call_end = (void *)calls->sh_addr + calls->sh_size;
}
if (para) {
cs.pv_start = (void *)para->sh_addr;
cs.pv_end = (void *)para->sh_addr + para->sh_size;
if (alt) {
cs.alt_start = (void *)alt->sh_addr;
cs.alt_end = (void *)alt->sh_addr + alt->sh_size;
}
callthunks_patch_module_calls(&cs, me);
......
......@@ -34,14 +34,8 @@
#include <asm/io_bitmap.h>
#include <asm/gsseg.h>
/*
* nop stub, which must not clobber anything *including the stack* to
* avoid confusing the entry prologues.
*/
DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
/* stub always returning 0. */
DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text);
DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
void __init default_banner(void)
{
......@@ -49,26 +43,12 @@ void __init default_banner(void)
pv_info.name);
}
/* Undefined instruction for dealing with missing ops pointers. */
noinstr void paravirt_BUG(void)
{
BUG();
}
static unsigned paravirt_patch_call(void *insn_buff, const void *target,
unsigned long addr, unsigned len)
{
__text_gen_insn(insn_buff, CALL_INSN_OPCODE,
(void *)addr, target, CALL_INSN_SIZE);
return CALL_INSN_SIZE;
}
#ifdef CONFIG_PARAVIRT_XXL
DEFINE_PARAVIRT_ASM(_paravirt_ident_64, "mov %rdi, %rax", .text);
DEFINE_PARAVIRT_ASM(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
DEFINE_PARAVIRT_ASM(pv_native_irq_disable, "cli", .noinstr.text);
DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text);
DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text);
DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text);
DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
......@@ -85,28 +65,6 @@ static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
tlb_remove_page(tlb, table);
}
unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
unsigned int len)
{
/*
* Neat trick to map patch type back to the call within the
* corresponding structure.
*/
void *opfunc = *((void **)&pv_ops + type);
unsigned ret;
if (opfunc == NULL)
/* If there's no function, patch it with paravirt_BUG() */
ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
else if (opfunc == _paravirt_nop)
ret = 0;
else
/* Otherwise call the function. */
ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
return ret;
}
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
......
......@@ -267,19 +267,6 @@ SECTIONS
}
#endif
/*
* start address and size of operations which during runtime
* can be patched with virtualization friendly instructions or
* baremetal native ones. Think page table operations.
* Details in paravirt_types.h
*/
. = ALIGN(8);
.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
__parainstructions = .;
*(.parainstructions)
__parainstructions_end = .;
}
#ifdef CONFIG_RETPOLINE
/*
* List of instructions that call/jmp/jcc to retpoline thunks
......
......@@ -66,7 +66,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
"(__parainstructions|__alt_instructions)(_end)?|"
"__alt_instructions(_end)?|"
"(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
#if CONFIG_FW_LOADER
......
......@@ -45,7 +45,7 @@ static const typeof(pv_ops) xen_irq_ops __initconst = {
/* Initial interrupt flag handling only called while interrupts off. */
.save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0),
.irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop),
.irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG),
.irq_enable = __PV_IS_CALLEE_SAVE(BUG_func),
.safe_halt = xen_safe_halt,
.halt = xen_halt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment