Commit 4e629211 authored by Juergen Gross's avatar Juergen Gross Committed by Borislav Petkov

x86/paravirt: Add new features for paravirt patching

For being able to switch paravirt patching from special cased custom
code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed
as new features. This enables to have the standard indirect pv call
as the default code and to patch that with the non-Xen custom code
sequence via ALTERNATIVE patching later.

Make sure paravirt patching is performed before alternatives patching.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210311142319.4723-9-jgross@suse.com
parent 2fe2a2c7
...@@ -236,6 +236,8 @@ ...@@ -236,6 +236,8 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
......
...@@ -45,6 +45,10 @@ static inline u64 paravirt_steal_clock(int cpu) ...@@ -45,6 +45,10 @@ static inline u64 paravirt_steal_clock(int cpu)
return static_call(pv_steal_clock)(cpu); return static_call(pv_steal_clock)(cpu);
} }
#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init paravirt_set_cap(void);
#endif
/* The paravirtualized I/O functions */ /* The paravirtualized I/O functions */
static inline void slow_down_io(void) static inline void slow_down_io(void)
{ {
...@@ -809,5 +813,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) ...@@ -809,5 +813,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{ {
} }
#endif #endif
#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline void paravirt_set_cap(void)
{
}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_H */ #endif /* _ASM_X86_PARAVIRT_H */
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/paravirt.h>
int __read_mostly alternatives_patched; int __read_mostly alternatives_patched;
...@@ -733,6 +734,33 @@ void __init alternative_instructions(void) ...@@ -733,6 +734,33 @@ void __init alternative_instructions(void)
* patching. * patching.
*/ */
/*
* Paravirt patching and alternative patching can be combined to
* replace a function call with a short direct code sequence (e.g.
* by setting a constant return value instead of doing that in an
* external function).
* In order to make this work the following sequence is required:
* 1. set (artificial) features depending on used paravirt
* functions which can later influence alternative patching
* 2. apply paravirt patching (generally replacing an indirect
* function call with a direct one)
* 3. apply alternative patching (e.g. replacing a direct function
* call with a custom code sequence)
* Doing paravirt patching after alternative patching would clobber
* the optimization of the custom code with a function call again.
*/
paravirt_set_cap();
/*
* First patch paravirt functions, such that we overwrite the indirect
* call with the direct call.
*/
apply_paravirt(__parainstructions, __parainstructions_end);
/*
* Then patch alternatives, such that those paravirt calls that are in
* alternatives can be overwritten by their immediate fragments.
*/
apply_alternatives(__alt_instructions, __alt_instructions_end); apply_alternatives(__alt_instructions, __alt_instructions_end);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -751,8 +779,6 @@ void __init alternative_instructions(void) ...@@ -751,8 +779,6 @@ void __init alternative_instructions(void)
} }
#endif #endif
apply_paravirt(__parainstructions, __parainstructions_end);
restart_nmi(); restart_nmi();
alternatives_patched = 1; alternatives_patched = 1;
} }
......
...@@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void) ...@@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
return pv_ops.lock.vcpu_is_preempted.func == return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted; __raw_callee_save___native_vcpu_is_preempted;
} }
void __init paravirt_set_cap(void)
{
if (!pv_is_native_spin_unlock())
setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
if (!pv_is_native_vcpu_is_preempted())
setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment