Commit 19f10315 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Stub out enable_evmcs static key for CONFIG_HYPERV=n

Wrap enable_evmcs in a helper and stub it out when CONFIG_HYPERV=n in
order to eliminate the static branch nop placeholders.  clang-14 is clever
enough to elide the nop, but gcc-12 is not.  Stubbing out the key reduces
the size of kvm-intel.ko by ~7.5% (200KiB) when compiled with gcc-12
(there are a _lot_ of VMCS accesses throughout KVM).
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20230211003534.564198-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 68ac4221
...@@ -118,8 +118,6 @@ ...@@ -118,8 +118,6 @@
#define EVMCS1_SUPPORTED_VMFUNC (0) #define EVMCS1_SUPPORTED_VMFUNC (0)
DEFINE_STATIC_KEY_FALSE(enable_evmcs);
#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x) #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \ #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
{EVMCS1_OFFSET(name), clean_field} {EVMCS1_OFFSET(name), clean_field}
...@@ -611,6 +609,8 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12) ...@@ -611,6 +609,8 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
} }
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
DEFINE_STATIC_KEY_FALSE(enable_evmcs);
/* /*
* KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
* is: in case a feature has corresponding fields in eVMCS described and it was * is: in case a feature has corresponding fields in eVMCS described and it was
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
struct vmcs_config; struct vmcs_config;
DECLARE_STATIC_KEY_FALSE(enable_evmcs);
#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
#define KVM_EVMCS_VERSION 1 #define KVM_EVMCS_VERSION 1
...@@ -69,6 +67,13 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs, ...@@ -69,6 +67,13 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
DECLARE_STATIC_KEY_FALSE(enable_evmcs);
static __always_inline bool kvm_is_using_evmcs(void)
{
return static_branch_unlikely(&enable_evmcs);
}
static __always_inline int get_evmcs_offset(unsigned long field, static __always_inline int get_evmcs_offset(unsigned long field,
u16 *clean_field) u16 *clean_field)
{ {
...@@ -158,6 +163,7 @@ static inline void evmcs_load(u64 phys_addr) ...@@ -158,6 +163,7 @@ static inline void evmcs_load(u64 phys_addr)
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
#else /* !IS_ENABLED(CONFIG_HYPERV) */ #else /* !IS_ENABLED(CONFIG_HYPERV) */
static __always_inline bool kvm_is_using_evmcs(void) { return false; }
static __always_inline void evmcs_write64(unsigned long field, u64 value) {} static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
static __always_inline void evmcs_write32(unsigned long field, u32 value) {} static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
static __always_inline void evmcs_write16(unsigned long field, u16 value) {} static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
......
...@@ -595,7 +595,7 @@ static void hv_reset_evmcs(void) ...@@ -595,7 +595,7 @@ static void hv_reset_evmcs(void)
{ {
struct hv_vp_assist_page *vp_ap; struct hv_vp_assist_page *vp_ap;
if (!static_branch_unlikely(&enable_evmcs)) if (!kvm_is_using_evmcs())
return; return;
/* /*
...@@ -2816,8 +2816,7 @@ static int vmx_hardware_enable(void) ...@@ -2816,8 +2816,7 @@ static int vmx_hardware_enable(void)
* This can happen if we hot-added a CPU but failed to allocate * This can happen if we hot-added a CPU but failed to allocate
* VP assist page for it. * VP assist page for it.
*/ */
if (static_branch_unlikely(&enable_evmcs) && if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
!hv_get_vp_assist_page(cpu))
return -EFAULT; return -EFAULT;
intel_pt_handle_vmx(1); intel_pt_handle_vmx(1);
...@@ -2869,7 +2868,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) ...@@ -2869,7 +2868,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
memset(vmcs, 0, vmcs_config.size); memset(vmcs, 0, vmcs_config.size);
/* KVM supports Enlightened VMCS v1 only */ /* KVM supports Enlightened VMCS v1 only */
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
vmcs->hdr.revision_id = KVM_EVMCS_VERSION; vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
else else
vmcs->hdr.revision_id = vmcs_config.revision_id; vmcs->hdr.revision_id = vmcs_config.revision_id;
...@@ -2964,7 +2963,7 @@ static __init int alloc_kvm_area(void) ...@@ -2964,7 +2963,7 @@ static __init int alloc_kvm_area(void)
* still be marked with revision_id reported by * still be marked with revision_id reported by
* physical CPU. * physical CPU.
*/ */
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
vmcs->hdr.revision_id = vmcs_config.revision_id; vmcs->hdr.revision_id = vmcs_config.revision_id;
per_cpu(vmxarea, cpu) = vmcs; per_cpu(vmxarea, cpu) = vmcs;
...@@ -3931,7 +3930,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) ...@@ -3931,7 +3930,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
* 'Enlightened MSR Bitmap' feature L0 needs to know that MSR * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
* bitmap has changed. * bitmap has changed.
*/ */
if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) { if (kvm_is_using_evmcs()) {
struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
if (evmcs->hv_enlightenments_control.msr_bitmap) if (evmcs->hv_enlightenments_control.msr_bitmap)
...@@ -7310,7 +7309,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -7310,7 +7309,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx)); vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
/* All fields are clean at this point */ /* All fields are clean at this point */
if (static_branch_unlikely(&enable_evmcs)) { if (kvm_is_using_evmcs()) {
current_evmcs->hv_clean_fields |= current_evmcs->hv_clean_fields |=
HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
...@@ -7440,7 +7439,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -7440,7 +7439,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
* feature only for vmcs01, KVM currently isn't equipped to realize any * feature only for vmcs01, KVM currently isn't equipped to realize any
* performance benefits from enabling it for vmcs02. * performance benefits from enabling it for vmcs02.
*/ */
if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && if (kvm_is_using_evmcs() &&
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
......
...@@ -147,7 +147,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) ...@@ -147,7 +147,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
static __always_inline u16 vmcs_read16(unsigned long field) static __always_inline u16 vmcs_read16(unsigned long field)
{ {
vmcs_check16(field); vmcs_check16(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_read16(field); return evmcs_read16(field);
return __vmcs_readl(field); return __vmcs_readl(field);
} }
...@@ -155,7 +155,7 @@ static __always_inline u16 vmcs_read16(unsigned long field) ...@@ -155,7 +155,7 @@ static __always_inline u16 vmcs_read16(unsigned long field)
static __always_inline u32 vmcs_read32(unsigned long field) static __always_inline u32 vmcs_read32(unsigned long field)
{ {
vmcs_check32(field); vmcs_check32(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_read32(field); return evmcs_read32(field);
return __vmcs_readl(field); return __vmcs_readl(field);
} }
...@@ -163,7 +163,7 @@ static __always_inline u32 vmcs_read32(unsigned long field) ...@@ -163,7 +163,7 @@ static __always_inline u32 vmcs_read32(unsigned long field)
static __always_inline u64 vmcs_read64(unsigned long field) static __always_inline u64 vmcs_read64(unsigned long field)
{ {
vmcs_check64(field); vmcs_check64(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_read64(field); return evmcs_read64(field);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return __vmcs_readl(field); return __vmcs_readl(field);
...@@ -175,7 +175,7 @@ static __always_inline u64 vmcs_read64(unsigned long field) ...@@ -175,7 +175,7 @@ static __always_inline u64 vmcs_read64(unsigned long field)
static __always_inline unsigned long vmcs_readl(unsigned long field) static __always_inline unsigned long vmcs_readl(unsigned long field)
{ {
vmcs_checkl(field); vmcs_checkl(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_read64(field); return evmcs_read64(field);
return __vmcs_readl(field); return __vmcs_readl(field);
} }
...@@ -222,7 +222,7 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val ...@@ -222,7 +222,7 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
static __always_inline void vmcs_write16(unsigned long field, u16 value) static __always_inline void vmcs_write16(unsigned long field, u16 value)
{ {
vmcs_check16(field); vmcs_check16(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_write16(field, value); return evmcs_write16(field, value);
__vmcs_writel(field, value); __vmcs_writel(field, value);
...@@ -231,7 +231,7 @@ static __always_inline void vmcs_write16(unsigned long field, u16 value) ...@@ -231,7 +231,7 @@ static __always_inline void vmcs_write16(unsigned long field, u16 value)
static __always_inline void vmcs_write32(unsigned long field, u32 value) static __always_inline void vmcs_write32(unsigned long field, u32 value)
{ {
vmcs_check32(field); vmcs_check32(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_write32(field, value); return evmcs_write32(field, value);
__vmcs_writel(field, value); __vmcs_writel(field, value);
...@@ -240,7 +240,7 @@ static __always_inline void vmcs_write32(unsigned long field, u32 value) ...@@ -240,7 +240,7 @@ static __always_inline void vmcs_write32(unsigned long field, u32 value)
static __always_inline void vmcs_write64(unsigned long field, u64 value) static __always_inline void vmcs_write64(unsigned long field, u64 value)
{ {
vmcs_check64(field); vmcs_check64(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_write64(field, value); return evmcs_write64(field, value);
__vmcs_writel(field, value); __vmcs_writel(field, value);
...@@ -252,7 +252,7 @@ static __always_inline void vmcs_write64(unsigned long field, u64 value) ...@@ -252,7 +252,7 @@ static __always_inline void vmcs_write64(unsigned long field, u64 value)
static __always_inline void vmcs_writel(unsigned long field, unsigned long value) static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
{ {
vmcs_checkl(field); vmcs_checkl(field);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_write64(field, value); return evmcs_write64(field, value);
__vmcs_writel(field, value); __vmcs_writel(field, value);
...@@ -262,7 +262,7 @@ static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) ...@@ -262,7 +262,7 @@ static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
{ {
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
"vmcs_clear_bits does not support 64-bit fields"); "vmcs_clear_bits does not support 64-bit fields");
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_write32(field, evmcs_read32(field) & ~mask); return evmcs_write32(field, evmcs_read32(field) & ~mask);
__vmcs_writel(field, __vmcs_readl(field) & ~mask); __vmcs_writel(field, __vmcs_readl(field) & ~mask);
...@@ -272,7 +272,7 @@ static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) ...@@ -272,7 +272,7 @@ static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
{ {
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
"vmcs_set_bits does not support 64-bit fields"); "vmcs_set_bits does not support 64-bit fields");
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_write32(field, evmcs_read32(field) | mask); return evmcs_write32(field, evmcs_read32(field) | mask);
__vmcs_writel(field, __vmcs_readl(field) | mask); __vmcs_writel(field, __vmcs_readl(field) | mask);
...@@ -289,7 +289,7 @@ static inline void vmcs_load(struct vmcs *vmcs) ...@@ -289,7 +289,7 @@ static inline void vmcs_load(struct vmcs *vmcs)
{ {
u64 phys_addr = __pa(vmcs); u64 phys_addr = __pa(vmcs);
if (static_branch_unlikely(&enable_evmcs)) if (kvm_is_using_evmcs())
return evmcs_load(phys_addr); return evmcs_load(phys_addr);
vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr); vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment