Commit 7d6bbebb authored by David Woodhouse's avatar David Woodhouse

KVM: x86/xen: Add kvm_xen_enabled static key

The code paths for Xen support are all fairly lightweight but if we hide
them behind this, they're even *more* lightweight for any system which
isn't actually hosting Xen guests.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
parent 78e9878c
...@@ -7988,6 +7988,7 @@ void kvm_arch_exit(void) ...@@ -7988,6 +7988,7 @@ void kvm_arch_exit(void)
kvm_mmu_module_exit(); kvm_mmu_module_exit();
free_percpu(user_return_msrs); free_percpu(user_return_msrs);
kmem_cache_destroy(x86_fpu_cache); kmem_cache_destroy(x86_fpu_cache);
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
} }
static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
...@@ -10581,6 +10582,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -10581,6 +10582,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm); kvm_mmu_uninit_vm(kvm);
kvm_page_track_cleanup(kvm); kvm_page_track_cleanup(kvm);
kvm_xen_destroy_vm(kvm);
kvm_hv_destroy_vm(kvm); kvm_hv_destroy_vm(kvm);
} }
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include "trace.h" #include "trace.h"
DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -93,10 +95,25 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) ...@@ -93,10 +95,25 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
xhc->blob_size_32 || xhc->blob_size_64)) xhc->blob_size_32 || xhc->blob_size_64))
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock);
if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
static_branch_inc(&kvm_xen_enabled.key);
else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
static_branch_slow_dec_deferred(&kvm_xen_enabled);
memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
mutex_unlock(&kvm->lock);
return 0; return 0;
} }
void kvm_xen_destroy_vm(struct kvm *kvm)
{
if (kvm->arch.xen_hvm_config.msr)
static_branch_slow_dec_deferred(&kvm_xen_enabled);
}
static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{ {
kvm_rax_write(vcpu, result); kvm_rax_write(vcpu, result);
......
...@@ -9,14 +9,20 @@ ...@@ -9,14 +9,20 @@
#ifndef __ARCH_X86_KVM_XEN_H__ #ifndef __ARCH_X86_KVM_XEN_H__
#define __ARCH_X86_KVM_XEN_H__ #define __ARCH_X86_KVM_XEN_H__
#include <linux/jump_label_ratelimit.h>
extern struct static_key_false_deferred kvm_xen_enabled;
int kvm_xen_hypercall(struct kvm_vcpu *vcpu); int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
void kvm_xen_destroy_vm(struct kvm *kvm);
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
{ {
return kvm->arch.xen_hvm_config.flags & return static_branch_unlikely(&kvm_xen_enabled.key) &&
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; (kvm->arch.xen_hvm_config.flags &
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
} }
#endif /* __ARCH_X86_KVM_XEN_H__ */ #endif /* __ARCH_X86_KVM_XEN_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment