Commit 562b6b08 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Consolidate VM allocation and free for VMX and SVM

Move the VM allocation and free code to common x86 as the logic is
more or less identical across SVM and VMX.

Note, although hyperv.hv_pa_pg is part of the common kvm->arch, it's
(currently) only allocated by VMX VMs.  But, since kfree() plays nice
when passed a NULL pointer, the superfluous call for SVM is harmless
and avoids future churn if SVM gains support for HyperV's direct TLB
flush.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
[Make vm_size a field instead of a function. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1a625056
...@@ -1059,8 +1059,7 @@ struct kvm_x86_ops { ...@@ -1059,8 +1059,7 @@ struct kvm_x86_ops {
bool (*has_emulated_msr)(int index); bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu); void (*cpuid_update)(struct kvm_vcpu *vcpu);
struct kvm *(*vm_alloc)(void); unsigned int vm_size;
void (*vm_free)(struct kvm *);
int (*vm_init)(struct kvm *kvm); int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm); void (*vm_destroy)(struct kvm *kvm);
...@@ -1278,13 +1277,10 @@ extern struct kmem_cache *x86_fpu_cache; ...@@ -1278,13 +1277,10 @@ extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC #define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void) static inline struct kvm *kvm_arch_alloc_vm(void)
{ {
return kvm_x86_ops->vm_alloc(); return __vmalloc(kvm_x86_ops->vm_size,
} GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
return kvm_x86_ops->vm_free(kvm);
} }
void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
......
...@@ -1944,19 +1944,6 @@ static void __unregister_enc_region_locked(struct kvm *kvm, ...@@ -1944,19 +1944,6 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
kfree(region); kfree(region);
} }
static struct kvm *svm_vm_alloc(void)
{
BUILD_BUG_ON(offsetof(struct kvm_svm, kvm) != 0);
return __vmalloc(sizeof(struct kvm_svm),
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
static void svm_vm_free(struct kvm *kvm)
{
vfree(kvm);
}
static void sev_vm_destroy(struct kvm *kvm) static void sev_vm_destroy(struct kvm *kvm)
{ {
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
...@@ -7395,8 +7382,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7395,8 +7382,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.vcpu_free = svm_free_vcpu, .vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset, .vcpu_reset = svm_vcpu_reset,
.vm_alloc = svm_vm_alloc, .vm_size = sizeof(struct kvm_svm),
.vm_free = svm_vm_free,
.vm_init = svm_vm_init, .vm_init = svm_vm_init,
.vm_destroy = svm_vm_destroy, .vm_destroy = svm_vm_destroy,
......
...@@ -6679,20 +6679,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6679,20 +6679,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_complete_interrupts(vmx); vmx_complete_interrupts(vmx);
} }
static struct kvm *vmx_vm_alloc(void)
{
BUILD_BUG_ON(offsetof(struct kvm_vmx, kvm) != 0);
return __vmalloc(sizeof(struct kvm_vmx),
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
static void vmx_vm_free(struct kvm *kvm)
{
kfree(kvm->arch.hyperv.hv_pa_pg);
vfree(kvm);
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu) static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -7835,9 +7821,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -7835,9 +7821,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_accelerated_tpr = report_flexpriority,
.has_emulated_msr = vmx_has_emulated_msr, .has_emulated_msr = vmx_has_emulated_msr,
.vm_size = sizeof(struct kvm_vmx),
.vm_init = vmx_vm_init, .vm_init = vmx_vm_init,
.vm_alloc = vmx_vm_alloc,
.vm_free = vmx_vm_free,
.vcpu_create = vmx_create_vcpu, .vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu, .vcpu_free = vmx_free_vcpu,
......
...@@ -9622,6 +9622,13 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) ...@@ -9622,6 +9622,13 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
kvm_x86_ops->sched_in(vcpu, cpu); kvm_x86_ops->sched_in(vcpu, cpu);
} }
void kvm_arch_free_vm(struct kvm *kvm)
{
kfree(kvm->arch.hyperv.hv_pa_pg);
vfree(kvm);
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
if (type) if (type)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment