Commit c16f862d authored by Rusty Russell's avatar Rusty Russell Committed by Avi Kivity

KVM: Use kmem cache for allocating vcpus

Avi wants the allocations of vcpus centralized again.  The easiest way
is to add a "size" arg to kvm_init_arch, and expose the thus-prepared
cache to the modules.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent e7d5d76c
...@@ -141,6 +141,7 @@ struct kvm_mmu_page { ...@@ -141,6 +141,7 @@ struct kvm_mmu_page {
}; };
struct kvm_vcpu; struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
/* /*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
...@@ -483,7 +484,8 @@ extern struct kvm_arch_ops *kvm_arch_ops; ...@@ -483,7 +484,8 @@ extern struct kvm_arch_ops *kvm_arch_ops;
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
struct module *module);
void kvm_exit_arch(void); void kvm_exit_arch(void);
int kvm_mmu_module_init(void); int kvm_mmu_module_init(void);
......
...@@ -53,6 +53,8 @@ static LIST_HEAD(vm_list); ...@@ -53,6 +53,8 @@ static LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled; static cpumask_t cpus_hardware_enabled;
struct kvm_arch_ops *kvm_arch_ops; struct kvm_arch_ops *kvm_arch_ops;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
static __read_mostly struct preempt_ops kvm_preempt_ops; static __read_mostly struct preempt_ops kvm_preempt_ops;
...@@ -3104,7 +3106,8 @@ static void kvm_sched_out(struct preempt_notifier *pn, ...@@ -3104,7 +3106,8 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_ops->vcpu_put(vcpu); kvm_arch_ops->vcpu_put(vcpu);
} }
int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
struct module *module)
{ {
int r; int r;
...@@ -3142,6 +3145,14 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) ...@@ -3142,6 +3145,14 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
if (r) if (r)
goto out_free_3; goto out_free_3;
/* A kmem cache lets us meet the alignment requirements of fx_save. */
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
__alignof__(struct kvm_vcpu), 0, 0);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_4;
}
kvm_chardev_ops.owner = module; kvm_chardev_ops.owner = module;
r = misc_register(&kvm_dev); r = misc_register(&kvm_dev);
...@@ -3156,6 +3167,8 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) ...@@ -3156,6 +3167,8 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
return r; return r;
out_free: out_free:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_4:
sysdev_unregister(&kvm_sysdev); sysdev_unregister(&kvm_sysdev);
out_free_3: out_free_3:
sysdev_class_unregister(&kvm_sysdev_class); sysdev_class_unregister(&kvm_sysdev_class);
...@@ -3173,6 +3186,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) ...@@ -3173,6 +3186,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
void kvm_exit_arch(void) void kvm_exit_arch(void)
{ {
misc_deregister(&kvm_dev); misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
sysdev_unregister(&kvm_sysdev); sysdev_unregister(&kvm_sysdev);
sysdev_class_unregister(&kvm_sysdev_class); sysdev_class_unregister(&kvm_sysdev_class);
unregister_reboot_notifier(&kvm_reboot_notifier); unregister_reboot_notifier(&kvm_reboot_notifier);
......
...@@ -577,7 +577,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -577,7 +577,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
struct page *page; struct page *page;
int err; int err;
svm = kzalloc(sizeof *svm, GFP_KERNEL); svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!svm) { if (!svm) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -1849,7 +1849,8 @@ static struct kvm_arch_ops svm_arch_ops = { ...@@ -1849,7 +1849,8 @@ static struct kvm_arch_ops svm_arch_ops = {
static int __init svm_init(void) static int __init svm_init(void)
{ {
return kvm_init_arch(&svm_arch_ops, THIS_MODULE); return kvm_init_arch(&svm_arch_ops, sizeof(struct vcpu_svm),
THIS_MODULE);
} }
static void __exit svm_exit(void) static void __exit svm_exit(void)
......
...@@ -2365,7 +2365,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -2365,7 +2365,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{ {
int err; int err;
struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL); struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
int cpu; int cpu;
if (!vmx) if (!vmx)
...@@ -2490,7 +2490,7 @@ static int __init vmx_init(void) ...@@ -2490,7 +2490,7 @@ static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE); memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b); kunmap(vmx_io_bitmap_b);
r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE); r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r) if (r)
goto out1; goto out1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment