Commit 11ec2804 authored by Shaohua Li's avatar Shaohua Li Committed by Avi Kivity

KVM: Convert vm lock to a mutex

This allows the kvm mmu to perform sleepy operations, such as memory
allocation.
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 15ad7146
...@@ -393,7 +393,7 @@ struct kvm_memory_slot { ...@@ -393,7 +393,7 @@ struct kvm_memory_slot {
}; };
struct kvm { struct kvm {
spinlock_t lock; /* protects everything except vcpus */ struct mutex lock; /* protects everything except vcpus */
int naliases; int naliases;
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
int nmemslots; int nmemslots;
......
...@@ -363,7 +363,7 @@ static struct kvm *kvm_create_vm(void) ...@@ -363,7 +363,7 @@ static struct kvm *kvm_create_vm(void)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kvm_io_bus_init(&kvm->pio_bus); kvm_io_bus_init(&kvm->pio_bus);
spin_lock_init(&kvm->lock); mutex_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages); INIT_LIST_HEAD(&kvm->active_mmu_pages);
kvm_io_bus_init(&kvm->mmio_bus); kvm_io_bus_init(&kvm->mmio_bus);
spin_lock(&kvm_lock); spin_lock(&kvm_lock);
...@@ -489,7 +489,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -489,7 +489,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
struct page *page; struct page *page;
u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
page = gfn_to_page(vcpu->kvm, pdpt_gfn); page = gfn_to_page(vcpu->kvm, pdpt_gfn);
if (!page) { if (!page) {
ret = 0; ret = 0;
...@@ -510,7 +510,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -510,7 +510,7 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
out: out:
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return ret; return ret;
} }
...@@ -570,9 +570,9 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -570,9 +570,9 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
kvm_arch_ops->set_cr0(vcpu, cr0); kvm_arch_ops->set_cr0(vcpu, cr0);
vcpu->cr0 = cr0; vcpu->cr0 = cr0;
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return; return;
} }
EXPORT_SYMBOL_GPL(set_cr0); EXPORT_SYMBOL_GPL(set_cr0);
...@@ -611,9 +611,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -611,9 +611,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return; return;
} }
kvm_arch_ops->set_cr4(vcpu, cr4); kvm_arch_ops->set_cr4(vcpu, cr4);
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
} }
EXPORT_SYMBOL_GPL(set_cr4); EXPORT_SYMBOL_GPL(set_cr4);
...@@ -650,7 +650,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -650,7 +650,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
} }
vcpu->cr3 = cr3; vcpu->cr3 = cr3;
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
/* /*
* Does the new cr3 value map to physical memory? (Note, we * Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would * catch an invalid cr3 even in real-mode, because it would
...@@ -664,7 +664,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -664,7 +664,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
inject_gp(vcpu); inject_gp(vcpu);
else else
vcpu->mmu.new_cr3(vcpu); vcpu->mmu.new_cr3(vcpu);
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
} }
EXPORT_SYMBOL_GPL(set_cr3); EXPORT_SYMBOL_GPL(set_cr3);
...@@ -741,7 +741,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, ...@@ -741,7 +741,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
raced: raced:
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
memory_config_version = kvm->memory_config_version; memory_config_version = kvm->memory_config_version;
new = old = *memslot; new = old = *memslot;
...@@ -770,7 +770,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, ...@@ -770,7 +770,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
* Do memory allocations outside lock. memory_config_version will * Do memory allocations outside lock. memory_config_version will
* detect any races. * detect any races.
*/ */
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
/* Deallocate if slot is being removed */ /* Deallocate if slot is being removed */
if (!npages) if (!npages)
...@@ -809,10 +809,10 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, ...@@ -809,10 +809,10 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
memset(new.dirty_bitmap, 0, dirty_bytes); memset(new.dirty_bitmap, 0, dirty_bytes);
} }
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (memory_config_version != kvm->memory_config_version) { if (memory_config_version != kvm->memory_config_version) {
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
kvm_free_physmem_slot(&new, &old); kvm_free_physmem_slot(&new, &old);
goto raced; goto raced;
} }
...@@ -830,13 +830,13 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, ...@@ -830,13 +830,13 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
kvm_mmu_slot_remove_write_access(kvm, mem->slot); kvm_mmu_slot_remove_write_access(kvm, mem->slot);
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
kvm_free_physmem_slot(&old, &new); kvm_free_physmem_slot(&old, &new);
return 0; return 0;
out_unlock: out_unlock:
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
out_free: out_free:
kvm_free_physmem_slot(&new, &old); kvm_free_physmem_slot(&new, &old);
out: out:
...@@ -854,14 +854,14 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -854,14 +854,14 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int n; int n;
unsigned long any = 0; unsigned long any = 0;
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
/* /*
* Prevent changes to guest memory configuration even while the lock * Prevent changes to guest memory configuration even while the lock
* is not taken. * is not taken.
*/ */
++kvm->busy; ++kvm->busy;
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
r = -EINVAL; r = -EINVAL;
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
goto out; goto out;
...@@ -880,18 +880,18 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -880,18 +880,18 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
goto out; goto out;
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot); kvm_mmu_slot_remove_write_access(kvm, log->slot);
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
memset(memslot->dirty_bitmap, 0, n); memset(memslot->dirty_bitmap, 0, n);
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
r = 0; r = 0;
out: out:
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
--kvm->busy; --kvm->busy;
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return r; return r;
} }
...@@ -921,7 +921,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, ...@@ -921,7 +921,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
< alias->target_phys_addr) < alias->target_phys_addr)
goto out; goto out;
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
p = &kvm->aliases[alias->slot]; p = &kvm->aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
...@@ -935,7 +935,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, ...@@ -935,7 +935,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
kvm_mmu_zap_all(kvm); kvm_mmu_zap_all(kvm);
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;
...@@ -1900,12 +1900,12 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -1900,12 +1900,12 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->pio.cur_count = now; vcpu->pio.cur_count = now;
for (i = 0; i < nr_pages; ++i) { for (i = 0; i < nr_pages; ++i) {
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE); page = gva_to_page(vcpu, address + i * PAGE_SIZE);
if (page) if (page)
get_page(page); get_page(page);
vcpu->pio.guest_pages[i] = page; vcpu->pio.guest_pages[i] = page;
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
if (!page) { if (!page) {
inject_gp(vcpu); inject_gp(vcpu);
free_pio_guest_pages(vcpu); free_pio_guest_pages(vcpu);
...@@ -2298,13 +2298,13 @@ static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ...@@ -2298,13 +2298,13 @@ static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
gpa_t gpa; gpa_t gpa;
vcpu_load(vcpu); vcpu_load(vcpu);
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr); gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
tr->physical_address = gpa; tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA; tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1; tr->writeable = 1;
tr->usermode = 0; tr->usermode = 0;
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
vcpu_put(vcpu); vcpu_put(vcpu);
return 0; return 0;
...@@ -2426,14 +2426,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) ...@@ -2426,14 +2426,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
if (r < 0) if (r < 0)
goto free_vcpu; goto free_vcpu;
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) { if (kvm->vcpus[n]) {
r = -EEXIST; r = -EEXIST;
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
goto mmu_unload; goto mmu_unload;
} }
kvm->vcpus[n] = vcpu; kvm->vcpus[n] = vcpu;
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
/* Now it's all set up, let userspace reach it */ /* Now it's all set up, let userspace reach it */
r = create_vcpu_fd(vcpu); r = create_vcpu_fd(vcpu);
...@@ -2442,9 +2442,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) ...@@ -2442,9 +2442,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
return r; return r;
unlink: unlink:
spin_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL; kvm->vcpus[n] = NULL;
spin_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
mmu_unload: mmu_unload:
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -2945,8 +2945,7 @@ static void decache_vcpus_on_cpu(int cpu) ...@@ -2945,8 +2945,7 @@ static void decache_vcpus_on_cpu(int cpu)
int i; int i;
spin_lock(&kvm_lock); spin_lock(&kvm_lock);
list_for_each_entry(vm, &vm_list, vm_list) { list_for_each_entry(vm, &vm_list, vm_list)
spin_lock(&vm->lock);
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = vm->vcpus[i]; vcpu = vm->vcpus[i];
if (!vcpu) if (!vcpu)
...@@ -2967,8 +2966,6 @@ static void decache_vcpus_on_cpu(int cpu) ...@@ -2967,8 +2966,6 @@ static void decache_vcpus_on_cpu(int cpu)
mutex_unlock(&vcpu->mutex); mutex_unlock(&vcpu->mutex);
} }
} }
spin_unlock(&vm->lock);
}
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
} }
......
...@@ -275,10 +275,9 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) ...@@ -275,10 +275,9 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT); r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
if (r < 0) { if (r < 0) {
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL); r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
kvm_mmu_free_some_pages(vcpu);
} }
return r; return r;
} }
...@@ -1069,7 +1068,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -1069,7 +1068,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
{ {
int r; int r;
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
r = mmu_topup_memory_caches(vcpu); r = mmu_topup_memory_caches(vcpu);
if (r) if (r)
goto out; goto out;
...@@ -1077,7 +1076,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -1077,7 +1076,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu); kvm_mmu_flush_tlb(vcpu);
out: out:
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return r; return r;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_load); EXPORT_SYMBOL_GPL(kvm_mmu_load);
......
...@@ -941,21 +941,21 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -941,21 +941,21 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (is_external_interrupt(exit_int_info)) if (is_external_interrupt(exit_int_info))
push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
fault_address = svm->vmcb->control.exit_info_2; fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1; error_code = svm->vmcb->control.exit_info_1;
r = kvm_mmu_page_fault(vcpu, fault_address, error_code); r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
if (r < 0) { if (r < 0) {
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return r; return r;
} }
if (!r) { if (!r) {
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return 1; return 1;
} }
er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
switch (er) { switch (er) {
case EMULATE_DONE: case EMULATE_DONE:
......
...@@ -1711,19 +1711,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1711,19 +1711,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info)) {
cr2 = vmcs_readl(EXIT_QUALIFICATION); cr2 = vmcs_readl(EXIT_QUALIFICATION);
spin_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
r = kvm_mmu_page_fault(vcpu, cr2, error_code); r = kvm_mmu_page_fault(vcpu, cr2, error_code);
if (r < 0) { if (r < 0) {
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return r; return r;
} }
if (!r) { if (!r) {
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return 1; return 1;
} }
er = emulate_instruction(vcpu, kvm_run, cr2, error_code); er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
spin_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
switch (er) { switch (er) {
case EMULATE_DONE: case EMULATE_DONE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment