Commit 5a4c9288 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held

kvm_mmu_zap_page() needs slots lock held (rmap_remove->gfn_to_memslot,
for example).

Since kvm_lock spinlock is held in mmu_shrink(), do a non-blocking
down_read_trylock().

Untested.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 7e37c299
...@@ -1987,6 +1987,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) ...@@ -1987,6 +1987,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
int npages; int npages;
if (!down_read_trylock(&kvm->slots_lock))
continue;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages - npages = kvm->arch.n_alloc_mmu_pages -
kvm->arch.n_free_mmu_pages; kvm->arch.n_free_mmu_pages;
...@@ -1999,6 +2001,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) ...@@ -1999,6 +2001,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
nr_to_scan--; nr_to_scan--;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
up_read(&kvm->slots_lock);
} }
if (kvm_freed) if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list); list_move_tail(&kvm_freed->vm_list, &vm_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment