Commit fcd1ec9c authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86/mmu: fix KVM_X86_QUIRK_SLOT_ZAP_ALL for shadow MMU

As was tried in commit 4e103134 ("KVM: x86/mmu: Zap only the relevant
pages when removing a memslot"), all shadow pages, i.e. non-leaf SPTEs,
need to be zapped.  All of the accounting for a shadow page is tied to the
memslot, i.e. the shadow page holds a reference to the memslot, for all
intents and purposes.  Deleting the memslot without removing all relevant
shadow pages, as is done when KVM_X86_QUIRK_SLOT_ZAP_ALL is disabled,
results in NULL pointer derefs when tearing down the VM.

Reintroduce from that commit the code that walks the whole memslot when
there are active shadow MMU pages.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 76f972c2
...@@ -1884,10 +1884,14 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp) ...@@ -1884,10 +1884,14 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
if (is_obsolete_sp((_kvm), (_sp))) { \ if (is_obsolete_sp((_kvm), (_sp))) { \
} else } else
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ #define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
for_each_valid_sp(_kvm, _sp, \ for_each_valid_sp(_kvm, _sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else if ((_sp)->gfn != (_gfn)) {} else
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
if (!sp_has_gptes(_sp)) {} else
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
...@@ -7049,14 +7053,42 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) ...@@ -7049,14 +7053,42 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
kvm_mmu_zap_all(kvm); kvm_mmu_zap_all(kvm);
} }
/* static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
* Zapping leaf SPTEs with memslot range when a memslot is moved/deleted. struct kvm_memory_slot *slot,
* bool flush)
* Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst {
* case scenario we'll have unused shadow pages lying around until they LIST_HEAD(invalid_list);
* are recycled due to age or when the VM is destroyed. unsigned long i;
if (list_empty(&kvm->arch.active_mmu_pages))
goto out_flush;
/*
* Since accounting information is stored in struct kvm_arch_memory_slot,
* shadow pages deletion (e.g. unaccount_shadowed()) requires that all
* gfns with a shadow page have a corresponding memslot. Do so before
* the memslot goes away.
*/ */
static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot) for (i = 0; i < slot->npages; i++) {
struct kvm_mmu_page *sp;
gfn_t gfn = slot->base_gfn + i;
for_each_gfn_valid_sp(kvm, sp, gfn)
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
flush = false;
cond_resched_rwlock_write(&kvm->mmu_lock);
}
}
out_flush:
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
}
static void kvm_mmu_zap_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{ {
struct kvm_gfn_range range = { struct kvm_gfn_range range = {
.slot = slot, .slot = slot,
...@@ -7064,11 +7096,11 @@ static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *s ...@@ -7064,11 +7096,11 @@ static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *s
.end = slot->base_gfn + slot->npages, .end = slot->base_gfn + slot->npages,
.may_block = true, .may_block = true,
}; };
bool flush;
write_lock(&kvm->mmu_lock); write_lock(&kvm->mmu_lock);
if (kvm_unmap_gfn_range(kvm, &range)) flush = kvm_unmap_gfn_range(kvm, &range);
kvm_flush_remote_tlbs_memslot(kvm, slot); kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
write_unlock(&kvm->mmu_lock); write_unlock(&kvm->mmu_lock);
} }
...@@ -7084,7 +7116,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, ...@@ -7084,7 +7116,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
if (kvm_memslot_flush_zap_all(kvm)) if (kvm_memslot_flush_zap_all(kvm))
kvm_mmu_zap_all_fast(kvm); kvm_mmu_zap_all_fast(kvm);
else else
kvm_mmu_zap_memslot_leafs(kvm, slot); kvm_mmu_zap_memslot(kvm, slot);
} }
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment