Commit 21fa3246 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Extract zapping of rmaps for gfn range to separate helper

Extract the zapping of rmaps, a.k.a. legacy MMU, for a gfn range to a
separate helper to clean up the unholy mess that kvm_zap_gfn_range() has
become.  In addition to deep nesting, the rmaps zapping spreads out the
declaration of several variables and is generally a mess.  Clean up the
mess now so that future work to improve the memslots implementation
doesn't need to deal with it.

Cc: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211022010005.1454978-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e8be2a5b
...@@ -5710,41 +5710,49 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) ...@@ -5710,41 +5710,49 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
kvm_mmu_uninit_tdp_mmu(kvm); kvm_mmu_uninit_tdp_mmu(kvm);
} }
/* static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
* Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
* (not including it)
*/
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ {
const struct kvm_memory_slot *memslot;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
bool flush = false; bool flush = false;
gfn_t start, end;
int i;
write_lock(&kvm->mmu_lock); if (!kvm_memslots_have_rmaps(kvm))
return flush;
kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
if (kvm_memslots_have_rmaps(kvm)) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i); slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) { kvm_for_each_memslot(memslot, slots) {
gfn_t start, end;
start = max(gfn_start, memslot->base_gfn); start = max(gfn_start, memslot->base_gfn);
end = min(gfn_end, memslot->base_gfn + memslot->npages); end = min(gfn_end, memslot->base_gfn + memslot->npages);
if (start >= end) if (start >= end)
continue; continue;
flush = slot_handle_level_range(kvm, flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
(const struct kvm_memory_slot *) memslot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
kvm_zap_rmapp, PG_LEVEL_4K, start, end - 1, true, flush);
KVM_MAX_HUGEPAGE_LEVEL, start,
end - 1, true, flush);
}
} }
} }
return flush;
}
/*
* Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
* (not including it)
*/
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
bool flush;
int i;
write_lock(&kvm->mmu_lock);
kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
flush = __kvm_zap_rmaps(kvm, gfn_start, gfn_end);
if (is_tdp_mmu_enabled(kvm)) { if (is_tdp_mmu_enabled(kvm)) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start, flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment