Commit 142ccde1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Coalesce TLB flushes when zapping collapsible SPTEs

Gather pending TLB flushes across both the legacy and TDP MMUs when
zapping collapsible SPTEs to avoid multiple flushes if both the legacy
MMU (for nested guests) and TDP MMU have mappings for the memslot.

Note, this also optimizes the TDP MMU to flush only the relevant range
when running as L1 with Hyper-V enlightenments.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 302695a5
...@@ -5629,11 +5629,13 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, ...@@ -5629,11 +5629,13 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
write_lock(&kvm->mmu_lock); write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
if (is_tdp_mmu_enabled(kvm))
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush) if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot); kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
write_unlock(&kvm->mmu_lock); write_unlock(&kvm->mmu_lock);
} }
......
...@@ -1310,11 +1310,10 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, ...@@ -1310,11 +1310,10 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
* Clear non-leaf entries (and free associated page tables) which could * Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot. * be replaced by large mappings, for GFNs within the slot.
*/ */
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *slot) struct kvm_memory_slot *slot, bool flush)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool flush = false;
int root_as_id; int root_as_id;
for_each_tdp_mmu_root_yield_safe(kvm, root) { for_each_tdp_mmu_root_yield_safe(kvm, root) {
...@@ -1325,8 +1324,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, ...@@ -1325,8 +1324,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
flush = zap_collapsible_spte_range(kvm, root, slot, flush); flush = zap_collapsible_spte_range(kvm, root, slot, flush);
} }
if (flush) return flush;
kvm_flush_remote_tlbs(kvm);
} }
/* /*
......
...@@ -55,8 +55,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -55,8 +55,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask, gfn_t gfn, unsigned long mask,
bool wrprot); bool wrprot);
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *slot); struct kvm_memory_slot *slot, bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn); struct kvm_memory_slot *slot, gfn_t gfn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment