Commit 302695a5 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Move flushing for "slot" handlers to caller for legacy MMU

Place the onus on the caller of slot_handle_*() to flush the TLB, rather
than handling the flush in the helper, and rename parameters accordingly.
This will allow future patches to coalesce flushes between address spaces
and between the legacy and TDP MMUs.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent af95b53e
...@@ -5249,7 +5249,7 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_ ...@@ -5249,7 +5249,7 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_
static __always_inline bool static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level, slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield)
{ {
struct slot_rmap_walk_iterator iterator; struct slot_rmap_walk_iterator iterator;
bool flush = false; bool flush = false;
...@@ -5260,7 +5260,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -5260,7 +5260,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
flush |= fn(kvm, iterator.rmap, memslot); flush |= fn(kvm, iterator.rmap, memslot);
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) { if (flush && flush_on_yield) {
kvm_flush_remote_tlbs_with_address(kvm, kvm_flush_remote_tlbs_with_address(kvm,
start_gfn, start_gfn,
iterator.gfn - start_gfn + 1); iterator.gfn - start_gfn + 1);
...@@ -5270,32 +5270,26 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -5270,32 +5270,26 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
} }
} }
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
end_gfn - start_gfn + 1);
flush = false;
}
return flush; return flush;
} }
static __always_inline bool static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level, slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb) bool flush_on_yield)
{ {
return slot_handle_level_range(kvm, memslot, fn, start_level, return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn, end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1, memslot->base_gfn + memslot->npages - 1,
lock_flush_tlb); flush_on_yield);
} }
static __always_inline bool static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb) slot_level_handler fn, bool flush_on_yield)
{ {
return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
PG_LEVEL_4K, lock_flush_tlb); PG_LEVEL_4K, flush_on_yield);
} }
static void free_mmu_pages(struct kvm_mmu *mmu) static void free_mmu_pages(struct kvm_mmu *mmu)
...@@ -5531,10 +5525,14 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5531,10 +5525,14 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end) if (start >= end)
continue; continue;
slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PG_LEVEL_4K, PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL, KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true); start, end - 1, true);
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
gfn_end);
} }
} }
...@@ -5627,9 +5625,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, ...@@ -5627,9 +5625,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
{ {
/* FIXME: const-ify all uses of struct kvm_memory_slot. */ /* FIXME: const-ify all uses of struct kvm_memory_slot. */
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot; struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
bool flush;
write_lock(&kvm->mmu_lock); write_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
if (is_tdp_mmu_enabled(kvm)) if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
...@@ -5641,7 +5642,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, ...@@ -5641,7 +5642,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
{ {
/* /*
* All current use cases for flushing the TLBs for a specific memslot * All current use cases for flushing the TLBs for a specific memslot
* are related to dirty logging, and do the TLB flush out of mmu_lock. * related to dirty logging, and many do the TLB flush out of mmu_lock.
* The interaction between the various operations on memslot must be * The interaction between the various operations on memslot must be
* serialized by slots_locks to ensure the TLB flush from one operation * serialized by slots_locks to ensure the TLB flush from one operation
* is observed by any other operation on the same memslot. * is observed by any other operation on the same memslot.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment