Commit 2b9663d8 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Pass address space ID to __kvm_tdp_mmu_zap_gfn_range()

Pass the address space ID to TDP MMU's primary "zap gfn range" helper to
allow the MMU notifier paths to iterate over memslots exactly once.
Currently, both the legacy MMU and TDP MMU iterate over memslots when
looking for an overlapping hva range, which can be quite costly if there
are a large number of memslots.

Add a "flush" parameter so that iterating over multiple address spaces
in the caller will continue to do the right thing when yielding while a
flush is pending from a previous address space.

Note, this also has a functional change in the form of coalescing TLB
flushes across multiple address spaces in kvm_zap_gfn_range(), and also
optimizes the TDP MMU to utilize range-based flushing when running as L1
with Hyper-V enlightenments.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-6-seanjc@google.com>
[Keep separate for loops to prepare for other incoming patches. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1a61b7db
...@@ -5532,15 +5532,15 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5532,15 +5532,15 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
} }
} }
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
if (is_tdp_mmu_enabled(kvm)) { if (is_tdp_mmu_enabled(kvm)) {
flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
if (flush) flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
kvm_flush_remote_tlbs(kvm); gfn_end, flush);
} }
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
write_unlock(&kvm->mmu_lock); write_unlock(&kvm->mmu_lock);
} }
......
...@@ -699,14 +699,16 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, ...@@ -699,14 +699,16 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* SPTEs have been cleared and a TLB flush is needed before releasing the * SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock. * MMU lock.
*/ */
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
bool can_yield) gfn_t end, bool can_yield, bool flush)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool flush = false;
for_each_tdp_mmu_root_yield_safe(kvm, root) for_each_tdp_mmu_root_yield_safe(kvm, root) {
if (kvm_mmu_page_as_id(root) != as_id)
continue;
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush); flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
}
return flush; return flush;
} }
...@@ -714,9 +716,12 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, ...@@ -714,9 +716,12 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
void kvm_tdp_mmu_zap_all(struct kvm *kvm) void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{ {
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
bool flush; bool flush = false;
int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
if (flush) if (flush)
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
......
...@@ -8,12 +8,12 @@ ...@@ -8,12 +8,12 @@
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
bool can_yield); gfn_t end, bool can_yield, bool flush);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
gfn_t end) gfn_t start, gfn_t end, bool flush)
{ {
return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true); return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
} }
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
...@@ -29,7 +29,8 @@ static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -29,7 +29,8 @@ static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
* of the shadow page's gfn range and stop iterating before yielding. * of the shadow page's gfn range and stop iterating before yielding.
*/ */
lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held_write(&kvm->mmu_lock);
return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false); return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
sp->gfn, end, false, false);
} }
void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_zap_all(struct kvm *kvm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment