Commit 4a42d848 authored by David Stevens's avatar David Stevens Committed by Paolo Bonzini

KVM: x86/mmu: Consider the hva in mmu_notifier retry

Track the range being invalidated by mmu_notifier and skip page fault
retries if the fault address is not affected by the in-progress
invalidation. Handle concurrent invalidations by finding the minimal
range which includes all ranges being invalidated. Although the combined
range may include unrelated addresses and cannot be shrunk as individual
invalidation operations complete, it is unlikely the marginal gains of
proper range tracking are worth the additional complexity.

The primary benefit of this change is the reduction in the likelihood of
extreme latency when handing a page fault due to another thread having
been preempted while modifying host virtual addresses.
Signed-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Message-Id: <20210222024522.1751719-3-stevensd@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5f8a7cf2
...@@ -590,7 +590,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, ...@@ -590,7 +590,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
} else { } else {
/* Call KVM generic code to do the slow-path check */ /* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
writing, &write_ok); writing, &write_ok, NULL);
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;
page = NULL; page = NULL;
......
...@@ -822,7 +822,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, ...@@ -822,7 +822,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
/* Call KVM generic code to do the slow-path check */ /* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
writing, upgrade_p); writing, upgrade_p, NULL);
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;
page = NULL; page = NULL;
......
...@@ -2734,6 +2734,13 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -2734,6 +2734,13 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
if (sp->role.level > PG_LEVEL_4K) if (sp->role.level > PG_LEVEL_4K)
return; return;
/*
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
if (unlikely(vcpu->kvm->mmu_notifier_count))
return;
__direct_pte_prefetch(vcpu, sp, sptep); __direct_pte_prefetch(vcpu, sp, sptep);
} }
...@@ -3640,8 +3647,8 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -3640,8 +3647,8 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
} }
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
bool *writable) bool write, bool *writable)
{ {
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
bool async; bool async;
...@@ -3654,7 +3661,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, ...@@ -3654,7 +3661,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
} }
async = false; async = false;
*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
write, writable, hva);
if (!async) if (!async)
return false; /* *pfn has correct page already */ return false; /* *pfn has correct page already */
...@@ -3668,7 +3676,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, ...@@ -3668,7 +3676,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
return true; return true;
} }
*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
write, writable, hva);
return false; return false;
} }
...@@ -3681,6 +3690,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -3681,6 +3690,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq; unsigned long mmu_seq;
kvm_pfn_t pfn; kvm_pfn_t pfn;
hva_t hva;
int r; int r;
if (page_fault_handle_page_track(vcpu, error_code, gfn)) if (page_fault_handle_page_track(vcpu, error_code, gfn))
...@@ -3699,7 +3709,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -3699,7 +3709,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
write, &map_writable))
return RET_PF_RETRY; return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
...@@ -3712,7 +3723,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -3712,7 +3723,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
else else
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (!is_noslot_pfn(pfn) && mmu_notifier_retry(vcpu->kvm, mmu_seq)) if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
goto out_unlock; goto out_unlock;
r = make_mmu_pages_available(vcpu); r = make_mmu_pages_available(vcpu);
if (r) if (r)
......
...@@ -601,6 +601,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, ...@@ -601,6 +601,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (sp->role.level > PG_LEVEL_4K) if (sp->role.level > PG_LEVEL_4K)
return; return;
/*
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
if (unlikely(vcpu->kvm->mmu_notifier_count))
return;
if (sp->role.direct) if (sp->role.direct)
return __direct_pte_prefetch(vcpu, sp, sptep); return __direct_pte_prefetch(vcpu, sp, sptep);
...@@ -790,6 +797,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -790,6 +797,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
struct guest_walker walker; struct guest_walker walker;
int r; int r;
kvm_pfn_t pfn; kvm_pfn_t pfn;
hva_t hva;
unsigned long mmu_seq; unsigned long mmu_seq;
bool map_writable, is_self_change_mapping; bool map_writable, is_self_change_mapping;
int max_level; int max_level;
...@@ -840,8 +848,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -840,8 +848,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, &hva,
&map_writable)) write_fault, &map_writable))
return RET_PF_RETRY; return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
...@@ -869,7 +877,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -869,7 +877,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
r = RET_PF_RETRY; r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (!is_noslot_pfn(pfn) && mmu_notifier_retry(vcpu->kvm, mmu_seq)) if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
goto out_unlock; goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/preempt.h> #include <linux/preempt.h>
...@@ -506,6 +507,8 @@ struct kvm { ...@@ -506,6 +507,8 @@ struct kvm {
struct mmu_notifier mmu_notifier; struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq; unsigned long mmu_notifier_seq;
long mmu_notifier_count; long mmu_notifier_count;
unsigned long mmu_notifier_range_start;
unsigned long mmu_notifier_range_end;
#endif #endif
long tlbs_dirty; long tlbs_dirty;
struct list_head devices; struct list_head devices;
...@@ -733,7 +736,7 @@ kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); ...@@ -733,7 +736,7 @@ kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault, bool atomic, bool *async, bool write_fault,
bool *writable); bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_release_pfn_dirty(kvm_pfn_t pfn);
...@@ -1207,6 +1210,26 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) ...@@ -1207,6 +1210,26 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
return 1; return 1;
return 0; return 0;
} }
static inline int mmu_notifier_retry_hva(struct kvm *kvm,
unsigned long mmu_seq,
unsigned long hva)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
* If mmu_notifier_count is non-zero, then the range maintained by
* kvm_mmu_notifier_invalidate_range_start contains all addresses that
* might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
if (unlikely(kvm->mmu_notifier_count) &&
hva >= kvm->mmu_notifier_range_start &&
hva < kvm->mmu_notifier_range_end)
return 1;
if (kvm->mmu_notifier_seq != mmu_seq)
return 1;
return 0;
}
#endif #endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
......
...@@ -486,6 +486,24 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, ...@@ -486,6 +486,24 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* count is also read inside the mmu_lock critical section. * count is also read inside the mmu_lock critical section.
*/ */
kvm->mmu_notifier_count++; kvm->mmu_notifier_count++;
if (likely(kvm->mmu_notifier_count == 1)) {
kvm->mmu_notifier_range_start = range->start;
kvm->mmu_notifier_range_end = range->end;
} else {
/*
* Fully tracking multiple concurrent ranges has dimishing
* returns. Keep things simple and just find the minimal range
* which includes the current and new ranges. As there won't be
* enough information to subtract a range after its invalidate
* completes, any ranges invalidated concurrently will
* accumulate and persist until all outstanding invalidates
* complete.
*/
kvm->mmu_notifier_range_start =
min(kvm->mmu_notifier_range_start, range->start);
kvm->mmu_notifier_range_end =
max(kvm->mmu_notifier_range_end, range->end);
}
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
range->flags); range->flags);
/* we've to flush the tlb before the pages can be freed */ /* we've to flush the tlb before the pages can be freed */
...@@ -2023,10 +2041,13 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, ...@@ -2023,10 +2041,13 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault, bool atomic, bool *async, bool write_fault,
bool *writable) bool *writable, hva_t *hva)
{ {
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
if (hva)
*hva = addr;
if (addr == KVM_HVA_ERR_RO_BAD) { if (addr == KVM_HVA_ERR_RO_BAD) {
if (writable) if (writable)
*writable = false; *writable = false;
...@@ -2054,19 +2075,19 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, ...@@ -2054,19 +2075,19 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable) bool *writable)
{ {
return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
write_fault, writable); write_fault, writable, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{ {
return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
{ {
return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment