Commit c10743a1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Zap only the target TDP MMU shadow page in NX recovery

When recovering a potential hugepage that was shattered for the iTLB
multihit workaround, precisely zap only the target page instead of
iterating over the TDP MMU to find the SP that was passed in.  This will
allow future simplification of zap_gfn_range() by having it zap only
leaf SPTEs.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220226001546.360188-14-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 626808d1
......@@ -30,6 +30,8 @@ extern bool dbg;
#define INVALID_PAE_ROOT 0
#define IS_VALID_PAE_ROOT(x) (!!(x))
typedef u64 __rcu *tdp_ptep_t;
struct kvm_mmu_page {
/*
* Note, "link" through "spt" fit in a single 64 byte cache line on
......@@ -59,7 +61,10 @@ struct kvm_mmu_page {
refcount_t tdp_mmu_root_count;
};
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
union {
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
tdp_ptep_t ptep;
};
DECLARE_BITMAP(unsync_child_bitmap, 512);
struct list_head lpage_disallowed_link;
......
......@@ -7,8 +7,6 @@
#include "mmu.h"
typedef u64 __rcu *tdp_ptep_t;
/*
* TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
* to be zapped while holding mmu_lock for read. Holding RCU isn't required for
......
......@@ -199,13 +199,14 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
return sp;
}
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, gfn_t gfn,
union kvm_mmu_page_role role)
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
gfn_t gfn, union kvm_mmu_page_role role)
{
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
sp->role = role;
sp->gfn = gfn;
sp->ptep = sptep;
sp->tdp_mmu_page = true;
trace_kvm_mmu_get_page(sp, true);
......@@ -222,7 +223,7 @@ static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
role = parent_sp->role;
role.level--;
tdp_mmu_init_sp(child_sp, iter->gfn, role);
tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
}
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
......@@ -244,7 +245,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
}
root = tdp_mmu_alloc_sp(vcpu);
tdp_mmu_init_sp(root, 0, role);
tdp_mmu_init_sp(root, NULL, 0, role);
refcount_set(&root->tdp_mmu_root_count, 1);
......@@ -736,6 +737,33 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
return iter->yielded;
}
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 old_spte;
/*
* This helper intentionally doesn't allow zapping a root shadow page,
* which doesn't have a parent page table and thus no associated entry.
*/
if (WARN_ON_ONCE(!sp->ptep))
return false;
rcu_read_lock();
old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) {
rcu_read_unlock();
return false;
}
__tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
sp->gfn, sp->role.level + 1, true, true);
rcu_read_unlock();
return true;
}
/*
* Tears down the mappings for the range of gfns, [start, end), and frees the
* non-root pages mapping GFNs strictly within that range. Returns true if
......
......@@ -22,24 +22,8 @@ static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
{
return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
}
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
/*
* Don't allow yielding, as the caller may have a flush pending. Note,
* if mmu_lock is held for write, zapping will never yield in this case,
* but explicitly disallow it for safety. The TDP MMU does not yield
* until it has made forward progress (steps sideways), and when zapping
* a single shadow page that it's guaranteed to see (thus the mmu_lock
* requirement), its "step sideways" will always step beyond the bounds
* of the shadow page's gfn range and stop iterating before yielding.
*/
lockdep_assert_held_write(&kvm->mmu_lock);
return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
sp->gfn, end, false, false);
}
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment