Commit 54eb3ef5 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Move shadow-present check out of spte_has_volatile_bits()

Move the is_shadow_present_pte() check out of spte_has_volatile_bits()
and into its callers.  Well, caller, since only one of its two callers
doesn't already do the shadow-present check.

Opportunistically move the helper to spte.c/h so that it can be used by
the TDP MMU, which is also the primary motivation for the shadow-present
change.  Unlike the legacy MMU, the TDP MMU uses a single path for clear
leaf and non-leaf SPTEs, and to avoid unnecessary atomic updates, the TDP
MMU will need to check is_last_spte() prior to calling
spte_has_volatile_bits(), and calling is_last_spte() without first
calling is_shadow_present_spte() is at best odd, and at worst a violation
of KVM's loosely defines SPTE rules.

Note, mmu_spte_clear_track_bits() could likely skip the write entirely
for SPTEs that are not shadow-present.  Leave that cleanup for a future
patch to avoid introducing a functional change, and because the
shadow-present check can likely be moved further up the stack, e.g.
drop_large_spte() appears to be the only path that doesn't already
explicitly check for a shadow-present SPTE.

No functional change intended.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220423034752.1161007-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 706c9c55
...@@ -473,32 +473,6 @@ static u64 __get_spte_lockless(u64 *sptep) ...@@ -473,32 +473,6 @@ static u64 __get_spte_lockless(u64 *sptep)
} }
#endif #endif
static bool spte_has_volatile_bits(u64 spte)
{
if (!is_shadow_present_pte(spte))
return false;
/*
* Always atomically update spte if it can be updated
* out of mmu-lock, it can ensure dirty bit is not lost,
* also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed.
*/
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
return true;
if (is_access_track_spte(spte))
return true;
if (spte_ad_enabled(spte)) {
if (!(spte & shadow_accessed_mask) ||
(is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
return true;
}
return false;
}
/* Rules for using mmu_spte_set: /* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present. * Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present * Note: the sptep being assigned *must* be either not present
...@@ -593,7 +567,8 @@ static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) ...@@ -593,7 +567,8 @@ static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
u64 old_spte = *sptep; u64 old_spte = *sptep;
int level = sptep_to_sp(sptep)->role.level; int level = sptep_to_sp(sptep)->role.level;
if (!spte_has_volatile_bits(old_spte)) if (!is_shadow_present_pte(old_spte) ||
!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull); __update_clear_spte_fast(sptep, 0ull);
else else
old_spte = __update_clear_spte_slow(sptep, 0ull); old_spte = __update_clear_spte_slow(sptep, 0ull);
......
...@@ -90,6 +90,34 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) ...@@ -90,6 +90,34 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
E820_TYPE_RAM); E820_TYPE_RAM);
} }
/*
* Returns true if the SPTE has bits that may be set without holding mmu_lock.
* The caller is responsible for checking if the SPTE is shadow-present, and
* for determining whether or not the caller cares about non-leaf SPTEs.
*/
bool spte_has_volatile_bits(u64 spte)
{
/*
* Always atomically update spte if it can be updated
* out of mmu-lock, it can ensure dirty bit is not lost,
* also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed.
*/
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
return true;
if (is_access_track_spte(spte))
return true;
if (spte_ad_enabled(spte)) {
if (!(spte & shadow_accessed_mask) ||
(is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
return true;
}
return false;
}
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
......
...@@ -404,6 +404,8 @@ static inline u64 get_mmio_spte_generation(u64 spte) ...@@ -404,6 +404,8 @@ static inline u64 get_mmio_spte_generation(u64 spte)
return gen; return gen;
} }
bool spte_has_volatile_bits(u64 spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment