Commit ce00053b authored by Peter Feiner's avatar Peter Feiner Committed by Paolo Bonzini

x86: kvm: mmu: dead code thanks to access tracking

The MMU always has hardware A bits or access tracking support, thus
it's unnecessary to handle the scenario where we have neither.
Signed-off-by: default avatarPeter Feiner <pfeiner@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8a53e7e5
...@@ -315,12 +315,21 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) ...@@ -315,12 +315,21 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
return likely(kvm_gen == spte_gen); return likely(kvm_gen == spte_gen);
} }
/*
* Sets the shadow PTE masks used by the MMU.
*
* Assumptions:
* - Setting either @accessed_mask or @dirty_mask requires setting both
* - At least one of @accessed_mask or @acc_track_mask must be set
*/
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
u64 acc_track_mask) u64 acc_track_mask)
{ {
if (acc_track_mask != 0) if (acc_track_mask != 0)
acc_track_mask |= SPTE_SPECIAL_MASK; acc_track_mask |= SPTE_SPECIAL_MASK;
BUG_ON(!dirty_mask != !accessed_mask);
BUG_ON(!accessed_mask && !acc_track_mask);
shadow_user_mask = user_mask; shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask; shadow_accessed_mask = accessed_mask;
...@@ -1766,18 +1775,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, ...@@ -1766,18 +1775,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
/*
* If there's no access bit in the secondary pte set by the hardware and
* fast access tracking is also not enabled, it's up to gup-fast/gup to
* set the access bit in the primary pte or in the page structure.
*/
if (!shadow_accessed_mask && !shadow_acc_track_mask)
goto out;
for_each_rmap_spte(rmap_head, &iter, sptep) for_each_rmap_spte(rmap_head, &iter, sptep)
if (is_accessed_spte(*sptep)) if (is_accessed_spte(*sptep))
return 1; return 1;
out:
return 0; return 0;
} }
...@@ -1798,18 +1798,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1798,18 +1798,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{ {
/*
* In case of absence of EPT Access and Dirty Bits supports,
* emulate the accessed bit for EPT, by checking if this page has
* an EPT mapping, and clearing it if it does. On the next access,
* a new EPT mapping will be established.
* This has some overhead, but not as much as the cost of swapping
* out actively used pages or breaking up actively used hugepages.
*/
if (!shadow_accessed_mask && !shadow_acc_track_mask)
return kvm_handle_hva_range(kvm, start, end, 0,
kvm_unmap_rmapp);
return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment