Commit 53597858 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Avoid memslot lookup in make_spte and mmu_try_to_unsync_pages

mmu_try_to_unsync_pages checks if page tracking is active for the given
gfn, which requires knowing the memslot. We can pass down the memslot
via make_spte to avoid this lookup.

The memslot is also handy for make_spte's marking of the gfn as dirty:
we can test whether dirty page tracking is enabled, and if so ensure that
pages are mapped as writable with 4K granularity.  Apart from the warning,
no functional change is intended.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210813203504.2742757-7-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8a9f566a
...@@ -59,8 +59,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, ...@@ -59,8 +59,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
void kvm_slot_page_track_remove_page(struct kvm *kvm, void kvm_slot_page_track_remove_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn, struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode); enum kvm_page_track_mode mode);
bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode); enum kvm_page_track_mode mode);
......
...@@ -2572,8 +2572,8 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -2572,8 +2572,8 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
* were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
* be write-protected. * be write-protected.
*/ */
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
bool speculative) gfn_t gfn, bool can_unsync, bool speculative)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
bool locked = false; bool locked = false;
...@@ -2583,7 +2583,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, ...@@ -2583,7 +2583,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
* track machinery is used to write-protect upper-level shadow pages, * track machinery is used to write-protect upper-level shadow pages,
* i.e. this guards the role.level == 4K assertion below! * i.e. this guards the role.level == 4K assertion below!
*/ */
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) if (kvm_slot_page_track_is_active(slot, gfn, KVM_PAGE_TRACK_WRITE))
return -EPERM; return -EPERM;
/* /*
...@@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, ...@@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
was_rmapped = 1; was_rmapped = 1;
} }
wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative, wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, speculative,
true, host_writable, &spte); true, host_writable, &spte);
if (*sptep == spte) { if (*sptep == spte) {
......
...@@ -118,8 +118,8 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) ...@@ -118,8 +118,8 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
kvm_x86_ops.cpu_dirty_log_size; kvm_x86_ops.cpu_dirty_log_size;
} }
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
bool speculative); gfn_t gfn, bool can_unsync, bool speculative);
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
......
...@@ -136,6 +136,9 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, ...@@ -136,6 +136,9 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
} }
EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
*/
bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode) enum kvm_page_track_mode mode)
{ {
...@@ -151,17 +154,6 @@ bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn, ...@@ -151,17 +154,6 @@ bool kvm_slot_page_track_is_active(struct kvm_memory_slot *slot, gfn_t gfn,
return !!READ_ONCE(slot->arch.gfn_track[mode][index]); return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
} }
/*
* check if the corresponding access on the specified guest page is tracked.
*/
bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return kvm_slot_page_track_is_active(slot, gfn, mode);
}
void kvm_page_track_cleanup(struct kvm *kvm) void kvm_page_track_cleanup(struct kvm *kvm)
{ {
struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_head *head;
......
...@@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
u64 *sptep, spte; u64 *sptep, spte;
struct kvm_memory_slot *slot;
unsigned pte_access; unsigned pte_access;
pt_element_t gpte; pt_element_t gpte;
gpa_t pte_gpa; gpa_t pte_gpa;
...@@ -1127,7 +1128,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1127,7 +1128,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sptep = &sp->spt[i]; sptep = &sp->spt[i];
spte = *sptep; spte = *sptep;
host_writable = spte & shadow_host_writable_mask; host_writable = spte & shadow_host_writable_mask;
make_spte(vcpu, sp, pte_access, gfn, slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
make_spte(vcpu, sp, slot, pte_access, gfn,
spte_to_pfn(spte), spte, true, false, spte_to_pfn(spte), spte, true, false,
host_writable, &spte); host_writable, &spte);
......
...@@ -90,6 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) ...@@ -90,6 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
} }
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool speculative, bool can_unsync, u64 old_spte, bool speculative, bool can_unsync,
bool host_writable, u64 *new_spte) bool host_writable, u64 *new_spte)
...@@ -160,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -160,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* e.g. it's write-tracked (upper-level SPs) or has one or more * e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed. * shadow pages and unsync'ing pages is not allowed.
*/ */
if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, speculative)) {
pgprintk("%s: found shadow page for %llx, marking ro\n", pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn); __func__, gfn);
wrprot = true; wrprot = true;
...@@ -180,8 +181,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -180,8 +181,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
"spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
if (spte & PT_WRITABLE_MASK) if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
kvm_vcpu_mark_page_dirty(vcpu, gfn); /* Enforced by kvm_mmu_hugepage_adjust. */
WARN_ON(level > PG_LEVEL_4K);
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
}
*new_spte = spte; *new_spte = spte;
return wrprot; return wrprot;
......
...@@ -335,6 +335,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) ...@@ -335,6 +335,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
} }
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool speculative, bool can_unsync, u64 old_spte, bool speculative, bool can_unsync,
bool host_writable, u64 *new_spte); bool host_writable, u64 *new_spte);
......
...@@ -906,7 +906,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, ...@@ -906,7 +906,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
if (unlikely(!fault->slot)) if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else else
wrprot = make_spte(vcpu, sp, ACC_ALL, iter->gfn, wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
fault->pfn, iter->old_spte, fault->prefault, true, fault->pfn, iter->old_spte, fault->prefault, true,
fault->map_writable, &new_spte); fault->map_writable, &new_spte);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment