Commit 57354682 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Add sptep_to_sp() helper to wrap shadow page lookup

Introduce sptep_to_sp() to reduce the boilerplate code needed to get the
shadow page associated with a spte pointer, and to improve readability
as it's not immediately obvious that "page_header" is a KVM-specific
accessor for retrieving a shadow page.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200622202034.15093-6-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 985ab278
...@@ -677,7 +677,7 @@ union split_spte { ...@@ -677,7 +677,7 @@ union split_spte {
static void count_spte_clear(u64 *sptep, u64 spte) static void count_spte_clear(u64 *sptep, u64 spte)
{ {
struct kvm_mmu_page *sp = page_header(__pa(sptep)); struct kvm_mmu_page *sp = sptep_to_sp(sptep);
if (is_shadow_present_pte(spte)) if (is_shadow_present_pte(spte))
return; return;
...@@ -761,7 +761,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) ...@@ -761,7 +761,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
*/ */
static u64 __get_spte_lockless(u64 *sptep) static u64 __get_spte_lockless(u64 *sptep)
{ {
struct kvm_mmu_page *sp = page_header(__pa(sptep)); struct kvm_mmu_page *sp = sptep_to_sp(sptep);
union split_spte spte, *orig = (union split_spte *)sptep; union split_spte spte, *orig = (union split_spte *)sptep;
int count; int count;
...@@ -1427,7 +1427,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1427,7 +1427,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
sp = page_header(__pa(spte)); sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
return pte_list_add(vcpu, spte, rmap_head); return pte_list_add(vcpu, spte, rmap_head);
...@@ -1439,7 +1439,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -1439,7 +1439,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
gfn_t gfn; gfn_t gfn;
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
sp = page_header(__pa(spte)); sp = sptep_to_sp(spte);
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp); rmap_head = gfn_to_rmap(kvm, gfn, sp);
__pte_list_remove(spte, rmap_head); __pte_list_remove(spte, rmap_head);
...@@ -1531,7 +1531,7 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) ...@@ -1531,7 +1531,7 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{ {
if (is_large_pte(*sptep)) { if (is_large_pte(*sptep)) {
WARN_ON(page_header(__pa(sptep))->role.level == PG_LEVEL_4K); WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
drop_spte(kvm, sptep); drop_spte(kvm, sptep);
--kvm->stat.lpages; --kvm->stat.lpages;
return true; return true;
...@@ -1543,7 +1543,7 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) ...@@ -1543,7 +1543,7 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{ {
if (__drop_large_spte(vcpu->kvm, sptep)) { if (__drop_large_spte(vcpu->kvm, sptep)) {
struct kvm_mmu_page *sp = page_header(__pa(sptep)); struct kvm_mmu_page *sp = sptep_to_sp(sptep);
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level)); KVM_PAGES_PER_HPAGE(sp->role.level));
...@@ -2002,7 +2002,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -2002,7 +2002,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
sp = page_header(__pa(spte)); sp = sptep_to_sp(spte);
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
...@@ -2124,7 +2124,7 @@ static void mark_unsync(u64 *spte) ...@@ -2124,7 +2124,7 @@ static void mark_unsync(u64 *spte)
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
unsigned int index; unsigned int index;
sp = page_header(__pa(spte)); sp = sptep_to_sp(spte);
index = spte - sp->spt; index = spte - sp->spt;
if (__test_and_set_bit(index, sp->unsync_child_bitmap)) if (__test_and_set_bit(index, sp->unsync_child_bitmap))
return; return;
...@@ -2449,9 +2449,7 @@ static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) ...@@ -2449,9 +2449,7 @@ static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
static void clear_sp_write_flooding_count(u64 *spte) static void clear_sp_write_flooding_count(u64 *spte)
{ {
struct kvm_mmu_page *sp = page_header(__pa(spte)); __clear_sp_write_flooding_count(sptep_to_sp(spte));
__clear_sp_write_flooding_count(sp);
} }
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
...@@ -3026,7 +3024,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -3026,7 +3024,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0; return 0;
sp = page_header(__pa(sptep)); sp = sptep_to_sp(sptep);
if (sp_ad_disabled(sp)) if (sp_ad_disabled(sp))
spte |= SPTE_AD_DISABLED_MASK; spte |= SPTE_AD_DISABLED_MASK;
else if (kvm_vcpu_ad_need_write_protect(vcpu)) else if (kvm_vcpu_ad_need_write_protect(vcpu))
...@@ -3239,7 +3237,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -3239,7 +3237,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
sp = page_header(__pa(sptep)); sp = sptep_to_sp(sptep);
/* /*
* Without accessed bits, there's no way to distinguish between * Without accessed bits, there's no way to distinguish between
...@@ -3547,7 +3545,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -3547,7 +3545,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (!is_shadow_present_pte(spte)) if (!is_shadow_present_pte(spte))
break; break;
sp = page_header(__pa(iterator.sptep)); sp = sptep_to_sp(iterator.sptep);
if (!is_last_spte(spte, sp->role.level)) if (!is_last_spte(spte, sp->role.level))
break; break;
...@@ -5926,7 +5924,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, ...@@ -5926,7 +5924,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
restart: restart:
for_each_rmap_spte(rmap_head, &iter, sptep) { for_each_rmap_spte(rmap_head, &iter, sptep) {
sp = page_header(__pa(sptep)); sp = sptep_to_sp(sptep);
pfn = spte_to_pfn(*sptep); pfn = spte_to_pfn(*sptep);
/* /*
......
...@@ -97,7 +97,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -97,7 +97,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
kvm_pfn_t pfn; kvm_pfn_t pfn;
hpa_t hpa; hpa_t hpa;
sp = page_header(__pa(sptep)); sp = sptep_to_sp(sptep);
if (sp->unsync) { if (sp->unsync) {
if (level != PG_LEVEL_4K) { if (level != PG_LEVEL_4K) {
...@@ -132,7 +132,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -132,7 +132,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
gfn_t gfn; gfn_t gfn;
rev_sp = page_header(__pa(sptep)); rev_sp = sptep_to_sp(sptep);
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
slots = kvm_memslots_for_spte_role(kvm, rev_sp->role); slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
...@@ -165,7 +165,7 @@ static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -165,7 +165,7 @@ static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level) static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{ {
struct kvm_mmu_page *sp = page_header(__pa(sptep)); struct kvm_mmu_page *sp = sptep_to_sp(sptep);
if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync " audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
......
...@@ -50,6 +50,11 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) ...@@ -50,6 +50,11 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
return (struct kvm_mmu_page *)page_private(page); return (struct kvm_mmu_page *)page_private(page);
} }
static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
{
return page_header(__pa(sptep));
}
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
......
...@@ -596,7 +596,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, ...@@ -596,7 +596,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
u64 *spte; u64 *spte;
int i; int i;
sp = page_header(__pa(sptep)); sp = sptep_to_sp(sptep);
if (sp->role.level > PG_LEVEL_4K) if (sp->role.level > PG_LEVEL_4K)
return; return;
...@@ -916,7 +916,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) ...@@ -916,7 +916,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
level = iterator.level; level = iterator.level;
sptep = iterator.sptep; sptep = iterator.sptep;
sp = page_header(__pa(sptep)); sp = sptep_to_sp(sptep);
if (is_last_spte(*sptep, level)) { if (is_last_spte(*sptep, level)) {
pt_element_t gpte; pt_element_t gpte;
gpa_t pte_gpa; gpa_t pte_gpa;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment