Commit e79234ce authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Greg Kroah-Hartman

KVM: x86: make FNAME(fetch) and __direct_map more similar

commit 3fcf2d1b upstream.

These two functions are basically doing the same thing through
kvm_mmu_get_page, link_shadow_page and mmu_set_spte; yet, for historical
reasons, their code looks very different.  This patch tries to take the
best of each and make them very similar, so that it is easy to understand
changes that apply to both of them.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8aaac306
...@@ -3087,40 +3087,39 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -3087,40 +3087,39 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep); __direct_pte_prefetch(vcpu, sp, sptep);
} }
static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault) int map_writable, int level, kvm_pfn_t pfn,
bool prefault)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int emulate = 0; int ret;
gfn_t pseudo_gfn; gfn_t gfn = gpa >> PAGE_SHIFT;
gfn_t base_gfn = gfn;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return 0; return RET_PF_RETRY;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { for_each_shadow_entry(vcpu, gpa, it) {
if (iterator.level == level) { base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, if (it.level == level)
write, level, gfn, pfn, prefault,
map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);
++vcpu->stat.pf_fixed;
break; break;
}
drop_large_spte(vcpu, iterator.sptep); drop_large_spte(vcpu, it.sptep);
if (!is_shadow_present_pte(*iterator.sptep)) { if (!is_shadow_present_pte(*it.sptep)) {
u64 base_addr = iterator.addr; sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
it.level - 1, true, ACC_ALL);
base_addr &= PT64_LVL_ADDR_MASK(iterator.level); link_shadow_page(vcpu, it.sptep, sp);
pseudo_gfn = base_addr >> PAGE_SHIFT;
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
iterator.level - 1, 1, ACC_ALL);
link_shadow_page(vcpu, iterator.sptep, sp);
} }
} }
return emulate;
ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
write, level, base_gfn, pfn, prefault,
map_writable);
direct_pte_prefetch(vcpu, it.sptep);
++vcpu->stat.pf_fixed;
return ret;
} }
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
...@@ -3453,8 +3452,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, ...@@ -3453,8 +3452,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
goto out_unlock; goto out_unlock;
if (likely(!force_pt_level)) if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
...@@ -4088,8 +4086,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -4088,8 +4086,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
goto out_unlock; goto out_unlock;
if (likely(!force_pt_level)) if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
......
...@@ -602,6 +602,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -602,6 +602,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct kvm_shadow_walk_iterator it; struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access; unsigned direct_access, access = gw->pt_access;
int top_level, ret; int top_level, ret;
gfn_t base_gfn;
direct_access = gw->pte_access; direct_access = gw->pte_access;
...@@ -646,31 +647,29 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -646,31 +647,29 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(vcpu, it.sptep, sp); link_shadow_page(vcpu, it.sptep, sp);
} }
for (; base_gfn = gw->gfn;
shadow_walk_okay(&it) && it.level > hlevel;
shadow_walk_next(&it)) {
gfn_t direct_gfn;
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
clear_sp_write_flooding_count(it.sptep); clear_sp_write_flooding_count(it.sptep);
base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == hlevel)
break;
validate_direct_spte(vcpu, it.sptep, direct_access); validate_direct_spte(vcpu, it.sptep, direct_access);
drop_large_spte(vcpu, it.sptep); drop_large_spte(vcpu, it.sptep);
if (is_shadow_present_pte(*it.sptep)) if (!is_shadow_present_pte(*it.sptep)) {
continue; sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
it.level - 1, true, direct_access);
direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); link_shadow_page(vcpu, it.sptep, sp);
}
sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
true, direct_access);
link_shadow_page(vcpu, it.sptep, sp);
} }
clear_sp_write_flooding_count(it.sptep);
ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
it.level, gw->gfn, pfn, prefault, map_writable); it.level, base_gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep); FNAME(pte_prefetch)(vcpu, gw, it.sptep);
++vcpu->stat.pf_fixed;
return ret; return ret;
out_gpte_changed: out_gpte_changed:
...@@ -833,7 +832,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -833,7 +832,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault, r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
level, pfn, map_writable, prefault); level, pfn, map_writable, prefault);
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock: out_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment