Commit b182093d authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Greg Kroah-Hartman

KVM: x86: remove now unneeded hugepage gfn adjustment

commit d679b326 upstream.

After the previous patch, the low bits of the gfn are masked in
both FNAME(fetch) and __direct_map, so we do not need to clear them
in transparent_hugepage_adjust.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e79234ce
...@@ -3155,11 +3155,10 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) ...@@ -3155,11 +3155,10 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
} }
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
gfn_t *gfnp, kvm_pfn_t *pfnp, gfn_t gfn, kvm_pfn_t *pfnp,
int *levelp) int *levelp)
{ {
kvm_pfn_t pfn = *pfnp; kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *gfnp;
int level = *levelp; int level = *levelp;
/* /*
...@@ -3186,8 +3185,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, ...@@ -3186,8 +3185,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
mask = KVM_PAGES_PER_HPAGE(level) - 1; mask = KVM_PAGES_PER_HPAGE(level) - 1;
VM_BUG_ON((gfn & mask) != (pfn & mask)); VM_BUG_ON((gfn & mask) != (pfn & mask));
if (pfn & mask) { if (pfn & mask) {
gfn &= ~mask;
*gfnp = gfn;
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
pfn &= ~mask; pfn &= ~mask;
kvm_get_pfn(pfn); kvm_get_pfn(pfn);
...@@ -3451,7 +3448,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, ...@@ -3451,7 +3448,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
if (make_mmu_pages_available(vcpu) < 0) if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock; goto out_unlock;
if (likely(!force_pt_level)) if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault); r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
...@@ -4085,7 +4082,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -4085,7 +4082,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (make_mmu_pages_available(vcpu) < 0) if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock; goto out_unlock;
if (likely(!force_pt_level)) if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault); r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
......
...@@ -829,7 +829,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -829,7 +829,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (make_mmu_pages_available(vcpu) < 0) if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock; goto out_unlock;
if (!force_pt_level) if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault, r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
level, pfn, map_writable, prefault); level, pfn, map_writable, prefault);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment