Commit ded8fef3 authored by Junaid Shahid's avatar Junaid Shahid Committed by Stefan Bader

kvm: x86: Do not release the page inside mmu_set_spte()

Release the page at the call-site where it was originally acquired.
This makes the exit code cleaner for most call sites, since they
do not need to duplicate code between success and the failure
label.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

CVE-2018-12207

(backportedfrom commit 43fdcda9)
[tyhicks: Backport to 4.4
 - Adjust for differences in call sites of __direct_map() in mmu.c]
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 7411be04
...@@ -2644,8 +2644,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, ...@@ -2644,8 +2644,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
} }
} }
kvm_release_pfn_clean(pfn);
return ret; return ret;
} }
...@@ -2680,9 +2678,11 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, ...@@ -2680,9 +2678,11 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
if (ret <= 0) if (ret <= 0)
return -1; return -1;
for (i = 0; i < ret; i++, gfn++, start++) for (i = 0; i < ret; i++, gfn++, start++) {
mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
page_to_pfn(pages[i]), true, true); page_to_pfn(pages[i]), true, true);
put_page(pages[i]);
}
return 0; return 0;
} }
...@@ -3032,6 +3032,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, ...@@ -3032,6 +3032,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
return r; return r;
r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock; goto out_unlock;
...@@ -3040,15 +3041,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, ...@@ -3040,15 +3041,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
prefault); prefault);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return RET_PF_RETRY; return r;
} }
...@@ -3530,6 +3527,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -3530,6 +3527,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
return r; return r;
r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock; goto out_unlock;
...@@ -3538,14 +3536,11 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -3538,14 +3536,11 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, r = __direct_map(vcpu, gpa, write, map_writable,
level, gfn, pfn, prefault); level, gfn, pfn, prefault);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return RET_PF_RETRY; return r;
} }
static void nonpaging_init_context(struct kvm_vcpu *vcpu, static void nonpaging_init_context(struct kvm_vcpu *vcpu,
......
...@@ -478,6 +478,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -478,6 +478,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
true, true); true, true);
kvm_release_pfn_clean(pfn);
return true; return true;
} }
...@@ -629,7 +630,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -629,7 +630,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
return ret; return ret;
out_gpte_changed: out_gpte_changed:
kvm_release_pfn_clean(pfn);
return RET_PF_RETRY; return RET_PF_RETRY;
} }
...@@ -773,6 +773,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -773,6 +773,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
walker.pte_access &= ~ACC_EXEC_MASK; walker.pte_access &= ~ACC_EXEC_MASK;
} }
r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock; goto out_unlock;
...@@ -785,14 +786,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -785,14 +786,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
level, pfn, map_writable, prefault); level, pfn, map_writable, prefault);
++vcpu->stat.pf_fixed; ++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return RET_PF_RETRY; return r;
} }
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment