Commit a634fbe9 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Stefan Bader

KVM: x86: simplify ept_misconfig

Calling handle_mmio_page_fault() has been unnecessary since commit
e9ee956e ("KVM: x86: MMU: Move handle_mmio_page_fault() call to
kvm_mmu_page_fault()", 2016-02-22).

handle_mmio_page_fault() can now be made static.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>

CVE-2018-12207

(backported from commit e08d26f0)
[tyhicks: Backport to 4.4
 - Minor context change in handle_ept_misconfig() due to missing commit
   db1c056c ("kvm: vmx: Use the hardware provided GPA instead of
   page walk")]
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 428b0294
......@@ -3362,7 +3362,23 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
return reserved;
}
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
/*
* Return values of handle_mmio_page_fault:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
* directly.
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
* fault path update the mmio spte.
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
*/
enum {
RET_MMIO_PF_EMULATE = 1,
RET_MMIO_PF_INVALID = 2,
RET_MMIO_PF_RETRY = 0,
RET_MMIO_PF_BUG = -1
};
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
u64 spte;
bool reserved;
......@@ -4387,6 +4403,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
return 1;
if (r < 0)
return r;
/* Must be RET_MMIO_PF_INVALID. */
}
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
......
......@@ -55,23 +55,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
/*
* Return values of handle_mmio_page_fault:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
* directly.
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
* fault path update the mmio spte.
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
*/
enum {
RET_MMIO_PF_EMULATE = 1,
RET_MMIO_PF_INVALID = 2,
RET_MMIO_PF_RETRY = 0,
RET_MMIO_PF_BUG = -1
};
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
......
......@@ -6372,16 +6372,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
NULL, 0) == EMULATE_DONE;
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
if (likely(ret == RET_MMIO_PF_EMULATE))
return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
EMULATE_DONE;
if (unlikely(ret == RET_MMIO_PF_INVALID))
return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
if (unlikely(ret == RET_MMIO_PF_RETRY))
return 1;
ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
if (ret >= 0)
return ret;
/* It is the real ept misconfig */
WARN_ON(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment