Commit 744e699c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move gpa_val and gpa_available into the emulator context

Move the GPA tracking into the emulator context now that the context is
guaranteed to be initialized via __init_emulate_ctxt() prior to
dereferencing gpa_{available,val}, i.e. now that seeing a stale
gpa_available will also trigger a WARN due to an invalid context.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 92daa48b
...@@ -319,6 +319,10 @@ struct x86_emulate_ctxt { ...@@ -319,6 +319,10 @@ struct x86_emulate_ctxt {
bool have_exception; bool have_exception;
struct x86_exception exception; struct x86_exception exception;
/* GPA available */
bool gpa_available;
gpa_t gpa_val;
/* /*
* decode cache * decode cache
*/ */
......
...@@ -808,10 +808,6 @@ struct kvm_vcpu_arch { ...@@ -808,10 +808,6 @@ struct kvm_vcpu_arch {
int pending_ioapic_eoi; int pending_ioapic_eoi;
int pending_external_vector; int pending_external_vector;
/* GPA available */
bool gpa_available;
gpa_t gpa_val;
/* be preempted when it's in kernel-mode(cpl=0) */ /* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel; bool preempted_in_kernel;
......
...@@ -5745,10 +5745,9 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, ...@@ -5745,10 +5745,9 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
* operation using rep will only have the initial GPA from the NPF * operation using rep will only have the initial GPA from the NPF
* occurred. * occurred.
*/ */
if (vcpu->arch.gpa_available && if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
emulator_can_use_gpa(ctxt) && (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
(addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) { gpa = ctxt->gpa_val;
gpa = vcpu->arch.gpa_val;
ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
} else { } else {
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
...@@ -6417,6 +6416,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) ...@@ -6417,6 +6416,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->gpa_available = false;
ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
...@@ -6847,8 +6847,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -6847,8 +6847,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
/* With shadow page tables, cr2 contains a GVA or nGPA. */ /* With shadow page tables, cr2 contains a GVA or nGPA. */
if (vcpu->arch.mmu->direct_map) { if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true; ctxt->gpa_available = true;
vcpu->arch.gpa_val = cr2_or_gpa; ctxt->gpa_val = cr2_or_gpa;
} }
} else { } else {
/* Sanitize the address out of an abundance of paranoia. */ /* Sanitize the address out of an abundance of paranoia. */
...@@ -8454,7 +8454,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8454,7 +8454,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.apic_attention) if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu); kvm_lapic_sync_from_vapic(vcpu);
vcpu->arch.gpa_available = false;
r = kvm_x86_ops->handle_exit(vcpu, exit_fastpath); r = kvm_x86_ops->handle_exit(vcpu, exit_fastpath);
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment