Commit 84b09f33 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page...

Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously"

Commit 9a6e7c39 (""KVM: async_pf: Fix #DF due to inject "Page not
Present" and "Page Ready" exceptions simultaneously") added a protection
against 'page ready' notification coming before 'page not present' is
delivered. This situation seems to be impossible since commit 2a266f23
("KVM MMU: check pending exception before injecting APF) which added
'vcpu->arch.exception.pending' check to kvm_can_do_async_pf.

On x86, kvm_arch_async_page_present() has only one call site:
kvm_check_async_pf_completion() loop and we only enter the loop when
kvm_arch_can_inject_async_page_present(vcpu) which when async pf msr
is enabled, translates into kvm_can_do_async_pf().

There is also one problem with the cancellation mechanism. We don't seem
to check that the 'page not present' notification we're canceling matches
the 'page ready' notification so in theory, we may erroneously drop two
valid events.

Revert the commit.
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200525144125.143875-2-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f4a9fdd5
...@@ -10427,13 +10427,6 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) ...@@ -10427,13 +10427,6 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
sizeof(val)); sizeof(val));
} }
static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
{
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
sizeof(u32));
}
static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
...@@ -10498,7 +10491,6 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -10498,7 +10491,6 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work) struct kvm_async_pf *work)
{ {
struct x86_exception fault; struct x86_exception fault;
u32 val;
if (work->wakeup_all) if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */ work->arch.token = ~0; /* broadcast wakeup */
...@@ -10507,19 +10499,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -10507,19 +10499,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
!apf_get_user(vcpu, &val)) { !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
if (val == KVM_PV_REASON_PAGE_NOT_PRESENT &&
vcpu->arch.exception.pending &&
vcpu->arch.exception.nr == PF_VECTOR &&
!apf_put_user(vcpu, 0)) {
vcpu->arch.exception.injected = false;
vcpu->arch.exception.pending = false;
vcpu->arch.exception.nr = 0;
vcpu->arch.exception.has_error_code = false;
vcpu->arch.exception.error_code = 0;
vcpu->arch.exception.has_payload = false;
vcpu->arch.exception.payload = 0;
} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
fault.vector = PF_VECTOR; fault.vector = PF_VECTOR;
fault.error_code_valid = true; fault.error_code_valid = true;
fault.error_code = 0; fault.error_code = 0;
...@@ -10527,7 +10507,6 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -10527,7 +10507,6 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
fault.address = work->arch.token; fault.address = work->arch.token;
fault.async_page_fault = true; fault.async_page_fault = true;
kvm_inject_page_fault(vcpu, &fault); kvm_inject_page_fault(vcpu, &fault);
}
} }
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment