Commit 2a18b7e7 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected

'Page not present' event may or may not get injected depending on
guest's state. If the event wasn't injected, there is no need to
inject the corresponding 'page ready' event as the guest may get
confused. E.g. Linux thinks that the corresponding 'page not present'
event wasn't delivered *yet* and allocates a 'dummy entry' for it.
This entry is never freed.

Note, 'wakeup all' events have no corresponding 'page not present'
event and always get injected.

s390 seems to always be able to inject 'page not present', the
change is effectively a nop.
Suggested-by: default avatarVivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610175532.779793-2-vkuznets@redhat.com>
Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=208081Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7863e346
...@@ -978,7 +978,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu); ...@@ -978,7 +978,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work); struct kvm_async_pf *work);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work); struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
......
...@@ -3923,11 +3923,13 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, ...@@ -3923,11 +3923,13 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
} }
} }
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work) struct kvm_async_pf *work)
{ {
trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
return true;
} }
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
......
...@@ -1670,7 +1670,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm); ...@@ -1670,7 +1670,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
unsigned long *vcpu_bitmap); unsigned long *vcpu_bitmap);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work); struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work); struct kvm_async_pf *work);
......
...@@ -10511,7 +10511,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) ...@@ -10511,7 +10511,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
return kvm_arch_interrupt_allowed(vcpu); return kvm_arch_interrupt_allowed(vcpu);
} }
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work) struct kvm_async_pf *work)
{ {
struct x86_exception fault; struct x86_exception fault;
...@@ -10528,6 +10528,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, ...@@ -10528,6 +10528,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
fault.address = work->arch.token; fault.address = work->arch.token;
fault.async_page_fault = true; fault.async_page_fault = true;
kvm_inject_page_fault(vcpu, &fault); kvm_inject_page_fault(vcpu, &fault);
return true;
} else { } else {
/* /*
* It is not possible to deliver a paravirtualized asynchronous * It is not possible to deliver a paravirtualized asynchronous
...@@ -10538,6 +10539,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, ...@@ -10538,6 +10539,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
* fault is retried, hopefully the page will be ready in the host. * fault is retried, hopefully the page will be ready in the host.
*/ */
kvm_make_request(KVM_REQ_APF_HALT, vcpu); kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return false;
} }
} }
...@@ -10555,7 +10557,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, ...@@ -10555,7 +10557,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
kvm_del_async_pf_gfn(vcpu, work->arch.gfn); kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
if (kvm_pv_async_pf_enabled(vcpu) && if ((work->wakeup_all || work->notpresent_injected) &&
kvm_pv_async_pf_enabled(vcpu) &&
!apf_put_user_ready(vcpu, work->arch.token)) { !apf_put_user_ready(vcpu, work->arch.token)) {
vcpu->arch.apf.pageready_pending = true; vcpu->arch.apf.pageready_pending = true;
kvm_apic_set_irq(vcpu, &irq, NULL); kvm_apic_set_irq(vcpu, &irq, NULL);
......
...@@ -206,6 +206,7 @@ struct kvm_async_pf { ...@@ -206,6 +206,7 @@ struct kvm_async_pf {
unsigned long addr; unsigned long addr;
struct kvm_arch_async_pf arch; struct kvm_arch_async_pf arch;
bool wakeup_all; bool wakeup_all;
bool notpresent_injected;
}; };
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
......
...@@ -189,7 +189,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -189,7 +189,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
list_add_tail(&work->queue, &vcpu->async_pf.queue); list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++; vcpu->async_pf.queued++;
kvm_arch_async_page_not_present(vcpu, work); work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
schedule_work(&work->work); schedule_work(&work->work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment