Commit 7863e346 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: async_pf: Cleanup kvm_setup_async_pf()

schedule_work() returns 'false' only when the work is already on the queue
and this can't happen as kvm_setup_async_pf() always allocates a new one.
Also, to avoid potential race, it makes sense to to schedule_work() at the
very end after we've added it to the queue.

While on it, do some minor cleanup. gfn_to_pfn_async() mentioned in a
comment does not currently exist and, moreover, we can check
kvm_is_error_hva() at the very beginning, before we try to allocate work so
'retry_sync' label can go away completely.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610175532.779793-1-vkuznets@redhat.com>
Reviewed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cd18eaea
...@@ -164,7 +164,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -164,7 +164,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
return 0; return 0;
/* setup delayed work */ /* Arch specific code should not do async PF in this case */
if (unlikely(kvm_is_error_hva(hva)))
return 0;
/* /*
* do alloc nowait since if we are going to sleep anyway we * do alloc nowait since if we are going to sleep anyway we
...@@ -183,24 +185,15 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -183,24 +185,15 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
mmget(work->mm); mmget(work->mm);
kvm_get_kvm(work->vcpu->kvm); kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
would succeed */
if (unlikely(kvm_is_error_hva(work->addr)))
goto retry_sync;
INIT_WORK(&work->work, async_pf_execute); INIT_WORK(&work->work, async_pf_execute);
if (!schedule_work(&work->work))
goto retry_sync;
list_add_tail(&work->queue, &vcpu->async_pf.queue); list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++; vcpu->async_pf.queued++;
kvm_arch_async_page_not_present(vcpu, work); kvm_arch_async_page_not_present(vcpu, work);
schedule_work(&work->work);
return 1; return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
} }
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment