Commit 5d9bc648 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: clean up kvm_arch_vcpu_runnable

Split the huge conditional in two functions.

Fixes: 64d60670
Cc: stable@vger.kernel.org
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f0d648bd
...@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1; return 1;
} }
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
}
static int vcpu_run(struct kvm_vcpu *vcpu) static int vcpu_run(struct kvm_vcpu *vcpu)
{ {
int r; int r;
...@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
for (;;) { for (;;) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && if (kvm_vcpu_running(vcpu))
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu); r = vcpu_enter_guest(vcpu);
else else
r = vcpu_block(kvm, vcpu); r = vcpu_block(kvm, vcpu);
...@@ -7762,19 +7767,33 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, ...@@ -7762,19 +7767,33 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm_mmu_invalidate_zap_all_pages(kvm); kvm_mmu_invalidate_zap_all_pages(kvm);
} }
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
if (kvm_apic_has_events(vcpu))
return true;
if (vcpu->arch.pv.pv_unhalted)
return true;
if (atomic_read(&vcpu->arch.nmi_queued))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu))
return true;
return false;
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
kvm_x86_ops->check_nested_events(vcpu, false); kvm_x86_ops->check_nested_events(vcpu, false);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| kvm_apic_has_events(vcpu)
|| vcpu->arch.pv.pv_unhalted
|| atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment