Commit d5fa597e authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: x86: allow per cpu apicv inhibit reasons

Add optional callback .vcpu_get_apicv_inhibit_reasons returning
extra inhibit reasons that prevent APICv from working on this vCPU.
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220322174050.241850-6-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0b349662
...@@ -126,6 +126,7 @@ KVM_X86_OP_OPTIONAL(migrate_timers) ...@@ -126,6 +126,7 @@ KVM_X86_OP_OPTIONAL(migrate_timers)
KVM_X86_OP(msr_filter_changed) KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
#undef KVM_X86_OP #undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL #undef KVM_X86_OP_OPTIONAL
......
...@@ -1507,6 +1507,11 @@ struct kvm_x86_ops { ...@@ -1507,6 +1507,11 @@ struct kvm_x86_ops {
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector); void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
/*
* Returns vCPU specific APICv inhibit reasons
*/
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
}; };
struct kvm_x86_nested_ops { struct kvm_x86_nested_ops {
...@@ -1807,6 +1812,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -1807,6 +1812,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception); struct x86_exception *exception);
bool kvm_apicv_activated(struct kvm *kvm); bool kvm_apicv_activated(struct kvm *kvm);
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu); void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
enum kvm_apicv_inhibit reason, bool set); enum kvm_apicv_inhibit reason, bool set);
......
...@@ -9114,6 +9114,14 @@ bool kvm_apicv_activated(struct kvm *kvm) ...@@ -9114,6 +9114,14 @@ bool kvm_apicv_activated(struct kvm *kvm)
} }
EXPORT_SYMBOL_GPL(kvm_apicv_activated); EXPORT_SYMBOL_GPL(kvm_apicv_activated);
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
{
ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu);
return (vm_reasons | vcpu_reasons) == 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated);
static void set_or_clear_apicv_inhibit(unsigned long *inhibits, static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
enum kvm_apicv_inhibit reason, bool set) enum kvm_apicv_inhibit reason, bool set)
...@@ -9799,7 +9807,8 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) ...@@ -9799,7 +9807,8 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
down_read(&vcpu->kvm->arch.apicv_update_lock); down_read(&vcpu->kvm->arch.apicv_update_lock);
activate = kvm_apicv_activated(vcpu->kvm); activate = kvm_vcpu_apicv_activated(vcpu);
if (vcpu->arch.apicv_active == activate) if (vcpu->arch.apicv_active == activate)
goto out; goto out;
...@@ -10200,7 +10209,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10200,7 +10209,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* per-VM state, and responsing vCPUs must wait for the update * per-VM state, and responsing vCPUs must wait for the update
* to complete before servicing KVM_REQ_APICV_UPDATE. * to complete before servicing KVM_REQ_APICV_UPDATE.
*/ */
WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu)); WARN_ON_ONCE(kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu));
exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment