Commit 4e19c36f authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Paolo Bonzini

kvm: x86: Introduce APICv inhibit reason bits

There are several reasons in which a VM needs to deactivate APICv
e.g. disable APICv via parameter during module loading, or when
enable Hyper-V SynIC support. Additional inhibit reasons will be
introduced later on when dynamic APICv is supported,

Introduce KVM APICv inhibit reason bits along with a new variable,
apicv_inhibit_reasons, to help keep track of APICv state for each VM,

Initially, the APICV_INHIBIT_REASON_DISABLE bit is used to indicate
the case where APICv is disabled during KVM module load.
(e.g. insmod kvm_amd avic=0 or insmod kvm_intel enable_apicv=0).
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
[Do not use get_enable_apicv; consider irqchip_split in svm.c. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b26a695a
...@@ -873,6 +873,8 @@ enum kvm_irqchip_mode { ...@@ -873,6 +873,8 @@ enum kvm_irqchip_mode {
KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
}; };
#define APICV_INHIBIT_REASON_DISABLE 0
struct kvm_arch { struct kvm_arch {
unsigned long n_used_mmu_pages; unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages; unsigned long n_requested_mmu_pages;
...@@ -904,6 +906,7 @@ struct kvm_arch { ...@@ -904,6 +906,7 @@ struct kvm_arch {
struct kvm_apic_map *apic_map; struct kvm_apic_map *apic_map;
bool apic_access_page_done; bool apic_access_page_done;
unsigned long apicv_inhibit_reasons;
gpa_t wall_clock; gpa_t wall_clock;
...@@ -1478,6 +1481,8 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -1478,6 +1481,8 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception); struct x86_exception *exception);
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
bool kvm_apicv_activated(struct kvm *kvm);
void kvm_apicv_init(struct kvm *kvm, bool enable);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
......
...@@ -2052,6 +2052,18 @@ static int avic_vm_init(struct kvm *kvm) ...@@ -2052,6 +2052,18 @@ static int avic_vm_init(struct kvm *kvm)
return err; return err;
} }
static int svm_vm_init(struct kvm *kvm)
{
if (avic) {
int ret = avic_vm_init(kvm);
if (ret)
return ret;
}
kvm_apicv_init(kvm, avic && irqchip_split(kvm));
return 0;
}
static inline int static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{ {
...@@ -7274,7 +7286,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7274,7 +7286,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.vm_alloc = svm_vm_alloc, .vm_alloc = svm_vm_alloc,
.vm_free = svm_vm_free, .vm_free = svm_vm_free,
.vm_init = avic_vm_init, .vm_init = svm_vm_init,
.vm_destroy = svm_vm_destroy, .vm_destroy = svm_vm_destroy,
.prepare_guest_switch = svm_prepare_guest_switch, .prepare_guest_switch = svm_prepare_guest_switch,
......
...@@ -6813,6 +6813,7 @@ static int vmx_vm_init(struct kvm *kvm) ...@@ -6813,6 +6813,7 @@ static int vmx_vm_init(struct kvm *kvm)
break; break;
} }
} }
kvm_apicv_init(kvm, enable_apicv);
return 0; return 0;
} }
......
...@@ -7469,6 +7469,23 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) ...@@ -7469,6 +7469,23 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
} }
bool kvm_apicv_activated(struct kvm *kvm)
{
return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
}
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
void kvm_apicv_init(struct kvm *kvm, bool enable)
{
if (enable)
clear_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
else
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}
EXPORT_SYMBOL_GPL(kvm_apicv_init);
static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
{ {
struct kvm_vcpu *target = NULL; struct kvm_vcpu *target = NULL;
...@@ -9219,10 +9236,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -9219,10 +9236,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return r; return r;
if (irqchip_in_kernel(vcpu->kvm)) { if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0) if (r < 0)
goto fail_mmu_destroy; goto fail_mmu_destroy;
if (kvm_apicv_activated(vcpu->kvm))
vcpu->arch.apicv_active = true;
} else } else
static_key_slow_inc(&kvm_no_apic_vcpu); static_key_slow_inc(&kvm_no_apic_vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment