Commit 87ee613d authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyperv: keep track of mismatched VP indexes

In most common cases VP index of a vcpu matches its vcpu index. Userspace
is, however, free to set any mapping it wishes and we need to account for
that when we need to find a vCPU with a particular VP index. To keep search
algorithms optimal in both cases introduce 'num_mismatched_vp_indexes'
counter showing how many vCPUs with mismatching VP index we have. In case
the counter is zero we can assume vp_index == vcpu_idx.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: default avatarRoman Kagan <rkagan@virtuozzo.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1779a39f
...@@ -790,6 +790,9 @@ struct kvm_hv { ...@@ -790,6 +790,9 @@ struct kvm_hv {
u64 hv_reenlightenment_control; u64 hv_reenlightenment_control;
u64 hv_tsc_emulation_control; u64 hv_tsc_emulation_control;
u64 hv_tsc_emulation_status; u64 hv_tsc_emulation_status;
/* How many vCPUs have VP index != vCPU index */
atomic_t num_mismatched_vp_indexes;
}; };
enum kvm_irqchip_mode { enum kvm_irqchip_mode {
......
...@@ -1045,11 +1045,31 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1045,11 +1045,31 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
switch (msr) { switch (msr) {
case HV_X64_MSR_VP_INDEX: case HV_X64_MSR_VP_INDEX: {
if (!host || (u32)data >= KVM_MAX_VCPUS) struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
u32 new_vp_index = (u32)data;
if (!host || new_vp_index >= KVM_MAX_VCPUS)
return 1; return 1;
hv_vcpu->vp_index = (u32)data;
if (new_vp_index == hv_vcpu->vp_index)
return 0;
/*
* The VP index is initialized to vcpu_index by
* kvm_hv_vcpu_postcreate so they initially match. Now the
* VP index is changing, adjust num_mismatched_vp_indexes if
* it now matches or no longer matches vcpu_idx.
*/
if (hv_vcpu->vp_index == vcpu_idx)
atomic_inc(&hv->num_mismatched_vp_indexes);
else if (new_vp_index == vcpu_idx)
atomic_dec(&hv->num_mismatched_vp_indexes);
hv_vcpu->vp_index = new_vp_index;
break; break;
}
case HV_X64_MSR_VP_ASSIST_PAGE: { case HV_X64_MSR_VP_ASSIST_PAGE: {
u64 gfn; u64 gfn;
unsigned long addr; unsigned long addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment