Commit 6f6a657c authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM/Hyper-V/VMX: Add direct tlb flush support

Hyper-V provides direct tlb flush function which helps
L1 Hypervisor to handle Hyper-V tlb flush request from
L2 guest. Add the function support for VMX.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarTianyu Lan <Tianyu.Lan@microsoft.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 344c6c80
...@@ -181,6 +181,7 @@ ...@@ -181,6 +181,7 @@
#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) #define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */ /* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
#define HV_X64_NESTED_DIRECT_FLUSH BIT(17)
#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) #define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19) #define HV_X64_NESTED_MSR_BITMAP BIT(19)
...@@ -892,4 +893,7 @@ struct hv_tlb_flush_ex { ...@@ -892,4 +893,7 @@ struct hv_tlb_flush_ex {
u64 gva_list[]; u64 gva_list[];
} __packed; } __packed;
struct hv_partition_assist_pg {
u32 tlb_lock_count;
};
#endif #endif
...@@ -844,6 +844,8 @@ struct kvm_hv { ...@@ -844,6 +844,8 @@ struct kvm_hv {
/* How many vCPUs have VP index != vCPU index */ /* How many vCPUs have VP index != vCPU index */
atomic_t num_mismatched_vp_indexes; atomic_t num_mismatched_vp_indexes;
struct hv_partition_assist_pg *hv_pa_pg;
}; };
enum kvm_irqchip_mode { enum kvm_irqchip_mode {
......
...@@ -178,6 +178,8 @@ static inline void evmcs_load(u64 phys_addr) ...@@ -178,6 +178,8 @@ static inline void evmcs_load(u64 phys_addr)
struct hv_vp_assist_page *vp_ap = struct hv_vp_assist_page *vp_ap =
hv_get_vp_assist_page(smp_processor_id()); hv_get_vp_assist_page(smp_processor_id());
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
vp_ap->nested_control.features.directhypercall = 1;
vp_ap->current_nested_vmcs = phys_addr; vp_ap->current_nested_vmcs = phys_addr;
vp_ap->enlighten_vmentry = 1; vp_ap->enlighten_vmentry = 1;
} }
......
...@@ -486,6 +486,35 @@ static int hv_remote_flush_tlb(struct kvm *kvm) ...@@ -486,6 +486,35 @@ static int hv_remote_flush_tlb(struct kvm *kvm)
return hv_remote_flush_tlb_with_range(kvm, NULL); return hv_remote_flush_tlb_with_range(kvm, NULL);
} }
static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
struct hv_partition_assist_pg **p_hv_pa_pg =
&vcpu->kvm->arch.hyperv.hv_pa_pg;
/*
* Synthetic VM-Exit is not enabled in current code and so All
* evmcs in singe VM shares same assist page.
*/
if (!*p_hv_pa_pg) {
*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!*p_hv_pa_pg)
return -ENOMEM;
pr_debug("KVM: Hyper-V: allocated PA_PG for %llx\n",
(u64)&vcpu->kvm);
}
evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
evmcs->partition_assist_page =
__pa(*p_hv_pa_pg);
evmcs->hv_vm_id = (u64)vcpu->kvm;
evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
pr_debug("KVM: Hyper-V: enabled DIRECT flush for %llx\n",
(u64)vcpu->kvm);
return 0;
}
#endif /* IS_ENABLED(CONFIG_HYPERV) */ #endif /* IS_ENABLED(CONFIG_HYPERV) */
/* /*
...@@ -6511,6 +6540,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6511,6 +6540,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
current_evmcs->hv_clean_fields |= current_evmcs->hv_clean_fields |=
HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
if (static_branch_unlikely(&enable_evmcs))
current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (vmx->host_debugctlmsr) if (vmx->host_debugctlmsr)
update_debugctlmsr(vmx->host_debugctlmsr); update_debugctlmsr(vmx->host_debugctlmsr);
...@@ -6578,6 +6610,7 @@ static struct kvm *vmx_vm_alloc(void) ...@@ -6578,6 +6610,7 @@ static struct kvm *vmx_vm_alloc(void)
static void vmx_vm_free(struct kvm *kvm) static void vmx_vm_free(struct kvm *kvm)
{ {
kfree(kvm->arch.hyperv.hv_pa_pg);
vfree(to_kvm_vmx(kvm)); vfree(to_kvm_vmx(kvm));
} }
...@@ -7837,6 +7870,7 @@ static void vmx_exit(void) ...@@ -7837,6 +7870,7 @@ static void vmx_exit(void)
if (!vp_ap) if (!vp_ap)
continue; continue;
vp_ap->nested_control.features.directhypercall = 0;
vp_ap->current_nested_vmcs = 0; vp_ap->current_nested_vmcs = 0;
vp_ap->enlighten_vmentry = 0; vp_ap->enlighten_vmentry = 0;
} }
...@@ -7876,6 +7910,11 @@ static int __init vmx_init(void) ...@@ -7876,6 +7910,11 @@ static int __init vmx_init(void)
pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
static_branch_enable(&enable_evmcs); static_branch_enable(&enable_evmcs);
} }
if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
vmx_x86_ops.enable_direct_tlbflush
= hv_enable_direct_tlbflush;
} else { } else {
enlightened_vmcs = false; enlightened_vmcs = false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment