Commit 74c55931 authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: VMX: Cache IA32_DEBUGCTL in memory

MSR_IA32_DEBUGCTLMSR is zeroed on VMEXIT, so it is saved/restored each
time during world switch.  This patch caches the host IA32_DEBUGCTL MSR
and saves/restores the host IA32_DEBUGCTL msr when guest/host switches
to avoid to save/restore each time during world switch.  This saves
about 100 clock cycles per vmexit.
Suggested-by: default avatarJim Mattson <jmattson@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: default avatarWanpeng Li <wanpeng.li@hotmail.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 276c796c
...@@ -650,6 +650,8 @@ struct vcpu_vmx { ...@@ -650,6 +650,8 @@ struct vcpu_vmx {
u32 host_pkru; u32 host_pkru;
unsigned long host_debugctlmsr;
/* /*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
...@@ -2318,6 +2320,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2318,6 +2320,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmx_vcpu_pi_load(vcpu, cpu); vmx_vcpu_pi_load(vcpu, cpu);
vmx->host_pkru = read_pkru(); vmx->host_pkru = read_pkru();
vmx->host_debugctlmsr = get_debugctlmsr();
} }
static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
...@@ -9261,7 +9264,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) ...@@ -9261,7 +9264,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr3, cr4; unsigned long cr3, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */ /* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi && if (unlikely(!enable_vnmi &&
...@@ -9314,7 +9317,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -9314,7 +9317,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
__write_pkru(vcpu->arch.pkru); __write_pkru(vcpu->arch.pkru);
atomic_switch_perf_msrs(vmx); atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
vmx_arm_hv_timer(vcpu); vmx_arm_hv_timer(vcpu);
...@@ -9425,8 +9427,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -9425,8 +9427,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
); );
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr) if (vmx->host_debugctlmsr)
update_debugctlmsr(debugctlmsr); update_debugctlmsr(vmx->host_debugctlmsr);
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment