Commit 1c11e713 authored by Avi Kivity's avatar Avi Kivity

KVM: VMX: Avoid writing HOST_CR0 every entry

cr0.ts may change between entries, so we copy cr0 to HOST_CR0 before each
entry.  That is slow, so instead, set HOST_CR0 to have TS set unconditionally
(which is a safe value), and issue a clts() just before exiting vcpu context
if the task indeed owns the fpu.

Saves ~50 cycles/exit.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 08acfa18
...@@ -812,6 +812,8 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) ...@@ -812,6 +812,8 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
} }
#endif #endif
if (current_thread_info()->status & TS_USEDFPU)
clts();
} }
static void vmx_load_host_state(struct vcpu_vmx *vmx) static void vmx_load_host_state(struct vcpu_vmx *vmx)
...@@ -2507,7 +2509,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -2507,7 +2509,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
...@@ -3859,11 +3861,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3859,11 +3861,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0); vmx_set_interrupt_shadow(vcpu, 0);
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
asm( asm(
/* Store host registers */ /* Store host registers */
"push %%"R"dx; push %%"R"bp;" "push %%"R"dx; push %%"R"bp;"
......
...@@ -1731,8 +1731,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1731,8 +1731,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
kvm_put_guest_fpu(vcpu);
kvm_x86_ops->vcpu_put(vcpu); kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
} }
static int is_efer_nx(void) static int is_efer_nx(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment