Commit 02daab21 authored by Avi Kivity's avatar Avi Kivity Committed by Marcelo Tosatti

KVM: Lazify fpu activation and deactivation

Defer fpu deactivation as much as possible - if the guest fpu is loaded, keep
it loaded until the next heavyweight exit (where we are forced to unload it).
This reduces unnecessary exits.

We also defer fpu activation on clts; while clts signals the intent to use the
fpu, we can't be sure the guest will actually use it.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent e8467fda
...@@ -506,6 +506,7 @@ struct kvm_x86_ops { ...@@ -506,6 +506,7 @@ struct kvm_x86_ops {
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
void (*tlb_flush)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu);
......
...@@ -984,17 +984,11 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -984,17 +984,11 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (npt_enabled) if (npt_enabled)
goto set; goto set;
if (kvm_read_cr0_bits(vcpu, X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
vcpu->fpu_active = 1;
}
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
cr0 |= X86_CR0_PG | X86_CR0_WP; cr0 |= X86_CR0_PG | X86_CR0_WP;
if (!vcpu->fpu_active) {
svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); if (!vcpu->fpu_active)
cr0 |= X86_CR0_TS; cr0 |= X86_CR0_TS;
}
set: set:
/* /*
* re-enable caching here because the QEMU bios * re-enable caching here because the QEMU bios
...@@ -1250,6 +1244,8 @@ static int nm_interception(struct vcpu_svm *svm) ...@@ -1250,6 +1244,8 @@ static int nm_interception(struct vcpu_svm *svm)
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS)) if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS))
svm->vmcb->save.cr0 &= ~X86_CR0_TS; svm->vmcb->save.cr0 &= ~X86_CR0_TS;
else
svm->vmcb->save.cr0 |= X86_CR0_TS;
svm->vcpu.fpu_active = 1; svm->vcpu.fpu_active = 1;
return 1; return 1;
...@@ -2586,6 +2582,8 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) ...@@ -2586,6 +2582,8 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu)
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{ {
if (npt_enabled)
vcpu->fpu_active = 1;
} }
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
...@@ -2805,12 +2803,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) ...@@ -2805,12 +2803,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = root; svm->vmcb->save.cr3 = root;
force_new_asid(vcpu); force_new_asid(vcpu);
if (vcpu->fpu_active) {
svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
svm->vmcb->save.cr0 |= X86_CR0_TS;
vcpu->fpu_active = 0;
}
} }
static int is_disabled(void) static int is_disabled(void)
...@@ -2926,6 +2918,20 @@ static bool svm_rdtscp_supported(void) ...@@ -2926,6 +2918,20 @@ static bool svm_rdtscp_supported(void)
return false; return false;
} }
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (npt_enabled) {
/* hack: npt requires active fpu at this time */
vcpu->fpu_active = 1;
return;
}
svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
svm->vmcb->save.cr0 |= X86_CR0_TS;
}
static struct kvm_x86_ops svm_x86_ops = { static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm, .cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled, .disabled_by_bios = is_disabled,
...@@ -2967,6 +2973,7 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -2967,6 +2973,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.cache_reg = svm_cache_reg, .cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags, .get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags, .set_rflags = svm_set_rflags,
.fpu_deactivate = svm_fpu_deactivate,
.tlb_flush = svm_flush_tlb, .tlb_flush = svm_flush_tlb,
......
...@@ -66,7 +66,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); ...@@ -66,7 +66,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
#define KVM_GUEST_CR0_MASK \ #define KVM_GUEST_CR0_MASK \
(KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
(X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) (X86_CR0_WP | X86_CR0_NE | X86_CR0_MP)
#define KVM_VM_CR0_ALWAYS_ON \ #define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS \ #define KVM_CR4_GUEST_OWNED_BITS \
...@@ -579,9 +579,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) ...@@ -579,9 +579,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
{ {
u32 eb; u32 eb;
eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR)
if (!vcpu->fpu_active) | (1u << NM_VECTOR);
eb |= 1u << NM_VECTOR;
/* /*
* Unconditionally intercept #DB so we can maintain dr6 without * Unconditionally intercept #DB so we can maintain dr6 without
* reading it every exit. * reading it every exit.
...@@ -595,6 +594,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) ...@@ -595,6 +594,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
eb = ~0; eb = ~0;
if (enable_ept) if (enable_ept)
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
if (vcpu->fpu_active)
eb &= ~(1u << NM_VECTOR);
vmcs_write32(EXCEPTION_BITMAP, eb); vmcs_write32(EXCEPTION_BITMAP, eb);
} }
...@@ -806,9 +807,6 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) ...@@ -806,9 +807,6 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->fpu_active)
return;
vcpu->fpu_active = 0;
vmcs_set_bits(GUEST_CR0, X86_CR0_TS); vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
update_exception_bitmap(vcpu); update_exception_bitmap(vcpu);
} }
...@@ -1737,8 +1735,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1737,8 +1735,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
else else
hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
vmx_fpu_deactivate(vcpu);
if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu); enter_pmode(vcpu);
...@@ -1757,12 +1753,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1757,12 +1753,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (enable_ept) if (enable_ept)
ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
if (!vcpu->fpu_active)
hw_cr0 |= X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0); vmcs_writel(GUEST_CR0, hw_cr0);
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
vmx_fpu_activate(vcpu);
} }
static u64 construct_eptp(unsigned long root_hpa) static u64 construct_eptp(unsigned long root_hpa)
...@@ -1793,8 +1789,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1793,8 +1789,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vmx_flush_tlb(vcpu); vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, guest_cr3); vmcs_writel(GUEST_CR3, guest_cr3);
if (kvm_read_cr0_bits(vcpu, X86_CR0_PE))
vmx_fpu_deactivate(vcpu);
} }
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
...@@ -3002,11 +2996,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -3002,11 +2996,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
}; };
break; break;
case 2: /* clts */ case 2: /* clts */
vmx_fpu_deactivate(vcpu);
vcpu->arch.cr0 &= ~X86_CR0_TS; vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu)); vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu));
trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
return 1; return 1;
case 1: /*mov from cr*/ case 1: /*mov from cr*/
...@@ -4127,6 +4119,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -4127,6 +4119,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cache_reg = vmx_cache_reg, .cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags, .get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags, .set_rflags = vmx_set_rflags,
.fpu_deactivate = vmx_fpu_deactivate,
.tlb_flush = vmx_flush_tlb, .tlb_flush = vmx_flush_tlb,
......
...@@ -1509,8 +1509,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1509,8 +1509,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu); kvm_put_guest_fpu(vcpu);
kvm_x86_ops->vcpu_put(vcpu);
} }
static int is_efer_nx(void) static int is_efer_nx(void)
...@@ -4006,6 +4006,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -4006,6 +4006,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 0; r = 0;
goto out; goto out;
} }
if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu);
}
} }
preempt_disable(); preempt_disable();
...@@ -5075,6 +5079,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -5075,6 +5079,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
kvm_fx_save(&vcpu->arch.guest_fx_image); kvm_fx_save(&vcpu->arch.guest_fx_image);
kvm_fx_restore(&vcpu->arch.host_fx_image); kvm_fx_restore(&vcpu->arch.host_fx_image);
++vcpu->stat.fpu_reload; ++vcpu->stat.fpu_reload;
set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
} }
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#define KVM_REQ_MMU_SYNC 7 #define KVM_REQ_MMU_SYNC 7
#define KVM_REQ_KVMCLOCK_UPDATE 8 #define KVM_REQ_KVMCLOCK_UPDATE 8
#define KVM_REQ_KICK 9 #define KVM_REQ_KICK 9
#define KVM_REQ_DEACTIVATE_FPU 10
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment