Commit 7cf30855 authored by Sheng Yang's avatar Sheng Yang Committed by Avi Kivity

KVM: x86: Use unlazy_fpu() for host FPU

We can avoid unnecessary fpu load when userspace process
didn't use FPU frequently.

Derived from Avi's idea.
Signed-off-by: default avatarSheng Yang <sheng@linux.intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 5ee481da
...@@ -301,7 +301,6 @@ struct kvm_vcpu_arch { ...@@ -301,7 +301,6 @@ struct kvm_vcpu_arch {
unsigned long mmu_seq; unsigned long mmu_seq;
} update_pte; } update_pte;
struct i387_fxsave_struct host_fx_image;
struct i387_fxsave_struct guest_fx_image; struct i387_fxsave_struct guest_fx_image;
gva_t mmio_fault_cr2; gva_t mmio_fault_cr2;
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/i387.h>
#define MAX_IO_MSRS 256 #define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \ #define CR0_RESERVED_BITS \
...@@ -5134,21 +5135,10 @@ void fx_init(struct kvm_vcpu *vcpu) ...@@ -5134,21 +5135,10 @@ void fx_init(struct kvm_vcpu *vcpu)
{ {
unsigned after_mxcsr_mask; unsigned after_mxcsr_mask;
/*
* Touch the fpu the first time in non atomic context as if
* this is the first fpu instruction the exception handler
* will fire before the instruction returns and it'll have to
* allocate ram with GFP_KERNEL.
*/
if (!used_math())
kvm_fx_save(&vcpu->arch.host_fx_image);
/* Initialize guest FPU by resetting ours and saving into guest's */ /* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable(); preempt_disable();
kvm_fx_save(&vcpu->arch.host_fx_image);
kvm_fx_finit(); kvm_fx_finit();
kvm_fx_save(&vcpu->arch.guest_fx_image); kvm_fx_save(&vcpu->arch.guest_fx_image);
kvm_fx_restore(&vcpu->arch.host_fx_image);
preempt_enable(); preempt_enable();
vcpu->arch.cr0 |= X86_CR0_ET; vcpu->arch.cr0 |= X86_CR0_ET;
...@@ -5165,7 +5155,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -5165,7 +5155,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
return; return;
vcpu->guest_fpu_loaded = 1; vcpu->guest_fpu_loaded = 1;
kvm_fx_save(&vcpu->arch.host_fx_image); unlazy_fpu(current);
kvm_fx_restore(&vcpu->arch.guest_fx_image); kvm_fx_restore(&vcpu->arch.guest_fx_image);
trace_kvm_fpu(1); trace_kvm_fpu(1);
} }
...@@ -5177,7 +5167,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -5177,7 +5167,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
vcpu->guest_fpu_loaded = 0; vcpu->guest_fpu_loaded = 0;
kvm_fx_save(&vcpu->arch.guest_fx_image); kvm_fx_save(&vcpu->arch.guest_fx_image);
kvm_fx_restore(&vcpu->arch.host_fx_image);
++vcpu->stat.fpu_reload; ++vcpu->stat.fpu_reload;
set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
trace_kvm_fpu(0); trace_kvm_fpu(0);
...@@ -5203,9 +5192,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -5203,9 +5192,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
int r; int r;
/* We do fxsave: this must be aligned. */
BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
vcpu->arch.mtrr_state.have_fixed = 1; vcpu->arch.mtrr_state.have_fixed = 1;
vcpu_load(vcpu); vcpu_load(vcpu);
r = kvm_arch_vcpu_reset(vcpu); r = kvm_arch_vcpu_reset(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment