Commit ed3a0a01 authored by Heiko Carstens's avatar Heiko Carstens

s390/kvm: convert to regular kernel fpu user

KVM modifies the kernel fpu's regs pointer to its own area to implement its
custom version of preemtible kernel fpu context. With general support for
preemptible kernel fpu context there is no need for the extra complexity in
KVM code anymore.

Therefore convert KVM to a regular kernel fpu user. In particular this
means that all TIF_FPU checks can be removed, since the fpu register
context will never be changed by other kernel fpu users, and also the fpu
register context will be restored if a thread is preempted.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 4eed43de
...@@ -743,7 +743,6 @@ struct kvm_vcpu_arch { ...@@ -743,7 +743,6 @@ struct kvm_vcpu_arch {
struct kvm_s390_sie_block *vsie_block; struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS]; unsigned int host_acrs[NUM_ACRS];
struct gs_cb *host_gscb; struct gs_cb *host_gscb;
struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;
......
...@@ -220,8 +220,6 @@ SYM_FUNC_START(__sie64a) ...@@ -220,8 +220,6 @@ SYM_FUNC_START(__sie64a)
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
tm __SIE_PROG20+3(%r14),3 # last exit... tm __SIE_PROG20+3(%r14),3 # last exit...
jnz .Lsie_skip jnz .Lsie_skip
TSTMSK __SF_SIE_FLAGS(%r15),_TIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
.Lsie_entry: .Lsie_entry:
......
...@@ -584,7 +584,11 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -584,7 +584,11 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
mci.val = mchk->mcic; mci.val = mchk->mcic;
/* take care of lazy register loading */ /* take care of lazy register loading */
save_user_fpu_regs(); fpu_stfpc(&vcpu->run->s.regs.fpc);
if (cpu_has_vx())
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb); save_gs_cb(current->thread.gs_cb);
...@@ -648,7 +652,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -648,7 +652,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
} }
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA, rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128); vcpu->run->s.regs.gprs, 128);
rc |= put_guest_lc(vcpu, current->thread.ufpu.fpc, rc |= put_guest_lc(vcpu, vcpu->run->s.regs.fpc,
(u32 __user *) __LC_FP_CREG_SAVE_AREA); (u32 __user *) __LC_FP_CREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA); (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
......
...@@ -4829,8 +4829,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -4829,8 +4829,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.gprs, vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs)); sizeof(sie_page->pv_grregs));
} }
if (test_thread_flag(TIF_FPU))
load_user_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block, exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs); vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) { if (kvm_s390_pv_cpu_is_protected(vcpu)) {
...@@ -4951,16 +4949,11 @@ static void sync_regs(struct kvm_vcpu *vcpu) ...@@ -4951,16 +4949,11 @@ static void sync_regs(struct kvm_vcpu *vcpu)
} }
save_access_regs(vcpu->arch.host_acrs); save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */ fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
save_user_fpu_regs();
vcpu->arch.host_fpregs.fpc = current->thread.ufpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.ufpu.regs;
if (cpu_has_vx()) if (cpu_has_vx())
current->thread.ufpu.regs = vcpu->run->s.regs.vrs; load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else else
current->thread.ufpu.regs = vcpu->run->s.regs.fprs; load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
current->thread.ufpu.fpc = vcpu->run->s.regs.fpc;
/* Sync fmt2 only data */ /* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu); sync_regs_fmt2(vcpu);
...@@ -5021,12 +5014,11 @@ static void store_regs(struct kvm_vcpu *vcpu) ...@@ -5021,12 +5014,11 @@ static void store_regs(struct kvm_vcpu *vcpu)
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */ fpu_stfpc(&vcpu->run->s.regs.fpc);
save_user_fpu_regs(); if (cpu_has_vx())
vcpu->run->s.regs.fpc = current->thread.ufpu.fpc; save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
/* Restore will be done lazily at return */ else
current->thread.ufpu.fpc = vcpu->arch.host_fpregs.fpc; save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
current->thread.ufpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu); store_regs_fmt2(vcpu);
} }
...@@ -5034,6 +5026,7 @@ static void store_regs(struct kvm_vcpu *vcpu) ...@@ -5034,6 +5026,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
DECLARE_KERNEL_FPU_ONSTACK(fpu);
int rc; int rc;
/* /*
...@@ -5075,6 +5068,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -5075,6 +5068,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
sync_regs(vcpu); sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu); enable_cpu_timer_accounting(vcpu);
...@@ -5098,6 +5092,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -5098,6 +5092,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
disable_cpu_timer_accounting(vcpu); disable_cpu_timer_accounting(vcpu);
store_regs(vcpu); store_regs(vcpu);
kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
kvm_sigset_deactivate(vcpu); kvm_sigset_deactivate(vcpu);
...@@ -5172,8 +5167,11 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -5172,8 +5167,11 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* switch in the run ioctl. Let's update our copies before we save * switch in the run ioctl. Let's update our copies before we save
* it into the save area * it into the save area
*/ */
save_user_fpu_regs(); fpu_stfpc(&vcpu->run->s.regs.fpc);
vcpu->run->s.regs.fpc = current->thread.ufpu.fpc; if (cpu_has_vx())
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/dis.h> #include <asm/dis.h>
#include <asm/fpu.h>
#include <asm/facility.h> #include <asm/facility.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
...@@ -1149,8 +1148,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -1149,8 +1148,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
*/ */
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier(); barrier();
if (test_thread_flag(TIF_FPU))
load_user_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu)) if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs); rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier(); barrier();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment