Commit 4a599328 authored by Janosch Frank's avatar Janosch Frank Committed by Heiko Carstens

KVM: s390: introduce kvm_s390_fpu_(store|load)

It's a bit nicer than having multiple lines and will help if there's
another re-work since we'll only have to change one location.
Signed-off-by: default avatarJanosch Frank <frankja@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 778666df
......@@ -584,11 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
mci.val = mchk->mcic;
/* take care of lazy register loading */
fpu_stfpc(&vcpu->run->s.regs.fpc);
if (cpu_has_vx())
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
kvm_s390_fpu_store(vcpu->run);
save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
......
......@@ -4949,11 +4949,7 @@ static void sync_regs(struct kvm_vcpu *vcpu)
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
if (cpu_has_vx())
load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
kvm_s390_fpu_load(vcpu->run);
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu);
......@@ -5014,11 +5010,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
fpu_stfpc(&vcpu->run->s.regs.fpc);
if (cpu_has_vx())
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
kvm_s390_fpu_store(vcpu->run);
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu);
}
......@@ -5167,11 +5159,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
fpu_stfpc(&vcpu->run->s.regs.fpc);
if (cpu_has_vx())
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
kvm_s390_fpu_store(vcpu->run);
save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr);
......
......@@ -20,6 +20,24 @@
#include <asm/processor.h>
#include <asm/sclp.h>
static inline void kvm_s390_fpu_store(struct kvm_run *run)
{
fpu_stfpc(&run->s.regs.fpc);
if (cpu_has_vx())
save_vx_regs((__vector128 *)&run->s.regs.vrs);
else
save_fp_regs((freg_t *)&run->s.regs.fprs);
}
static inline void kvm_s390_fpu_load(struct kvm_run *run)
{
fpu_lfpc_safe(&run->s.regs.fpc);
if (cpu_has_vx())
load_vx_regs((__vector128 *)&run->s.regs.vrs);
else
load_fp_regs((freg_t *)&run->s.regs.fprs);
}
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
#define TDB_FORMAT1 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment