Commit d0164ee2 authored by Hendrik Brueckner's avatar Hendrik Brueckner Committed by Martin Schwidefsky

s390/kernel: remove save_fpu_regs() parameter and use __LC_CURRENT instead

All calls to save_fpu_regs() specify the fpu structure of the current task
pointer as parameter.  The task pointer of the current task can also be
retrieved from the CPU lowcore directly.  Remove the parameter definition,
load the __LC_CURRENT task pointer from the CPU lowcore, and rebase the FPU
structure onto the task structure.  Apply the same approach for the
load_fpu_regs() function.
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarHendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 2a01bd1b
...@@ -28,7 +28,7 @@ struct fpu { ...@@ -28,7 +28,7 @@ struct fpu {
}; };
}; };
void save_fpu_regs(struct fpu *fpu); void save_fpu_regs(void);
#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) #define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) #define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
......
...@@ -30,7 +30,7 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -30,7 +30,7 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
if (prev->mm) { \ if (prev->mm) { \
save_fpu_regs(&prev->thread.fpu); \ save_fpu_regs(); \
save_access_regs(&prev->thread.acrs[0]); \ save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \ save_ri_cb(prev->thread.ri_cb); \
} \ } \
......
...@@ -28,16 +28,14 @@ int main(void) ...@@ -28,16 +28,14 @@ int main(void)
DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK(); BLANK();
DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
DEFINE(__THREAD_fpu, offsetof(struct task_struct, thread.fpu)); DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
BLANK(); BLANK();
DEFINE(__FPU_fpc, offsetof(struct fpu, fpc));
DEFINE(__FPU_flags, offsetof(struct fpu, flags));
DEFINE(__FPU_regs, offsetof(struct fpu, regs));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_flags, offsetof(struct thread_info, flags)); DEFINE(__TI_flags, offsetof(struct thread_info, flags));
DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
......
...@@ -154,7 +154,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) ...@@ -154,7 +154,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
static void store_sigregs(void) static void store_sigregs(void)
{ {
save_access_regs(current->thread.acrs); save_access_regs(current->thread.acrs);
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
} }
/* Load registers after signal return */ /* Load registers after signal return */
...@@ -286,7 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) ...@@ -286,7 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs)) if (restore_sigregs32(regs, &frame->sregs))
goto badframe; goto badframe;
if (restore_sigregs_ext32(regs, &frame->sregs_ext)) if (restore_sigregs_ext32(regs, &frame->sregs_ext))
...@@ -309,7 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) ...@@ -309,7 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
set_current_blocked(&set); set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack)) if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe; goto badframe;
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
......
...@@ -183,7 +183,6 @@ ENTRY(sie64a) ...@@ -183,7 +183,6 @@ ENTRY(sie64a)
xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs jno .Lsie_load_guest_gprs
lg %r12,__LC_THREAD_INFO # load fp/vx regs save area
brasl %r14,load_fpu_regs # load guest fp/vx regs brasl %r14,load_fpu_regs # load guest fp/vx regs
.Lsie_load_guest_gprs: .Lsie_load_guest_gprs:
lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
...@@ -752,14 +751,16 @@ ENTRY(psw_idle) ...@@ -752,14 +751,16 @@ ENTRY(psw_idle)
* of the register contents at system call or io return. * of the register contents at system call or io return.
*/ */
ENTRY(save_fpu_regs) ENTRY(save_fpu_regs)
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
tm __LC_CPU_FLAGS+7,_CIF_FPU tm __LC_CPU_FLAGS+7,_CIF_FPU
bor %r14 bor %r14
stfpc __FPU_fpc(%r2) stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end: .Lsave_fpu_regs_fpc_end:
lg %r3,__FPU_regs(%r2) lg %r3,__THREAD_FPU_regs(%r2)
ltgr %r3,%r3 ltgr %r3,%r3
jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU
tm __FPU_flags+3(%r2),FPU_USE_VX tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs jz .Lsave_fpu_regs_fp # no -> store FP regs
.Lsave_fpu_regs_vx_low: .Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
...@@ -794,20 +795,19 @@ ENTRY(save_fpu_regs) ...@@ -794,20 +795,19 @@ ENTRY(save_fpu_regs)
* FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
* *
* There are special calling conventions to fit into sysc and io return work: * There are special calling conventions to fit into sysc and io return work:
* %r12: __LC_THREAD_INFO
* %r15: <kernel stack> * %r15: <kernel stack>
* The function requires: * The function requires:
* %r4 and __SF_EMPTY+32(%r15) * %r4 and __SF_EMPTY+32(%r15)
*/ */
load_fpu_regs: load_fpu_regs:
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
tm __LC_CPU_FLAGS+7,_CIF_FPU tm __LC_CPU_FLAGS+7,_CIF_FPU
bnor %r14 bnor %r14
lg %r4,__TI_task(%r12) lfpc __THREAD_FPU_fpc(%r4)
la %r4,__THREAD_fpu(%r4)
lfpc __FPU_fpc(%r4)
stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
lg %r4,__FPU_regs(%r4) # %r4 <- reg save area lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs
.Lload_fpu_regs_vx_ctl: .Lload_fpu_regs_vx_ctl:
tm __SF_EMPTY+32+5(%r15),2 # test VX control tm __SF_EMPTY+32+5(%r15),2 # test VX control
...@@ -1190,13 +1190,14 @@ cleanup_critical: ...@@ -1190,13 +1190,14 @@ cleanup_critical:
jhe 2f jhe 2f
clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
jhe 1f jhe 1f
lg %r2,__LC_CURRENT
0: # Store floating-point controls 0: # Store floating-point controls
stfpc __FPU_fpc(%r2) stfpc __THREAD_FPU_fpc(%r2)
1: # Load register save area and check if VX is active 1: # Load register save area and check if VX is active
lg %r3,__FPU_regs(%r2) lg %r3,__THREAD_FPU_regs(%r2)
ltgr %r3,%r3 ltgr %r3,%r3
jz 5f # no save area -> set CIF_FPU jz 5f # no save area -> set CIF_FPU
tm __FPU_flags+3(%r2),FPU_USE_VX tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
jz 4f # no VX -> store FP regs jz 4f # no VX -> store FP regs
2: # Store vector registers (V0-V15) 2: # Store vector registers (V0-V15)
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
...@@ -1250,11 +1251,10 @@ cleanup_critical: ...@@ -1250,11 +1251,10 @@ cleanup_critical:
jhe 5f jhe 5f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
jhe 6f jhe 6f
lg %r4,__TI_task(%r12) lg %r4,__LC_CURRENT
la %r4,__THREAD_fpu(%r4) lfpc __THREAD_FPU_fpc(%r4)
lfpc __FPU_fpc(%r4) tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
lg %r4,__FPU_regs(%r4) # %r4 <- reg save area
jz 3f # -> no VX, load FP regs jz 3f # -> no VX, load FP regs
6: # Set VX-enablement control 6: # Set VX-enablement control
stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
......
...@@ -104,7 +104,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -104,7 +104,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* The CIF_FPU flag is set in any case to lazy clear or restore a saved * The CIF_FPU flag is set in any case to lazy clear or restore a saved
* state when switching to a different task or returning to user space. * state when switching to a different task or returning to user space.
*/ */
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
dst->thread.fpu.fpc = current->thread.fpu.fpc; dst->thread.fpu.fpc = current->thread.fpu.fpc;
if (is_vx_task(current)) if (is_vx_task(current))
convert_vx_to_fp(dst->thread.fpu.fprs, convert_vx_to_fp(dst->thread.fpu.fprs,
...@@ -196,7 +196,7 @@ asmlinkage void execve_tail(void) ...@@ -196,7 +196,7 @@ asmlinkage void execve_tail(void)
*/ */
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{ {
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
fpregs->fpc = current->thread.fpu.fpc; fpregs->fpc = current->thread.fpu.fpc;
fpregs->pad = 0; fpregs->pad = 0;
if (is_vx_task(current)) if (is_vx_task(current))
......
...@@ -943,7 +943,7 @@ static int s390_fpregs_get(struct task_struct *target, ...@@ -943,7 +943,7 @@ static int s390_fpregs_get(struct task_struct *target,
_s390_fp_regs fp_regs; _s390_fp_regs fp_regs;
if (target == current) if (target == current)
save_fpu_regs(&target->thread.fpu); save_fpu_regs();
fp_regs.fpc = target->thread.fpu.fpc; fp_regs.fpc = target->thread.fpu.fpc;
fpregs_store(&fp_regs, &target->thread.fpu); fpregs_store(&fp_regs, &target->thread.fpu);
...@@ -961,7 +961,7 @@ static int s390_fpregs_set(struct task_struct *target, ...@@ -961,7 +961,7 @@ static int s390_fpregs_set(struct task_struct *target,
freg_t fprs[__NUM_FPRS]; freg_t fprs[__NUM_FPRS];
if (target == current) if (target == current)
save_fpu_regs(&target->thread.fpu); save_fpu_regs();
/* If setting FPC, must validate it first. */ /* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
...@@ -1049,7 +1049,7 @@ static int s390_vxrs_low_get(struct task_struct *target, ...@@ -1049,7 +1049,7 @@ static int s390_vxrs_low_get(struct task_struct *target,
return -ENODEV; return -ENODEV;
if (is_vx_task(target)) { if (is_vx_task(target)) {
if (target == current) if (target == current)
save_fpu_regs(&target->thread.fpu); save_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
} else } else
...@@ -1072,7 +1072,7 @@ static int s390_vxrs_low_set(struct task_struct *target, ...@@ -1072,7 +1072,7 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (rc) if (rc)
return rc; return rc;
} else if (target == current) } else if (target == current)
save_fpu_regs(&target->thread.fpu); save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0) if (rc == 0)
...@@ -1093,7 +1093,7 @@ static int s390_vxrs_high_get(struct task_struct *target, ...@@ -1093,7 +1093,7 @@ static int s390_vxrs_high_get(struct task_struct *target,
return -ENODEV; return -ENODEV;
if (is_vx_task(target)) { if (is_vx_task(target)) {
if (target == current) if (target == current)
save_fpu_regs(&target->thread.fpu); save_fpu_regs();
memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(vxrs)); sizeof(vxrs));
} else } else
...@@ -1115,7 +1115,7 @@ static int s390_vxrs_high_set(struct task_struct *target, ...@@ -1115,7 +1115,7 @@ static int s390_vxrs_high_set(struct task_struct *target,
if (rc) if (rc)
return rc; return rc;
} else if (target == current) } else if (target == current)
save_fpu_regs(&target->thread.fpu); save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
......
...@@ -105,7 +105,7 @@ struct rt_sigframe ...@@ -105,7 +105,7 @@ struct rt_sigframe
static void store_sigregs(void) static void store_sigregs(void)
{ {
save_access_regs(current->thread.acrs); save_access_regs(current->thread.acrs);
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
} }
/* Load registers after signal return */ /* Load registers after signal return */
...@@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn) ...@@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn)
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
if (restore_sigregs(regs, &frame->sregs)) if (restore_sigregs(regs, &frame->sregs))
goto badframe; goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext)) if (restore_sigregs_ext(regs, &frame->sregs_ext))
...@@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) ...@@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
set_current_blocked(&set); set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack)) if (restore_altstack(&frame->uc.uc_stack))
goto badframe; goto badframe;
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
if (restore_sigregs(regs, &frame->uc.uc_mcontext)) if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
......
...@@ -236,7 +236,7 @@ int alloc_vector_registers(struct task_struct *tsk) ...@@ -236,7 +236,7 @@ int alloc_vector_registers(struct task_struct *tsk)
return -ENOMEM; return -ENOMEM;
preempt_disable(); preempt_disable();
if (tsk == current) if (tsk == current)
save_fpu_regs(&tsk->thread.fpu); save_fpu_regs();
/* Copy the 16 floating point registers */ /* Copy the 16 floating point registers */
convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
fprs = tsk->thread.fpu.fprs; fprs = tsk->thread.fpu.fprs;
...@@ -257,7 +257,7 @@ void vector_exception(struct pt_regs *regs) ...@@ -257,7 +257,7 @@ void vector_exception(struct pt_regs *regs)
} }
/* get vector interrupt code from fpc */ /* get vector interrupt code from fpc */
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
vic = (current->thread.fpu.fpc & 0xf00) >> 8; vic = (current->thread.fpu.fpc & 0xf00) >> 8;
switch (vic) { switch (vic) {
case 1: /* invalid vector operation */ case 1: /* invalid vector operation */
...@@ -295,7 +295,7 @@ void data_exception(struct pt_regs *regs) ...@@ -295,7 +295,7 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs); location = get_trap_ip(regs);
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
/* Check for vector register enablement */ /* Check for vector register enablement */
if (MACHINE_HAS_VX && !is_vx_task(current) && if (MACHINE_HAS_VX && !is_vx_task(current) &&
(current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
......
...@@ -1224,7 +1224,7 @@ static inline void load_fpu_from(struct fpu *from) ...@@ -1224,7 +1224,7 @@ static inline void load_fpu_from(struct fpu *from)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
/* Save host register state */ /* Save host register state */
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
save_fpu_to(&vcpu->arch.host_fpregs); save_fpu_to(&vcpu->arch.host_fpregs);
if (test_kvm_facility(vcpu->kvm, 129)) { if (test_kvm_facility(vcpu->kvm, 129)) {
...@@ -1256,7 +1256,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1256,7 +1256,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129)) if (test_kvm_facility(vcpu->kvm, 129))
/* /*
...@@ -1671,7 +1671,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) ...@@ -1671,7 +1671,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL; return -EINVAL;
memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
vcpu->arch.guest_fpregs.fpc = fpu->fpc; vcpu->arch.guest_fpregs.fpc = fpu->fpc;
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
load_fpu_from(&vcpu->arch.guest_fpregs); load_fpu_from(&vcpu->arch.guest_fpregs);
return 0; return 0;
} }
...@@ -2241,7 +2241,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -2241,7 +2241,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* copying in vcpu load/put. Lets update our copies before we save * copying in vcpu load/put. Lets update our copies before we save
* it into the save area * it into the save area
*/ */
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129)) { if (test_kvm_facility(vcpu->kvm, 129)) {
/* /*
* If the vector extension is available, the vector registers * If the vector extension is available, the vector registers
...@@ -2288,7 +2288,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -2288,7 +2288,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
* *
* Let's update our copies before we save it into the save area. * Let's update our copies before we save it into the save area.
*/ */
save_fpu_regs(&current->thread.fpu); save_fpu_regs();
return kvm_s390_store_adtl_status_unloaded(vcpu, addr); return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment