Commit 87c5c700 authored by Heiko Carstens's avatar Heiko Carstens

s390/fpu: rename save_fpu_regs() to save_user_fpu_regs(), etc

Rename save_fpu_regs(), load_fpu_regs(), and struct thread_struct's fpu
member to save_user_fpu_regs(), load_user_fpu_regs(), and ufpu. This way
the function and variable names reflect for which context they are supposed
to be used.

This large and trivial conversion is a prerequisite for making the kernel
fpu usage preemptible.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 419abc4d
...@@ -42,7 +42,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, ...@@ -42,7 +42,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void) static __always_inline void arch_exit_to_user_mode(void)
{ {
if (test_thread_flag(TIF_FPU)) if (test_thread_flag(TIF_FPU))
__load_fpu_regs(); __load_user_fpu_regs();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
debug_user_asce(1); debug_user_asce(1);
......
...@@ -57,9 +57,9 @@ static inline bool cpu_has_vx(void) ...@@ -57,9 +57,9 @@ static inline bool cpu_has_vx(void)
return likely(test_facility(129)); return likely(test_facility(129));
} }
void save_fpu_regs(void); void save_user_fpu_regs(void);
void load_fpu_regs(void); void load_user_fpu_regs(void);
void __load_fpu_regs(void); void __load_user_fpu_regs(void);
enum { enum {
KERNEL_FPC_BIT = 0, KERNEL_FPC_BIT = 0,
...@@ -150,7 +150,7 @@ static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags) ...@@ -150,7 +150,7 @@ static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
state->mask = S390_lowcore.fpu_flags; state->mask = S390_lowcore.fpu_flags;
if (!test_thread_flag(TIF_FPU)) { if (!test_thread_flag(TIF_FPU)) {
/* Save user space FPU state and register contents */ /* Save user space FPU state and register contents */
save_fpu_regs(); save_user_fpu_regs();
} else if (state->mask & flags) { } else if (state->mask & flags) {
/* Save FPU/vector register in-use by the kernel */ /* Save FPU/vector register in-use by the kernel */
__kernel_fpu_begin(state, flags); __kernel_fpu_begin(state, flags);
......
...@@ -181,7 +181,7 @@ struct thread_struct { ...@@ -181,7 +181,7 @@ struct thread_struct {
struct gs_cb *gs_cb; /* Current guarded storage cb */ struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */ struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
struct fpu fpu; /* FP and VX register save area */ struct fpu ufpu; /* User FP and VX register save area */
}; };
/* Flag to disable transactions. */ /* Flag to disable transactions. */
...@@ -200,7 +200,7 @@ typedef struct thread_struct thread_struct; ...@@ -200,7 +200,7 @@ typedef struct thread_struct thread_struct;
#define INIT_THREAD { \ #define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.fpu.regs = (void *) init_task.thread.fpu.fprs, \ .ufpu.regs = (void *)init_task.thread.ufpu.fprs, \
.last_break = 1, \ .last_break = 1, \
} }
......
...@@ -56,7 +56,7 @@ typedef struct ...@@ -56,7 +56,7 @@ typedef struct
static void store_sigregs(void) static void store_sigregs(void)
{ {
save_access_regs(current->thread.acrs); save_access_regs(current->thread.acrs);
save_fpu_regs(); save_user_fpu_regs();
} }
/* Load registers after signal return */ /* Load registers after signal return */
...@@ -79,7 +79,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) ...@@ -79,7 +79,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
memcpy(&user_sregs.regs.acrs, current->thread.acrs, memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs)); sizeof(user_sregs.regs.acrs));
fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu); fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.ufpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -113,7 +113,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) ...@@ -113,7 +113,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(&current->thread.acrs, &user_sregs.regs.acrs, memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs)); sizeof(current->thread.acrs));
fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu); fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, &current->thread.ufpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
...@@ -136,11 +136,11 @@ static int save_sigregs_ext32(struct pt_regs *regs, ...@@ -136,11 +136,11 @@ static int save_sigregs_ext32(struct pt_regs *regs,
/* Save vector registers to signal stack */ /* Save vector registers to signal stack */
if (cpu_has_vx()) { if (cpu_has_vx()) {
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = current->thread.fpu.vxrs[i].low; vxrs[i] = current->thread.ufpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) || sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high, __copy_to_user(&sregs_ext->vxrs_high,
current->thread.fpu.vxrs + __NUM_VXRS_LOW, current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high))) sizeof(sregs_ext->vxrs_high)))
return -EFAULT; return -EFAULT;
} }
...@@ -165,12 +165,12 @@ static int restore_sigregs_ext32(struct pt_regs *regs, ...@@ -165,12 +165,12 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
if (cpu_has_vx()) { if (cpu_has_vx()) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) || sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high, &sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high))) sizeof(sregs_ext->vxrs_high)))
return -EFAULT; return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
current->thread.fpu.vxrs[i].low = vxrs[i]; current->thread.ufpu.vxrs[i].low = vxrs[i];
} }
return 0; return 0;
} }
...@@ -184,7 +184,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) ...@@ -184,7 +184,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask)) if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
save_fpu_regs(); save_user_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs)) if (restore_sigregs32(regs, &frame->sregs))
goto badframe; goto badframe;
if (restore_sigregs_ext32(regs, &frame->sregs_ext)) if (restore_sigregs_ext32(regs, &frame->sregs_ext))
...@@ -207,7 +207,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) ...@@ -207,7 +207,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
set_current_blocked(&set); set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack)) if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe; goto badframe;
save_fpu_regs(); save_user_fpu_regs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
......
...@@ -107,10 +107,10 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) ...@@ -107,10 +107,10 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
} }
EXPORT_SYMBOL(__kernel_fpu_end); EXPORT_SYMBOL(__kernel_fpu_end);
void __load_fpu_regs(void) void __load_user_fpu_regs(void)
{ {
struct fpu *state = &current->thread.fpu; struct fpu *state = &current->thread.ufpu;
void *regs = current->thread.fpu.regs; void *regs = current->thread.ufpu.regs;
fpu_lfpc_safe(&state->fpc); fpu_lfpc_safe(&state->fpc);
if (likely(cpu_has_vx())) if (likely(cpu_has_vx()))
...@@ -120,15 +120,15 @@ void __load_fpu_regs(void) ...@@ -120,15 +120,15 @@ void __load_fpu_regs(void)
clear_thread_flag(TIF_FPU); clear_thread_flag(TIF_FPU);
} }
void load_fpu_regs(void) void load_user_fpu_regs(void)
{ {
raw_local_irq_disable(); raw_local_irq_disable();
__load_fpu_regs(); __load_user_fpu_regs();
raw_local_irq_enable(); raw_local_irq_enable();
} }
EXPORT_SYMBOL(load_fpu_regs); EXPORT_SYMBOL(load_user_fpu_regs);
void save_fpu_regs(void) void save_user_fpu_regs(void)
{ {
unsigned long flags; unsigned long flags;
struct fpu *state; struct fpu *state;
...@@ -139,8 +139,8 @@ void save_fpu_regs(void) ...@@ -139,8 +139,8 @@ void save_fpu_regs(void)
if (test_thread_flag(TIF_FPU)) if (test_thread_flag(TIF_FPU))
goto out; goto out;
state = &current->thread.fpu; state = &current->thread.ufpu;
regs = current->thread.fpu.regs; regs = current->thread.ufpu.regs;
fpu_stfpc(&state->fpc); fpu_stfpc(&state->fpc);
if (likely(cpu_has_vx())) if (likely(cpu_has_vx()))
...@@ -151,4 +151,4 @@ void save_fpu_regs(void) ...@@ -151,4 +151,4 @@ void save_fpu_regs(void)
out: out:
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(save_fpu_regs); EXPORT_SYMBOL(save_user_fpu_regs);
...@@ -20,9 +20,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) ...@@ -20,9 +20,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
idx -= PERF_REG_S390_FP0; idx -= PERF_REG_S390_FP0;
if (cpu_has_vx()) if (cpu_has_vx())
fp = *(freg_t *)(current->thread.fpu.vxrs + idx); fp = *(freg_t *)(current->thread.ufpu.vxrs + idx);
else else
fp = current->thread.fpu.fprs[idx]; fp = current->thread.ufpu.fprs[idx];
return fp.ui; return fp.ui;
} }
...@@ -64,6 +64,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, ...@@ -64,6 +64,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
*/ */
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
if (user_mode(regs_user->regs)) if (user_mode(regs_user->regs))
save_fpu_regs(); save_user_fpu_regs();
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
} }
...@@ -91,10 +91,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -91,10 +91,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* task and set the TIF_FPU flag to lazy restore the FPU register * task and set the TIF_FPU flag to lazy restore the FPU register
* state when returning to user space. * state when returning to user space.
*/ */
save_fpu_regs(); save_user_fpu_regs();
*dst = *src; *dst = *src;
dst->thread.fpu.regs = dst->thread.fpu.fprs; dst->thread.ufpu.regs = dst->thread.ufpu.fprs;
/* /*
* Don't transfer over the runtime instrumentation or the guarded * Don't transfer over the runtime instrumentation or the guarded
...@@ -190,13 +190,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) ...@@ -190,13 +190,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
void execve_tail(void) void execve_tail(void)
{ {
current->thread.fpu.fpc = 0; current->thread.ufpu.fpc = 0;
fpu_sfpc(0); fpu_sfpc(0);
} }
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
{ {
save_fpu_regs(); save_user_fpu_regs();
save_access_regs(&prev->thread.acrs[0]); save_access_regs(&prev->thread.acrs[0]);
save_ri_cb(prev->thread.ri_cb); save_ri_cb(prev->thread.ri_cb);
save_gs_cb(prev->thread.gs_cb); save_gs_cb(prev->thread.gs_cb);
......
...@@ -247,21 +247,21 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) ...@@ -247,21 +247,21 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
/* /*
* floating point control reg. is in the thread structure * floating point control reg. is in the thread structure
*/ */
tmp = child->thread.fpu.fpc; tmp = child->thread.ufpu.fpc;
tmp <<= BITS_PER_LONG - 32; tmp <<= BITS_PER_LONG - 32;
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/* /*
* floating point regs. are either in child->thread.fpu * floating point regs. are either in child->thread.ufpu
* or the child->thread.fpu.vxrs array * or the child->thread.ufpu.vxrs array
*/ */
offset = addr - offsetof(struct user, regs.fp_regs.fprs); offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (cpu_has_vx()) if (cpu_has_vx())
tmp = *(addr_t *) tmp = *(addr_t *)
((addr_t) child->thread.fpu.vxrs + 2*offset); ((addr_t)child->thread.ufpu.vxrs + 2 * offset);
else else
tmp = *(addr_t *) tmp = *(addr_t *)
((addr_t) child->thread.fpu.fprs + offset); ((addr_t)child->thread.ufpu.fprs + offset);
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/* /*
...@@ -396,20 +396,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -396,20 +396,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
*/ */
if ((unsigned int)data != 0) if ((unsigned int)data != 0)
return -EINVAL; return -EINVAL;
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32);
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/* /*
* floating point regs. are either in child->thread.fpu * floating point regs. are either in child->thread.ufpu
* or the child->thread.fpu.vxrs array * or the child->thread.ufpu.vxrs array
*/ */
offset = addr - offsetof(struct user, regs.fp_regs.fprs); offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (cpu_has_vx()) if (cpu_has_vx())
*(addr_t *)((addr_t) *(addr_t *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = data; child->thread.ufpu.vxrs + 2 * offset) = data;
else else
*(addr_t *)((addr_t) *(addr_t *)((addr_t)
child->thread.fpu.fprs + offset) = data; child->thread.ufpu.fprs + offset) = data;
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/* /*
...@@ -623,20 +623,20 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) ...@@ -623,20 +623,20 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
/* /*
* floating point control reg. is in the thread structure * floating point control reg. is in the thread structure
*/ */
tmp = child->thread.fpu.fpc; tmp = child->thread.ufpu.fpc;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/* /*
* floating point regs. are either in child->thread.fpu * floating point regs. are either in child->thread.ufpu
* or the child->thread.fpu.vxrs array * or the child->thread.ufpu.vxrs array
*/ */
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (cpu_has_vx()) if (cpu_has_vx())
tmp = *(__u32 *) tmp = *(__u32 *)
((addr_t) child->thread.fpu.vxrs + 2*offset); ((addr_t)child->thread.ufpu.vxrs + 2 * offset);
else else
tmp = *(__u32 *) tmp = *(__u32 *)
((addr_t) child->thread.fpu.fprs + offset); ((addr_t)child->thread.ufpu.fprs + offset);
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/* /*
...@@ -749,20 +749,20 @@ static int __poke_user_compat(struct task_struct *child, ...@@ -749,20 +749,20 @@ static int __poke_user_compat(struct task_struct *child,
/* /*
* floating point control reg. is in the thread structure * floating point control reg. is in the thread structure
*/ */
child->thread.fpu.fpc = data; child->thread.ufpu.fpc = data;
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/* /*
* floating point regs. are either in child->thread.fpu * floating point regs. are either in child->thread.ufpu
* or the child->thread.fpu.vxrs array * or the child->thread.ufpu.vxrs array
*/ */
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (cpu_has_vx()) if (cpu_has_vx())
*(__u32 *)((addr_t) *(__u32 *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = tmp; child->thread.ufpu.vxrs + 2 * offset) = tmp;
else else
*(__u32 *)((addr_t) *(__u32 *)((addr_t)
child->thread.fpu.fprs + offset) = tmp; child->thread.ufpu.fprs + offset) = tmp;
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/* /*
...@@ -894,10 +894,10 @@ static int s390_fpregs_get(struct task_struct *target, ...@@ -894,10 +894,10 @@ static int s390_fpregs_get(struct task_struct *target,
_s390_fp_regs fp_regs; _s390_fp_regs fp_regs;
if (target == current) if (target == current)
save_fpu_regs(); save_user_fpu_regs();
fp_regs.fpc = target->thread.fpu.fpc; fp_regs.fpc = target->thread.ufpu.fpc;
fpregs_store(&fp_regs, &target->thread.fpu); fpregs_store(&fp_regs, &target->thread.ufpu);
return membuf_write(&to, &fp_regs, sizeof(fp_regs)); return membuf_write(&to, &fp_regs, sizeof(fp_regs));
} }
...@@ -911,22 +911,22 @@ static int s390_fpregs_set(struct task_struct *target, ...@@ -911,22 +911,22 @@ static int s390_fpregs_set(struct task_struct *target,
freg_t fprs[__NUM_FPRS]; freg_t fprs[__NUM_FPRS];
if (target == current) if (target == current)
save_fpu_regs(); save_user_fpu_regs();
if (cpu_has_vx()) if (cpu_has_vx())
convert_vx_to_fp(fprs, target->thread.fpu.vxrs); convert_vx_to_fp(fprs, target->thread.ufpu.vxrs);
else else
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); memcpy(&fprs, target->thread.ufpu.fprs, sizeof(fprs));
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; u32 ufpc[2] = { target->thread.ufpu.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs)); 0, offsetof(s390_fp_regs, fprs));
if (rc) if (rc)
return rc; return rc;
if (ufpc[1] != 0) if (ufpc[1] != 0)
return -EINVAL; return -EINVAL;
target->thread.fpu.fpc = ufpc[0]; target->thread.ufpu.fpc = ufpc[0];
} }
if (rc == 0 && count > 0) if (rc == 0 && count > 0)
...@@ -936,9 +936,9 @@ static int s390_fpregs_set(struct task_struct *target, ...@@ -936,9 +936,9 @@ static int s390_fpregs_set(struct task_struct *target,
return rc; return rc;
if (cpu_has_vx()) if (cpu_has_vx())
convert_fp_to_vx(target->thread.fpu.vxrs, fprs); convert_fp_to_vx(target->thread.ufpu.vxrs, fprs);
else else
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); memcpy(target->thread.ufpu.fprs, &fprs, sizeof(fprs));
return rc; return rc;
} }
...@@ -989,9 +989,9 @@ static int s390_vxrs_low_get(struct task_struct *target, ...@@ -989,9 +989,9 @@ static int s390_vxrs_low_get(struct task_struct *target,
if (!cpu_has_vx()) if (!cpu_has_vx())
return -ENODEV; return -ENODEV;
if (target == current) if (target == current)
save_fpu_regs(); save_user_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = target->thread.fpu.vxrs[i].low; vxrs[i] = target->thread.ufpu.vxrs[i].low;
return membuf_write(&to, vxrs, sizeof(vxrs)); return membuf_write(&to, vxrs, sizeof(vxrs));
} }
...@@ -1006,15 +1006,15 @@ static int s390_vxrs_low_set(struct task_struct *target, ...@@ -1006,15 +1006,15 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (!cpu_has_vx()) if (!cpu_has_vx())
return -ENODEV; return -ENODEV;
if (target == current) if (target == current)
save_fpu_regs(); save_user_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = target->thread.fpu.vxrs[i].low; vxrs[i] = target->thread.ufpu.vxrs[i].low;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0) if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fpu.vxrs[i].low = vxrs[i]; target->thread.ufpu.vxrs[i].low = vxrs[i];
return rc; return rc;
} }
...@@ -1026,8 +1026,8 @@ static int s390_vxrs_high_get(struct task_struct *target, ...@@ -1026,8 +1026,8 @@ static int s390_vxrs_high_get(struct task_struct *target,
if (!cpu_has_vx()) if (!cpu_has_vx())
return -ENODEV; return -ENODEV;
if (target == current) if (target == current)
save_fpu_regs(); save_user_fpu_regs();
return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW,
__NUM_VXRS_HIGH * sizeof(__vector128)); __NUM_VXRS_HIGH * sizeof(__vector128));
} }
...@@ -1041,10 +1041,10 @@ static int s390_vxrs_high_set(struct task_struct *target, ...@@ -1041,10 +1041,10 @@ static int s390_vxrs_high_set(struct task_struct *target,
if (!cpu_has_vx()) if (!cpu_has_vx())
return -ENODEV; return -ENODEV;
if (target == current) if (target == current)
save_fpu_regs(); save_user_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1);
return rc; return rc;
} }
......
...@@ -109,7 +109,7 @@ struct rt_sigframe ...@@ -109,7 +109,7 @@ struct rt_sigframe
static void store_sigregs(void) static void store_sigregs(void)
{ {
save_access_regs(current->thread.acrs); save_access_regs(current->thread.acrs);
save_fpu_regs(); save_user_fpu_regs();
} }
/* Load registers after signal return */ /* Load registers after signal return */
...@@ -131,7 +131,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) ...@@ -131,7 +131,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs, memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs)); sizeof(user_sregs.regs.acrs));
fpregs_store(&user_sregs.fpregs, &current->thread.fpu); fpregs_store(&user_sregs.fpregs, &current->thread.ufpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -165,7 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) ...@@ -165,7 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&current->thread.acrs, &user_sregs.regs.acrs, memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs)); sizeof(current->thread.acrs));
fpregs_load(&user_sregs.fpregs, &current->thread.fpu); fpregs_load(&user_sregs.fpregs, &current->thread.ufpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
...@@ -181,11 +181,11 @@ static int save_sigregs_ext(struct pt_regs *regs, ...@@ -181,11 +181,11 @@ static int save_sigregs_ext(struct pt_regs *regs,
/* Save vector registers to signal stack */ /* Save vector registers to signal stack */
if (cpu_has_vx()) { if (cpu_has_vx()) {
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = current->thread.fpu.vxrs[i].low; vxrs[i] = current->thread.ufpu.vxrs[i].low;
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) || sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high, __copy_to_user(&sregs_ext->vxrs_high,
current->thread.fpu.vxrs + __NUM_VXRS_LOW, current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high))) sizeof(sregs_ext->vxrs_high)))
return -EFAULT; return -EFAULT;
} }
...@@ -202,12 +202,12 @@ static int restore_sigregs_ext(struct pt_regs *regs, ...@@ -202,12 +202,12 @@ static int restore_sigregs_ext(struct pt_regs *regs,
if (cpu_has_vx()) { if (cpu_has_vx()) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) || sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high, &sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high))) sizeof(sregs_ext->vxrs_high)))
return -EFAULT; return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++) for (i = 0; i < __NUM_VXRS_LOW; i++)
current->thread.fpu.vxrs[i].low = vxrs[i]; current->thread.ufpu.vxrs[i].low = vxrs[i];
} }
return 0; return 0;
} }
...@@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn) ...@@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn)
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
save_fpu_regs(); save_user_fpu_regs();
if (restore_sigregs(regs, &frame->sregs)) if (restore_sigregs(regs, &frame->sregs))
goto badframe; goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext)) if (restore_sigregs_ext(regs, &frame->sregs_ext))
...@@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) ...@@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
set_current_blocked(&set); set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack)) if (restore_altstack(&frame->uc.uc_stack))
goto badframe; goto badframe;
save_fpu_regs(); save_user_fpu_regs();
if (restore_sigregs(regs, &frame->uc.uc_mcontext)) if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
......
...@@ -201,8 +201,8 @@ static void vector_exception(struct pt_regs *regs) ...@@ -201,8 +201,8 @@ static void vector_exception(struct pt_regs *regs)
} }
/* get vector interrupt code from fpc */ /* get vector interrupt code from fpc */
save_fpu_regs(); save_user_fpu_regs();
vic = (current->thread.fpu.fpc & 0xf00) >> 8; vic = (current->thread.ufpu.fpc & 0xf00) >> 8;
switch (vic) { switch (vic) {
case 1: /* invalid vector operation */ case 1: /* invalid vector operation */
si_code = FPE_FLTINV; si_code = FPE_FLTINV;
...@@ -227,9 +227,9 @@ static void vector_exception(struct pt_regs *regs) ...@@ -227,9 +227,9 @@ static void vector_exception(struct pt_regs *regs)
static void data_exception(struct pt_regs *regs) static void data_exception(struct pt_regs *regs)
{ {
save_fpu_regs(); save_user_fpu_regs();
if (current->thread.fpu.fpc & FPC_DXC_MASK) if (current->thread.ufpu.fpc & FPC_DXC_MASK)
do_fp_trap(regs, current->thread.fpu.fpc); do_fp_trap(regs, current->thread.ufpu.fpc);
else else
do_trap(regs, SIGILL, ILL_ILLOPN, "data exception"); do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
} }
......
...@@ -584,7 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -584,7 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
mci.val = mchk->mcic; mci.val = mchk->mcic;
/* take care of lazy register loading */ /* take care of lazy register loading */
save_fpu_regs(); save_user_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb); save_gs_cb(current->thread.gs_cb);
...@@ -648,7 +648,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -648,7 +648,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
} }
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA, rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128); vcpu->run->s.regs.gprs, 128);
rc |= put_guest_lc(vcpu, current->thread.fpu.fpc, rc |= put_guest_lc(vcpu, current->thread.ufpu.fpc,
(u32 __user *) __LC_FP_CREG_SAVE_AREA); (u32 __user *) __LC_FP_CREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA); (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
......
...@@ -4830,7 +4830,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -4830,7 +4830,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
sizeof(sie_page->pv_grregs)); sizeof(sie_page->pv_grregs));
} }
if (test_thread_flag(TIF_FPU)) if (test_thread_flag(TIF_FPU))
load_fpu_regs(); load_user_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block, exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs); vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) { if (kvm_s390_pv_cpu_is_protected(vcpu)) {
...@@ -4952,14 +4952,14 @@ static void sync_regs(struct kvm_vcpu *vcpu) ...@@ -4952,14 +4952,14 @@ static void sync_regs(struct kvm_vcpu *vcpu)
save_access_regs(vcpu->arch.host_acrs); save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */ /* save host (userspace) fprs/vrs */
save_fpu_regs(); save_user_fpu_regs();
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; vcpu->arch.host_fpregs.fpc = current->thread.ufpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; vcpu->arch.host_fpregs.regs = current->thread.ufpu.regs;
if (cpu_has_vx()) if (cpu_has_vx())
current->thread.fpu.regs = vcpu->run->s.regs.vrs; current->thread.ufpu.regs = vcpu->run->s.regs.vrs;
else else
current->thread.fpu.regs = vcpu->run->s.regs.fprs; current->thread.ufpu.regs = vcpu->run->s.regs.fprs;
current->thread.fpu.fpc = vcpu->run->s.regs.fpc; current->thread.ufpu.fpc = vcpu->run->s.regs.fpc;
/* Sync fmt2 only data */ /* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
...@@ -5022,11 +5022,11 @@ static void store_regs(struct kvm_vcpu *vcpu) ...@@ -5022,11 +5022,11 @@ static void store_regs(struct kvm_vcpu *vcpu)
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */ /* Save guest register state */
save_fpu_regs(); save_user_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc; vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
/* Restore will be done lazily at return */ /* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; current->thread.ufpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; current->thread.ufpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu); store_regs_fmt2(vcpu);
} }
...@@ -5172,8 +5172,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -5172,8 +5172,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* switch in the run ioctl. Let's update our copies before we save * switch in the run ioctl. Let's update our copies before we save
* it into the save area * it into the save area
*/ */
save_fpu_regs(); save_user_fpu_regs();
vcpu->run->s.regs.fpc = current->thread.fpu.fpc; vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
......
...@@ -1150,7 +1150,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -1150,7 +1150,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier(); barrier();
if (test_thread_flag(TIF_FPU)) if (test_thread_flag(TIF_FPU))
load_fpu_regs(); load_user_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu)) if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs); rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier(); barrier();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment