Commit 419abc4d authored by Heiko Carstens's avatar Heiko Carstens

s390/fpu: convert FPU CIF flag to regular TIF flag

The FPU state, as represented by the CIF_FPU flag reflects the FPU state of
a task, not the CPU it is running on. Therefore convert the flag to a
regular TIF flag.

This removes the magic in switch_to() where a save_fpu_regs() call for the
currently (previous) running task sets the per-cpu CIF_FPU flag, which is
required to restore FPU register contents of the next task, when it returns
to user space.
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 918c7cad
......@@ -41,7 +41,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
if (test_cpu_flag(CIF_FPU))
if (test_thread_flag(TIF_FPU))
__load_fpu_regs();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
......
......@@ -148,7 +148,7 @@ static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
preempt_disable();
state->mask = S390_lowcore.fpu_flags;
if (!test_cpu_flag(CIF_FPU)) {
if (!test_thread_flag(TIF_FPU)) {
/* Save user space FPU state and register contents */
save_fpu_regs();
} else if (state->mask & flags) {
......
......@@ -15,13 +15,11 @@
#include <linux/bits.h>
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
......
......@@ -69,6 +69,7 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_FPU 8 /* restore FPU registers on exit to usermode */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
......@@ -92,6 +93,7 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#define _TIF_FPU BIT(TIF_FPU)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
......
......@@ -220,7 +220,7 @@ SYM_FUNC_START(__sie64a)
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
tm __SIE_PROG20+3(%r14),3 # last exit...
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
TSTMSK __SF_SIE_FLAGS(%r15),_TIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
......
......@@ -117,7 +117,7 @@ void __load_fpu_regs(void)
load_vx_regs(regs);
else
load_fp_regs(regs);
clear_cpu_flag(CIF_FPU);
clear_thread_flag(TIF_FPU);
}
void load_fpu_regs(void)
......@@ -136,7 +136,7 @@ void save_fpu_regs(void)
local_irq_save(flags);
if (test_cpu_flag(CIF_FPU))
if (test_thread_flag(TIF_FPU))
goto out;
state = &current->thread.fpu;
......@@ -147,7 +147,7 @@ void save_fpu_regs(void)
save_vx_regs(regs);
else
save_fp_regs(regs);
set_cpu_flag(CIF_FPU);
set_thread_flag(TIF_FPU);
out:
local_irq_restore(flags);
}
......
......@@ -88,7 +88,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
* Save the floating-point or vector register state of the current
* task and set the CIF_FPU flag to lazy restore the FPU register
* task and set the TIF_FPU flag to lazy restore the FPU register
* state when returning to user space.
*/
save_fpu_regs();
......@@ -196,11 +196,6 @@ void execve_tail(void)
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
{
/*
* save_fpu_regs() sets the CIF_FPU flag, which enforces
* a restore of the floating point / vector registers as
* soon as the next task returns to user space.
*/
save_fpu_regs();
save_access_regs(&prev->thread.acrs[0]);
save_ri_cb(prev->thread.ri_cb);
......
......@@ -4829,7 +4829,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
if (test_cpu_flag(CIF_FPU))
if (test_thread_flag(TIF_FPU))
load_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
......
......@@ -1149,7 +1149,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
*/
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
if (test_cpu_flag(CIF_FPU))
if (test_thread_flag(TIF_FPU))
load_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment