Commit 98da581e authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman

powerpc: Move part of giveup_fpu,altivec,spe into c

Move the MSR modification into new c functions. Removing it from
the low level functions will allow us to avoid costly MSR writes
by batching them up.

Move the check_if_tm_restore_required() check into these new functions.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent b51b1153
...@@ -23,28 +23,27 @@ extern int emulate_altivec(struct pt_regs *); ...@@ -23,28 +23,27 @@ extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *); extern void __giveup_vsx(struct task_struct *);
extern void giveup_vsx(struct task_struct *); extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void); extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *); extern void load_up_spe(struct task_struct *);
extern void switch_booke_debug_regs(struct debug_reg *new_debug); extern void switch_booke_debug_regs(struct debug_reg *new_debug);
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
extern void flush_fp_to_thread(struct task_struct *); extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *); extern void giveup_fpu(struct task_struct *);
extern void __giveup_fpu(struct task_struct *);
#else #else
static inline void flush_fp_to_thread(struct task_struct *t) { } static inline void flush_fp_to_thread(struct task_struct *t) { }
static inline void giveup_fpu(struct task_struct *t) { } static inline void giveup_fpu(struct task_struct *t) { }
static inline void __giveup_fpu(struct task_struct *t) { }
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *); extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *); extern void giveup_altivec(struct task_struct *);
extern void __giveup_altivec(struct task_struct *);
#else #else
static inline void flush_altivec_to_thread(struct task_struct *t) static inline void flush_altivec_to_thread(struct task_struct *t) { }
{ static inline void giveup_altivec(struct task_struct *t) { }
} static inline void __giveup_altivec(struct task_struct *t) { }
static inline void giveup_altivec(struct task_struct *t)
{
}
#endif #endif
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
...@@ -57,10 +56,12 @@ static inline void flush_vsx_to_thread(struct task_struct *t) ...@@ -57,10 +56,12 @@ static inline void flush_vsx_to_thread(struct task_struct *t)
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
extern void flush_spe_to_thread(struct task_struct *); extern void flush_spe_to_thread(struct task_struct *);
extern void giveup_spe(struct task_struct *);
extern void __giveup_spe(struct task_struct *);
#else #else
static inline void flush_spe_to_thread(struct task_struct *t) static inline void flush_spe_to_thread(struct task_struct *t) { }
{ static inline void giveup_spe(struct task_struct *t) { }
} static inline void __giveup_spe(struct task_struct *t) { }
#endif #endif
static inline void clear_task_ebb(struct task_struct *t) static inline void clear_task_ebb(struct task_struct *t)
......
...@@ -155,24 +155,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -155,24 +155,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
blr blr
/* /*
* giveup_fpu(tsk) * __giveup_fpu(tsk)
* Disable FP for the task given as the argument, * Disable FP for the task given as the argument,
* and save the floating-point registers in its thread_struct. * and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return. * Enables the FPU for use in the kernel on return.
*/ */
_GLOBAL(giveup_fpu) _GLOBAL(__giveup_fpu)
mfmsr r5
ori r5,r5,MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
SYNC_601
ISYNC_601
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r6,THREAD_FPSAVEAREA(r3) PPC_LL r6,THREAD_FPSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3) PPC_LL r5,PT_REGS(r3)
......
...@@ -984,14 +984,10 @@ _GLOBAL(__setup_ehv_ivors) ...@@ -984,14 +984,10 @@ _GLOBAL(__setup_ehv_ivors)
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
/* /*
* extern void giveup_spe(struct task_struct *prev) * extern void __giveup_spe(struct task_struct *prev)
* *
*/ */
_GLOBAL(giveup_spe) _GLOBAL(__giveup_spe)
mfmsr r5
oris r5,r5,MSR_SPE@h
mtmsr r5 /* enable use of SPE now */
isync
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3) lwz r5,PT_REGS(r3)
cmpi 0,r5,0 cmpi 0,r5,0
......
...@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount); ...@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount);
#endif #endif
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(load_fp_state); EXPORT_SYMBOL(load_fp_state);
EXPORT_SYMBOL(store_fp_state); EXPORT_SYMBOL(store_fp_state);
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
EXPORT_SYMBOL(load_vr_state); EXPORT_SYMBOL(load_vr_state);
EXPORT_SYMBOL(store_vr_state); EXPORT_SYMBOL(store_vr_state);
#endif #endif
...@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state); ...@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state);
EXPORT_SYMBOL_GPL(__giveup_vsx); EXPORT_SYMBOL_GPL(__giveup_vsx);
#endif #endif
#ifdef CONFIG_SPE
EXPORT_SYMBOL(giveup_spe);
#endif
#ifdef CONFIG_EPAPR_PARAVIRT #ifdef CONFIG_EPAPR_PARAVIRT
EXPORT_SYMBOL(epapr_hypercall_start); EXPORT_SYMBOL(epapr_hypercall_start);
#endif #endif
......
...@@ -88,6 +88,25 @@ static inline void check_if_tm_restore_required(struct task_struct *tsk) { } ...@@ -88,6 +88,25 @@ static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
void giveup_fpu(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;
check_if_tm_restore_required(tsk);
newmsr = oldmsr | MSR_FP;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
newmsr |= MSR_VSX;
#endif
if (oldmsr != newmsr)
mtmsr_isync(newmsr);
__giveup_fpu(tsk);
}
EXPORT_SYMBOL(giveup_fpu);
/* /*
* Make sure the floating-point register state in the * Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk. * the thread_struct is up to date for task tsk.
...@@ -113,7 +132,6 @@ void flush_fp_to_thread(struct task_struct *tsk) ...@@ -113,7 +132,6 @@ void flush_fp_to_thread(struct task_struct *tsk)
* to still have its FP state in the CPU registers. * to still have its FP state in the CPU registers.
*/ */
BUG_ON(tsk != current); BUG_ON(tsk != current);
check_if_tm_restore_required(tsk);
giveup_fpu(tsk); giveup_fpu(tsk);
} }
preempt_enable(); preempt_enable();
...@@ -127,7 +145,6 @@ void enable_kernel_fp(void) ...@@ -127,7 +145,6 @@ void enable_kernel_fp(void)
WARN_ON(preemptible()); WARN_ON(preemptible());
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
check_if_tm_restore_required(current);
giveup_fpu(current); giveup_fpu(current);
} else { } else {
u64 oldmsr = mfmsr(); u64 oldmsr = mfmsr();
...@@ -139,12 +156,26 @@ void enable_kernel_fp(void) ...@@ -139,12 +156,26 @@ void enable_kernel_fp(void)
EXPORT_SYMBOL(enable_kernel_fp); EXPORT_SYMBOL(enable_kernel_fp);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
void giveup_altivec(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;
check_if_tm_restore_required(tsk);
newmsr = oldmsr | MSR_VEC;
if (oldmsr != newmsr)
mtmsr_isync(newmsr);
__giveup_altivec(tsk);
}
EXPORT_SYMBOL(giveup_altivec);
void enable_kernel_altivec(void) void enable_kernel_altivec(void)
{ {
WARN_ON(preemptible()); WARN_ON(preemptible());
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
check_if_tm_restore_required(current);
giveup_altivec(current); giveup_altivec(current);
} else { } else {
u64 oldmsr = mfmsr(); u64 oldmsr = mfmsr();
...@@ -165,7 +196,6 @@ void flush_altivec_to_thread(struct task_struct *tsk) ...@@ -165,7 +196,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_disable(); preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) { if (tsk->thread.regs->msr & MSR_VEC) {
BUG_ON(tsk != current); BUG_ON(tsk != current);
check_if_tm_restore_required(tsk);
giveup_altivec(tsk); giveup_altivec(tsk);
} }
preempt_enable(); preempt_enable();
...@@ -214,6 +244,20 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread); ...@@ -214,6 +244,20 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
void giveup_spe(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;
check_if_tm_restore_required(tsk);
newmsr = oldmsr | MSR_SPE;
if (oldmsr != newmsr)
mtmsr_isync(newmsr);
__giveup_spe(tsk);
}
EXPORT_SYMBOL(giveup_spe);
void enable_kernel_spe(void) void enable_kernel_spe(void)
{ {
......
...@@ -112,17 +112,11 @@ _GLOBAL(load_up_altivec) ...@@ -112,17 +112,11 @@ _GLOBAL(load_up_altivec)
blr blr
/* /*
* giveup_altivec(tsk) * __giveup_altivec(tsk)
* Disable VMX for the task given as the argument, * Disable VMX for the task given as the argument,
* and save the vector registers in its thread_struct. * and save the vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
*/ */
_GLOBAL(giveup_altivec) _GLOBAL(__giveup_altivec)
mfmsr r5
oris r5,r5,MSR_VEC@h
SYNC
MTMSRD(r5) /* enable use of VMX now */
isync
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r7,THREAD_VRSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3) PPC_LL r5,PT_REGS(r3)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment