Commit ac3d0853 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/signal32: Remove impossible #ifdef combinations

PPC_TRANSACTIONAL_MEM is only on book3s/64
SPE is only on booke

PPC_TRANSACTIONAL_MEM selects ALTIVEC and VSX

Therefore, within PPC_TRANSACTIONAL_MEM sections,
ALTIVEC and VSX are always defined while SPE never is.

Remove all SPE code and all #ifdef ALTIVEC and VSX in tm
functions.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/a069a348ee3c2fe3123a5a93695c2b35dc42cb40.1623340691.git.christophe.leroy@csgroup.eu
parent baf24d23
...@@ -354,14 +354,8 @@ static void prepare_save_tm_user_regs(void) ...@@ -354,14 +354,8 @@ static void prepare_save_tm_user_regs(void)
{ {
WARN_ON(tm_suspend_disabled); WARN_ON(tm_suspend_disabled);
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC)) if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.ckvrsave = mfspr(SPRN_VRSAVE); current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
#endif
#ifdef CONFIG_SPE
if (current->thread.used_spe)
flush_spe_to_thread(current);
#endif
} }
static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
...@@ -379,7 +373,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user ...@@ -379,7 +373,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
*/ */
unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed); unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
#ifdef CONFIG_ALTIVEC
/* save altivec registers */ /* save altivec registers */
if (current->thread.used_vr) { if (current->thread.used_vr) {
unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state, unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
...@@ -412,7 +405,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user ...@@ -412,7 +405,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else else
unsafe_put_user(current->thread.ckvrsave, unsafe_put_user(current->thread.ckvrsave,
(u32 __user *)&tm_frame->mc_vregs[32], failed); (u32 __user *)&tm_frame->mc_vregs[32], failed);
#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed); unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
if (msr & MSR_FP) if (msr & MSR_FP)
...@@ -420,7 +412,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user ...@@ -420,7 +412,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else else
unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed); unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
#ifdef CONFIG_VSX
/* /*
* Copy VSR 0-31 upper half from thread_struct to local * Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in * buffer, then write that to userspace. Also set MSR_VSX in
...@@ -436,23 +427,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user ...@@ -436,23 +427,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
msr |= MSR_VSX; msr |= MSR_VSX;
} }
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* SPE regs are not checkpointed with TM, so this section is
* simply the same as in __unsafe_save_user_regs().
*/
if (current->thread.used_spe) {
unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
ELF_NEVRREG * sizeof(u32), failed);
/* set MSR_SPE in the saved MSR value to indicate that
* frame->mc_vregs contains valid data */
msr |= MSR_SPE;
}
/* We always copy to/from spefscr */
unsafe_put_user(current->thread.spefscr,
(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed); unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
...@@ -587,9 +561,7 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -587,9 +561,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *tm_sr) struct mcontext __user *tm_sr)
{ {
unsigned long msr, msr_hi; unsigned long msr, msr_hi;
#ifdef CONFIG_VSX
int i; int i;
#endif
if (tm_suspend_disabled) if (tm_suspend_disabled)
return 1; return 1;
...@@ -610,7 +582,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -610,7 +582,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
/* Restore the previous little-endian mode */ /* Restore the previous little-endian mode */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
#ifdef CONFIG_ALTIVEC
regs->msr &= ~MSR_VEC; regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
/* restore altivec registers from the stack */ /* restore altivec registers from the stack */
...@@ -629,13 +600,11 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -629,13 +600,11 @@ static long restore_tm_user_regs(struct pt_regs *regs,
(u32 __user *)&sr->mc_vregs[32], failed); (u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC)) if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.ckvrsave); mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
#endif /* CONFIG_ALTIVEC */
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
#ifdef CONFIG_VSX
regs->msr &= ~MSR_VSX; regs->msr &= ~MSR_VSX;
if (msr & MSR_VSX) { if (msr & MSR_VSX) {
/* /*
...@@ -649,24 +618,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -649,24 +618,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
} }
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* SPE regs are not checkpointed with TM, so this section is
* simply the same as in restore_user_regs().
*/
regs->msr &= ~MSR_SPE;
if (msr & MSR_SPE) {
unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
ELF_NEVRREG * sizeof(u32), failed);
current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
/* Always get SPEFSCR back */
unsafe_get_user(current->thread.spefscr,
(u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
user_read_access_end(); user_read_access_end();
...@@ -675,7 +626,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -675,7 +626,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_restore_general_regs(regs, tm_sr, failed); unsafe_restore_general_regs(regs, tm_sr, failed);
#ifdef CONFIG_ALTIVEC
/* restore altivec registers from the stack */ /* restore altivec registers from the stack */
if (msr & MSR_VEC) if (msr & MSR_VEC)
unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs, unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
...@@ -684,11 +634,9 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -684,11 +634,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
/* Always get VRSAVE back */ /* Always get VRSAVE back */
unsafe_get_user(current->thread.vrsave, unsafe_get_user(current->thread.vrsave,
(u32 __user *)&tm_sr->mc_vregs[32], failed); (u32 __user *)&tm_sr->mc_vregs[32], failed);
#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed); unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
#ifdef CONFIG_VSX
if (msr & MSR_VSX) { if (msr & MSR_VSX) {
/* /*
* Restore altivec registers from the stack to a local * Restore altivec registers from the stack to a local
...@@ -697,7 +645,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -697,7 +645,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed); unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
current->thread.used_vsr = true; current->thread.used_vsr = true;
} }
#endif /* CONFIG_VSX */
/* Get the top half of the MSR from the user context */ /* Get the top half of the MSR from the user context */
unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed); unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
...@@ -742,12 +689,10 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -742,12 +689,10 @@ static long restore_tm_user_regs(struct pt_regs *regs,
load_fp_state(&current->thread.fp_state); load_fp_state(&current->thread.fp_state);
regs->msr |= (MSR_FP | current->thread.fpexc_mode); regs->msr |= (MSR_FP | current->thread.fpexc_mode);
} }
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
load_vr_state(&current->thread.vr_state); load_vr_state(&current->thread.vr_state);
regs->msr |= MSR_VEC; regs->msr |= MSR_VEC;
} }
#endif
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment