Commit f110c0c1 authored by Michael Neuling's avatar Michael Neuling Committed by Stephen Rothwell

powerpc: fix compiling CONFIG_PPC_TRANSACTIONAL_MEM when CONFIG_ALTIVEC=n

We can't compile a kernel with CONFIG_ALTIVEC=n when
CONFIG_PPC_TRANSACTIONAL_MEM=y.  We currently get:

arch/powerpc/kernel/tm.S:320: Error: unsupported relocation against THREAD_VSCR
arch/powerpc/kernel/tm.S:323: Error: unsupported relocation against THREAD_VR0
arch/powerpc/kernel/tm.S:323: Error: unsupported relocation against THREAD_VR0
etc.

The below fixes this with a sprinkling of #ifdefs.

This was found by mpe with kisskb:
  http://kisskb.ellerman.id.au/kisskb/buildresult/8539442/Signed-off-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
parent e8f2b548
...@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) ...@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |= new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode); (MSR_FP | new->thread.fpexc_mode);
} }
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread); do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC; new->thread.regs->msr |= MSR_VEC;
} }
#endif
/* We may as well turn on VSX too since all the state is restored now */ /* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX) if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX; new->thread.regs->msr |= MSR_VSX;
......
...@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs, ...@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(&current->thread); do_load_up_transact_fpu(&current->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode); regs->msr |= (MSR_FP | current->thread.fpexc_mode);
} }
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
do_load_up_transact_altivec(&current->thread); do_load_up_transact_altivec(&current->thread);
regs->msr |= MSR_VEC; regs->msr |= MSR_VEC;
} }
#endif
return 0; return 0;
} }
......
...@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, ...@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(&current->thread); do_load_up_transact_fpu(&current->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode); regs->msr |= (MSR_FP | current->thread.fpexc_mode);
} }
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
do_load_up_transact_altivec(&current->thread); do_load_up_transact_altivec(&current->thread);
regs->msr |= MSR_VEC; regs->msr |= MSR_VEC;
} }
#endif
return err; return err;
} }
......
...@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint) ...@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5 mtmsr r5
#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[] /* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version * and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable * is more modern, and will be loaded subsequently by any FPUnavailable
...@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint) ...@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3) ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5 mtspr SPRN_VRSAVE, r5
#endif
dont_restore_vec: dont_restore_vec:
andi. r0, r4, MSR_FP andi. r0, r4, MSR_FP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment