Commit 04879b04 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: VMX (Altivec) support & signal32 rework, from Ben Herrenschmidt

From: Anton Blanchard <anton@samba.org>

VMX (Altivec) support & signal32 rework, from Ben Herrenschmidt
parent d4c6e4e1
......@@ -72,6 +72,13 @@ config PPC64
bool
default y
# VMX is pSeries only for now until somebody writes the iSeries
# exception vectors for it
config ALTIVEC
bool "Support for VMX (Altivec) vector unit"
depends on PPC_PSERIES
default y
config POWER4_ONLY
bool "Optimize for POWER4"
default n
......
......@@ -31,4 +31,6 @@ typedef struct {
#define BITS_PER_LONG 32
typedef __vector128 vector128;
#endif /* _PPC64_TYPES_H */
......@@ -56,6 +56,12 @@ int main(void)
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
DEFINE(MM, offsetof(struct task_struct, mm));
/* naca */
......
......@@ -21,6 +21,13 @@
struct cpu_spec* cur_cpu_spec = NULL;
/* NOTE:
* Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
* the responsibility of the appropriate CPU save/restore functions to
* eventually copy these settings over. Those save/restore aren't yet
* part of the cputable though. That has to be fixed for both ppc32
* and ppc64
*/
extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
......
......@@ -29,6 +29,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
......@@ -211,6 +212,15 @@ _GLOBAL(ret_from_syscall_2)
.align 2,0
#endif
_GLOBAL(ppc32_swapcontext)
bl .sys32_swapcontext
b 80f
_GLOBAL(ppc64_swapcontext)
bl .sys_swapcontext
b 80f
_GLOBAL(ppc32_sigreturn)
bl .sys32_sigreturn
b 80f
......@@ -261,10 +271,17 @@ _GLOBAL(_switch)
SAVE_10GPRS(22, r1)
mflr r20 /* Return to switch caller */
mfmsr r22
andi. r21, r22, MSR_FP
li r0, MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r0,r0,MSR_VEC@h /* Disable altivec */
mfspr r24,SPRN_VRSAVE /* save vrsave register value */
std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
and. r0,r0,r22
beq+ 1f
li r6,MSR_FP /* Disable floating-point */
andc r22,r22,r6
andc r22,r22,r0
mtmsrd r22
isync
1: std r20,_NIP(r1)
......@@ -278,6 +295,14 @@ _GLOBAL(_switch)
ld r1,KSP(r4) /* Load new stack pointer */
ld r6,_CCR(r1)
mtcrf 0xFF,r6
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
......
......@@ -391,9 +391,34 @@ __start_interrupts:
STD_EXCEPTION_PSERIES( 0xc00, SystemCall )
STD_EXCEPTION_PSERIES( 0xd00, SingleStep )
STD_EXCEPTION_PSERIES( 0xe00, Trap_0e )
STD_EXCEPTION_PSERIES( 0xf00, PerformanceMonitor )
/* We need to deal with the Altivec unavailable exception
* here which is at 0xf20, thus in the middle of the
* prolog code of the PerformanceMonitor one. A little
* trickery is thus necessary
*/
. = 0xf00
b .PerformanceMonitor_Pseries
. = 0xf20
b .AltivecUnavailable_Pseries
STD_EXCEPTION_PSERIES( 0x1300, InstructionBreakpoint )
STD_EXCEPTION_PSERIES( 0x1700, AltivecAssist )
/* Here are the "moved" performance monitor and
* altivec unavailable exceptions
*/
. = 0x3000
.globl PerformanceMonitor_Pseries;
.PerformanceMonitor_Pseries:
EXCEPTION_PROLOG_PSERIES(0xf00, PerformanceMonitor_common)
. = 0x3100
.globl AltivecUnavailable_Pseries;
.AltivecUnavailable_Pseries:
EXCEPTION_PROLOG_PSERIES(0xf20, AltivecUnavailable_common)
/* Space for the naca. Architected to be located at real address
* NACA_PHYS_ADDR. Various tools rely on this location being fixed.
* The first dword of the naca is required by iSeries LPAR to
......@@ -580,7 +605,11 @@ __end_stab:
STD_EXCEPTION_COMMON( 0xe00, Trap_0e, .UnknownException )
STD_EXCEPTION_COMMON( 0xf00, PerformanceMonitor, .PerformanceMonitorException )
STD_EXCEPTION_COMMON(0x1300, InstructionBreakpoint, .InstructionBreakpointException )
#ifdef CONFIG_ALTIVEC
STD_EXCEPTION_COMMON(0x1700, AltivecAssist, .AltivecAssistException )
#else
STD_EXCEPTION_COMMON(0x1700, AltivecAssist, .UnknownException )
#endif
/*
* Return from an exception which is handled without calling
* save_remaining_regs. The caller is assumed to have done
......@@ -755,6 +784,23 @@ FPUnavailable_common:
bl .KernelFPUnavailableException
BUG_OPCODE
.globl AltivecUnavailable_common
AltivecUnavailable_common:
EXCEPTION_PROLOG_COMMON
#ifdef CONFIG_ALTIVEC
bne .load_up_altivec /* if from user, just load it up */
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
DO_COPY_EE()
li r6,0xf20
bl .save_remaining_regs
#ifdef CONFIG_ALTIVEC
bl .KernelAltivecUnavailableException
#else
bl .UnknownException
#endif
BUG_OPCODE
.globl SystemCall_common
SystemCall_common:
EXCEPTION_PROLOG_COMMON
......@@ -1483,6 +1529,126 @@ _GLOBAL(giveup_fpu)
#endif /* CONFIG_SMP */
blr
#ifdef CONFIG_ALTIVEC
/*
* load_up_altivec(unused, unused, tsk)
* Disable VMX for the task which had it previously,
* and save its vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
* On SMP we know the VMX is free, since we give it up every
* switch (ie, no lazy save of the vector registers).
* On entry: r13 == 'current' && last_task_used_altivec != 'current'
*/
_STATIC(load_up_altivec)
mfmsr r5 /* grab the current MSR */
oris r5,r5,MSR_VEC@h
mtmsrd r5 /* enable use of VMX now */
isync
/*
* For SMP, we don't do lazy VMX switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_altvec in switch_to.
* VRSAVE isn't dealt with here, that is done in the normal context
* switch code. Note that we could rely on vrsave value to eventually
* avoid saving all of the VREGs here...
*/
#ifndef CONFIG_SMP
LOADBASE(r3,last_task_used_altivec)
ld r4,last_task_used_altivec@l(r3)
cmpi 0,r4,0
beq 1f
/* Save VMX state to last_task_used_altivec's THREAD struct */
addi r4,r4,THREAD
SAVE_32VRS(0,r5,r4)
mfvscr vr0
li r10,THREAD_VSCR
stvx vr0,r10,r4
/* Disable VMX for last_task_used_altivec */
ld r5,PT_REGS(r4)
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r20,MSR_VEC@h
andc r4,r4,r20
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* Hack: if we get an altivec unavailable trap with VRSAVE
* set to all zeros, we assume this is a broken application
* that fails to set it properly, and thus we switch it to
* all 1's
*/
mfspr r4,SPRN_VRSAVE
cmpi 0,r4,0
bne+ 1f
li r4,-1
mtspr SPRN_VRSAVE,r4
1:
/* enable use of VMX after return */
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
oris r23,r23,MSR_VEC@h
li r4,1
li r10,THREAD_VSCR
stw r4,THREAD_USED_VR(r5)
lvx vr0,r10,r5
REST_32VRS(0,r4,r5)
#ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */
subi r4,r5,THREAD /* Back to 'current' */
std r4,last_task_used_altivec@l(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
b fast_exception_return
/*
* disable_kernel_altivec()
* Disable the VMX.
*/
_GLOBAL(disable_kernel_altivec)
mfmsr r3
rldicl r0,r3,(63-MSR_VEC_LG),1
rldicl r3,r0,(MSR_VEC_LG+1),0
mtmsrd r3 /* disable use of VMX now */
isync
blr
/*
* giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
* and save the vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
*/
_GLOBAL(giveup_altivec)
mfmsr r5
oris r5,r5,MSR_VEC@h
mtmsrd r5 /* enable use of VMX now */
isync
cmpi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3)
cmpi 0,r5,0
SAVE_32VRS(0,r4,r3)
mfvscr vr0
li r4,THREAD_VSCR
stvx vr0,r4,r3
beq 1f
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_VEC@h
andc r4,r4,r3 /* disable FP for previous task */
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
LOADBASE(r4,last_task_used_altivec)
std r5,last_task_used_altivec@l(r4)
#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SMP
/*
* This function is called after the master CPU has released the
......@@ -1784,6 +1950,12 @@ _STATIC(start_here_common)
addi r2,r2,0x4000
addi r2,r2,0x4000
/* Apply the CPUs-specific fixups (nop out sections not relevant
* to this CPU
*/
li r3,0
bl .do_cpu_ftr_fixups
/* setup the systemcfg pointer */
LOADADDR(r9,systemcfg)
SET_REG_TO_CONST(r8, SYSTEMCFG_VIRT_ADDR)
......
......@@ -418,7 +418,7 @@ _GLOBAL(cvt_df)
blr
/*
* identify_cpu,
* identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
......@@ -434,9 +434,17 @@ _GLOBAL(identify_cpu)
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
add r3,r3,r5
std r3,0(r4)
blr
add r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
sub r4,r4,r5
ld r4,0(r4)
sub r4,r4,r5
mtctr r4
/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
mr r4,r3
mr r3,r5
bctr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
......@@ -486,25 +494,6 @@ _GLOBAL(do_cpu_ftr_fixups)
isync
b 1b
/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset
*
* Setup function is called with:
* r3 = data offset
* r4 = ptr to CPU spec (relocated)
*/
_GLOBAL(call_setup_cpu)
LOADADDR(r4, cur_cpu_spec)
sub r4,r4,r3
lwz r4,0(r4) # load pointer to cpu_spec
sub r4,r4,r3 # relocate
lwz r6,CPU_SPEC_SETUP(r4) # load function pointer
sub r6,r6,r3
mtctr r6
bctr
/*
* Create a kernel thread
......@@ -823,7 +812,7 @@ _GLOBAL(sys_call_table32)
.llong .compat_clock_gettime
.llong .compat_clock_getres
.llong .compat_clock_nanosleep
.llong .sys_ni_syscall
.llong .ppc32_swapcontext
.llong .sys32_tgkill /* 250 */
.llong .sys32_utimes
.llong .compat_statfs64
......@@ -1082,7 +1071,7 @@ _GLOBAL(sys_call_table)
.llong .sys_clock_gettime
.llong .sys_clock_getres
.llong .sys_clock_nanosleep
.llong .sys_ni_syscall
.llong .ppc64_swapcontext
.llong .sys_tgkill /* 250 */
.llong .sys_utimes
.llong .sys_statfs64
......
......@@ -164,7 +164,9 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(flush_instruction_cache);
EXPORT_SYMBOL(_get_PVR);
EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(enable_kernel_fp);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
#endif
EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(flush_icache_user_range);
EXPORT_SYMBOL(flush_dcache_page);
......
......@@ -50,7 +50,10 @@
#include <asm/cputable.h>
#include <asm/sections.h>
#ifndef CONFIG_SMP
struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
#endif
struct mm_struct ioremap_mm = { pgd : ioremap_dir
,page_table_lock : SPIN_LOCK_UNLOCKED };
......@@ -58,8 +61,7 @@ struct mm_struct ioremap_mm = { pgd : ioremap_dir
char *sysmap = NULL;
unsigned long sysmap_size = 0;
void
enable_kernel_fp(void)
void enable_kernel_fp(void)
{
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
......@@ -70,6 +72,7 @@ enable_kernel_fp(void)
giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
......@@ -85,6 +88,31 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
return 1;
}
#ifdef CONFIG_ALTIVEC
void enable_kernel_altivec(void)
{
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
else
giveup_altivec(NULL); /* just enables FP for kernel */
#else
giveup_altivec(last_task_used_altivec);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_altivec);
int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
{
if (regs->msr & MSR_VEC)
giveup_altivec(current);
memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1;
}
#endif /* CONFIG_ALTIVEC */
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
......@@ -104,8 +132,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
giveup_fpu(prev);
#ifdef CONFIG_ALTIVEC
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */
#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP)
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
#endif /* CONFIG_ALTIVEC */
new_thread = &new->thread;
old_thread = &current->thread;
......@@ -158,8 +198,14 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
#ifdef CONFIG_ALTIVEC
if (last_task_used_altivec == current)
last_task_used_altivec = NULL;
#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */
}
void flush_thread(void)
......@@ -169,8 +215,14 @@ void flush_thread(void)
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
#ifdef CONFIG_ALTIVEC
if (last_task_used_altivec == current)
last_task_used_altivec = NULL;
#endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */
}
void
......@@ -178,6 +230,25 @@ release_thread(struct task_struct *t)
{
}
/*
* This gets called before we allocate a new thread and copy
* the current task into it.
*/
void prepare_to_copy(struct task_struct *tsk)
{
struct pt_regs *regs = tsk->thread.regs;
if (regs == NULL)
return;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
}
/*
* Copy a thread..
*/
......@@ -268,9 +339,25 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
regs->gpr[1] = sp;
regs->gpr[2] = toc;
regs->msr = MSR_USER64;
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = 0;
#endif /* CONFIG_SMP */
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
#ifndef CONFIG_SMP
if (last_task_used_altivec == current)
last_task_used_altivec = 0;
#endif /* CONFIG_SMP */
memset(current->thread.vr, 0, sizeof(current->thread.vr));
current->thread.vscr.u[0] = 0;
current->thread.vscr.u[1] = 0;
current->thread.vscr.u[2] = 0;
current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
}
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
......@@ -314,9 +401,6 @@ int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
}
}
if (regs->msr & MSR_FP)
giveup_fpu(current);
return do_fork(clone_flags & ~CLONE_IDLETASK, p2, regs, 0,
(int *)parent_tidptr, (int *)child_tidptr);
}
......@@ -325,9 +409,6 @@ int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
if (regs->msr & MSR_FP)
giveup_fpu(current);
return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
}
......@@ -335,9 +416,6 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
if (regs->msr & MSR_FP)
giveup_fpu(current);
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0,
NULL, NULL);
}
......@@ -355,7 +433,10 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
error = do_execve(filename, (char **) a1, (char **) a2, regs);
if (error == 0)
......
......@@ -312,6 +312,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else
seq_printf(m, "unknown (%08x)", pvr);
#ifdef CONFIG_ALTIVEC
if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
seq_printf(m, ", altivec supported");
#endif /* CONFIG_ALTIVEC */
seq_printf(m, "\n");
#ifdef CONFIG_PPC_PSERIES
......
......@@ -114,19 +114,49 @@ long sys_sigaltstack(const stack_t *uss, stack_t *uoss, unsigned long r5,
* Set up the sigcontext for the signal frame.
*/
static int
setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
int signr, sigset_t *set, unsigned long handler)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
* the context). This is very important because we must ensure we
* don't lose the VRSAVE content that may have been set prior to
* the process doing its first vector operation
* Userland shall check AT_HWCAP to know wether it can rely on the
* v_regs pointer or not
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t *v_regs = (elf_vrreg_t *)(((unsigned long)sc->vmx_reserve) & ~0xful);
#endif
int err = 0;
if (regs->msr & MSR_FP)
giveup_fpu(current);
current->thread.saved_msr = regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1);
regs->msr = current->thread.saved_msr | current->thread.fpexc_mode;
current->thread.saved_softe = regs->softe;
/* Make sure signal doesn't get spurrious FP exceptions */
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
/* save altivec registers */
if (current->thread.used_vr) {
if (regs->msr & MSR_VEC)
giveup_altivec(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
*/
regs->msr |= MSR_VEC;
}
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
err |= __put_user(current->thread.vrsave, (u32 *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
err |= __put_user(&sc->gp_regs, &sc->regs);
err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
......@@ -135,9 +165,6 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
if (set != NULL)
err |= __put_user(set->sig[0], &sc->oldmask);
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
current->thread.fpscr = 0;
return err;
}
......@@ -145,23 +172,42 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
* Restore the sigcontext from the signal frame.
*/
static int
restore_sigcontext(struct pt_regs *regs, sigset_t *set, struct sigcontext *sc)
static int restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, struct sigcontext *sc)
{
#ifdef CONFIG_ALTIVEC
elf_vrreg_t *v_regs;
#endif
unsigned int err = 0;
unsigned long save_r13;
if (regs->msr & MSR_FP)
giveup_fpu(current);
/* If this is not a signal return, we preserve the TLS in r13 */
if (!sig)
save_r13 = regs->gpr[13];
err |= __copy_from_user(regs, &sc->gp_regs, GP_REGS_SIZE);
if (!sig)
regs->gpr[13] = save_r13;
err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
current->thread.fpexc_mode = regs->msr & (MSR_FE0 | MSR_FE1);
if (set != NULL)
err |= __get_user(set->sig[0], &sc->oldmask);
/* Don't allow the signal handler to change these modulo FE{0,1} */
regs->msr = current->thread.saved_msr & ~(MSR_FP | MSR_FE0 | MSR_FE1);
regs->softe = current->thread.saved_softe;
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
if (err)
return err;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != 0 && (regs->msr & MSR_VEC) != 0)
err |= __copy_from_user(current->thread.vr, v_regs, 33 * sizeof(vector128));
else if (current->thread.used_vr)
memset(&current->thread.vr, 0, 33);
/* Always get VRSAVE back */
if (v_regs != 0)
err |= __get_user(current->thread.vrsave, (u32 *)&v_regs[33]);
else
current->thread.vrsave = 0;
#endif /* CONFIG_ALTIVEC */
/* Force reload of FP/VEC */
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
return err;
}
......@@ -169,8 +215,8 @@ restore_sigcontext(struct pt_regs *regs, sigset_t *set, struct sigcontext *sc)
/*
* Allocate space for the signal frame
*/
static inline void *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
static inline void * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long newsp;
......@@ -185,8 +231,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
return (void *)((newsp - frame_size) & -8ul);
}
static int
setup_trampoline(unsigned int syscall, unsigned int *tramp)
/*
* Setup the trampoline code on the stack
*/
static int setup_trampoline(unsigned int syscall, unsigned int *tramp)
{
int i, err = 0;
......@@ -208,6 +256,72 @@ setup_trampoline(unsigned int syscall, unsigned int *tramp)
return err;
}
/*
* Restore the user process's signal mask (also used by signal32.c)
*/
void restore_sigmask(sigset_t *set)
{
sigdelsetmask(set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = *set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
/*
* Handle {get,set,swap}_context operations
*/
int sys_swapcontext(struct ucontext __user *old_ctx,
struct ucontext __user *new_ctx,
long ctx_size, long r6, long r7, long r8, struct pt_regs *regs)
{
unsigned char tmp;
sigset_t set;
/* Context size is for future use. Right now, we only make sure
* we are passed something we understand
*/
if (ctx_size < sizeof(struct ucontext))
return -EINVAL;
if (old_ctx != NULL) {
if (verify_area(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
|| setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0)
|| __copy_to_user(&old_ctx->uc_sigmask,
&current->blocked, sizeof(sigset_t)))
return -EFAULT;
}
if (new_ctx == NULL)
return 0;
if (verify_area(VERIFY_READ, new_ctx, sizeof(*new_ctx))
|| __get_user(tmp, (u8 *) new_ctx)
|| __get_user(tmp, (u8 *) (new_ctx + 1) - 1))
return -EFAULT;
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
* because the user's registers will be corrupted. For instance
* the NIP value may have been updated but not some of the
* other registers. Given that we have done the verify_area
* and successfully read the first and last bytes of the region
* above, this should only happen in an out-of-memory situation
* or if another thread unmaps the region containing the context.
* We kill the task with a SIGSEGV in this situation.
*/
if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
do_exit(SIGSEGV);
restore_sigmask(&set);
if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
do_exit(SIGSEGV);
/* This returns like rt_sigreturn */
return 0;
}
/*
* Do a signal return; undo the signal stack.
*/
......@@ -218,7 +332,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
{
struct ucontext *uc = (struct ucontext *)regs->gpr[1];
sigset_t set;
stack_t st;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
......@@ -228,20 +341,14 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, NULL, &uc->uc_mcontext))
restore_sigmask(&set);
if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
goto badframe;
if (__copy_from_user(&st, &uc->uc_stack, sizeof(st)))
goto badframe;
/* This function sets back the stack flags into
the current task structure. */
sys_sigaltstack(&st, NULL, 0, 0, 0, 0, regs);
/* do_sigaltstack expects a __user pointer and won't modify
* what's in there anyway
*/
do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]);
return regs->result;
......@@ -253,8 +360,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
do_exit(SIGSEGV);
}
static void
setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
static void setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
/* Handler is *really* a pointer to the function descriptor for
......@@ -332,9 +438,8 @@ setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
/*
* OK, we're invoking a handler
*/
static void
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
static void handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
/* Set up Signal Frame */
setup_rt_frame(sig, ka, info, oldset, regs);
......@@ -352,8 +457,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
return;
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
switch ((int)regs->result) {
case -ERESTART_RESTARTBLOCK:
......
This diff is collapsed.
......@@ -2106,6 +2106,10 @@ long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
error = do_execve32(filename, (u32*) a1, (u32*) a2, regs);
......@@ -2126,9 +2130,25 @@ void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
regs->nip = nip;
regs->gpr[1] = sp;
regs->msr = MSR_USER32;
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = 0;
#endif /* CONFIG_SMP */
current->thread.fpscr = 0;
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
#ifdef CONFIG_ALTIVEC
#ifndef CONFIG_SMP
if (last_task_used_altivec == current)
last_task_used_altivec = 0;
#endif /* CONFIG_SMP */
memset(current->thread.vr, 0, sizeof(current->thread.vr));
current->thread.vscr.u[0] = 0;
current->thread.vscr.u[1] = 0;
current->thread.vscr.u[2] = 0;
current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
}
extern asmlinkage int sys_prctl(int option, unsigned long arg2, unsigned long arg3,
......
......@@ -419,6 +419,14 @@ KernelFPUnavailableException(struct pt_regs *regs)
panic("Unrecoverable FP Unavailable Exception in Kernel");
}
void
KernelAltivecUnavailableException(struct pt_regs *regs)
{
printk("Illegal VMX/Altivec used in kernel (task=0x%p, "
"pc=0x%016lx, trap=0x%lx)\n", current, regs->nip, regs->trap);
panic("Unrecoverable VMX/Altivec Unavailable Exception in Kernel");
}
void
SingleStepException(struct pt_regs *regs)
{
......@@ -488,6 +496,17 @@ AlignmentException(struct pt_regs *regs)
_exception(SIGBUS, &info, regs);
}
#ifdef CONFIG_ALTIVEC
void
AltivecAssistException(struct pt_regs *regs)
{
if (regs->msr & MSR_VEC)
giveup_altivec(current);
/* XXX quick hack for now: set the non-Java bit in the VSCR */
current->thread.vscr.u[3] |= 0x10000;
}
#endif /* CONFIG_ALTIVEC */
void __init trap_init(void)
{
}
#ifndef __PPC64_ELF_H
#define __PPC64_ELF_H
#include <asm/types.h>
#include <asm/ptrace.h>
#include <asm/cputable.h>
/* PowerPC relocations defined by the ABIs */
#define R_PPC_NONE 0
#define R_PPC_ADDR32 1 /* 32bit absolute address */
......@@ -39,8 +43,39 @@
#define R_PPC_SECTOFF_LO 34
#define R_PPC_SECTOFF_HI 35
#define R_PPC_SECTOFF_HA 36
/* PowerPC relocations defined for the TLS access ABI. */
#define R_PPC_TLS 67 /* none (sym+add)@tls */
#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
/* Keep this the last entry. */
#define R_PPC_NUM 37
#define R_PPC_NUM 95
/*
* ELF register definitions..
......@@ -54,7 +89,8 @@
#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
#define ELF_NFPREG 33 /* includes fpscr */
#define ELF_NVRREG 34 /* includes vscr */
#define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
#define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
typedef unsigned long elf_greg_t64;
typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
......@@ -95,9 +131,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
* ptrace interface. This allows signal handling and ptrace to use the same
* structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*
* Note that it's _not_ compatible with 32 bits ucontext which stuffs the
* vrsave along with vscr and so only uses 33 vectors for the register set
*/
typedef __vector128 elf_vrreg_t;
typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
/*
* This is used to ensure we don't load something for the wrong architecture.
......@@ -145,13 +185,15 @@ static inline int dump_task_regs(struct task_struct *tsk,
extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
/* XXX Should we define the XFPREGS using altivec ??? */
#endif
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (0)
#define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
......@@ -289,7 +331,50 @@ do { \
#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2. */
#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2. */
#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2. */
/* PowerPC64 relocations defined for the TLS access ABI. */
#define R_PPC64_TLS 67 /* none (sym+add)@tls */
#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
/* Keep this the last entry. */
#define R_PPC64_NUM 67
#define R_PPC64_NUM 107
#endif /* __PPC64_ELF_H */
......@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <asm/mmu.h>
#include <asm/ppcdebug.h>
#include <asm/cputable.h>
/*
* Copyright (C) 2001 PPC 64 Team, IBM Corp
......@@ -139,10 +140,16 @@ extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
* switch_mm is the entry point called from the architecture independent
* code in kernel/sched.c
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
#ifdef CONFIG_ALTIVEC
asm volatile (
BEGIN_FTR_SECTION
"dssall;\n"
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
: : );
#endif /* CONFIG_ALTIVEC */
flush_stab(tsk, next);
cpu_set(smp_processor_id(), next->cpu_vm_mask);
}
......
......@@ -121,12 +121,24 @@ struct sigcontext32 {
u32 regs; /* 4 byte pointer to the pt_regs32 structure. */
};
struct mcontext32 {
elf_gregset_t32 mc_gregs;
elf_fpregset_t mc_fregs;
unsigned int mc_pad[2];
elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
};
struct ucontext32 {
unsigned int uc_flags;
unsigned int uc_link;
stack_32_t uc_stack;
struct sigcontext32 uc_mcontext;
sigset_t uc_sigmask; /* mask last for extensibility */
unsigned int uc_flags;
unsigned int uc_link;
stack_32_t uc_stack;
int uc_pad[7];
u32 uc_regs; /* points to uc_mcontext field */
compat_sigset_t uc_sigmask; /* mask last for extensibility */
/* glibc has 1024-bit signal masks, ours are 64-bit */
int uc_maskext[30];
int uc_pad2[3];
struct mcontext32 uc_mcontext;
};
typedef struct compat_sigevent {
......
......@@ -39,6 +39,19 @@
#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
#define CHECKANYINT(ra,rb) \
mfspr rb,SPRG3; /* Get Paca address */\
ld ra,PACALPPACA+LPPACAANYINT(rb); /* Get pending interrupt flags */\
......
......@@ -311,6 +311,7 @@
#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
#define SPRN_XER 0x001 /* Fixed Point Exception Register */
#define SPRN_ZPR 0x3B0 /* Zone Protection Register */
#define SPRN_VRSAVE 0x100 /* Vector save */
/* Short-hand versions for a number of the above SPRNs */
......@@ -464,11 +465,9 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern void prepare_to_copy(struct task_struct *tsk);
/*
* Create a new kernel thread.
*/
/* Create a new kernel thread. */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/*
......@@ -479,6 +478,7 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Lazy FPU handling on uni-processor */
extern struct task_struct *last_task_used_math;
extern struct task_struct *last_task_used_altivec;
#ifdef __KERNEL__
......@@ -518,6 +518,14 @@ struct thread_struct {
unsigned long fpexc_mode; /* Floating-point exception mode */
unsigned long saved_msr; /* Save MSR across signal handlers */
unsigned long saved_softe; /* Ditto for Soft Enable/Disable */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
vector128 vr[32] __attribute((aligned(16)));
/* AltiVec status */
vector128 vscr __attribute((aligned(16)));
unsigned long vrsave;
int used_vr; /* set if process has used altivec */
#endif /* CONFIG_ALTIVEC */
};
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
......
......@@ -85,6 +85,9 @@ extern int _get_PVR(void);
extern void giveup_fpu(struct task_struct *);
extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void giveup_altivec(struct task_struct *);
extern void disable_kernel_altivec(void);
extern void enable_kernel_altivec(void);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
extern int abs(int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment