Commit 23932693 authored by Paul Mackerras's avatar Paul Mackerras Committed by Linus Torvalds

[PATCH] ppc64: Implement CONFIG_PREEMPT

This implements CONFIG_PREEMPT for ppc64.  Aside from the entry.S
changes to check the _TIF_NEED_RESCHED bit when returning from an
exception, there are various changes to make the ppc64-specific code
preempt-safe, mostly adding preempt_enable/disable or get_cpu/put_cpu
calls where needed.  I have been using this on my desktop G5 for the
last week without problems.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent adf791bd
...@@ -198,7 +198,6 @@ config SCHED_SMT ...@@ -198,7 +198,6 @@ config SCHED_SMT
config PREEMPT config PREEMPT
bool "Preemptible Kernel" bool "Preemptible Kernel"
depends on BROKEN
help help
This option reduces the latency of the kernel when reacting to This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to real-time or interactive events by allowing a low priority process to
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cputable.h> #include <asm/cputable.h>
void disable_kernel_fp(void); /* asm function from head.S */
struct aligninfo { struct aligninfo {
unsigned char len; unsigned char len;
unsigned char flags; unsigned char flags;
...@@ -280,8 +278,11 @@ fix_alignment(struct pt_regs *regs) ...@@ -280,8 +278,11 @@ fix_alignment(struct pt_regs *regs)
} }
/* Force the fprs into the save area so we can reference them */ /* Force the fprs into the save area so we can reference them */
if ((flags & F) && (regs->msr & MSR_FP)) if (flags & F) {
giveup_fpu(current); if (!user_mode(regs))
return 0;
flush_fp_to_thread(current);
}
/* If we are loading, get the data from user space */ /* If we are loading, get the data from user space */
if (flags & LD) { if (flags & LD) {
...@@ -310,9 +311,11 @@ fix_alignment(struct pt_regs *regs) ...@@ -310,9 +311,11 @@ fix_alignment(struct pt_regs *regs)
if (flags & F) { if (flags & F) {
if (nb == 4) { if (nb == 4) {
/* Doing stfs, have to convert to single */ /* Doing stfs, have to convert to single */
preempt_disable();
enable_kernel_fp(); enable_kernel_fp();
cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr); cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr);
disable_kernel_fp(); disable_kernel_fp();
preempt_enable();
} }
else else
data.dd = current->thread.fpr[reg]; data.dd = current->thread.fpr[reg];
...@@ -344,9 +347,11 @@ fix_alignment(struct pt_regs *regs) ...@@ -344,9 +347,11 @@ fix_alignment(struct pt_regs *regs)
if (flags & F) { if (flags & F) {
if (nb == 4) { if (nb == 4) {
/* Doing lfs, have to convert to double */ /* Doing lfs, have to convert to double */
preempt_disable();
enable_kernel_fp(); enable_kernel_fp();
cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr); cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr);
disable_kernel_fp(); disable_kernel_fp();
preempt_enable();
} }
else else
current->thread.fpr[reg] = data.dd; current->thread.fpr[reg] = data.dd;
......
...@@ -48,6 +48,7 @@ int main(void) ...@@ -48,6 +48,7 @@ int main(void)
DEFINE(THREAD_SHIFT, THREAD_SHIFT); DEFINE(THREAD_SHIFT, THREAD_SHIFT);
DEFINE(THREAD_SIZE, THREAD_SIZE); DEFINE(THREAD_SIZE, THREAD_SIZE);
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
/* task_struct->thread */ /* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(THREAD, offsetof(struct task_struct, thread));
......
...@@ -371,15 +371,27 @@ _GLOBAL(ret_from_except) ...@@ -371,15 +371,27 @@ _GLOBAL(ret_from_except)
andc r9,r10,r4 /* clear MSR_EE */ andc r9,r10,r4 /* clear MSR_EE */
mtmsrd r9,1 /* Update machine state */ mtmsrd r9,1 /* Update machine state */
#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9)
andi. r0,r3,MSR_PR
mtcrf 1,r4 /* get bottom 4 thread flags into cr7 */
bt 31-TIF_NEED_RESCHED,do_resched
beq restore /* if returning to kernel */
bt 31-TIF_SIGPENDING,do_user_signal
#else /* !CONFIG_PREEMPT */
ld r3,_MSR(r1) /* Returning to user mode? */ ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR andi. r3,r3,MSR_PR
beq restore /* if not, just restore regs and return */ beq restore /* if not, just restore regs and return */
/* Check current_thread_info()->flags */ /* Check current_thread_info()->flags */
clrrdi r3,r1,THREAD_SHIFT clrrdi r9,r1,THREAD_SHIFT
ld r3,TI_FLAGS(r3) ld r4,TI_FLAGS(r9)
andi. r0,r3,_TIF_USER_WORK_MASK andi. r0,r4,_TIF_USER_WORK_MASK
bne do_work bne do_work
#endif
addi r0,r1,INT_FRAME_SIZE /* size of frame */ addi r0,r1,INT_FRAME_SIZE /* size of frame */
ld r4,PACACURRENT(r13) ld r4,PACACURRENT(r13)
...@@ -452,18 +464,47 @@ restore: ...@@ -452,18 +464,47 @@ restore:
rfid rfid
#ifndef CONFIG_PREEMPT
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */ /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work: do_work:
/* Enable interrupts */ andi. r0,r4,_TIF_NEED_RESCHED
mtmsrd r10,1 beq do_user_signal
#else /* CONFIG_PREEMPT */
do_resched:
bne do_user_resched /* if returning to user mode */
/* Check that preempt_count() == 0 and interrupts are enabled */
lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0
#ifdef CONFIG_PPC_ISERIES
ld r0,SOFTE(r1)
cmpdi r0,0
#else
andi. r0,r3,MSR_EE
#endif
crandc eq,cr1*4+eq,eq
bne restore
/* here we are preempting the current task */
1: lis r0,PREEMPT_ACTIVE@h
stw r0,TI_PREEMPT(r9)
#ifdef CONFIG_PPC_ISERIES
li r0,1
stb r0,PACAPROCENABLED(r13)
#endif
#endif /* CONFIG_PREEMPT */
andi. r0,r3,_TIF_NEED_RESCHED do_user_resched:
beq 1f mtmsrd r10,1 /* reenable interrupts */
bl .schedule bl .schedule
#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT
li r0,0
stw r0,TI_PREEMPT(r9)
#endif
b .ret_from_except b .ret_from_except
1: andi. r0,r3,_TIF_SIGPENDING do_user_signal:
beq .ret_from_except mtmsrd r10,1
li r3,0 li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD addi r4,r1,STACK_FRAME_OVERHEAD
bl .do_signal bl .do_signal
......
...@@ -65,8 +65,43 @@ struct mm_struct ioremap_mm = { ...@@ -65,8 +65,43 @@ struct mm_struct ioremap_mm = {
.page_table_lock = SPIN_LOCK_UNLOCKED, .page_table_lock = SPIN_LOCK_UNLOCKED,
}; };
/*
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_fp_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
/*
* We need to disable preemption here because if we didn't,
* another process could get scheduled after the regs->msr
* test but before we have finished saving the FP registers
* to the thread_struct. That process could take over the
* FPU, and then when we get scheduled again we would store
* bogus values for the remaining FP registers.
*/
preempt_disable();
if (tsk->thread.regs->msr & MSR_FP) {
#ifdef CONFIG_SMP
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
* the FP register state on context switch on SMP,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON(tsk != current);
#endif
giveup_fpu(current);
}
preempt_enable();
}
}
void enable_kernel_fp(void) void enable_kernel_fp(void)
{ {
WARN_ON(preemptible());
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current); giveup_fpu(current);
...@@ -80,12 +115,9 @@ EXPORT_SYMBOL(enable_kernel_fp); ...@@ -80,12 +115,9 @@ EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{ {
struct pt_regs *regs = tsk->thread.regs; if (!tsk->thread.regs)
if (!regs)
return 0; return 0;
if (tsk == current && (regs->msr & MSR_FP)) flush_fp_to_thread(current);
giveup_fpu(current);
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
...@@ -96,6 +128,8 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) ...@@ -96,6 +128,8 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
void enable_kernel_altivec(void) void enable_kernel_altivec(void)
{ {
WARN_ON(preemptible());
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current); giveup_altivec(current);
...@@ -107,10 +141,29 @@ void enable_kernel_altivec(void) ...@@ -107,10 +141,29 @@ void enable_kernel_altivec(void)
} }
EXPORT_SYMBOL(enable_kernel_altivec); EXPORT_SYMBOL(enable_kernel_altivec);
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_altivec_to_thread(struct task_struct *tsk)
{
#ifdef CONFIG_ALTIVEC
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) {
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
giveup_altivec(current);
}
preempt_enable();
}
#endif
}
int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
{ {
if (regs->msr & MSR_VEC) flush_altivec_to_thread(current);
giveup_altivec(current);
memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1; return 1;
} }
...@@ -245,16 +298,8 @@ release_thread(struct task_struct *t) ...@@ -245,16 +298,8 @@ release_thread(struct task_struct *t)
*/ */
void prepare_to_copy(struct task_struct *tsk) void prepare_to_copy(struct task_struct *tsk)
{ {
struct pt_regs *regs = tsk->thread.regs; flush_fp_to_thread(current);
flush_altivec_to_thread(current);
if (regs == NULL)
return;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
} }
/* /*
...@@ -439,12 +484,8 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, ...@@ -439,12 +484,8 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error = PTR_ERR(filename); error = PTR_ERR(filename);
if (IS_ERR(filename)) if (IS_ERR(filename))
goto out; goto out;
if (regs->msr & MSR_FP) flush_fp_to_thread(current);
giveup_fpu(current); flush_altivec_to_thread(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
error = do_execve(filename, (char __user * __user *) a1, error = do_execve(filename, (char __user * __user *) a1,
(char __user * __user *) a2, regs); (char __user * __user *) a2, regs);
......
...@@ -119,8 +119,7 @@ int sys_ptrace(long request, long pid, long addr, long data) ...@@ -119,8 +119,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if (index < PT_FPR0) { if (index < PT_FPR0) {
tmp = get_reg(child, (int)index); tmp = get_reg(child, (int)index);
} else { } else {
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
} }
ret = put_user(tmp,(unsigned long __user *) data); ret = put_user(tmp,(unsigned long __user *) data);
...@@ -152,8 +151,7 @@ int sys_ptrace(long request, long pid, long addr, long data) ...@@ -152,8 +151,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if (index < PT_FPR0) { if (index < PT_FPR0) {
ret = put_reg(child, index, data); ret = put_reg(child, index, data);
} else { } else {
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
ret = 0; ret = 0;
} }
...@@ -245,8 +243,7 @@ int sys_ptrace(long request, long pid, long addr, long data) ...@@ -245,8 +243,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned long __user *tmp = (unsigned long __user *)addr; unsigned long __user *tmp = (unsigned long __user *)addr;
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
ret = put_user(*reg, tmp); ret = put_user(*reg, tmp);
...@@ -263,8 +260,7 @@ int sys_ptrace(long request, long pid, long addr, long data) ...@@ -263,8 +260,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned long __user *tmp = (unsigned long __user *)addr; unsigned long __user *tmp = (unsigned long __user *)addr;
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
ret = get_user(*reg, tmp); ret = get_user(*reg, tmp);
......
...@@ -136,8 +136,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) ...@@ -136,8 +136,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
if (index < PT_FPR0) { if (index < PT_FPR0) {
tmp = get_reg(child, index); tmp = get_reg(child, index);
} else { } else {
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
/* /*
* the user space code considers the floating point * the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the * to be an array of unsigned int (32 bits) - the
...@@ -179,8 +178,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) ...@@ -179,8 +178,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
break; break;
if (numReg >= PT_FPR0) { if (numReg >= PT_FPR0) {
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0]; tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
} else { /* register within PT_REGS struct */ } else { /* register within PT_REGS struct */
tmp = get_reg(child, numReg); tmp = get_reg(child, numReg);
...@@ -244,8 +242,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) ...@@ -244,8 +242,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
if (index < PT_FPR0) { if (index < PT_FPR0) {
ret = put_reg(child, index, data); ret = put_reg(child, index, data);
} else { } else {
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
/* /*
* the user space code considers the floating point * the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the * to be an array of unsigned int (32 bits) - the
...@@ -283,8 +280,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) ...@@ -283,8 +280,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
|| ((numReg > PT_CCR) && (numReg < PT_FPR0))) || ((numReg > PT_CCR) && (numReg < PT_FPR0)))
break; break;
if (numReg >= PT_FPR0) { if (numReg >= PT_FPR0) {
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
} }
if (numReg == PT_MSR) if (numReg == PT_MSR)
data = (data & MSR_DEBUGCHANGE) data = (data & MSR_DEBUGCHANGE)
...@@ -379,8 +375,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) ...@@ -379,8 +375,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned int __user *tmp = (unsigned int __user *)addr; unsigned int __user *tmp = (unsigned int __user *)addr;
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
ret = put_user(*reg, tmp); ret = put_user(*reg, tmp);
...@@ -397,8 +392,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) ...@@ -397,8 +392,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned int __user *tmp = (unsigned int __user *)addr; unsigned int __user *tmp = (unsigned int __user *)addr;
if (child->thread.regs->msr & MSR_FP) flush_fp_to_thread(child);
giveup_fpu(child);
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
ret = get_user(*reg, tmp); ret = get_user(*reg, tmp);
......
...@@ -68,10 +68,11 @@ char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned; ...@@ -68,10 +68,11 @@ char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned;
void void
call_rtas_display_status(char c) call_rtas_display_status(char c)
{ {
struct rtas_args *args = &(get_paca()->xRtas); struct rtas_args *args;
unsigned long s; unsigned long s;
spin_lock_irqsave(&rtas.lock, s); spin_lock_irqsave(&rtas.lock, s);
args = &(get_paca()->xRtas);
args->token = 10; args->token = 10;
args->nargs = 1; args->nargs = 1;
...@@ -145,7 +146,7 @@ rtas_call(int token, int nargs, int nret, ...@@ -145,7 +146,7 @@ rtas_call(int token, int nargs, int nret,
va_list list; va_list list;
int i, logit = 0; int i, logit = 0;
unsigned long s; unsigned long s;
struct rtas_args *rtas_args = &(get_paca()->xRtas); struct rtas_args *rtas_args;
long ret; long ret;
PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n"); PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n");
...@@ -158,6 +159,7 @@ rtas_call(int token, int nargs, int nret, ...@@ -158,6 +159,7 @@ rtas_call(int token, int nargs, int nret,
/* Gotta do something different here, use global lock for now... */ /* Gotta do something different here, use global lock for now... */
spin_lock_irqsave(&rtas.lock, s); spin_lock_irqsave(&rtas.lock, s);
rtas_args = &(get_paca()->xRtas);
rtas_args->token = token; rtas_args->token = token;
rtas_args->nargs = nargs; rtas_args->nargs = nargs;
......
...@@ -131,8 +131,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, ...@@ -131,8 +131,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
#endif #endif
long err = 0; long err = 0;
if (regs->msr & MSR_FP) flush_fp_to_thread(current);
giveup_fpu(current);
/* Make sure signal doesn't get spurrious FP exceptions */ /* Make sure signal doesn't get spurrious FP exceptions */
current->thread.fpscr = 0; current->thread.fpscr = 0;
...@@ -141,9 +140,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, ...@@ -141,9 +140,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(v_regs, &sc->v_regs);
/* save altivec registers */ /* save altivec registers */
if (current->thread.used_vr) { if (current->thread.used_vr) {
if (regs->msr & MSR_VEC) flush_altivec_to_thread(current);
giveup_altivec(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */ /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
......
...@@ -130,11 +130,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, ...@@ -130,11 +130,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
{ {
elf_greg_t64 *gregs = (elf_greg_t64 *)regs; elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i, err = 0; int i, err = 0;
/* Make sure floating point registers are stored in regs */ /* Make sure floating point registers are stored in regs */
if (regs->msr & MSR_FP) flush_fp_to_thread(current);
giveup_fpu(current);
/* save general and floating-point registers */ /* save general and floating-point registers */
for (i = 0; i <= PT_RESULT; i ++) for (i = 0; i <= PT_RESULT; i ++)
err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]); err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]);
...@@ -148,8 +147,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, ...@@ -148,8 +147,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
/* save altivec registers */ /* save altivec registers */
if (current->thread.used_vr) { if (current->thread.used_vr) {
if (regs->msr & MSR_VEC) flush_altivec_to_thread(current);
giveup_altivec(current);
if (__copy_to_user(&frame->mc_vregs, current->thread.vr, if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
ELF_NVRREG32 * sizeof(vector128))) ELF_NVRREG32 * sizeof(vector128)))
return 1; return 1;
......
...@@ -617,12 +617,8 @@ long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2, ...@@ -617,12 +617,8 @@ long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error = PTR_ERR(filename); error = PTR_ERR(filename);
if (IS_ERR(filename)) if (IS_ERR(filename))
goto out; goto out;
if (regs->msr & MSR_FP) flush_fp_to_thread(current);
giveup_fpu(current); flush_altivec_to_thread(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs); error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
......
...@@ -308,8 +308,7 @@ static void parse_fpe(struct pt_regs *regs) ...@@ -308,8 +308,7 @@ static void parse_fpe(struct pt_regs *regs)
siginfo_t info; siginfo_t info;
unsigned long fpscr; unsigned long fpscr;
if (regs->msr & MSR_FP) flush_fp_to_thread(current);
giveup_fpu(current);
fpscr = current->thread.fpscr; fpscr = current->thread.fpscr;
...@@ -531,8 +530,7 @@ AlignmentException(struct pt_regs *regs) ...@@ -531,8 +530,7 @@ AlignmentException(struct pt_regs *regs)
void void
AltivecAssistException(struct pt_regs *regs) AltivecAssistException(struct pt_regs *regs)
{ {
if (regs->msr & MSR_VEC) flush_altivec_to_thread(current);
giveup_altivec(current);
/* XXX quick hack for now: set the non-Java bit in the VSCR */ /* XXX quick hack for now: set the non-Java bit in the VSCR */
current->thread.vscr.u[3] |= 0x10000; current->thread.vscr.u[3] |= 0x10000;
} }
......
...@@ -251,6 +251,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -251,6 +251,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
struct mm_struct *mm; struct mm_struct *mm;
pte_t *ptep; pte_t *ptep;
int ret; int ret;
int cpu;
int user_region = 0; int user_region = 0;
int local = 0; int local = 0;
cpumask_t tmp; cpumask_t tmp;
...@@ -302,7 +303,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -302,7 +303,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (pgdir == NULL) if (pgdir == NULL)
return 1; return 1;
tmp = cpumask_of_cpu(smp_processor_id()); cpu = get_cpu();
tmp = cpumask_of_cpu(cpu);
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1; local = 1;
...@@ -311,11 +313,13 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -311,11 +313,13 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
ret = hash_huge_page(mm, access, ea, vsid, local); ret = hash_huge_page(mm, access, ea, vsid, local);
else { else {
ptep = find_linux_pte(pgdir, ea); ptep = find_linux_pte(pgdir, ea);
if (ptep == NULL) if (ptep == NULL) {
put_cpu();
return 1; return 1;
}
ret = __hash_page(ea, access, vsid, ptep, trap, local); ret = __hash_page(ea, access, vsid, ptep, trap, local);
} }
put_cpu();
return ret; return ret;
} }
......
...@@ -375,6 +375,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -375,6 +375,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long addr; unsigned long addr;
hugepte_t *ptep; hugepte_t *ptep;
struct page *page; struct page *page;
int cpu;
int local = 0; int local = 0;
cpumask_t tmp; cpumask_t tmp;
...@@ -383,7 +384,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -383,7 +384,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
BUG_ON((end % HPAGE_SIZE) != 0); BUG_ON((end % HPAGE_SIZE) != 0);
/* XXX are there races with checking cpu_vm_mask? - Anton */ /* XXX are there races with checking cpu_vm_mask? - Anton */
tmp = cpumask_of_cpu(smp_processor_id()); cpu = get_cpu();
tmp = cpumask_of_cpu(cpu);
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1; local = 1;
...@@ -406,6 +408,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -406,6 +408,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
put_page(page); put_page(page);
} }
put_cpu();
mm->rss -= (end - start) >> PAGE_SHIFT; mm->rss -= (end - start) >> PAGE_SHIFT;
} }
......
...@@ -764,6 +764,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -764,6 +764,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
void *pgdir; void *pgdir;
pte_t *ptep; pte_t *ptep;
int local = 0; int local = 0;
int cpu;
cpumask_t tmp; cpumask_t tmp;
/* handle i-cache coherency */ /* handle i-cache coherency */
...@@ -794,12 +795,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -794,12 +795,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
vsid = get_vsid(vma->vm_mm->context.id, ea); vsid = get_vsid(vma->vm_mm->context.id, ea);
tmp = cpumask_of_cpu(smp_processor_id()); cpu = get_cpu();
tmp = cpumask_of_cpu(cpu);
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1; local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local); 0x300, local);
put_cpu();
} }
void * reserve_phb_iospace(unsigned long size) void * reserve_phb_iospace(unsigned long size)
......
...@@ -91,12 +91,15 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot) ...@@ -91,12 +91,15 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
void __flush_tlb_pending(struct ppc64_tlb_batch *batch) void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{ {
int i; int i;
cpumask_t tmp = cpumask_of_cpu(smp_processor_id()); int cpu;
cpumask_t tmp;
int local = 0; int local = 0;
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
cpu = get_cpu();
i = batch->index; i = batch->index;
tmp = cpumask_of_cpu(cpu);
if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
local = 1; local = 1;
...@@ -106,6 +109,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) ...@@ -106,6 +109,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
else else
flush_hash_range(batch->context, i, local); flush_hash_range(batch->context, i, local);
batch->index = 0; batch->index = 0;
put_cpu();
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -82,9 +82,11 @@ typedef struct { ...@@ -82,9 +82,11 @@ typedef struct {
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else #else
# define in_atomic() (preempt_count() != 0) # define in_atomic() (preempt_count() != 0)
# define preemptible() 0
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif #endif
#define irq_exit() \ #define irq_exit() \
......
...@@ -111,6 +111,8 @@ extern void flush_instruction_cache(void); ...@@ -111,6 +111,8 @@ extern void flush_instruction_cache(void);
extern int _get_PVR(void); extern int _get_PVR(void);
extern void giveup_fpu(struct task_struct *); extern void giveup_fpu(struct task_struct *);
extern void disable_kernel_fp(void); extern void disable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void flush_altivec_to_thread(struct task_struct *);
extern void enable_kernel_fp(void); extern void enable_kernel_fp(void);
extern void giveup_altivec(struct task_struct *); extern void giveup_altivec(struct task_struct *);
extern void disable_kernel_altivec(void); extern void disable_kernel_altivec(void);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/config.h> #include <linux/config.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h>
#include <linux/stringify.h> #include <linux/stringify.h>
/* /*
...@@ -23,7 +24,7 @@ struct thread_info { ...@@ -23,7 +24,7 @@ struct thread_info {
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */ int cpu; /* cpu we're on */
int preempt_count; /* not used at present */ int preempt_count;
struct restart_block restart_block; struct restart_block restart_block;
}; };
...@@ -73,7 +74,7 @@ struct thread_info { ...@@ -73,7 +74,7 @@ struct thread_info {
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; struct thread_info *ti;
__asm__("clrrdi %0,1,14" : "=r"(ti)); __asm__("clrrdi %0,1,%1" : "=r"(ti) : "i" (THREAD_SHIFT));
return ti; return ti;
} }
...@@ -83,6 +84,8 @@ static inline struct thread_info *current_thread_info(void) ...@@ -83,6 +84,8 @@ static inline struct thread_info *current_thread_info(void)
/* /*
* thread information flag bit numbers * thread information flag bit numbers
* N.B. If TIF_SIGPENDING or TIF_NEED_RESCHED are changed
* to be >= 4, code in entry.S will need to be changed.
*/ */
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment