Commit 23932693 authored by Paul Mackerras's avatar Paul Mackerras Committed by Linus Torvalds

[PATCH] ppc64: Implement CONFIG_PREEMPT

This implements CONFIG_PREEMPT for ppc64.  Aside from the entry.S
changes to check the _TIF_NEED_RESCHED bit when returning from an
exception, there are various changes to make the ppc64-specific code
preempt-safe, mostly adding preempt_enable/disable or get_cpu/put_cpu
calls where needed.  I have been using this on my desktop G5 for the
last week without problems.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent adf791bd
......@@ -198,7 +198,6 @@ config SCHED_SMT
config PREEMPT
bool "Preemptible Kernel"
depends on BROKEN
help
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
......
......@@ -22,8 +22,6 @@
#include <asm/cache.h>
#include <asm/cputable.h>
void disable_kernel_fp(void); /* asm function from head.S */
struct aligninfo {
unsigned char len;
unsigned char flags;
......@@ -280,8 +278,11 @@ fix_alignment(struct pt_regs *regs)
}
/* Force the fprs into the save area so we can reference them */
if ((flags & F) && (regs->msr & MSR_FP))
giveup_fpu(current);
if (flags & F) {
if (!user_mode(regs))
return 0;
flush_fp_to_thread(current);
}
/* If we are loading, get the data from user space */
if (flags & LD) {
......@@ -310,9 +311,11 @@ fix_alignment(struct pt_regs *regs)
if (flags & F) {
if (nb == 4) {
/* Doing stfs, have to convert to single */
preempt_disable();
enable_kernel_fp();
cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr);
disable_kernel_fp();
preempt_enable();
}
else
data.dd = current->thread.fpr[reg];
......@@ -344,9 +347,11 @@ fix_alignment(struct pt_regs *regs)
if (flags & F) {
if (nb == 4) {
/* Doing lfs, have to convert to double */
preempt_disable();
enable_kernel_fp();
cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr);
disable_kernel_fp();
preempt_enable();
}
else
current->thread.fpr[reg] = data.dd;
......
......@@ -48,6 +48,7 @@ int main(void)
DEFINE(THREAD_SHIFT, THREAD_SHIFT);
DEFINE(THREAD_SIZE, THREAD_SIZE);
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
......
......@@ -371,15 +371,27 @@ _GLOBAL(ret_from_except)
andc r9,r10,r4 /* clear MSR_EE */
mtmsrd r9,1 /* Update machine state */
#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9)
andi. r0,r3,MSR_PR
mtcrf 1,r4 /* get bottom 4 thread flags into cr7 */
bt 31-TIF_NEED_RESCHED,do_resched
beq restore /* if returning to kernel */
bt 31-TIF_SIGPENDING,do_user_signal
#else /* !CONFIG_PREEMPT */
ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
beq restore /* if not, just restore regs and return */
/* Check current_thread_info()->flags */
clrrdi r3,r1,THREAD_SHIFT
ld r3,TI_FLAGS(r3)
andi. r0,r3,_TIF_USER_WORK_MASK
clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_USER_WORK_MASK
bne do_work
#endif
addi r0,r1,INT_FRAME_SIZE /* size of frame */
ld r4,PACACURRENT(r13)
......@@ -452,18 +464,47 @@ restore:
rfid
#ifndef CONFIG_PREEMPT
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work:
/* Enable interrupts */
mtmsrd r10,1
andi. r0,r4,_TIF_NEED_RESCHED
beq do_user_signal
#else /* CONFIG_PREEMPT */
do_resched:
bne do_user_resched /* if returning to user mode */
/* Check that preempt_count() == 0 and interrupts are enabled */
lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0
#ifdef CONFIG_PPC_ISERIES
ld r0,SOFTE(r1)
cmpdi r0,0
#else
andi. r0,r3,MSR_EE
#endif
crandc eq,cr1*4+eq,eq
bne restore
/* here we are preempting the current task */
1: lis r0,PREEMPT_ACTIVE@h
stw r0,TI_PREEMPT(r9)
#ifdef CONFIG_PPC_ISERIES
li r0,1
stb r0,PACAPROCENABLED(r13)
#endif
#endif /* CONFIG_PREEMPT */
andi. r0,r3,_TIF_NEED_RESCHED
beq 1f
do_user_resched:
mtmsrd r10,1 /* reenable interrupts */
bl .schedule
#ifdef CONFIG_PREEMPT
clrrdi r9,r1,THREAD_SHIFT
li r0,0
stw r0,TI_PREEMPT(r9)
#endif
b .ret_from_except
1: andi. r0,r3,_TIF_SIGPENDING
beq .ret_from_except
do_user_signal:
mtmsrd r10,1
li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
bl .do_signal
......
......@@ -65,8 +65,43 @@ struct mm_struct ioremap_mm = {
.page_table_lock = SPIN_LOCK_UNLOCKED,
};
/*
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_fp_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
/*
* We need to disable preemption here because if we didn't,
* another process could get scheduled after the regs->msr
* test but before we have finished saving the FP registers
* to the thread_struct. That process could take over the
* FPU, and then when we get scheduled again we would store
* bogus values for the remaining FP registers.
*/
preempt_disable();
if (tsk->thread.regs->msr & MSR_FP) {
#ifdef CONFIG_SMP
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
* the FP register state on context switch on SMP,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON(tsk != current);
#endif
giveup_fpu(current);
}
preempt_enable();
}
}
void enable_kernel_fp(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
......@@ -80,12 +115,9 @@ EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
struct pt_regs *regs = tsk->thread.regs;
if (!regs)
if (!tsk->thread.regs)
return 0;
if (tsk == current && (regs->msr & MSR_FP))
giveup_fpu(current);
flush_fp_to_thread(current);
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
......@@ -96,6 +128,8 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
void enable_kernel_altivec(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
......@@ -107,10 +141,29 @@ void enable_kernel_altivec(void)
}
EXPORT_SYMBOL(enable_kernel_altivec);
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_altivec_to_thread(struct task_struct *tsk)
{
#ifdef CONFIG_ALTIVEC
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) {
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
giveup_altivec(current);
}
preempt_enable();
}
#endif
}
int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
{
if (regs->msr & MSR_VEC)
giveup_altivec(current);
flush_altivec_to_thread(current);
memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1;
}
......@@ -245,16 +298,8 @@ release_thread(struct task_struct *t)
*/
void prepare_to_copy(struct task_struct *tsk)
{
struct pt_regs *regs = tsk->thread.regs;
if (regs == NULL)
return;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
}
/*
......@@ -439,12 +484,8 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
error = do_execve(filename, (char __user * __user *) a1,
(char __user * __user *) a2, regs);
......
......@@ -119,8 +119,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if (index < PT_FPR0) {
tmp = get_reg(child, (int)index);
} else {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
}
ret = put_user(tmp,(unsigned long __user *) data);
......@@ -152,8 +151,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if (index < PT_FPR0) {
ret = put_reg(child, index, data);
} else {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
ret = 0;
}
......@@ -245,8 +243,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned long __user *tmp = (unsigned long __user *)addr;
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
for (i = 0; i < 32; i++) {
ret = put_user(*reg, tmp);
......@@ -263,8 +260,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned long __user *tmp = (unsigned long __user *)addr;
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
for (i = 0; i < 32; i++) {
ret = get_user(*reg, tmp);
......
......@@ -136,8 +136,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
if (index < PT_FPR0) {
tmp = get_reg(child, index);
} else {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
......@@ -179,8 +178,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
break;
if (numReg >= PT_FPR0) {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
} else { /* register within PT_REGS struct */
tmp = get_reg(child, numReg);
......@@ -244,8 +242,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
if (index < PT_FPR0) {
ret = put_reg(child, index, data);
} else {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
......@@ -283,8 +280,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
|| ((numReg > PT_CCR) && (numReg < PT_FPR0)))
break;
if (numReg >= PT_FPR0) {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
}
if (numReg == PT_MSR)
data = (data & MSR_DEBUGCHANGE)
......@@ -379,8 +375,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned int __user *tmp = (unsigned int __user *)addr;
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
for (i = 0; i < 32; i++) {
ret = put_user(*reg, tmp);
......@@ -397,8 +392,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
unsigned int __user *tmp = (unsigned int __user *)addr;
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
flush_fp_to_thread(child);
for (i = 0; i < 32; i++) {
ret = get_user(*reg, tmp);
......
......@@ -68,10 +68,11 @@ char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned;
void
call_rtas_display_status(char c)
{
struct rtas_args *args = &(get_paca()->xRtas);
struct rtas_args *args;
unsigned long s;
spin_lock_irqsave(&rtas.lock, s);
args = &(get_paca()->xRtas);
args->token = 10;
args->nargs = 1;
......@@ -145,7 +146,7 @@ rtas_call(int token, int nargs, int nret,
va_list list;
int i, logit = 0;
unsigned long s;
struct rtas_args *rtas_args = &(get_paca()->xRtas);
struct rtas_args *rtas_args;
long ret;
PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n");
......@@ -158,6 +159,7 @@ rtas_call(int token, int nargs, int nret,
/* Gotta do something different here, use global lock for now... */
spin_lock_irqsave(&rtas.lock, s);
rtas_args = &(get_paca()->xRtas);
rtas_args->token = token;
rtas_args->nargs = nargs;
......
......@@ -131,8 +131,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
#endif
long err = 0;
if (regs->msr & MSR_FP)
giveup_fpu(current);
flush_fp_to_thread(current);
/* Make sure signal doesn't get spurrious FP exceptions */
current->thread.fpscr = 0;
......@@ -141,9 +140,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(v_regs, &sc->v_regs);
/* save altivec registers */
if (current->thread.used_vr) {
if (regs->msr & MSR_VEC)
giveup_altivec(current);
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
......
......@@ -130,11 +130,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i, err = 0;
/* Make sure floating point registers are stored in regs */
if (regs->msr & MSR_FP)
giveup_fpu(current);
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
/* save general and floating-point registers */
for (i = 0; i <= PT_RESULT; i ++)
err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]);
......@@ -148,8 +147,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
if (regs->msr & MSR_VEC)
giveup_altivec(current);
flush_altivec_to_thread(current);
if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
ELF_NVRREG32 * sizeof(vector128)))
return 1;
......
......@@ -617,12 +617,8 @@ long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
#ifdef CONFIG_ALTIVEC
if (regs->msr & MSR_VEC)
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
......
......@@ -308,8 +308,7 @@ static void parse_fpe(struct pt_regs *regs)
siginfo_t info;
unsigned long fpscr;
if (regs->msr & MSR_FP)
giveup_fpu(current);
flush_fp_to_thread(current);
fpscr = current->thread.fpscr;
......@@ -531,8 +530,7 @@ AlignmentException(struct pt_regs *regs)
void
AltivecAssistException(struct pt_regs *regs)
{
if (regs->msr & MSR_VEC)
giveup_altivec(current);
flush_altivec_to_thread(current);
/* XXX quick hack for now: set the non-Java bit in the VSCR */
current->thread.vscr.u[3] |= 0x10000;
}
......
......@@ -251,6 +251,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
struct mm_struct *mm;
pte_t *ptep;
int ret;
int cpu;
int user_region = 0;
int local = 0;
cpumask_t tmp;
......@@ -302,7 +303,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (pgdir == NULL)
return 1;
tmp = cpumask_of_cpu(smp_processor_id());
cpu = get_cpu();
tmp = cpumask_of_cpu(cpu);
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1;
......@@ -311,11 +313,13 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
ret = hash_huge_page(mm, access, ea, vsid, local);
else {
ptep = find_linux_pte(pgdir, ea);
if (ptep == NULL)
if (ptep == NULL) {
put_cpu();
return 1;
}
ret = __hash_page(ea, access, vsid, ptep, trap, local);
}
put_cpu();
return ret;
}
......
......@@ -375,6 +375,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long addr;
hugepte_t *ptep;
struct page *page;
int cpu;
int local = 0;
cpumask_t tmp;
......@@ -383,7 +384,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
BUG_ON((end % HPAGE_SIZE) != 0);
/* XXX are there races with checking cpu_vm_mask? - Anton */
tmp = cpumask_of_cpu(smp_processor_id());
cpu = get_cpu();
tmp = cpumask_of_cpu(cpu);
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
......@@ -406,6 +408,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
put_page(page);
}
put_cpu();
mm->rss -= (end - start) >> PAGE_SHIFT;
}
......
......@@ -764,6 +764,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
void *pgdir;
pte_t *ptep;
int local = 0;
int cpu;
cpumask_t tmp;
/* handle i-cache coherency */
......@@ -794,12 +795,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
vsid = get_vsid(vma->vm_mm->context.id, ea);
tmp = cpumask_of_cpu(smp_processor_id());
cpu = get_cpu();
tmp = cpumask_of_cpu(cpu);
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
put_cpu();
}
void * reserve_phb_iospace(unsigned long size)
......
......@@ -91,12 +91,15 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
int i;
cpumask_t tmp = cpumask_of_cpu(smp_processor_id());
int cpu;
cpumask_t tmp;
int local = 0;
BUG_ON(in_interrupt());
cpu = get_cpu();
i = batch->index;
tmp = cpumask_of_cpu(cpu);
if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
local = 1;
......@@ -106,6 +109,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
else
flush_hash_range(batch->context, i, local);
batch->index = 0;
put_cpu();
}
#ifdef CONFIG_SMP
......
......@@ -82,9 +82,11 @@ typedef struct {
#ifdef CONFIG_PREEMPT
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define preemptible() 0
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
......
......@@ -111,6 +111,8 @@ extern void flush_instruction_cache(void);
extern int _get_PVR(void);
extern void giveup_fpu(struct task_struct *);
extern void disable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void flush_altivec_to_thread(struct task_struct *);
extern void enable_kernel_fp(void);
extern void giveup_altivec(struct task_struct *);
extern void disable_kernel_altivec(void);
......
......@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <linux/stringify.h>
/*
......@@ -23,7 +24,7 @@ struct thread_info {
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* not used at present */
int preempt_count;
struct restart_block restart_block;
};
......@@ -73,7 +74,7 @@ struct thread_info {
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("clrrdi %0,1,14" : "=r"(ti));
__asm__("clrrdi %0,1,%1" : "=r"(ti) : "i" (THREAD_SHIFT));
return ti;
}
......@@ -83,6 +84,8 @@ static inline struct thread_info *current_thread_info(void)
/*
* thread information flag bit numbers
* N.B. If TIF_SIGPENDING or TIF_NEED_RESCHED are changed
* to be >= 4, code in entry.S will need to be changed.
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment