Commit 3ca7a440 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Always provide p->on_cpu

Always provide p->on_cpu so that we can determine if its on a cpu
without having to lock the rq.
Reviewed-by: default avatarFrank Rowand <frank.rowand@am.sony.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152728.785452014@chello.nlSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 184748cc
...@@ -1200,9 +1200,7 @@ struct task_struct { ...@@ -1200,9 +1200,7 @@ struct task_struct {
int lock_depth; /* BKL lock depth */ int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW int on_cpu;
int oncpu;
#endif
#endif #endif
int prio, static_prio, normal_prio; int prio, static_prio, normal_prio;
......
...@@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p) ...@@ -838,18 +838,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
return rq->curr == p; return rq->curr == p;
} }
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p) static inline int task_running(struct rq *rq, struct task_struct *p)
{ {
#ifdef CONFIG_SMP
return p->on_cpu;
#else
return task_current(rq, p); return task_current(rq, p);
#endif
} }
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{ {
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->on_cpu = 1;
#endif
} }
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{ {
#ifdef CONFIG_SMP
/*
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*/
smp_wmb();
prev->on_cpu = 0;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */ /* this is a valid case when another task releases the spinlock */
rq->lock.owner = current; rq->lock.owner = current;
...@@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -865,15 +886,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
} }
#else /* __ARCH_WANT_UNLOCKED_CTXSW */ #else /* __ARCH_WANT_UNLOCKED_CTXSW */
static inline int task_running(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->oncpu;
#else
return task_current(rq, p);
#endif
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ...@@ -882,7 +894,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
* SMP rebalancing from interrupt is the only thing that cares * SMP rebalancing from interrupt is the only thing that cares
* here. * here.
*/ */
next->oncpu = 1; next->on_cpu = 1;
#endif #endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
raw_spin_unlock_irq(&rq->lock); raw_spin_unlock_irq(&rq->lock);
...@@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -895,12 +907,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* After ->oncpu is cleared, the task can be moved to a different CPU. * After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely * We must ensure this doesn't happen until the switch is completely
* finished. * finished.
*/ */
smp_wmb(); smp_wmb();
prev->oncpu = 0; prev->on_cpu = 0;
#endif #endif
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable(); local_irq_enable();
...@@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags) ...@@ -2686,8 +2698,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (likely(sched_info_on())) if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info)); memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif #endif
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) #if defined(CONFIG_SMP)
p->oncpu = 0; p->on_cpu = 0;
#endif #endif
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */ /* Want to start with kernel preemption disabled. */
...@@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) ...@@ -5776,8 +5788,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
rcu_read_unlock(); rcu_read_unlock();
rq->curr = rq->idle = idle; rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) #if defined(CONFIG_SMP)
idle->oncpu = 1; idle->on_cpu = 1;
#endif #endif
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment