Commit be9af746 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/mingo/BK/linux-2.5-sched

into home.transmeta.com:/home/torvalds/v2.5/linux
parents af0d5292 f85e6275
...@@ -119,7 +119,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos) ...@@ -119,7 +119,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
* writers synchronously that there is more * writers synchronously that there is more
* room. * room.
*/ */
wake_up_interruptible(PIPE_WAIT(*inode)); wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
if (!PIPE_EMPTY(*inode)) if (!PIPE_EMPTY(*inode))
BUG(); BUG();
...@@ -219,7 +219,7 @@ pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos) ...@@ -219,7 +219,7 @@ pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
* is going to give up this CPU, so it doesnt have * is going to give up this CPU, so it doesnt have
* to do idle reschedules. * to do idle reschedules.
*/ */
wake_up_interruptible(PIPE_WAIT(*inode)); wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
PIPE_WAITING_WRITERS(*inode)++; PIPE_WAITING_WRITERS(*inode)++;
pipe_wait(inode); pipe_wait(inode);
......
...@@ -491,6 +491,7 @@ extern unsigned long prof_len; ...@@ -491,6 +491,7 @@ extern unsigned long prof_len;
extern unsigned long prof_shift; extern unsigned long prof_shift;
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q)); extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout)); signed long timeout));
...@@ -507,6 +508,11 @@ extern void FASTCALL(sched_exit(task_t * p)); ...@@ -507,6 +508,11 @@ extern void FASTCALL(sched_exit(task_t * p));
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#ifdef CONFIG_SMP
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#else
#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#endif
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
extern int in_group_p(gid_t); extern int in_group_p(gid_t);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) #define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0) #define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0) #define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) #define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
...@@ -143,6 +144,12 @@ do { \ ...@@ -143,6 +144,12 @@ do { \
preempt_schedule(); \ preempt_schedule(); \
} while (0) } while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define spin_lock(lock) \ #define spin_lock(lock) \
do { \ do { \
preempt_disable(); \ preempt_disable(); \
...@@ -157,6 +164,12 @@ do { \ ...@@ -157,6 +164,12 @@ do { \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
#define spin_unlock_no_resched(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable_no_resched(); \
} while (0)
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) #define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) #define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) #define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
...@@ -166,20 +179,22 @@ do { \ ...@@ -166,20 +179,22 @@ do { \
#else #else
#define preempt_get_count() (0) #define preempt_get_count() (0)
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0) #define preempt_enable_no_resched() do {} while(0)
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock) #define spin_lock(lock) _raw_spin_lock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock) #define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock) #define spin_unlock_no_resched(lock) _raw_spin_unlock(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_lock(lock) _raw_write_lock(lock) #define read_lock(lock) _raw_read_lock(lock)
#define write_unlock(lock) _raw_write_unlock(lock) #define read_unlock(lock) _raw_read_unlock(lock)
#define write_trylock(lock) _raw_write_trylock(lock) #define write_lock(lock) _raw_write_lock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define write_trylock(lock) _raw_write_trylock(lock)
#endif #endif
/* "lock on reference count zero" */ /* "lock on reference count zero" */
......
...@@ -463,6 +463,9 @@ EXPORT_SYMBOL(iomem_resource); ...@@ -463,6 +463,9 @@ EXPORT_SYMBOL(iomem_resource);
/* process management */ /* process management */
EXPORT_SYMBOL(complete_and_exit); EXPORT_SYMBOL(complete_and_exit);
EXPORT_SYMBOL(__wake_up); EXPORT_SYMBOL(__wake_up);
#if CONFIG_SMP
EXPORT_SYMBOL_GPL(__wake_up_sync); /* internal use only */
#endif
EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(wake_up_process);
EXPORT_SYMBOL(sleep_on); EXPORT_SYMBOL(sleep_on);
EXPORT_SYMBOL(sleep_on_timeout); EXPORT_SYMBOL(sleep_on_timeout);
......
...@@ -135,7 +135,6 @@ struct prio_array { ...@@ -135,7 +135,6 @@ struct prio_array {
*/ */
struct runqueue { struct runqueue {
spinlock_t lock; spinlock_t lock;
spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp; unsigned long nr_running, nr_switches, expired_timestamp;
signed long nr_uninterruptible; signed long nr_uninterruptible;
task_t *curr, *idle; task_t *curr, *idle;
...@@ -153,17 +152,27 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; ...@@ -153,17 +152,27 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO) #define rt_task(p) ((p)->prio < MAX_RT_PRIO)
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*
* WARNING: to squeeze out a few more cycles we do not disable preemption
* explicitly (or implicitly), we just keep interrupts disabled. This means
* that within task_rq_lock/unlock sections you must be careful
* about locking/unlocking spinlocks, since they could cause an unexpected
* preemption.
*/
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
{ {
struct runqueue *rq; struct runqueue *rq;
repeat_lock_task: repeat_lock_task:
preempt_disable(); local_irq_save(*flags);
rq = task_rq(p); rq = task_rq(p);
spin_lock_irqsave(&rq->lock, *flags); _raw_spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) { if (unlikely(rq != task_rq(p))) {
spin_unlock_irqrestore(&rq->lock, *flags); _raw_spin_unlock_irqrestore(&rq->lock, *flags);
preempt_enable();
goto repeat_lock_task; goto repeat_lock_task;
} }
return rq; return rq;
...@@ -171,8 +180,25 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) ...@@ -171,8 +180,25 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
{ {
spin_unlock_irqrestore(&rq->lock, *flags); _raw_spin_unlock_irqrestore(&rq->lock, *flags);
preempt_enable(); preempt_check_resched();
}
/*
* rq_lock - lock a given runqueue and disable interrupts.
*/
static inline runqueue_t *rq_lock(runqueue_t *rq)
{
local_irq_disable();
rq = this_rq();
spin_lock(&rq->lock);
return rq;
}
static inline void rq_unlock(runqueue_t *rq)
{
spin_unlock(&rq->lock);
local_irq_enable();
} }
/* /*
...@@ -263,8 +289,15 @@ static inline void resched_task(task_t *p) ...@@ -263,8 +289,15 @@ static inline void resched_task(task_t *p)
nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id())) if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id()))
/*
* NOTE: smp_send_reschedule() can be called from
* spinlocked sections which do not have an elevated
* preemption count. So the code either has to avoid
* spinlocks, or has to put preempt_disable() and
* preempt_enable_no_resched() around the code.
*/
smp_send_reschedule(p->thread_info->cpu); smp_send_reschedule(p->thread_info->cpu);
preempt_enable(); preempt_enable_no_resched();
#else #else
set_tsk_need_resched(p); set_tsk_need_resched(p);
#endif #endif
...@@ -284,9 +317,15 @@ void wait_task_inactive(task_t * p) ...@@ -284,9 +317,15 @@ void wait_task_inactive(task_t * p)
repeat: repeat:
preempt_disable(); preempt_disable();
rq = task_rq(p); rq = task_rq(p);
while (unlikely(rq->curr == p)) { if (unlikely(rq->curr == p)) {
cpu_relax(); cpu_relax();
barrier(); /*
* enable/disable preemption just to make this
* a preemption point - we are busy-waiting
* anyway.
*/
preempt_enable();
goto repeat;
} }
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
if (unlikely(rq->curr == p)) { if (unlikely(rq->curr == p)) {
...@@ -309,8 +348,10 @@ void wait_task_inactive(task_t * p) ...@@ -309,8 +348,10 @@ void wait_task_inactive(task_t * p)
*/ */
void kick_if_running(task_t * p) void kick_if_running(task_t * p)
{ {
if (p == task_rq(p)->curr) if (p == task_rq(p)->curr) {
resched_task(p); resched_task(p);
preempt_check_resched();
}
} }
#endif #endif
...@@ -322,40 +363,50 @@ void kick_if_running(task_t * p) ...@@ -322,40 +363,50 @@ void kick_if_running(task_t * p)
* "current->state = TASK_RUNNING" to mark yourself runnable * "current->state = TASK_RUNNING" to mark yourself runnable
* without the overhead of this. * without the overhead of this.
*/ */
static int try_to_wake_up(task_t * p) static int try_to_wake_up(task_t * p, int sync)
{ {
unsigned long flags; unsigned long flags;
int success = 0; int success = 0;
long old_state; long old_state;
runqueue_t *rq; runqueue_t *rq;
repeat_lock_task:
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
old_state = p->state; old_state = p->state;
p->state = TASK_RUNNING;
if (!p->array) { if (!p->array) {
if (unlikely(sync && (rq->curr != p))) {
if (p->thread_info->cpu != smp_processor_id()) {
p->thread_info->cpu = smp_processor_id();
task_rq_unlock(rq, &flags);
goto repeat_lock_task;
}
}
if (old_state == TASK_UNINTERRUPTIBLE) if (old_state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--; rq->nr_uninterruptible--;
activate_task(p, rq); activate_task(p, rq);
/*
* If sync is set, a resched_task() is a NOOP
*/
if (p->prio < rq->curr->prio) if (p->prio < rq->curr->prio)
resched_task(rq->curr); resched_task(rq->curr);
success = 1; success = 1;
} }
p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
return success; return success;
} }
int wake_up_process(task_t * p) int wake_up_process(task_t * p)
{ {
return try_to_wake_up(p); return try_to_wake_up(p, 0);
} }
void wake_up_forked_process(task_t * p) void wake_up_forked_process(task_t * p)
{ {
runqueue_t *rq; runqueue_t *rq;
preempt_disable(); rq = rq_lock(rq);
rq = this_rq();
spin_lock_irq(&rq->lock);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
if (!rt_task(p)) { if (!rt_task(p)) {
...@@ -371,8 +422,7 @@ void wake_up_forked_process(task_t * p) ...@@ -371,8 +422,7 @@ void wake_up_forked_process(task_t * p)
p->thread_info->cpu = smp_processor_id(); p->thread_info->cpu = smp_processor_id();
activate_task(p, rq); activate_task(p, rq);
spin_unlock_irq(&rq->lock); rq_unlock(rq);
preempt_enable();
} }
/* /*
...@@ -403,7 +453,7 @@ void sched_exit(task_t * p) ...@@ -403,7 +453,7 @@ void sched_exit(task_t * p)
#if CONFIG_SMP || CONFIG_PREEMPT #if CONFIG_SMP || CONFIG_PREEMPT
asmlinkage void schedule_tail(void) asmlinkage void schedule_tail(void)
{ {
spin_unlock_irq(&this_rq()->frozen); spin_unlock_irq(&this_rq()->lock);
} }
#endif #endif
...@@ -828,9 +878,6 @@ asmlinkage void schedule(void) ...@@ -828,9 +878,6 @@ asmlinkage void schedule(void)
if (likely(prev != next)) { if (likely(prev != next)) {
rq->nr_switches++; rq->nr_switches++;
rq->curr = next; rq->curr = next;
spin_lock(&rq->frozen);
spin_unlock(&rq->lock);
context_switch(prev, next); context_switch(prev, next);
/* /*
...@@ -840,10 +887,8 @@ asmlinkage void schedule(void) ...@@ -840,10 +887,8 @@ asmlinkage void schedule(void)
*/ */
mb(); mb();
rq = this_rq(); rq = this_rq();
spin_unlock_irq(&rq->frozen);
} else {
spin_unlock_irq(&rq->lock);
} }
spin_unlock_irq(&rq->lock);
reacquire_kernel_lock(current); reacquire_kernel_lock(current);
preempt_enable_no_resched(); preempt_enable_no_resched();
...@@ -880,7 +925,7 @@ asmlinkage void preempt_schedule(void) ...@@ -880,7 +925,7 @@ asmlinkage void preempt_schedule(void)
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue. * zero in this (rare) case, and we handle it by continuing to scan the queue.
*/ */
static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
{ {
struct list_head *tmp; struct list_head *tmp;
unsigned int state; unsigned int state;
...@@ -891,7 +936,7 @@ static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int ...@@ -891,7 +936,7 @@ static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int
curr = list_entry(tmp, wait_queue_t, task_list); curr = list_entry(tmp, wait_queue_t, task_list);
p = curr->task; p = curr->task;
state = p->state; state = p->state;
if ((state & mode) && try_to_wake_up(p) && if ((state & mode) && try_to_wake_up(p, sync) &&
((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive))
break; break;
} }
...@@ -905,17 +950,36 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) ...@@ -905,17 +950,36 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
return; return;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive); __wake_up_common(q, mode, nr_exclusive, 0);
spin_unlock_irqrestore(&q->lock, flags);
}
#if CONFIG_SMP
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
unsigned long flags;
if (unlikely(!q))
return;
spin_lock_irqsave(&q->lock, flags);
if (likely(nr_exclusive))
__wake_up_common(q, mode, nr_exclusive, 1);
else
__wake_up_common(q, mode, nr_exclusive, 0);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
#endif
void complete(struct completion *x) void complete(struct completion *x)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags); spin_lock_irqsave(&x->wait.lock, flags);
x->done++; x->done++;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1); __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
spin_unlock_irqrestore(&x->wait.lock, flags); spin_unlock_irqrestore(&x->wait.lock, flags);
} }
...@@ -1342,8 +1406,7 @@ asmlinkage long sys_sched_yield(void) ...@@ -1342,8 +1406,7 @@ asmlinkage long sys_sched_yield(void)
runqueue_t *rq; runqueue_t *rq;
prio_array_t *array; prio_array_t *array;
preempt_disable(); rq = rq_lock(rq);
rq = this_rq();
/* /*
* Decrease the yielding task's priority by one, to avoid * Decrease the yielding task's priority by one, to avoid
...@@ -1353,7 +1416,6 @@ asmlinkage long sys_sched_yield(void) ...@@ -1353,7 +1416,6 @@ asmlinkage long sys_sched_yield(void)
* If priority is already MAX_PRIO-1 then we still * If priority is already MAX_PRIO-1 then we still
* roundrobin the task within the runlist. * roundrobin the task within the runlist.
*/ */
spin_lock_irq(&rq->lock);
array = current->array; array = current->array;
/* /*
* If the task has reached maximum priority (or is a RT task) * If the task has reached maximum priority (or is a RT task)
...@@ -1370,8 +1432,7 @@ asmlinkage long sys_sched_yield(void) ...@@ -1370,8 +1432,7 @@ asmlinkage long sys_sched_yield(void)
list_add_tail(&current->run_list, array->queue + current->prio); list_add_tail(&current->run_list, array->queue + current->prio);
__set_bit(current->prio, array->bitmap); __set_bit(current->prio, array->bitmap);
} }
spin_unlock(&rq->lock); spin_unlock_no_resched(&rq->lock);
preempt_enable_no_resched();
schedule(); schedule();
...@@ -1599,7 +1660,6 @@ void __init sched_init(void) ...@@ -1599,7 +1660,6 @@ void __init sched_init(void)
rq->active = rq->arrays; rq->active = rq->arrays;
rq->expired = rq->arrays + 1; rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock); spin_lock_init(&rq->lock);
spin_lock_init(&rq->frozen);
INIT_LIST_HEAD(&rq->migration_queue); INIT_LIST_HEAD(&rq->migration_queue);
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
...@@ -1687,7 +1747,15 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask) ...@@ -1687,7 +1747,15 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
goto out; goto out;
} }
/*
* If the task is not on a runqueue (and not running), then
* it is sufficient to simply update the task's cpu field.
*/
if (!p->array && (p != rq->curr)) {
p->thread_info->cpu = __ffs(p->cpus_allowed);
task_rq_unlock(rq, &flags);
goto out;
}
init_MUTEX_LOCKED(&req.sem); init_MUTEX_LOCKED(&req.sem);
req.task = p; req.task = p;
list_add(&req.list, &rq->migration_queue); list_add(&req.list, &rq->migration_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment