Commit 2b75b535 authored by Ingo Molnar's avatar Ingo Molnar

- put the sync wakeup feature back in, based on Mike Kravetz's patch.

parent c700d531
...@@ -119,7 +119,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos) ...@@ -119,7 +119,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
* writers synchronously that there is more * writers synchronously that there is more
* room. * room.
*/ */
wake_up_interruptible(PIPE_WAIT(*inode)); wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
if (!PIPE_EMPTY(*inode)) if (!PIPE_EMPTY(*inode))
BUG(); BUG();
...@@ -219,7 +219,7 @@ pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos) ...@@ -219,7 +219,7 @@ pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
* is going to give up this CPU, so it doesnt have * is going to give up this CPU, so it doesnt have
* to do idle reschedules. * to do idle reschedules.
*/ */
wake_up_interruptible(PIPE_WAIT(*inode)); wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
PIPE_WAITING_WRITERS(*inode)++; PIPE_WAITING_WRITERS(*inode)++;
pipe_wait(inode); pipe_wait(inode);
......
...@@ -491,6 +491,7 @@ extern unsigned long prof_len; ...@@ -491,6 +491,7 @@ extern unsigned long prof_len;
extern unsigned long prof_shift; extern unsigned long prof_shift;
extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
extern void FASTCALL(sleep_on(wait_queue_head_t *q)); extern void FASTCALL(sleep_on(wait_queue_head_t *q));
extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
signed long timeout)); signed long timeout));
...@@ -507,6 +508,11 @@ extern void FASTCALL(sched_exit(task_t * p)); ...@@ -507,6 +508,11 @@ extern void FASTCALL(sched_exit(task_t * p));
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#ifdef CONFIG_SMP
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#else
#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#endif
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
extern int in_group_p(gid_t); extern int in_group_p(gid_t);
......
...@@ -457,6 +457,9 @@ EXPORT_SYMBOL(iomem_resource); ...@@ -457,6 +457,9 @@ EXPORT_SYMBOL(iomem_resource);
/* process management */ /* process management */
EXPORT_SYMBOL(complete_and_exit); EXPORT_SYMBOL(complete_and_exit);
EXPORT_SYMBOL(__wake_up); EXPORT_SYMBOL(__wake_up);
#if CONFIG_SMP
EXPORT_SYMBOL_GPL(__wake_up_sync); /* internal use only */
#endif
EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(wake_up_process);
EXPORT_SYMBOL(sleep_on); EXPORT_SYMBOL(sleep_on);
EXPORT_SYMBOL(sleep_on_timeout); EXPORT_SYMBOL(sleep_on_timeout);
......
...@@ -321,31 +321,43 @@ void kick_if_running(task_t * p) ...@@ -321,31 +321,43 @@ void kick_if_running(task_t * p)
* "current->state = TASK_RUNNING" to mark yourself runnable * "current->state = TASK_RUNNING" to mark yourself runnable
* without the overhead of this. * without the overhead of this.
*/ */
static int try_to_wake_up(task_t * p) static int try_to_wake_up(task_t * p, int sync)
{ {
unsigned long flags; unsigned long flags;
int success = 0; int success = 0;
long old_state; long old_state;
runqueue_t *rq; runqueue_t *rq;
repeat_lock_task:
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
old_state = p->state; old_state = p->state;
p->state = TASK_RUNNING;
if (!p->array) { if (!p->array) {
if (unlikely(sync && (rq->curr != p))) {
if (p->thread_info->cpu != smp_processor_id()) {
p->thread_info->cpu = smp_processor_id();
task_rq_unlock(rq, &flags);
goto repeat_lock_task;
}
}
if (old_state == TASK_UNINTERRUPTIBLE) if (old_state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--; rq->nr_uninterruptible--;
activate_task(p, rq); activate_task(p, rq);
/*
* If sync is set, a resched_task() is a NOOP
*/
if (p->prio < rq->curr->prio) if (p->prio < rq->curr->prio)
resched_task(rq->curr); resched_task(rq->curr);
success = 1; success = 1;
} }
p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
return success; return success;
} }
int wake_up_process(task_t * p) int wake_up_process(task_t * p)
{ {
return try_to_wake_up(p); return try_to_wake_up(p, 0);
} }
void wake_up_forked_process(task_t * p) void wake_up_forked_process(task_t * p)
...@@ -874,7 +886,7 @@ asmlinkage void preempt_schedule(void) ...@@ -874,7 +886,7 @@ asmlinkage void preempt_schedule(void)
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue. * zero in this (rare) case, and we handle it by continuing to scan the queue.
*/ */
static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
{ {
struct list_head *tmp; struct list_head *tmp;
unsigned int state; unsigned int state;
...@@ -885,7 +897,7 @@ static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int ...@@ -885,7 +897,7 @@ static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int
curr = list_entry(tmp, wait_queue_t, task_list); curr = list_entry(tmp, wait_queue_t, task_list);
p = curr->task; p = curr->task;
state = p->state; state = p->state;
if ((state & mode) && try_to_wake_up(p) && if ((state & mode) && try_to_wake_up(p, sync) &&
((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive))
break; break;
} }
...@@ -899,17 +911,36 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) ...@@ -899,17 +911,36 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
return; return;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive); __wake_up_common(q, mode, nr_exclusive, 0);
spin_unlock_irqrestore(&q->lock, flags);
}
#if CONFIG_SMP
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
unsigned long flags;
if (unlikely(!q))
return;
spin_lock_irqsave(&q->lock, flags);
if (likely(nr_exclusive))
__wake_up_common(q, mode, nr_exclusive, 1);
else
__wake_up_common(q, mode, nr_exclusive, 0);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
#endif
void complete(struct completion *x) void complete(struct completion *x)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags); spin_lock_irqsave(&x->wait.lock, flags);
x->done++; x->done++;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1); __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
spin_unlock_irqrestore(&x->wait.lock, flags); spin_unlock_irqrestore(&x->wait.lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment