Commit 5a8b70fc authored by Andrew Morton's avatar Andrew Morton Committed by Hideaki Yoshifuji

[NET]: Use fancy wakeups where applicable.

parent cd9a1f5f
......@@ -68,11 +68,9 @@ static inline int connection_based(struct sock *sk)
static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
{
int error;
DEFINE_WAIT(wait);
DECLARE_WAITQUEUE(wait, current);
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(sk->sleep, &wait);
prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
/* Socket errors? */
error = sock_error(sk);
......@@ -101,8 +99,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
error = 0;
*timeo_p = schedule_timeout(*timeo_p);
out:
current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
return error;
interrupted:
error = sock_intr_errno(*timeo_p);
......
......@@ -746,17 +746,16 @@ void sock_kfree_s(struct sock *sk, void *mem, int size)
*/
static long sock_wait_for_wmem(struct sock * sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
add_wait_queue(sk->sleep, &wait);
for (;;) {
if (!timeo)
break;
if (signal_pending(current))
break;
set_bit(SOCK_NOSPACE, &sk->socket->flags);
set_current_state(TASK_INTERRUPTIBLE);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)
break;
if (sk->shutdown & SEND_SHUTDOWN)
......@@ -765,8 +764,7 @@ static long sock_wait_for_wmem(struct sock * sk, long timeo)
break;
timeo = schedule_timeout(timeo);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
return timeo;
}
......@@ -860,19 +858,18 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
void __lock_sock(struct sock *sk)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
add_wait_queue_exclusive(&sk->lock.wq, &wait);
for(;;) {
current->state = TASK_UNINTERRUPTIBLE;
prepare_to_wait_exclusive(&sk->lock.wq, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&sk->lock.slock);
schedule();
spin_lock_bh(&sk->lock.slock);
if(!sock_owned_by_user(sk))
break;
}
current->state = TASK_RUNNING;
remove_wait_queue(&sk->lock.wq, &wait);
finish_wait(&sk->lock.wq, &wait);
}
void __release_sock(struct sock *sk)
......
......@@ -562,10 +562,9 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(sk->sleep, &wait);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
/* Basic assumption: if someone sets sk->err, he _must_
* change state of the socket from TCP_SYN_*.
......@@ -578,10 +577,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
set_current_state(TASK_INTERRUPTIBLE);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
return timeo;
}
......
......@@ -659,7 +659,7 @@ static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
{
struct tcp_opt *tp = tcp_sk(sk);
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
DEFINE_WAIT(wait);
while ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
if (sk->err)
......@@ -671,16 +671,14 @@ static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue(sk->sleep, &wait);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
tp->write_pending++;
release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
lock_sock(sk);
__set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
tp->write_pending--;
}
return 0;
......@@ -700,16 +698,15 @@ static int wait_for_tcp_memory(struct sock *sk, long *timeo)
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo;
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
if (tcp_memory_free(sk))
current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
add_wait_queue(sk->sleep, &wait);
for (;;) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
set_current_state(TASK_INTERRUPTIBLE);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
goto do_error;
......@@ -740,8 +737,7 @@ static int wait_for_tcp_memory(struct sock *sk, long *timeo)
*timeo = current_timeo;
}
out:
current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
return err;
do_error:
......@@ -1374,11 +1370,9 @@ static void cleanup_rbuf(struct sock *sk, int copied)
static long tcp_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
add_wait_queue(sk->sleep, &wait);
__set_current_state(TASK_INTERRUPTIBLE);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
release_sock(sk);
......@@ -1389,8 +1383,7 @@ static long tcp_data_wait(struct sock *sk, long timeo)
lock_sock(sk);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
remove_wait_queue(sk->sleep, &wait);
__set_current_state(TASK_RUNNING);
finish_wait(sk->sleep, &wait);
return timeo;
}
......@@ -2017,12 +2010,10 @@ void tcp_close(struct sock *sk, long timeout)
if (timeout) {
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sleep, &wait);
DEFINE_WAIT(wait);
do {
set_current_state(TASK_INTERRUPTIBLE);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
if (!closing(sk))
break;
release_sock(sk);
......@@ -2030,8 +2021,7 @@ void tcp_close(struct sock *sk, long timeout)
lock_sock(sk);
} while (!signal_pending(tsk) && timeout);
tsk->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
}
adjudge_to_death:
......@@ -2191,7 +2181,7 @@ int tcp_disconnect(struct sock *sk, int flags)
static int wait_for_connect(struct sock *sk, long timeo)
{
struct tcp_opt *tp = tcp_sk(sk);
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
int err;
/*
......@@ -2208,9 +2198,8 @@ static int wait_for_connect(struct sock *sk, long timeo)
* our exclusiveness temporarily when we get woken up without
* having to remove and re-insert us on the wait queue.
*/
add_wait_queue_exclusive(sk->sleep, &wait);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
prepare_to_wait_exclusive(sk->sleep, &wait, TASK_INTERRUPTIBLE);
release_sock(sk);
if (!tp->accept_queue)
timeo = schedule_timeout(timeo);
......@@ -2228,8 +2217,7 @@ static int wait_for_connect(struct sock *sk, long timeo)
if (!timeo)
break;
}
current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
return err;
}
......
......@@ -334,11 +334,11 @@ void tcp_listen_wlock(void)
write_lock(&tcp_lhash_lock);
if (atomic_read(&tcp_lhash_users)) {
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
add_wait_queue_exclusive(&tcp_lhash_wait, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
prepare_to_wait_exclusive(&tcp_lhash_wait,
&wait, TASK_UNINTERRUPTIBLE);
if (!atomic_read(&tcp_lhash_users))
break;
write_unlock_bh(&tcp_lhash_lock);
......@@ -346,8 +346,7 @@ void tcp_listen_wlock(void)
write_lock_bh(&tcp_lhash_lock);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tcp_lhash_wait, &wait);
finish_wait(&tcp_lhash_wait, &wait);
}
}
......
......@@ -859,10 +859,9 @@ static long unix_wait_for_peer(unix_socket *other, long timeo)
{
struct unix_sock *u = unix_sk(other);
int sched;
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&u->peer_wait, &wait);
prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
sched = (!test_bit(SOCK_DEAD, &other->flags) &&
!(other->shutdown&RCV_SHUTDOWN) &&
......@@ -873,8 +872,7 @@ static long unix_wait_for_peer(unix_socket *other, long timeo)
if (sched)
timeo = schedule_timeout(timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&u->peer_wait, &wait);
finish_wait(&u->peer_wait, &wait);
return timeo;
}
......@@ -1542,14 +1540,12 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
static long unix_stream_data_wait(unix_socket * sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
DEFINE_WAIT(wait);
unix_state_rlock(sk);
add_wait_queue(sk->sleep, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
if (skb_queue_len(&sk->receive_queue) ||
sk->err ||
......@@ -1565,8 +1561,7 @@ static long unix_stream_data_wait(unix_socket * sk, long timeo)
clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
finish_wait(sk->sleep, &wait);
unix_state_runlock(sk);
return timeo;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment