Commit fdaea996 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by Arnaldo Carvalho de Melo

[NET] introduce sk_wait_evend and generalise tcp_data_wait

Later patches will make other protocols use sk_wait_data and further generalisations
of tcp code will use sk_wait_event.

This is again to abstract away more stuff from poor network family writers, like
the SOCK_ASYNC_WAITDATA, etc.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@conectiva.com.br>
parent b7155403
......@@ -410,6 +410,20 @@ do { if (!(__sk)->sk_backlog.tail) { \
(__skb)->next = NULL; \
} while(0)
#define sk_wait_event(__sk, __timeo, __condition) \
({ int rc; \
release_sock(__sk); \
rc = __condition; \
if (!rc) { \
*(__timeo) = schedule_timeout(*(__timeo)); \
rc = __condition; \
} \
lock_sock(__sk); \
rc; \
})
extern int sk_wait_data(struct sock *sk, long *timeo);
/* IP protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
......
......@@ -917,6 +917,31 @@ void __release_sock(struct sock *sk)
} while((skb = sk->sk_backlog.head) != NULL);
}
/**
* sk_wait_data - wait for data to arrive at sk_receive_queue
* sk - sock to wait on
* timeo - for how long
*
* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
*/
int sk_wait_data(struct sock *sk, long *timeo)
{
int rc;
DEFINE_WAIT(wait);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
finish_wait(sk->sk_sleep, &wait);
return rc;
}
EXPORT_SYMBOL(sk_wait_data);
/*
* Set of default routines for initialising struct proto_ops when
* the protocol does not support a particular function. In certain
......
......@@ -1356,31 +1356,6 @@ static void cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk);
}
/* Now socket state including sk->sk_err is changed only under lock,
* hence we may omit checks after joining wait queue.
* We check receive queue before schedule() only as optimization;
* it is very likely that release_sock() added new data.
*/
static long tcp_data_wait(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
release_sock(sk);
if (skb_queue_empty(&sk->sk_receive_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
finish_wait(sk->sk_sleep, &wait);
return timeo;
}
static void tcp_prequeue_process(struct sock *sk)
{
struct sk_buff *skb;
......@@ -1660,9 +1635,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
} else {
timeo = tcp_data_wait(sk, timeo);
}
} else
sk_wait_data(sk, &timeo);
if (user_recv) {
int chunk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment