Commit b4321277 authored by David S. Miller's avatar David S. Miller

Merge bk://kernel.bkbits.net/acme/net-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents bdba3437 a09a8386
...@@ -262,8 +262,8 @@ struct tcp_opt { ...@@ -262,8 +262,8 @@ struct tcp_opt {
__u32 frto_highmark; /* snd_nxt when RTO occurred */ __u32 frto_highmark; /* snd_nxt when RTO occurred */
__u8 unused_pad; __u8 unused_pad;
__u8 queue_shrunk; /* Write queue has been shrunk recently.*/
__u8 defer_accept; /* User waits for some data after accept() */ __u8 defer_accept; /* User waits for some data after accept() */
/* one byte hole, try to pack */
/* RTT measurement */ /* RTT measurement */
__u8 backoff; /* backoff */ __u8 backoff; /* backoff */
...@@ -297,7 +297,6 @@ struct tcp_opt { ...@@ -297,7 +297,6 @@ struct tcp_opt {
struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */ struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
struct sk_buff *send_head; /* Front of stuff to transmit */
__u32 rcv_wnd; /* Current receiver window */ __u32 rcv_wnd; /* Current receiver window */
__u32 rcv_wup; /* rcv_nxt on last window update sent */ __u32 rcv_wup; /* rcv_nxt on last window update sent */
...@@ -371,8 +370,6 @@ struct tcp_opt { ...@@ -371,8 +370,6 @@ struct tcp_opt {
struct open_request *accept_queue; struct open_request *accept_queue;
struct open_request *accept_queue_tail; struct open_request *accept_queue_tail;
int write_pending; /* A write to socket waits to start. */
unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */ unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2; int linger2;
......
...@@ -167,6 +167,9 @@ struct sock_common { ...@@ -167,6 +167,9 @@ struct sock_common {
* @sk_socket - Identd and reporting IO signals * @sk_socket - Identd and reporting IO signals
* @sk_user_data - RPC layer private data * @sk_user_data - RPC layer private data
* @sk_owner - module that owns this socket * @sk_owner - module that owns this socket
* @sk_send_head - front of stuff to transmit
* @sk_write_pending - a write to stream socket waits to start
* @sk_queue_shrunk - write queue has been shrunk recently
* @sk_state_change - callback to indicate change in the state of the sock * @sk_state_change - callback to indicate change in the state of the sock
* @sk_data_ready - callback to indicate there is data to be processed * @sk_data_ready - callback to indicate there is data to be processed
* @sk_write_space - callback to indicate there is bf sending space available * @sk_write_space - callback to indicate there is bf sending space available
...@@ -246,8 +249,12 @@ struct sock { ...@@ -246,8 +249,12 @@ struct sock {
struct timeval sk_stamp; struct timeval sk_stamp;
struct socket *sk_socket; struct socket *sk_socket;
void *sk_user_data; void *sk_user_data;
struct sk_buff *sk_send_head;
struct module *sk_owner; struct module *sk_owner;
int sk_write_pending;
void *sk_security; void *sk_security;
__u8 sk_queue_shrunk;
/* three bytes hole, try to pack */
void (*sk_state_change)(struct sock *sk); void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk); void (*sk_write_space)(struct sock *sk);
...@@ -434,6 +441,24 @@ static inline int sk_stream_memory_free(struct sock *sk) ...@@ -434,6 +441,24 @@ static inline int sk_stream_memory_free(struct sock *sk)
return sk->sk_wmem_queued < sk->sk_sndbuf; return sk->sk_wmem_queued < sk->sk_sndbuf;
} }
extern void sk_stream_rfree(struct sk_buff *skb);
static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb->sk = sk;
skb->destructor = sk_stream_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk->sk_forward_alloc -= skb->truesize;
}
static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk->sk_queue_shrunk = 1;
sk->sk_wmem_queued -= skb->truesize;
sk->sk_forward_alloc += skb->truesize;
__kfree_skb(skb);
}
/* The per-socket spinlock must be held here. */ /* The per-socket spinlock must be held here. */
#define sk_add_backlog(__sk, __skb) \ #define sk_add_backlog(__sk, __skb) \
do { if (!(__sk)->sk_backlog.tail) { \ do { if (!(__sk)->sk_backlog.tail) { \
...@@ -458,6 +483,11 @@ do { if (!(__sk)->sk_backlog.tail) { \ ...@@ -458,6 +483,11 @@ do { if (!(__sk)->sk_backlog.tail) { \
rc; \ rc; \
}) })
extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern int sk_wait_data(struct sock *sk, long *timeo); extern int sk_wait_data(struct sock *sk, long *timeo);
/* IP protocol blocks we attach to sockets. /* IP protocol blocks we attach to sockets.
...@@ -1067,6 +1097,20 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) ...@@ -1067,6 +1097,20 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
#define SOCK_MIN_SNDBUF 2048 #define SOCK_MIN_SNDBUF 2048
#define SOCK_MIN_RCVBUF 256 #define SOCK_MIN_RCVBUF 256
static inline void sk_stream_moderate_sndbuf(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
#define sk_stream_for_retrans_queue(skb, sk) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (sk)->sk_send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)
/* /*
* Default write policy as shown to user space via poll/select/SIGIO * Default write policy as shown to user space via poll/select/SIGIO
*/ */
......
...@@ -1186,13 +1186,6 @@ struct tcp_skb_cb { ...@@ -1186,13 +1186,6 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
#define for_retrans_queue(skb, sk, tp) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (tp)->send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb=skb->next)
#include <net/tcp_ecn.h> #include <net/tcp_ecn.h>
/* This determines how many packets are "in the network" to the best /* This determines how many packets are "in the network" to the best
...@@ -1400,7 +1393,7 @@ tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int n ...@@ -1400,7 +1393,7 @@ tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int n
tcp_minshall_check(tp)))); tcp_minshall_check(tp))));
} }
/* This checks if the data bearing packet SKB (usually tp->send_head) /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now. * should be put on the wire right now.
*/ */
static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb, static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
...@@ -1457,7 +1450,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk, ...@@ -1457,7 +1450,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk,
unsigned cur_mss, unsigned cur_mss,
int nonagle) int nonagle)
{ {
struct sk_buff *skb = tp->send_head; struct sk_buff *skb = sk->sk_send_head;
if (skb) { if (skb) {
if (!tcp_skb_is_last(sk, skb)) if (!tcp_skb_is_last(sk, skb))
...@@ -1477,7 +1470,7 @@ static __inline__ void tcp_push_pending_frames(struct sock *sk, ...@@ -1477,7 +1470,7 @@ static __inline__ void tcp_push_pending_frames(struct sock *sk,
static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp) static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
{ {
struct sk_buff *skb = tp->send_head; struct sk_buff *skb = sk->sk_send_head;
return (skb && return (skb &&
tcp_snd_test(tp, skb, tcp_current_mss(sk, 1), tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
...@@ -1876,14 +1869,6 @@ static __inline__ void tcp_openreq_init(struct open_request *req, ...@@ -1876,14 +1869,6 @@ static __inline__ void tcp_openreq_init(struct open_request *req,
#define TCP_MEM_QUANTUM ((int)PAGE_SIZE) #define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
{
tcp_sk(sk)->queue_shrunk = 1;
sk->sk_wmem_queued -= skb->truesize;
sk->sk_forward_alloc += skb->truesize;
__kfree_skb(skb);
}
extern void __tcp_mem_reclaim(struct sock *sk); extern void __tcp_mem_reclaim(struct sock *sk);
extern int tcp_mem_schedule(struct sock *sk, int size, int kind); extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
...@@ -1901,14 +1886,6 @@ static inline void tcp_enter_memory_pressure(void) ...@@ -1901,14 +1886,6 @@ static inline void tcp_enter_memory_pressure(void)
} }
} }
static inline void tcp_moderate_sndbuf(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
}
}
static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp) static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
{ {
struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp); struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
...@@ -1923,7 +1900,7 @@ static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, ...@@ -1923,7 +1900,7 @@ static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem,
__kfree_skb(skb); __kfree_skb(skb);
} else { } else {
tcp_enter_memory_pressure(); tcp_enter_memory_pressure();
tcp_moderate_sndbuf(sk); sk_stream_moderate_sndbuf(sk);
} }
return NULL; return NULL;
} }
...@@ -1942,7 +1919,7 @@ static inline struct page * tcp_alloc_page(struct sock *sk) ...@@ -1942,7 +1919,7 @@ static inline struct page * tcp_alloc_page(struct sock *sk)
return page; return page;
} }
tcp_enter_memory_pressure(); tcp_enter_memory_pressure();
tcp_moderate_sndbuf(sk); sk_stream_moderate_sndbuf(sk);
return NULL; return NULL;
} }
...@@ -1951,20 +1928,10 @@ static inline void tcp_writequeue_purge(struct sock *sk) ...@@ -1951,20 +1928,10 @@ static inline void tcp_writequeue_purge(struct sock *sk)
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
tcp_free_skb(sk, skb); sk_stream_free_skb(sk, skb);
tcp_mem_reclaim(sk); tcp_mem_reclaim(sk);
} }
extern void tcp_rfree(struct sk_buff *skb);
static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb->sk = sk;
skb->destructor = tcp_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk->sk_forward_alloc -= skb->truesize;
}
extern void tcp_listen_wlock(void); extern void tcp_listen_wlock(void);
/* - We may sleep inside this lock. /* - We may sleep inside this lock.
...@@ -2049,8 +2016,8 @@ static inline int tcp_use_frto(const struct sock *sk) ...@@ -2049,8 +2016,8 @@ static inline int tcp_use_frto(const struct sock *sk)
* unsent new data, and the advertised window should allow * unsent new data, and the advertised window should allow
* sending it. * sending it.
*/ */
return (sysctl_tcp_frto && tp->send_head && return (sysctl_tcp_frto && sk->sk_send_head &&
!after(TCP_SKB_CB(tp->send_head)->end_seq, !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd)); tp->snd_una + tp->snd_wnd));
} }
......
...@@ -1147,6 +1147,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -1147,6 +1147,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
skb_queue_head_init(&sk->sk_write_queue); skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue); skb_queue_head_init(&sk->sk_error_queue);
sk->sk_send_head = NULL;
init_timer(&sk->sk_timer); init_timer(&sk->sk_timer);
sk->sk_allocation = GFP_KERNEL; sk->sk_allocation = GFP_KERNEL;
...@@ -1176,6 +1178,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -1176,6 +1178,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_peercred.pid = 0; sk->sk_peercred.pid = 0;
sk->sk_peercred.uid = -1; sk->sk_peercred.uid = -1;
sk->sk_peercred.gid = -1; sk->sk_peercred.gid = -1;
sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1; sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/tcp.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <net/sock.h> #include <net/sock.h>
...@@ -39,3 +40,151 @@ void sk_stream_write_space(struct sock *sk) ...@@ -39,3 +40,151 @@ void sk_stream_write_space(struct sock *sk)
} }
EXPORT_SYMBOL(sk_stream_write_space); EXPORT_SYMBOL(sk_stream_write_space);
/**
* sk_stream_wait_connect - Wait for a socket to get into the connected state
* @sk - sock to wait on
* @timeo_p - for how long to wait
*
* Must be called with the socket locked.
*/
int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
{
struct task_struct *tsk = current;
DEFINE_WAIT(wait);
while (1) {
if (sk->sk_err)
return sock_error(sk);
if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
return -EPIPE;
if (!*timeo_p)
return -EAGAIN;
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
sk->sk_write_pending++;
if (sk_wait_event(sk, timeo_p,
!((1 << sk->sk_state) &
~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))))
break;
finish_wait(sk->sk_sleep, &wait);
sk->sk_write_pending--;
}
return 0;
}
EXPORT_SYMBOL(sk_stream_wait_connect);
/**
* sk_stream_closing - Return 1 if we still have things to send in our buffers.
* @sk - socket to verify
*/
static inline int sk_stream_closing(struct sock *sk)
{
return (1 << sk->sk_state) &
(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
}
void sk_stream_wait_close(struct sock *sk, long timeout)
{
if (timeout) {
DEFINE_WAIT(wait);
do {
prepare_to_wait(sk->sk_sleep, &wait,
TASK_INTERRUPTIBLE);
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
break;
} while (!signal_pending(current) && timeout);
finish_wait(sk->sk_sleep, &wait);
}
}
EXPORT_SYMBOL(sk_stream_wait_close);
/**
* sk_stream_wait_memory - Wait for more memory for a socket
* @sk - socket to wait for memory
* @timeo_p - for how long
*/
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
{
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
if (sk_stream_memory_free(sk))
current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
while (1) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
if (!*timeo_p)
goto do_nonblock;
if (signal_pending(current))
goto do_interrupted;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (sk_stream_memory_free(sk) && !vm_wait)
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
sk_wait_event(sk, &current_timeo, sk_stream_memory_free(sk) &&
vm_wait);
sk->sk_write_pending--;
if (vm_wait) {
vm_wait -= current_timeo;
current_timeo = *timeo_p;
if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
(current_timeo -= vm_wait) < 0)
current_timeo = 0;
vm_wait = 0;
}
*timeo_p = current_timeo;
}
out:
finish_wait(sk->sk_sleep, &wait);
return err;
do_error:
err = -EPIPE;
goto out;
do_nonblock:
err = -EAGAIN;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
}
EXPORT_SYMBOL(sk_stream_wait_memory);
void sk_stream_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
sk->sk_forward_alloc += skb->truesize;
}
EXPORT_SYMBOL(sk_stream_rfree);
int sk_stream_error(struct sock *sk, int flags, int err)
{
if (err == -EPIPE)
err = sock_error(sk) ? : -EPIPE;
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
return err;
}
EXPORT_SYMBOL(sk_stream_error);
...@@ -333,7 +333,7 @@ int tcp_mem_schedule(struct sock *sk, int size, int kind) ...@@ -333,7 +333,7 @@ int tcp_mem_schedule(struct sock *sk, int size, int kind)
suppress_allocation: suppress_allocation:
if (!kind) { if (!kind) {
tcp_moderate_sndbuf(sk); sk_stream_moderate_sndbuf(sk);
/* Fail only if socket is _under_ its sndbuf. /* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail. * In this case we cannot block, so that we have to fail.
...@@ -360,14 +360,6 @@ void __tcp_mem_reclaim(struct sock *sk) ...@@ -360,14 +360,6 @@ void __tcp_mem_reclaim(struct sock *sk)
} }
} }
void tcp_rfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
sk->sk_forward_alloc += skb->truesize;
}
/* /*
* LISTEN is a special case for poll.. * LISTEN is a special case for poll..
*/ */
...@@ -636,102 +628,6 @@ static void tcp_listen_stop (struct sock *sk) ...@@ -636,102 +628,6 @@ static void tcp_listen_stop (struct sock *sk)
BUG_TRAP(!sk->sk_ack_backlog); BUG_TRAP(!sk->sk_ack_backlog);
} }
/*
* Wait for a socket to get into the connected state
*
* Note: Must be called with the socket locked.
*/
static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
{
struct tcp_opt *tp = tcp_sk(sk);
struct task_struct *tsk = current;
DEFINE_WAIT(wait);
while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
if (sk->sk_err)
return sock_error(sk);
if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
return -EPIPE;
if (!*timeo_p)
return -EAGAIN;
if (signal_pending(tsk))
return sock_intr_errno(*timeo_p);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
tp->write_pending++;
release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p);
lock_sock(sk);
finish_wait(sk->sk_sleep, &wait);
tp->write_pending--;
}
return 0;
}
/*
* Wait for more memory for a socket
*/
static int wait_for_tcp_memory(struct sock *sk, long *timeo)
{
struct tcp_opt *tp = tcp_sk(sk);
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo;
DEFINE_WAIT(wait);
if (sk_stream_memory_free(sk))
current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
for (;;) {
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
if (!*timeo)
goto do_nonblock;
if (signal_pending(current))
goto do_interrupted;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (sk_stream_memory_free(sk) && !vm_wait)
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
tp->write_pending++;
release_sock(sk);
if (!sk_stream_memory_free(sk) || vm_wait)
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
tp->write_pending--;
if (vm_wait) {
vm_wait -= current_timeo;
current_timeo = *timeo;
if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
(current_timeo -= vm_wait) < 0)
current_timeo = 0;
vm_wait = 0;
}
*timeo = current_timeo;
}
out:
finish_wait(sk->sk_sleep, &wait);
return err;
do_error:
err = -EPIPE;
goto out;
do_nonblock:
err = -EAGAIN;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo);
goto out;
}
static inline void fill_page_desc(struct sk_buff *skb, int i, static inline void fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size) struct page *page, int off, int size)
{ {
...@@ -763,8 +659,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp, ...@@ -763,8 +659,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->sacked = 0;
__skb_queue_tail(&sk->sk_write_queue, skb); __skb_queue_tail(&sk->sk_write_queue, skb);
sk_charge_skb(sk, skb); sk_charge_skb(sk, skb);
if (!tp->send_head) if (!sk->sk_send_head)
tp->send_head = skb; sk->sk_send_head = skb;
else if (tp->nonagle&TCP_NAGLE_PUSH) else if (tp->nonagle&TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH; tp->nonagle &= ~TCP_NAGLE_PUSH;
} }
...@@ -782,7 +678,7 @@ static inline void tcp_mark_urg(struct tcp_opt *tp, int flags, ...@@ -782,7 +678,7 @@ static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
int mss_now, int nonagle) int mss_now, int nonagle)
{ {
if (tp->send_head) { if (sk->sk_send_head) {
struct sk_buff *skb = sk->sk_write_queue.prev; struct sk_buff *skb = sk->sk_write_queue.prev;
if (!(flags & MSG_MORE) || forced_push(tp)) if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
...@@ -792,15 +688,6 @@ static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, ...@@ -792,15 +688,6 @@ static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
} }
} }
static int tcp_error(struct sock *sk, int flags, int err)
{
if (err == -EPIPE)
err = sock_error(sk) ? : -EPIPE;
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
return err;
}
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags) size_t psize, int flags)
{ {
...@@ -812,7 +699,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -812,7 +699,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
/* Wait for a connection to finish. */ /* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0) if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err; goto out_err;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
...@@ -831,7 +718,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -831,7 +718,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
int offset = poffset % PAGE_SIZE; int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset); int size = min_t(size_t, psize, PAGE_SIZE - offset);
if (!tp->send_head || (copy = mss_now - skb->len) <= 0) { if (!sk->sk_send_head || (copy = mss_now - skb->len) <= 0) {
new_segment: new_segment:
if (!sk_stream_memory_free(sk)) if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf; goto wait_for_sndbuf;
...@@ -879,7 +766,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -879,7 +766,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (forced_push(tp)) { if (forced_push(tp)) {
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tp->send_head) } else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now); tcp_push_one(sk, mss_now);
continue; continue;
...@@ -889,7 +776,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -889,7 +776,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (copied) if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0) if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error; goto do_error;
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
...@@ -904,7 +791,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -904,7 +791,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (copied) if (copied)
goto out; goto out;
out_err: out_err:
return tcp_error(sk, flags, err); return sk_stream_error(sk, flags, err);
} }
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
...@@ -965,7 +852,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -965,7 +852,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Wait for a connection to finish. */ /* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0) if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err; goto out_err;
/* This should be in poll */ /* This should be in poll */
...@@ -993,7 +880,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -993,7 +880,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
skb = sk->sk_write_queue.prev; skb = sk->sk_write_queue.prev;
if (!tp->send_head || if (!sk->sk_send_head ||
(copy = mss_now - skb->len) <= 0) { (copy = mss_now - skb->len) <= 0) {
new_segment: new_segment:
...@@ -1122,7 +1009,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1122,7 +1009,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (forced_push(tp)) { if (forced_push(tp)) {
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tp->send_head) } else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now); tcp_push_one(sk, mss_now);
continue; continue;
...@@ -1132,7 +1019,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1132,7 +1019,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (copied) if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0) if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error; goto do_error;
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
...@@ -1148,17 +1035,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1148,17 +1035,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
do_fault: do_fault:
if (!skb->len) { if (!skb->len) {
if (tp->send_head == skb) if (sk->sk_send_head == skb)
tp->send_head = NULL; sk->sk_send_head = NULL;
__skb_unlink(skb, skb->list); __skb_unlink(skb, skb->list);
tcp_free_skb(sk, skb); sk_stream_free_skb(sk, skb);
} }
do_error: do_error:
if (copied) if (copied)
goto out; goto out;
out_err: out_err:
err = tcp_error(sk, flags, err); err = sk_stream_error(sk, flags, err);
TCP_CHECK_TIMER(sk); TCP_CHECK_TIMER(sk);
release_sock(sk); release_sock(sk);
return err; return err;
...@@ -1747,17 +1634,6 @@ void tcp_shutdown(struct sock *sk, int how) ...@@ -1747,17 +1634,6 @@ void tcp_shutdown(struct sock *sk, int how)
} }
} }
/*
* Return 1 if we still have things to send in our buffers.
*/
static inline int closing(struct sock *sk)
{
return (1 << sk->sk_state) &
(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
}
static __inline__ void tcp_kill_sk_queues(struct sock *sk) static __inline__ void tcp_kill_sk_queues(struct sock *sk)
{ {
/* First the read buffer. */ /* First the read buffer. */
...@@ -1900,22 +1776,7 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -1900,22 +1776,7 @@ void tcp_close(struct sock *sk, long timeout)
tcp_send_fin(sk); tcp_send_fin(sk);
} }
if (timeout) { sk_stream_wait_close(sk, timeout);
struct task_struct *tsk = current;
DEFINE_WAIT(wait);
do {
prepare_to_wait(sk->sk_sleep, &wait,
TASK_INTERRUPTIBLE);
if (!closing(sk))
break;
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
} while (!signal_pending(tsk) && timeout);
finish_wait(sk->sk_sleep, &wait);
}
adjudge_to_death: adjudge_to_death:
/* It is the last release_sock in its life. It will remove backlog. */ /* It is the last release_sock in its life. It will remove backlog. */
...@@ -2046,7 +1907,7 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -2046,7 +1907,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(tp, TCP_CA_Open); tcp_set_ca_state(tp, TCP_CA_Open);
tcp_clear_retrans(tp); tcp_clear_retrans(tp);
tcp_delack_init(tp); tcp_delack_init(tp);
tp->send_head = NULL; sk->sk_send_head = NULL;
tp->saw_tstamp = 0; tp->saw_tstamp = 0;
tcp_sack_reset(tp); tcp_sack_reset(tp);
__sk_dst_reset(sk); __sk_dst_reset(sk);
......
...@@ -1009,7 +1009,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ ...@@ -1009,7 +1009,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (after(end_seq, tp->high_seq)) if (after(end_seq, tp->high_seq))
flag |= FLAG_DATA_LOST; flag |= FLAG_DATA_LOST;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
u8 sacked = TCP_SKB_CB(skb)->sacked; u8 sacked = TCP_SKB_CB(skb)->sacked;
int in_sack; int in_sack;
...@@ -1113,7 +1113,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ ...@@ -1113,7 +1113,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (lost_retrans && tp->ca_state == TCP_CA_Recovery) { if (lost_retrans && tp->ca_state == TCP_CA_Recovery) {
struct sk_buff *skb; struct sk_buff *skb;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
if (after(TCP_SKB_CB(skb)->seq, lost_retrans)) if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
break; break;
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
...@@ -1179,7 +1179,7 @@ void tcp_enter_frto(struct sock *sk) ...@@ -1179,7 +1179,7 @@ void tcp_enter_frto(struct sock *sk)
tp->undo_marker = tp->snd_una; tp->undo_marker = tp->snd_una;
tp->undo_retrans = 0; tp->undo_retrans = 0;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS; TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
} }
tcp_sync_left_out(tp); tcp_sync_left_out(tp);
...@@ -1202,7 +1202,7 @@ static void tcp_enter_frto_loss(struct sock *sk) ...@@ -1202,7 +1202,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
tp->lost_out = 0; tp->lost_out = 0;
tp->fackets_out = 0; tp->fackets_out = 0;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
cnt++; cnt++;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
...@@ -1275,7 +1275,7 @@ void tcp_enter_loss(struct sock *sk, int how) ...@@ -1275,7 +1275,7 @@ void tcp_enter_loss(struct sock *sk, int how)
if (!how) if (!how)
tp->undo_marker = tp->snd_una; tp->undo_marker = tp->snd_una;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
cnt++; cnt++;
if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
tp->undo_marker = 0; tp->undo_marker = 0;
...@@ -1518,7 +1518,7 @@ tcp_mark_head_lost(struct sock *sk, struct tcp_opt *tp, int packets, u32 high_se ...@@ -1518,7 +1518,7 @@ tcp_mark_head_lost(struct sock *sk, struct tcp_opt *tp, int packets, u32 high_se
BUG_TRAP(cnt <= tp->packets_out); BUG_TRAP(cnt <= tp->packets_out);
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
if (--cnt < 0 || after(TCP_SKB_CB(skb)->end_seq, high_seq)) if (--cnt < 0 || after(TCP_SKB_CB(skb)->end_seq, high_seq))
break; break;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
...@@ -1550,7 +1550,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp) ...@@ -1550,7 +1550,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp)
if (tcp_head_timedout(sk, tp)) { if (tcp_head_timedout(sk, tp)) {
struct sk_buff *skb; struct sk_buff *skb;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
if (tcp_skb_timedout(tp, skb) && if (tcp_skb_timedout(tp, skb) &&
!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
...@@ -1719,7 +1719,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp) ...@@ -1719,7 +1719,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp)
{ {
if (tcp_may_undo(tp)) { if (tcp_may_undo(tp)) {
struct sk_buff *skb; struct sk_buff *skb;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
} }
DBGUNDO(sk, tp, "partial loss"); DBGUNDO(sk, tp, "partial loss");
...@@ -2328,7 +2328,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) ...@@ -2328,7 +2328,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
int acked = 0; int acked = 0;
__s32 seq_rtt = -1; __s32 seq_rtt = -1;
while ((skb = skb_peek(&sk->sk_write_queue)) && skb != tp->send_head) { while ((skb = skb_peek(&sk->sk_write_queue)) && skb != sk->sk_send_head) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb); struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
__u8 sacked = scb->sacked; __u8 sacked = scb->sacked;
...@@ -2376,7 +2376,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) ...@@ -2376,7 +2376,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
tp->fackets_out--; tp->fackets_out--;
tp->packets_out--; tp->packets_out--;
__skb_unlink(skb, skb->list); __skb_unlink(skb, skb->list);
tcp_free_skb(sk, skb); sk_stream_free_skb(sk, skb);
} }
if (acked&FLAG_ACKED) { if (acked&FLAG_ACKED) {
...@@ -2416,7 +2416,7 @@ static void tcp_ack_probe(struct sock *sk) ...@@ -2416,7 +2416,7 @@ static void tcp_ack_probe(struct sock *sk)
/* Was it a usable window open? */ /* Was it a usable window open? */
if (!after(TCP_SKB_CB(tp->send_head)->end_seq, if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd)) { tp->snd_una + tp->snd_wnd)) {
tp->backoff = 0; tp->backoff = 0;
tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0);
...@@ -2857,7 +2857,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) ...@@ -2857,7 +2857,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
* being used to time the probes, and is probably far higher than * being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission. * it needs to be for normal retransmission.
*/ */
if (tp->send_head) if (sk->sk_send_head)
tcp_ack_probe(sk); tcp_ack_probe(sk);
return 1; return 1;
...@@ -3461,7 +3461,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3461,7 +3461,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb)) if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
goto drop; goto drop;
} }
tcp_set_owner_r(skb, sk); sk_stream_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb); __skb_queue_tail(&sk->sk_receive_queue, skb);
} }
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
...@@ -3542,7 +3542,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3542,7 +3542,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
tcp_set_owner_r(skb, sk); sk_stream_set_owner_r(skb, sk);
if (!skb_peek(&tp->out_of_order_queue)) { if (!skb_peek(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */ /* Initial out of order segment, build 1 SACK. */
...@@ -3681,7 +3681,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head, ...@@ -3681,7 +3681,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
__skb_insert(nskb, skb->prev, skb, skb->list); __skb_insert(nskb, skb->prev, skb, skb->list);
tcp_set_owner_r(nskb, sk); sk_stream_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */ /* Copy data, releasing collapsed skbs. */
while (copy > 0) { while (copy > 0) {
...@@ -3837,7 +3837,7 @@ void tcp_cwnd_application_limited(struct sock *sk) ...@@ -3837,7 +3837,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
/* When incoming ACK allowed to free some skb from write_queue, /* When incoming ACK allowed to free some skb from write_queue,
* we remember this event in flag tp->queue_shrunk and wake up socket * we remember this event in flag sk->sk_queue_shrunk and wake up socket
* on the exit from tcp input handler. * on the exit from tcp input handler.
* *
* PROBLEM: sndbuf expansion does not work well with largesend. * PROBLEM: sndbuf expansion does not work well with largesend.
...@@ -3865,10 +3865,8 @@ static void tcp_new_space(struct sock *sk) ...@@ -3865,10 +3865,8 @@ static void tcp_new_space(struct sock *sk)
static inline void tcp_check_space(struct sock *sk) static inline void tcp_check_space(struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); if (sk->sk_queue_shrunk) {
sk->sk_queue_shrunk = 0;
if (tp->queue_shrunk) {
tp->queue_shrunk = 0;
if (sk->sk_socket && if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk); tcp_new_space(sk);
...@@ -3887,8 +3885,7 @@ static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb) ...@@ -3887,8 +3885,7 @@ static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb)
static __inline__ void tcp_data_snd_check(struct sock *sk) static __inline__ void tcp_data_snd_check(struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); struct sk_buff *skb = sk->sk_send_head;
struct sk_buff *skb = tp->send_head;
if (skb != NULL) if (skb != NULL)
__tcp_data_snd_check(sk, skb); __tcp_data_snd_check(sk, skb);
...@@ -4242,7 +4239,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4242,7 +4239,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* Bulk data transfer: receiver */ /* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len); __skb_pull(skb,tcp_header_len);
__skb_queue_tail(&sk->sk_receive_queue, skb); __skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_set_owner_r(skb, sk); sk_stream_set_owner_r(skb, sk);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
} }
...@@ -4482,7 +4479,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4482,7 +4479,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
sk_wake_async(sk, 0, POLL_OUT); sk_wake_async(sk, 0, POLL_OUT);
} }
if (tp->write_pending || tp->defer_accept || tp->ack.pingpong) { if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) {
/* Save one ACK. Data will be ready after /* Save one ACK. Data will be ready after
* several ticks, if write_pending is set. * several ticks, if write_pending is set.
* *
......
...@@ -718,6 +718,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, ...@@ -718,6 +718,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
sock_reset_flag(newsk, SOCK_DONE); sock_reset_flag(newsk, SOCK_DONE);
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_send_head = NULL;
newsk->sk_callback_lock = RW_LOCK_UNLOCKED; newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
skb_queue_head_init(&newsk->sk_error_queue); skb_queue_head_init(&newsk->sk_error_queue);
newsk->sk_write_space = sk_stream_write_space; newsk->sk_write_space = sk_stream_write_space;
...@@ -775,7 +776,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, ...@@ -775,7 +776,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
tcp_set_ca_state(newtp, TCP_CA_Open); tcp_set_ca_state(newtp, TCP_CA_Open);
tcp_init_xmit_timers(newsk); tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue); skb_queue_head_init(&newtp->out_of_order_queue);
newtp->send_head = NULL;
newtp->rcv_wup = req->rcv_isn + 1; newtp->rcv_wup = req->rcv_isn + 1;
newtp->write_seq = req->snt_isn + 1; newtp->write_seq = req->snt_isn + 1;
newtp->pushed_seq = newtp->write_seq; newtp->pushed_seq = newtp->write_seq;
......
...@@ -48,9 +48,9 @@ int sysctl_tcp_retrans_collapse = 1; ...@@ -48,9 +48,9 @@ int sysctl_tcp_retrans_collapse = 1;
static __inline__ static __inline__
void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
{ {
tp->send_head = skb->next; sk->sk_send_head = skb->next;
if (tp->send_head == (struct sk_buff *)&sk->sk_write_queue) if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
tp->send_head = NULL; sk->sk_send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0) if (tp->packets_out++ == 0)
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
...@@ -329,8 +329,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) ...@@ -329,8 +329,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
sk_charge_skb(sk, skb); sk_charge_skb(sk, skb);
/* Queue it, remembering where we must start sending. */ /* Queue it, remembering where we must start sending. */
if (tp->send_head == NULL) if (sk->sk_send_head == NULL)
tp->send_head = skb; sk->sk_send_head = skb;
} }
/* Send _single_ skb sitting at the send head. This function requires /* Send _single_ skb sitting at the send head. This function requires
...@@ -339,13 +339,13 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) ...@@ -339,13 +339,13 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
void tcp_push_one(struct sock *sk, unsigned cur_mss) void tcp_push_one(struct sock *sk, unsigned cur_mss)
{ {
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb = tp->send_head; struct sk_buff *skb = sk->sk_send_head;
if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) { if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) {
/* Send it out now. */ /* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) { if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {
tp->send_head = NULL; sk->sk_send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0) if (tp->packets_out++ == 0)
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
...@@ -572,8 +572,10 @@ int tcp_write_xmit(struct sock *sk, int nonagle) ...@@ -572,8 +572,10 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
*/ */
mss_now = tcp_current_mss(sk, 1); mss_now = tcp_current_mss(sk, 1);
while((skb = tp->send_head) && while ((skb = sk->sk_send_head) &&
tcp_snd_test(tp, skb, mss_now, tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)) { tcp_snd_test(tp, skb, mss_now,
tcp_skb_is_last(sk, skb) ? nonagle :
TCP_NAGLE_PUSH)) {
if (skb->len > mss_now) { if (skb->len > mss_now) {
if (tcp_fragment(sk, skb, mss_now)) if (tcp_fragment(sk, skb, mss_now))
break; break;
...@@ -593,7 +595,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle) ...@@ -593,7 +595,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
return 0; return 0;
} }
return !tp->packets_out && tp->send_head; return !tp->packets_out && sk->sk_send_head;
} }
return 0; return 0;
} }
...@@ -763,7 +765,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m ...@@ -763,7 +765,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
*/ */
if (tp->fackets_out) if (tp->fackets_out)
tp->fackets_out--; tp->fackets_out--;
tcp_free_skb(sk, next_skb); sk_stream_free_skb(sk, next_skb);
tp->packets_out--; tp->packets_out--;
} }
} }
...@@ -779,7 +781,7 @@ void tcp_simple_retransmit(struct sock *sk) ...@@ -779,7 +781,7 @@ void tcp_simple_retransmit(struct sock *sk)
unsigned int mss = tcp_current_mss(sk, 0); unsigned int mss = tcp_current_mss(sk, 0);
int lost = 0; int lost = 0;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
if (skb->len > mss && if (skb->len > mss &&
!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
...@@ -865,7 +867,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -865,7 +867,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Collapse two adjacent packets if worthwhile and we can. */ /* Collapse two adjacent packets if worthwhile and we can. */
if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
(skb->len < (cur_mss >> 1)) && (skb->len < (cur_mss >> 1)) &&
(skb->next != tp->send_head) && (skb->next != sk->sk_send_head) &&
(skb->next != (struct sk_buff *)&sk->sk_write_queue) && (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
(skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
(sysctl_tcp_retrans_collapse != 0)) (sysctl_tcp_retrans_collapse != 0))
...@@ -940,7 +942,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -940,7 +942,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
/* First pass: retransmit lost packets. */ /* First pass: retransmit lost packets. */
if (packet_cnt) { if (packet_cnt) {
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked; __u8 sacked = TCP_SKB_CB(skb)->sacked;
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
...@@ -988,7 +990,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -988,7 +990,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
packet_cnt = 0; packet_cnt = 0;
for_retrans_queue(skb, sk, tp) { sk_stream_for_retrans_queue(skb, sk) {
if(++packet_cnt > tp->fackets_out) if(++packet_cnt > tp->fackets_out)
break; break;
...@@ -1025,7 +1027,7 @@ void tcp_send_fin(struct sock *sk) ...@@ -1025,7 +1027,7 @@ void tcp_send_fin(struct sock *sk)
*/ */
mss_now = tcp_current_mss(sk, 1); mss_now = tcp_current_mss(sk, 1);
if(tp->send_head != NULL) { if (sk->sk_send_head != NULL) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
TCP_SKB_CB(skb)->end_seq++; TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++; tp->write_seq++;
...@@ -1104,7 +1106,7 @@ int tcp_send_synack(struct sock *sk) ...@@ -1104,7 +1106,7 @@ int tcp_send_synack(struct sock *sk)
return -ENOMEM; return -ENOMEM;
__skb_unlink(skb, &sk->sk_write_queue); __skb_unlink(skb, &sk->sk_write_queue);
__skb_queue_head(&sk->sk_write_queue, nskb); __skb_queue_head(&sk->sk_write_queue, nskb);
tcp_free_skb(sk, skb); sk_stream_free_skb(sk, skb);
sk_charge_skb(sk, nskb); sk_charge_skb(sk, nskb);
skb = nskb; skb = nskb;
} }
...@@ -1404,7 +1406,7 @@ int tcp_write_wakeup(struct sock *sk) ...@@ -1404,7 +1406,7 @@ int tcp_write_wakeup(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
if ((skb = tp->send_head) != NULL && if ((skb = sk->sk_send_head) != NULL &&
before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
int err; int err;
int mss = tcp_current_mss(sk, 0); int mss = tcp_current_mss(sk, 0);
...@@ -1458,7 +1460,7 @@ void tcp_send_probe0(struct sock *sk) ...@@ -1458,7 +1460,7 @@ void tcp_send_probe0(struct sock *sk)
err = tcp_write_wakeup(sk); err = tcp_write_wakeup(sk);
if (tp->packets_out || !tp->send_head) { if (tp->packets_out || !sk->sk_send_head) {
/* Cancel probe timer, if it is not required. */ /* Cancel probe timer, if it is not required. */
tp->probes_out = 0; tp->probes_out = 0;
tp->backoff = 0; tp->backoff = 0;
......
...@@ -269,7 +269,7 @@ static void tcp_probe_timer(struct sock *sk) ...@@ -269,7 +269,7 @@ static void tcp_probe_timer(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
int max_probes; int max_probes;
if (tp->packets_out || !tp->send_head) { if (tp->packets_out || !sk->sk_send_head) {
tp->probes_out = 0; tp->probes_out = 0;
return; return;
} }
...@@ -606,7 +606,7 @@ static void tcp_keepalive_timer (unsigned long data) ...@@ -606,7 +606,7 @@ static void tcp_keepalive_timer (unsigned long data)
elapsed = keepalive_time_when(tp); elapsed = keepalive_time_when(tp);
/* It is alive without keepalive 8) */ /* It is alive without keepalive 8) */
if (tp->packets_out || tp->send_head) if (tp->packets_out || sk->sk_send_head)
goto resched; goto resched;
elapsed = tcp_time_stamp - tp->rcv_tstamp; elapsed = tcp_time_stamp - tp->rcv_tstamp;
......
...@@ -990,7 +990,7 @@ static int sctp_error(struct sock *sk, int flags, int err) ...@@ -990,7 +990,7 @@ static int sctp_error(struct sock *sk, int flags, int err)
* Note: This function could use a rewrite especially when explicit * Note: This function could use a rewrite especially when explicit
* connect support comes in. * connect support comes in.
*/ */
/* BUG: We do not implement the equivalent of wait_for_tcp_memory(). */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment