[NET] move send_head from tcp private area to struct sock

The poor cousins also need this, LLC will be the first to use it.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@conectiva.com.br>
parent 6511d74e
......@@ -297,7 +297,6 @@ struct tcp_opt {
struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
struct sk_buff *send_head; /* Front of stuff to transmit */
__u32 rcv_wnd; /* Current receiver window */
__u32 rcv_wup; /* rcv_nxt on last window update sent */
......
......@@ -167,6 +167,7 @@ struct sock_common {
* @sk_socket - Identd and reporting IO signals
* @sk_user_data - RPC layer private data
* @sk_owner - module that owns this socket
* @sk_send_head - front of stuff to transmit
* @sk_write_pending - a write to stream socket waits to start
* @sk_queue_shrunk - write queue has been shrunk recently
* @sk_state_change - callback to indicate change in the state of the sock
......@@ -248,6 +249,7 @@ struct sock {
struct timeval sk_stamp;
struct socket *sk_socket;
void *sk_user_data;
struct sk_buff *sk_send_head;
struct module *sk_owner;
int sk_write_pending;
void *sk_security;
......@@ -1103,6 +1105,12 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
}
}
#define sk_stream_for_retrans_queue(skb, sk) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (sk)->sk_send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
......
......@@ -1186,13 +1186,6 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
#define for_retrans_queue(skb, sk, tp) \
for (skb = (sk)->sk_write_queue.next; \
(skb != (tp)->send_head) && \
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb=skb->next)
#include <net/tcp_ecn.h>
/* This determines how many packets are "in the network" to the best
......@@ -1400,7 +1393,7 @@ tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int n
tcp_minshall_check(tp))));
}
/* This checks if the data bearing packet SKB (usually tp->send_head)
/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now.
*/
static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
......@@ -1457,7 +1450,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk,
unsigned cur_mss,
int nonagle)
{
struct sk_buff *skb = tp->send_head;
struct sk_buff *skb = sk->sk_send_head;
if (skb) {
if (!tcp_skb_is_last(sk, skb))
......@@ -1477,7 +1470,7 @@ static __inline__ void tcp_push_pending_frames(struct sock *sk,
static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
{
struct sk_buff *skb = tp->send_head;
struct sk_buff *skb = sk->sk_send_head;
return (skb &&
tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
......@@ -2023,8 +2016,8 @@ static inline int tcp_use_frto(const struct sock *sk)
* unsent new data, and the advertised window should allow
* sending it.
*/
return (sysctl_tcp_frto && tp->send_head &&
!after(TCP_SKB_CB(tp->send_head)->end_seq,
return (sysctl_tcp_frto && sk->sk_send_head &&
!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd));
}
......
......@@ -1147,6 +1147,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue);
sk->sk_send_head = NULL;
init_timer(&sk->sk_timer);
sk->sk_allocation = GFP_KERNEL;
......
......@@ -659,8 +659,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
TCP_SKB_CB(skb)->sacked = 0;
__skb_queue_tail(&sk->sk_write_queue, skb);
sk_charge_skb(sk, skb);
if (!tp->send_head)
tp->send_head = skb;
if (!sk->sk_send_head)
sk->sk_send_head = skb;
else if (tp->nonagle&TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
......@@ -678,7 +678,7 @@ static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
int mss_now, int nonagle)
{
if (tp->send_head) {
if (sk->sk_send_head) {
struct sk_buff *skb = sk->sk_write_queue.prev;
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
......@@ -718,7 +718,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
if (!tp->send_head || (copy = mss_now - skb->len) <= 0) {
if (!sk->sk_send_head || (copy = mss_now - skb->len) <= 0) {
new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
......@@ -766,7 +766,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tp->send_head)
} else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now);
continue;
......@@ -880,7 +880,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
skb = sk->sk_write_queue.prev;
if (!tp->send_head ||
if (!sk->sk_send_head ||
(copy = mss_now - skb->len) <= 0) {
new_segment:
......@@ -1009,7 +1009,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tp->send_head)
} else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now);
continue;
......@@ -1035,8 +1035,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
do_fault:
if (!skb->len) {
if (tp->send_head == skb)
tp->send_head = NULL;
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, skb->list);
sk_stream_free_skb(sk, skb);
}
......@@ -1907,7 +1907,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(tp, TCP_CA_Open);
tcp_clear_retrans(tp);
tcp_delack_init(tp);
tp->send_head = NULL;
sk->sk_send_head = NULL;
tp->saw_tstamp = 0;
tcp_sack_reset(tp);
__sk_dst_reset(sk);
......
......@@ -1001,7 +1001,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (after(end_seq, tp->high_seq))
flag |= FLAG_DATA_LOST;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
u8 sacked = TCP_SKB_CB(skb)->sacked;
int in_sack;
......@@ -1105,7 +1105,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (lost_retrans && tp->ca_state == TCP_CA_Recovery) {
struct sk_buff *skb;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
break;
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
......@@ -1171,7 +1171,7 @@ void tcp_enter_frto(struct sock *sk)
tp->undo_marker = tp->snd_una;
tp->undo_retrans = 0;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
}
tcp_sync_left_out(tp);
......@@ -1194,7 +1194,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
tp->lost_out = 0;
tp->fackets_out = 0;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
cnt++;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
......@@ -1267,7 +1267,7 @@ void tcp_enter_loss(struct sock *sk, int how)
if (!how)
tp->undo_marker = tp->snd_una;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
cnt++;
if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
tp->undo_marker = 0;
......@@ -1510,7 +1510,7 @@ tcp_mark_head_lost(struct sock *sk, struct tcp_opt *tp, int packets, u32 high_se
BUG_TRAP(cnt <= tp->packets_out);
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
if (--cnt < 0 || after(TCP_SKB_CB(skb)->end_seq, high_seq))
break;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
......@@ -1542,7 +1542,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp)
if (tcp_head_timedout(sk, tp)) {
struct sk_buff *skb;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
if (tcp_skb_timedout(tp, skb) &&
!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
......@@ -1711,7 +1711,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp)
{
if (tcp_may_undo(tp)) {
struct sk_buff *skb;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
}
DBGUNDO(sk, tp, "partial loss");
......@@ -2320,7 +2320,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
int acked = 0;
__s32 seq_rtt = -1;
while ((skb = skb_peek(&sk->sk_write_queue)) && skb != tp->send_head) {
while ((skb = skb_peek(&sk->sk_write_queue)) && skb != sk->sk_send_head) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
__u8 sacked = scb->sacked;
......@@ -2408,7 +2408,7 @@ static void tcp_ack_probe(struct sock *sk)
/* Was it a usable window open? */
if (!after(TCP_SKB_CB(tp->send_head)->end_seq,
if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd)) {
tp->backoff = 0;
tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0);
......@@ -2849,7 +2849,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
*/
if (tp->send_head)
if (sk->sk_send_head)
tcp_ack_probe(sk);
return 1;
......@@ -3877,8 +3877,7 @@ static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb)
static __inline__ void tcp_data_snd_check(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb = tp->send_head;
struct sk_buff *skb = sk->sk_send_head;
if (skb != NULL)
__tcp_data_snd_check(sk, skb);
......
......@@ -718,6 +718,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
sock_reset_flag(newsk, SOCK_DONE);
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_send_head = NULL;
newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
skb_queue_head_init(&newsk->sk_error_queue);
newsk->sk_write_space = sk_stream_write_space;
......@@ -775,7 +776,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
tcp_set_ca_state(newtp, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue);
newtp->send_head = NULL;
newtp->rcv_wup = req->rcv_isn + 1;
newtp->write_seq = req->snt_isn + 1;
newtp->pushed_seq = newtp->write_seq;
......
......@@ -48,9 +48,9 @@ int sysctl_tcp_retrans_collapse = 1;
static __inline__
void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
{
tp->send_head = skb->next;
if (tp->send_head == (struct sk_buff *)&sk->sk_write_queue)
tp->send_head = NULL;
sk->sk_send_head = skb->next;
if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
sk->sk_send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0)
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
......@@ -329,8 +329,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
sk_charge_skb(sk, skb);
/* Queue it, remembering where we must start sending. */
if (tp->send_head == NULL)
tp->send_head = skb;
if (sk->sk_send_head == NULL)
sk->sk_send_head = skb;
}
/* Send _single_ skb sitting at the send head. This function requires
......@@ -339,13 +339,13 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
void tcp_push_one(struct sock *sk, unsigned cur_mss)
{
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb = tp->send_head;
struct sk_buff *skb = sk->sk_send_head;
if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) {
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {
tp->send_head = NULL;
sk->sk_send_head = NULL;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
if (tp->packets_out++ == 0)
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
......@@ -572,8 +572,10 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
*/
mss_now = tcp_current_mss(sk, 1);
while((skb = tp->send_head) &&
tcp_snd_test(tp, skb, mss_now, tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)) {
while ((skb = sk->sk_send_head) &&
tcp_snd_test(tp, skb, mss_now,
tcp_skb_is_last(sk, skb) ? nonagle :
TCP_NAGLE_PUSH)) {
if (skb->len > mss_now) {
if (tcp_fragment(sk, skb, mss_now))
break;
......@@ -593,7 +595,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
return 0;
}
return !tp->packets_out && tp->send_head;
return !tp->packets_out && sk->sk_send_head;
}
return 0;
}
......@@ -779,7 +781,7 @@ void tcp_simple_retransmit(struct sock *sk)
unsigned int mss = tcp_current_mss(sk, 0);
int lost = 0;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
if (skb->len > mss &&
!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
......@@ -865,7 +867,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Collapse two adjacent packets if worthwhile and we can. */
if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
(skb->len < (cur_mss >> 1)) &&
(skb->next != tp->send_head) &&
(skb->next != sk->sk_send_head) &&
(skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
(skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
(sysctl_tcp_retrans_collapse != 0))
......@@ -940,7 +942,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
/* First pass: retransmit lost packets. */
if (packet_cnt) {
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
......@@ -988,7 +990,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
packet_cnt = 0;
for_retrans_queue(skb, sk, tp) {
sk_stream_for_retrans_queue(skb, sk) {
if(++packet_cnt > tp->fackets_out)
break;
......@@ -1025,7 +1027,7 @@ void tcp_send_fin(struct sock *sk)
*/
mss_now = tcp_current_mss(sk, 1);
if(tp->send_head != NULL) {
if (sk->sk_send_head != NULL) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
......@@ -1404,7 +1406,7 @@ int tcp_write_wakeup(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
struct sk_buff *skb;
if ((skb = tp->send_head) != NULL &&
if ((skb = sk->sk_send_head) != NULL &&
before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
int err;
int mss = tcp_current_mss(sk, 0);
......@@ -1458,7 +1460,7 @@ void tcp_send_probe0(struct sock *sk)
err = tcp_write_wakeup(sk);
if (tp->packets_out || !tp->send_head) {
if (tp->packets_out || !sk->sk_send_head) {
/* Cancel probe timer, if it is not required. */
tp->probes_out = 0;
tp->backoff = 0;
......
......@@ -269,7 +269,7 @@ static void tcp_probe_timer(struct sock *sk)
struct tcp_opt *tp = tcp_sk(sk);
int max_probes;
if (tp->packets_out || !tp->send_head) {
if (tp->packets_out || !sk->sk_send_head) {
tp->probes_out = 0;
return;
}
......@@ -606,7 +606,7 @@ static void tcp_keepalive_timer (unsigned long data)
elapsed = keepalive_time_when(tp);
/* It is alive without keepalive 8) */
if (tp->packets_out || tp->send_head)
if (tp->packets_out || sk->sk_send_head)
goto resched;
elapsed = tcp_time_stamp - tp->rcv_tstamp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment