Commit b08d6cb2 authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

[TCP]: Limit processing lost_retrans loop to work-to-do cases

This addition of lost_retrans_low to tcp_sock might be
unnecessary, it's not clear how often lost_retrans worker is
executed when there wasn't work to do.
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f785a8e2
...@@ -348,6 +348,8 @@ struct tcp_sock { ...@@ -348,6 +348,8 @@ struct tcp_sock {
int lost_cnt_hint; int lost_cnt_hint;
int retransmit_cnt_hint; int retransmit_cnt_hint;
u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
u16 advmss; /* Advertised MSS */ u16 advmss; /* Advertised MSS */
u16 prior_ssthresh; /* ssthresh saved at recovery start */ u16 prior_ssthresh; /* ssthresh saved at recovery start */
u32 lost_out; /* Lost packets */ u32 lost_out; /* Lost packets */
......
...@@ -1112,7 +1112,8 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, ...@@ -1112,7 +1112,8 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
* *
* Search retransmitted skbs from write_queue that were sent when snd_nxt was * Search retransmitted skbs from write_queue that were sent when snd_nxt was
* less than what is now known to be received by the other end (derived from * less than what is now known to be received by the other end (derived from
* SACK blocks by the caller). * SACK blocks by the caller). Also calculate the lowest snd_nxt among the
* remaining retransmitted skbs to avoid some costly processing per ACKs.
*/ */
static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
{ {
...@@ -1120,6 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) ...@@ -1120,6 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
struct sk_buff *skb; struct sk_buff *skb;
int flag = 0; int flag = 0;
int cnt = 0; int cnt = 0;
u32 new_low_seq = 0;
tcp_for_write_queue(skb, sk) { tcp_for_write_queue(skb, sk) {
u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
...@@ -1151,9 +1153,15 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) ...@@ -1151,9 +1153,15 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
} }
} else { } else {
if (!new_low_seq || before(ack_seq, new_low_seq))
new_low_seq = ack_seq;
cnt += tcp_skb_pcount(skb); cnt += tcp_skb_pcount(skb);
} }
} }
if (tp->retrans_out)
tp->lost_retrans_low = new_low_seq;
return flag; return flag;
} }
...@@ -1481,8 +1489,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ ...@@ -1481,8 +1489,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
} }
} }
if (tp->retrans_out && highest_sack_end_seq && if (tp->retrans_out &&
after(highest_sack_end_seq, tp->high_seq) && after(highest_sack_end_seq, tp->lost_retrans_low) &&
icsk->icsk_ca_state == TCP_CA_Recovery) icsk->icsk_ca_state == TCP_CA_Recovery)
flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq); flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq);
......
...@@ -1914,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -1914,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
printk(KERN_DEBUG "retrans_out leaked.\n"); printk(KERN_DEBUG "retrans_out leaked.\n");
} }
#endif #endif
if (!tp->retrans_out)
tp->lost_retrans_low = tp->snd_nxt;
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb); tp->retrans_out += tcp_skb_pcount(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment