Commit 19119f29 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: take care of compressed acks in tcp_add_reno_sack()

Neal pointed out that non sack flows might suffer from ACK compression
added in the following patch ("tcp: implement coalescing on backlog queue")

Instead of tweaking tcp_add_backlog() we can take into
account how many ACK were coalesced, this information
will be available in skb_shinfo(skb)->gso_segs
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ebeef4bc
...@@ -1865,16 +1865,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) ...@@ -1865,16 +1865,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
/* Emulate SACKs for SACKless connection: account for a new dupack. */ /* Emulate SACKs for SACKless connection: account for a new dupack. */
static void tcp_add_reno_sack(struct sock *sk) static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
{ {
struct tcp_sock *tp = tcp_sk(sk); if (num_dupack) {
u32 prior_sacked = tp->sacked_out; struct tcp_sock *tp = tcp_sk(sk);
u32 prior_sacked = tp->sacked_out;
s32 delivered;
tp->sacked_out++; tp->sacked_out += num_dupack;
tcp_check_reno_reordering(sk, 0); tcp_check_reno_reordering(sk, 0);
if (tp->sacked_out > prior_sacked) delivered = tp->sacked_out - prior_sacked;
tp->delivered++; /* Some out-of-order packet is delivered */ if (delivered > 0)
tcp_verify_left_out(tp); tp->delivered += delivered;
tcp_verify_left_out(tp);
}
} }
/* Account for ACK, ACKing some data in Reno Recovery phase. */ /* Account for ACK, ACKing some data in Reno Recovery phase. */
...@@ -2636,7 +2640,7 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack) ...@@ -2636,7 +2640,7 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
* recovered or spurious. Otherwise retransmits more on partial ACKs. * recovered or spurious. Otherwise retransmits more on partial ACKs.
*/ */
static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
int *rexmit) int *rexmit)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -2655,7 +2659,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, ...@@ -2655,7 +2659,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
return; return;
if (after(tp->snd_nxt, tp->high_seq)) { if (after(tp->snd_nxt, tp->high_seq)) {
if (flag & FLAG_DATA_SACKED || is_dupack) if (flag & FLAG_DATA_SACKED || num_dupack)
tp->frto = 0; /* Step 3.a. loss was real */ tp->frto = 0; /* Step 3.a. loss was real */
} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
tp->high_seq = tp->snd_nxt; tp->high_seq = tp->snd_nxt;
...@@ -2681,8 +2685,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, ...@@ -2681,8 +2685,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
/* A Reno DUPACK means new data in F-RTO step 2.b above are /* A Reno DUPACK means new data in F-RTO step 2.b above are
* delivered. Lower inflight to clock out (re)tranmissions. * delivered. Lower inflight to clock out (re)tranmissions.
*/ */
if (after(tp->snd_nxt, tp->high_seq) && is_dupack) if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
tcp_add_reno_sack(sk); tcp_add_reno_sack(sk, num_dupack);
else if (flag & FLAG_SND_UNA_ADVANCED) else if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp); tcp_reset_reno_sack(tp);
} }
...@@ -2759,13 +2763,13 @@ static bool tcp_force_fast_retransmit(struct sock *sk) ...@@ -2759,13 +2763,13 @@ static bool tcp_force_fast_retransmit(struct sock *sk)
* tcp_xmit_retransmit_queue(). * tcp_xmit_retransmit_queue().
*/ */
static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
bool is_dupack, int *ack_flag, int *rexmit) int num_dupack, int *ack_flag, int *rexmit)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int fast_rexmit = 0, flag = *ack_flag; int fast_rexmit = 0, flag = *ack_flag;
bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
tcp_force_fast_retransmit(sk)); tcp_force_fast_retransmit(sk));
if (!tp->packets_out && tp->sacked_out) if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0; tp->sacked_out = 0;
...@@ -2812,8 +2816,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2812,8 +2816,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
switch (icsk->icsk_ca_state) { switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery: case TCP_CA_Recovery:
if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp) && is_dupack) if (tcp_is_reno(tp))
tcp_add_reno_sack(sk); tcp_add_reno_sack(sk, num_dupack);
} else { } else {
if (tcp_try_undo_partial(sk, prior_snd_una)) if (tcp_try_undo_partial(sk, prior_snd_una))
return; return;
...@@ -2828,7 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2828,7 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tcp_identify_packet_loss(sk, ack_flag); tcp_identify_packet_loss(sk, ack_flag);
break; break;
case TCP_CA_Loss: case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack, rexmit); tcp_process_loss(sk, flag, num_dupack, rexmit);
tcp_identify_packet_loss(sk, ack_flag); tcp_identify_packet_loss(sk, ack_flag);
if (!(icsk->icsk_ca_state == TCP_CA_Open || if (!(icsk->icsk_ca_state == TCP_CA_Open ||
(*ack_flag & FLAG_LOST_RETRANS))) (*ack_flag & FLAG_LOST_RETRANS)))
...@@ -2839,8 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2839,8 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (tcp_is_reno(tp)) { if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED) if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp); tcp_reset_reno_sack(tp);
if (is_dupack) tcp_add_reno_sack(sk, num_dupack);
tcp_add_reno_sack(sk);
} }
if (icsk->icsk_ca_state <= TCP_CA_Disorder) if (icsk->icsk_ca_state <= TCP_CA_Disorder)
...@@ -3562,7 +3565,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3562,7 +3565,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
bool is_sack_reneg = tp->is_sack_reneg; bool is_sack_reneg = tp->is_sack_reneg;
u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq; u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false; int num_dupack = 0;
int prior_packets = tp->packets_out; int prior_packets = tp->packets_out;
u32 delivered = tp->delivered; u32 delivered = tp->delivered;
u32 lost = tp->lost; u32 lost = tp->lost;
...@@ -3673,8 +3676,13 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3673,8 +3676,13 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_set_xmit_timer(sk); tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) { if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, num_dupack = 1;
/* Consider if pure acks were aggregated in tcp_add_backlog() */
if (!(flag & FLAG_DATA))
num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
}
tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit); &rexmit);
} }
...@@ -3692,7 +3700,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3692,7 +3700,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue: no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */ /* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK) { if (flag & FLAG_DSACKING_ACK) {
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit); &rexmit);
tcp_newly_delivered(sk, delivered, flag); tcp_newly_delivered(sk, delivered, flag);
} }
...@@ -3717,7 +3725,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3717,7 +3725,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (TCP_SKB_CB(skb)->sacked) { if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
&sack_state); &sack_state);
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit); &rexmit);
tcp_newly_delivered(sk, delivered, flag); tcp_newly_delivered(sk, delivered, flag);
tcp_xmit_recovery(sk, rexmit); tcp_xmit_recovery(sk, rexmit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment