Commit c634e34f authored by Yousuk Seung's avatar Yousuk Seung Committed by David S. Miller

tcp: add ece_ack flag to reno sack functions

Pass a boolean flag that tells the ECE state of the current ack to reno
sack functions. This is pure refactor for future patches to improve
tracking delivered counts.
Signed-off-by: default avatarYousuk Seung <ysseung@google.com>
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fdb7eb21
...@@ -1893,7 +1893,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) ...@@ -1893,7 +1893,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
/* Emulate SACKs for SACKless connection: account for a new dupack. */ /* Emulate SACKs for SACKless connection: account for a new dupack. */
static void tcp_add_reno_sack(struct sock *sk, int num_dupack) static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
{ {
if (num_dupack) { if (num_dupack) {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -1911,7 +1911,7 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack) ...@@ -1911,7 +1911,7 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
/* Account for ACK, ACKing some data in Reno Recovery phase. */ /* Account for ACK, ACKing some data in Reno Recovery phase. */
static void tcp_remove_reno_sacks(struct sock *sk, int acked) static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -2697,7 +2697,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, ...@@ -2697,7 +2697,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
* delivered. Lower inflight to clock out (re)tranmissions. * delivered. Lower inflight to clock out (re)tranmissions.
*/ */
if (after(tp->snd_nxt, tp->high_seq) && num_dupack) if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
tcp_add_reno_sack(sk, num_dupack); tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
else if (flag & FLAG_SND_UNA_ADVANCED) else if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp); tcp_reset_reno_sack(tp);
} }
...@@ -2779,6 +2779,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2779,6 +2779,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int fast_rexmit = 0, flag = *ack_flag; int fast_rexmit = 0, flag = *ack_flag;
bool ece_ack = flag & FLAG_ECE;
bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) && bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
tcp_force_fast_retransmit(sk)); tcp_force_fast_retransmit(sk));
...@@ -2787,7 +2788,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2787,7 +2788,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
/* Now state machine starts. /* Now state machine starts.
* A. ECE, hence prohibit cwnd undoing, the reduction is required. */ * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
if (flag & FLAG_ECE) if (ece_ack)
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */ /* B. In all the states check for reneging SACKs. */
...@@ -2828,7 +2829,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2828,7 +2829,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
case TCP_CA_Recovery: case TCP_CA_Recovery:
if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp)) if (tcp_is_reno(tp))
tcp_add_reno_sack(sk, num_dupack); tcp_add_reno_sack(sk, num_dupack, ece_ack);
} else { } else {
if (tcp_try_undo_partial(sk, prior_snd_una)) if (tcp_try_undo_partial(sk, prior_snd_una))
return; return;
...@@ -2853,7 +2854,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2853,7 +2854,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (tcp_is_reno(tp)) { if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED) if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp); tcp_reset_reno_sack(tp);
tcp_add_reno_sack(sk, num_dupack); tcp_add_reno_sack(sk, num_dupack, ece_ack);
} }
if (icsk->icsk_ca_state <= TCP_CA_Disorder) if (icsk->icsk_ca_state <= TCP_CA_Disorder)
...@@ -2877,7 +2878,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2877,7 +2878,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
} }
/* Otherwise enter Recovery state */ /* Otherwise enter Recovery state */
tcp_enter_recovery(sk, (flag & FLAG_ECE)); tcp_enter_recovery(sk, ece_ack);
fast_rexmit = 1; fast_rexmit = 1;
} }
...@@ -3053,7 +3054,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, ...@@ -3053,7 +3054,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
*/ */
static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
u32 prior_snd_una, u32 prior_snd_una,
struct tcp_sacktag_state *sack) struct tcp_sacktag_state *sack, bool ece_ack)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u64 first_ackt, last_ackt; u64 first_ackt, last_ackt;
...@@ -3191,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, ...@@ -3191,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
} }
if (tcp_is_reno(tp)) { if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked); tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
/* If any of the cumulatively ACKed segments was /* If any of the cumulatively ACKed segments was
* retransmitted, non-SACK case cannot confirm that * retransmitted, non-SACK case cannot confirm that
...@@ -3685,7 +3686,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3685,7 +3686,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
goto no_queue; goto no_queue;
/* See if we can take anything off of the retransmit queue. */ /* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state); flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state,
flag & FLAG_ECE);
tcp_rack_update_reo_wnd(sk, &rs); tcp_rack_update_reo_wnd(sk, &rs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment