Commit 91a79387 authored by David S. Miller's avatar David S. Miller

[TCP]: Abstract out all settings of tcp_opt->ca_state into a function.

parent 424be00c
......@@ -1212,6 +1212,11 @@ static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
return max(tp->snd_cwnd >> 1U, 2U);
}
static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
{
tp->ca_state = ca_state;
}
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
......@@ -1271,7 +1276,7 @@ static inline void tcp_enter_cwr(struct tcp_opt *tp)
tp->prior_ssthresh = 0;
if (tp->ca_state < TCP_CA_CWR) {
__tcp_enter_cwr(tp);
tp->ca_state = TCP_CA_CWR;
tcp_set_ca_state(tp, TCP_CA_CWR);
}
}
......
......@@ -2158,7 +2158,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->packets_out = 0;
tp->snd_ssthresh = 0x7fffffff;
tp->snd_cwnd_cnt = 0;
tp->ca_state = TCP_CA_Open;
tcp_set_ca_state(tp, TCP_CA_Open);
tcp_clear_retrans(tp);
tcp_delack_init(tp);
tp->send_head = NULL;
......
......@@ -1003,7 +1003,7 @@ void tcp_enter_frto(struct sock *sk)
}
tcp_sync_left_out(tp);
tp->ca_state = TCP_CA_Open;
tcp_set_ca_state(tp, TCP_CA_Open);
tp->frto_highmark = tp->snd_nxt;
}
......@@ -1049,7 +1049,7 @@ void tcp_enter_frto_loss(struct sock *sk)
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tp->ca_state = TCP_CA_Loss;
tcp_set_ca_state(tp, TCP_CA_Loss);
tp->high_seq = tp->frto_highmark;
TCP_ECN_queue_cwr(tp);
}
......@@ -1112,7 +1112,7 @@ void tcp_enter_loss(struct sock *sk, int how)
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tp->ca_state = TCP_CA_Loss;
tcp_set_ca_state(tp, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
}
......@@ -1489,7 +1489,7 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_opt *tp)
tcp_moderate_cwnd(tp);
return 1;
}
tp->ca_state = TCP_CA_Open;
tcp_set_ca_state(tp, TCP_CA_Open);
return 0;
}
......@@ -1549,7 +1549,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp)
tp->retransmits = 0;
tp->undo_marker = 0;
if (!IsReno(tp))
tp->ca_state = TCP_CA_Open;
tcp_set_ca_state(tp, TCP_CA_Open);
return 1;
}
return 0;
......@@ -1583,7 +1583,7 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_opt *tp, int flag)
state = TCP_CA_Disorder;
if (tp->ca_state != state) {
tp->ca_state = state;
tcp_set_ca_state(tp, state);
tp->high_seq = tp->snd_nxt;
}
tcp_moderate_cwnd(tp);
......@@ -1658,7 +1658,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
* is ACKed for CWR bit to reach receiver. */
if (tp->snd_una != tp->high_seq) {
tcp_complete_cwr(tp);
tp->ca_state = TCP_CA_Open;
tcp_set_ca_state(tp, TCP_CA_Open);
}
break;
......@@ -1669,7 +1669,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
* catching for all duplicate ACKs. */
IsReno(tp) || tp->snd_una != tp->high_seq) {
tp->undo_marker = 0;
tp->ca_state = TCP_CA_Open;
tcp_set_ca_state(tp, TCP_CA_Open);
}
break;
......@@ -1743,7 +1743,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
}
tp->snd_cwnd_cnt = 0;
tp->ca_state = TCP_CA_Recovery;
tcp_set_ca_state(tp, TCP_CA_Recovery);
}
if (is_dupack || tcp_head_timedout(sk, tp))
......
......@@ -769,7 +769,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
newtp->ca_state = TCP_CA_Open;
tcp_set_ca_state(newtp, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue);
newtp->send_head = NULL;
......
......@@ -869,7 +869,7 @@ void tcp_simple_retransmit(struct sock *sk)
tp->snd_ssthresh = tcp_current_ssthresh(tp);
tp->prior_ssthresh = 0;
tp->undo_marker = 0;
tp->ca_state = TCP_CA_Loss;
tcp_set_ca_state(tp, TCP_CA_Loss);
}
tcp_xmit_retransmit_queue(sk);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment