Commit 46778cd1 authored by Kuniyuki Iwashima's avatar Kuniyuki Iwashima Committed by David S. Miller

tcp: Fix data-races around sysctl_tcp_reordering.

While reading sysctl_tcp_reordering, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

Fixes: 1da177e4 ("Linux-2.6.12-rc2")
Signed-off-by: default avatarKuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4177f545
...@@ -441,7 +441,7 @@ void tcp_init_sock(struct sock *sk) ...@@ -441,7 +441,7 @@ void tcp_init_sock(struct sock *sk)
tp->snd_cwnd_clamp = ~0; tp->snd_cwnd_clamp = ~0;
tp->mss_cache = TCP_MSS_DEFAULT; tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
tcp_assign_congestion_control(sk); tcp_assign_congestion_control(sk);
tp->tsoffset = 0; tp->tsoffset = 0;
......
...@@ -2139,6 +2139,7 @@ void tcp_enter_loss(struct sock *sk) ...@@ -2139,6 +2139,7 @@ void tcp_enter_loss(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
u8 reordering;
tcp_timeout_mark_lost(sk); tcp_timeout_mark_lost(sk);
...@@ -2159,10 +2160,12 @@ void tcp_enter_loss(struct sock *sk) ...@@ -2159,10 +2160,12 @@ void tcp_enter_loss(struct sock *sk)
/* Timeout in disordered state after receiving substantial DUPACKs /* Timeout in disordered state after receiving substantial DUPACKs
* suggests that the degree of reordering is over-estimated. * suggests that the degree of reordering is over-estimated.
*/ */
reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
if (icsk->icsk_ca_state <= TCP_CA_Disorder && if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) tp->sacked_out >= reordering)
tp->reordering = min_t(unsigned int, tp->reordering, tp->reordering = min_t(unsigned int, tp->reordering,
net->ipv4.sysctl_tcp_reordering); reordering);
tcp_set_ca_state(sk, TCP_CA_Loss); tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt; tp->high_seq = tp->snd_nxt;
tcp_ecn_queue_cwr(tp); tcp_ecn_queue_cwr(tp);
...@@ -3464,7 +3467,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) ...@@ -3464,7 +3467,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
* new SACK or ECE mark may first advance cwnd here and later reduce * new SACK or ECE mark may first advance cwnd here and later reduce
* cwnd in tcp_fastretrans_alert() based on more states. * cwnd in tcp_fastretrans_alert() based on more states.
*/ */
if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) if (tcp_sk(sk)->reordering >
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
return flag & FLAG_FORWARD_PROGRESS; return flag & FLAG_FORWARD_PROGRESS;
return flag & FLAG_DATA_ACKED; return flag & FLAG_DATA_ACKED;
......
...@@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk) ...@@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk)
if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
val = tcp_metric_get(tm, TCP_METRIC_REORDERING); val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val < tp->reordering && if (val < tp->reordering &&
tp->reordering != net->ipv4.sysctl_tcp_reordering) tp->reordering !=
READ_ONCE(net->ipv4.sysctl_tcp_reordering))
tcp_metric_set(tm, TCP_METRIC_REORDERING, tcp_metric_set(tm, TCP_METRIC_REORDERING,
tp->reordering); tp->reordering);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment