Commit ee733cd8 authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp-minor-adjustments-for-low-pacing-rates'

Eric Dumazet says:

====================
tcp: minor adjustments for low pacing rates

After pacing horizon addition, we have to adjust how we arm rto
timer, otherwise we might freeze very low pacing rate flows.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b94c280d 916e6d1a
...@@ -1289,26 +1289,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk) ...@@ -1289,26 +1289,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
} }
/* Return in jiffies the delay before one skb is sent. /* Estimates in how many jiffies next packet for this flow can be sent.
* If @skb is NULL, we look at EDT for next packet being sent on the socket. * Scheduling a retransmit timer too early would be silly.
*/ */
static inline unsigned long tcp_pacing_delay(const struct sock *sk, static inline unsigned long tcp_pacing_delay(const struct sock *sk)
const struct sk_buff *skb)
{ {
s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns; s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0; return delay > 0 ? nsecs_to_jiffies(delay) : 0;
} }
static inline void tcp_reset_xmit_timer(struct sock *sk, static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what, const int what,
unsigned long when, unsigned long when,
const unsigned long max_when, const unsigned long max_when)
const struct sk_buff *skb)
{ {
inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb), inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
max_when); max_when);
} }
...@@ -1336,8 +1332,7 @@ static inline void tcp_check_probe_timer(struct sock *sk) ...@@ -1336,8 +1332,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{ {
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
tcp_probe0_base(sk), TCP_RTO_MAX, tcp_probe0_base(sk), TCP_RTO_MAX);
NULL);
} }
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
......
...@@ -3014,7 +3014,7 @@ void tcp_rearm_rto(struct sock *sk) ...@@ -3014,7 +3014,7 @@ void tcp_rearm_rto(struct sock *sk)
rto = usecs_to_jiffies(max_t(int, delta_us, 1)); rto = usecs_to_jiffies(max_t(int, delta_us, 1));
} }
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX, tcp_rtx_queue_head(sk)); TCP_RTO_MAX);
} }
} }
...@@ -3291,7 +3291,7 @@ static void tcp_ack_probe(struct sock *sk) ...@@ -3291,7 +3291,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
when, TCP_RTO_MAX, NULL); when, TCP_RTO_MAX);
} }
} }
......
...@@ -2593,8 +2593,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) ...@@ -2593,8 +2593,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0) if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
TCP_RTO_MAX, NULL);
return true; return true;
} }
...@@ -3113,6 +3112,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -3113,6 +3112,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *rtx_head, *hole = NULL; struct sk_buff *skb, *rtx_head, *hole = NULL;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
bool rearm_timer = false;
u32 max_segs; u32 max_segs;
int mib_idx; int mib_idx;
...@@ -3135,7 +3135,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -3135,7 +3135,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
segs = tp->snd_cwnd - tcp_packets_in_flight(tp); segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0) if (segs <= 0)
return; break;
sacked = TCP_SKB_CB(skb)->sacked; sacked = TCP_SKB_CB(skb)->sacked;
/* In case tcp_shift_skb_data() have aggregated large skbs, /* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets * we need to make sure not sending too bigs TSO packets
...@@ -3160,10 +3160,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -3160,10 +3160,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
continue; continue;
if (tcp_small_queue_check(sk, skb, 1)) if (tcp_small_queue_check(sk, skb, 1))
return; break;
if (tcp_retransmit_skb(sk, skb, segs)) if (tcp_retransmit_skb(sk, skb, segs))
return; break;
NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
...@@ -3172,11 +3172,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -3172,11 +3172,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == rtx_head && if (skb == rtx_head &&
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
rearm_timer = true;
}
if (rearm_timer)
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto, inet_csk(sk)->icsk_rto,
TCP_RTO_MAX, TCP_RTO_MAX);
skb);
}
} }
/* We allow to exceed memory limits for FIN packets to expedite /* We allow to exceed memory limits for FIN packets to expedite
...@@ -3907,7 +3909,7 @@ void tcp_send_probe0(struct sock *sk) ...@@ -3907,7 +3909,7 @@ void tcp_send_probe0(struct sock *sk)
*/ */
timeout = TCP_RESOURCE_PROBE_INTERVAL; timeout = TCP_RESOURCE_PROBE_INTERVAL;
} }
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
} }
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment