Commit 6f458dfb authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: improve latencies of timer triggered events

Modern TCP stack highly depends on tcp_write_timer() having a small
latency, but current implementation doesn't exactly meet the
expectations.

When a timer fires but finds the socket is owned by the user, it rearms
itself for an additional delay hoping next run will be more
successful.

tcp_write_timer() for example uses a 50ms delay for next try, and it
defeats many attempts to get predictable TCP behavior in term of
latencies.

Use the recently introduced tcp_release_cb(), so that the user owning
the socket will call various handlers right before socket release.

This will permit us to post a followup patch to address the
tcp_tso_should_defer() syndrome (some deferred packets have to wait
RTO timer to be transmitted, while cwnd should allow us to send them
sooner)
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Nandita Dukkipati <nanditad@google.com>
Cc: H.K. Jerry Chu <hkchu@google.com>
Cc: John Heffner <johnwheffner@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9dc27415
...@@ -515,7 +515,9 @@ struct tcp_sock { ...@@ -515,7 +515,9 @@ struct tcp_sock {
enum tsq_flags { enum tsq_flags {
TSQ_THROTTLED, TSQ_THROTTLED,
TSQ_QUEUED, TSQ_QUEUED,
TSQ_OWNED, /* tcp_tasklet_func() found socket was locked */ TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */
TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */
}; };
static inline struct tcp_sock *tcp_sk(const struct sock *sk) static inline struct tcp_sock *tcp_sk(const struct sock *sk)
......
...@@ -350,6 +350,8 @@ extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -350,6 +350,8 @@ extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
extern int tcp_sendpage(struct sock *sk, struct page *page, int offset, extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags); size_t size, int flags);
extern void tcp_release_cb(struct sock *sk); extern void tcp_release_cb(struct sock *sk);
extern void tcp_write_timer_handler(struct sock *sk);
extern void tcp_delack_timer_handler(struct sock *sk);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len); const struct tcphdr *th, unsigned int len);
......
...@@ -837,6 +837,13 @@ struct tsq_tasklet { ...@@ -837,6 +837,13 @@ struct tsq_tasklet {
}; };
static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
static void tcp_tsq_handler(struct sock *sk)
{
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
}
/* /*
* One tasklest per cpu tries to send more skbs. * One tasklest per cpu tries to send more skbs.
* We run in tasklet context but need to disable irqs when * We run in tasklet context but need to disable irqs when
...@@ -864,16 +871,10 @@ static void tcp_tasklet_func(unsigned long data) ...@@ -864,16 +871,10 @@ static void tcp_tasklet_func(unsigned long data)
bh_lock_sock(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
if ((1 << sk->sk_state) & tcp_tsq_handler(sk);
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 |
TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
tcp_write_xmit(sk,
tcp_current_mss(sk),
0, 0,
GFP_ATOMIC);
} else { } else {
/* defer the work to tcp_release_cb() */ /* defer the work to tcp_release_cb() */
set_bit(TSQ_OWNED, &tp->tsq_flags); set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
...@@ -882,6 +883,9 @@ static void tcp_tasklet_func(unsigned long data) ...@@ -882,6 +883,9 @@ static void tcp_tasklet_func(unsigned long data)
} }
} }
#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
(1UL << TCP_WRITE_TIMER_DEFERRED) | \
(1UL << TCP_DELACK_TIMER_DEFERRED))
/** /**
* tcp_release_cb - tcp release_sock() callback * tcp_release_cb - tcp release_sock() callback
* @sk: socket * @sk: socket
...@@ -892,16 +896,24 @@ static void tcp_tasklet_func(unsigned long data) ...@@ -892,16 +896,24 @@ static void tcp_tasklet_func(unsigned long data)
void tcp_release_cb(struct sock *sk) void tcp_release_cb(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nflags;
if (test_and_clear_bit(TSQ_OWNED, &tp->tsq_flags)) { /* perform an atomic operation only if at least one flag is set */
if ((1 << sk->sk_state) & do {
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | flags = tp->tsq_flags;
TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) if (!(flags & TCP_DEFERRED_ALL))
tcp_write_xmit(sk, return;
tcp_current_mss(sk), nflags = flags & ~TCP_DEFERRED_ALL;
0, 0, } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
GFP_ATOMIC);
} if (flags & (1UL << TCP_TSQ_DEFERRED))
tcp_tsq_handler(sk);
if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED))
tcp_write_timer_handler(sk);
if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
tcp_delack_timer_handler(sk);
} }
EXPORT_SYMBOL(tcp_release_cb); EXPORT_SYMBOL(tcp_release_cb);
......
...@@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; ...@@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly; int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly; int sysctl_tcp_thin_linear_timeouts __read_mostly;
static void tcp_write_timer(unsigned long);
static void tcp_delack_timer(unsigned long);
static void tcp_keepalive_timer (unsigned long data);
void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
}
EXPORT_SYMBOL(tcp_init_xmit_timers);
static void tcp_write_err(struct sock *sk) static void tcp_write_err(struct sock *sk)
{ {
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
...@@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk)
return 0; return 0;
} }
static void tcp_delack_timer(unsigned long data) void tcp_delack_timer_handler(struct sock *sk)
{ {
struct sock *sk = (struct sock *)data;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later. */
icsk->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
sk_mem_reclaim_partial(sk); sk_mem_reclaim_partial(sk);
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
...@@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data) ...@@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data)
out: out:
if (sk_under_memory_pressure(sk)) if (sk_under_memory_pressure(sk))
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
out_unlock: }
static void tcp_delack_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
tcp_delack_timer_handler(sk);
} else {
inet_csk(sk)->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
}
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
} }
...@@ -450,19 +443,11 @@ void tcp_retransmit_timer(struct sock *sk) ...@@ -450,19 +443,11 @@ void tcp_retransmit_timer(struct sock *sk)
out:; out:;
} }
static void tcp_write_timer(unsigned long data) void tcp_write_timer_handler(struct sock *sk)
{ {
struct sock *sk = (struct sock *)data;
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
int event; int event;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later */
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
goto out_unlock;
}
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
goto out; goto out;
...@@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data) ...@@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data)
out: out:
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
out_unlock: }
static void tcp_write_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
tcp_write_timer_handler(sk);
} else {
/* deleguate our work to tcp_release_cb() */
set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
}
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
} }
...@@ -602,3 +599,10 @@ static void tcp_keepalive_timer (unsigned long data) ...@@ -602,3 +599,10 @@ static void tcp_keepalive_timer (unsigned long data)
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
} }
void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
}
EXPORT_SYMBOL(tcp_init_xmit_timers);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment