Commit 843f4a55 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: use tcp_v4_send_synack on first SYN-ACK

To avoid large code duplication in IPv6, we need to first simplify
the complicate SYN-ACK sending code in tcp_v4_conn_request().

To use tcp_v4(6)_send_synack() to send all SYN-ACKs, we need to
initialize the mini socket's receive window before trying to
create the child socket and/or building the SYN-ACK packet. So we move
that initialization from tcp_make_synack() to tcp_v4_conn_request()
as a new function tcp_openreq_init_req_rwin().

After this refactoring the SYN-ACK sending code is simpler and easier
to implement Fast Open for IPv6.
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarDaniel Lee <longinus00@gmail.com>
Signed-off-by: default avatarJerry Chu <hkchu@google.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 89278c9d
...@@ -1114,6 +1114,9 @@ static inline void tcp_openreq_init(struct request_sock *req, ...@@ -1114,6 +1114,9 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
} }
extern void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst);
void tcp_enter_memory_pressure(struct sock *sk); void tcp_enter_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp) static inline int keepalive_intvl_when(const struct tcp_sock *tp)
...@@ -1323,13 +1326,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp); ...@@ -1323,13 +1326,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
int tcp_fastopen_reset_cipher(void *key, unsigned int len); int tcp_fastopen_reset_cipher(void *key, unsigned int len);
int tcp_fastopen_create_child(struct sock *sk, bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct sk_buff *skb,
struct sk_buff *skb_synack,
struct request_sock *req);
bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, struct request_sock *req,
struct tcp_fastopen_cookie *foc); struct tcp_fastopen_cookie *foc,
struct dst_entry *dst);
void tcp_fastopen_init_key_once(bool publish); void tcp_fastopen_init_key_once(bool publish);
#define TCP_FASTOPEN_KEY_LENGTH 16 #define TCP_FASTOPEN_KEY_LENGTH 16
......
...@@ -95,34 +95,22 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst, ...@@ -95,34 +95,22 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
rcu_read_unlock(); rcu_read_unlock();
} }
int tcp_fastopen_create_child(struct sock *sk, static bool tcp_fastopen_create_child(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
struct sk_buff *skb_synack, struct dst_entry *dst,
struct request_sock *req) struct request_sock *req)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
const struct inet_request_sock *ireq = inet_rsk(req);
struct sock *child; struct sock *child;
int err;
req->num_retrans = 0; req->num_retrans = 0;
req->num_timeout = 0; req->num_timeout = 0;
req->sk = NULL; req->sk = NULL;
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
if (child == NULL) { if (child == NULL)
NET_INC_STATS_BH(sock_net(sk), return false;
LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
kfree_skb(skb_synack);
return -1;
}
err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr, ireq->opt);
err = net_xmit_eval(err);
if (!err)
tcp_rsk(req)->snt_synack = tcp_time_stamp;
/* XXX (TFO) - is it ok to ignore error and continue? */
spin_lock(&queue->fastopenq->lock); spin_lock(&queue->fastopenq->lock);
queue->fastopenq->qlen++; queue->fastopenq->qlen++;
...@@ -167,28 +155,24 @@ int tcp_fastopen_create_child(struct sock *sk, ...@@ -167,28 +155,24 @@ int tcp_fastopen_create_child(struct sock *sk,
/* Queue the data carried in the SYN packet. We need to first /* Queue the data carried in the SYN packet. We need to first
* bump skb's refcnt because the caller will attempt to free it. * bump skb's refcnt because the caller will attempt to free it.
* *
* XXX (TFO) - we honor a zero-payload TFO request for now. * XXX (TFO) - we honor a zero-payload TFO request for now,
* (Any reason not to?) * (any reason not to?) but no need to queue the skb since
*/ * there is no data. How about SYN+FIN?
if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
/* Don't queue the skb if there is no payload in SYN.
* XXX (TFO) - How about SYN+FIN?
*/ */
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
} else {
skb = skb_get(skb); skb = skb_get(skb);
skb_dst_drop(skb); skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4); __skb_pull(skb, tcp_hdr(skb)->doff * 4);
skb_set_owner_r(skb, child); skb_set_owner_r(skb, child);
__skb_queue_tail(&child->sk_receive_queue, skb); __skb_queue_tail(&child->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
tp->syn_data_acked = 1; tp->syn_data_acked = 1;
} }
tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
sk->sk_data_ready(sk); sk->sk_data_ready(sk);
bh_unlock_sock(child); bh_unlock_sock(child);
sock_put(child); sock_put(child);
WARN_ON(req->sk == NULL); WARN_ON(req->sk == NULL);
return 0; return true;
} }
EXPORT_SYMBOL(tcp_fastopen_create_child); EXPORT_SYMBOL(tcp_fastopen_create_child);
...@@ -232,9 +216,10 @@ static bool tcp_fastopen_queue_check(struct sock *sk) ...@@ -232,9 +216,10 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
* may be updated and return the client in the SYN-ACK later. E.g., Fast Open * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
* cookie request (foc->len == 0). * cookie request (foc->len == 0).
*/ */
bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, struct request_sock *req,
struct tcp_fastopen_cookie *foc) struct tcp_fastopen_cookie *foc,
struct dst_entry *dst)
{ {
struct tcp_fastopen_cookie valid_foc = { .len = -1 }; struct tcp_fastopen_cookie valid_foc = { .len = -1 };
bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
...@@ -255,12 +240,22 @@ bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, ...@@ -255,12 +240,22 @@ bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
if (foc->len == TCP_FASTOPEN_COOKIE_SIZE && if (foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
foc->len == valid_foc.len && foc->len == valid_foc.len &&
!memcmp(foc->val, valid_foc.val, foc->len)) { !memcmp(foc->val, valid_foc.val, foc->len)) {
/* Cookie is valid. Create a (full) child socket to accept
* the data in SYN before returning a SYN-ACK to ack the
* data. If we fail to create the socket, fall back and
* ack the ISN only but includes the same cookie.
*
* Note: Data-less SYN with valid cookie is allowed to send
* data in SYN_RECV state.
*/
fastopen: fastopen:
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tcp_fastopen_create_child(sk, skb, dst, req)) {
foc->len = -1; foc->len = -1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE); NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE);
return true; return true;
} }
}
NET_INC_STATS_BH(sock_net(sk), foc->len ? NET_INC_STATS_BH(sock_net(sk), foc->len ?
LINUX_MIB_TCPFASTOPENPASSIVEFAIL : LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
...@@ -268,4 +263,4 @@ bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, ...@@ -268,4 +263,4 @@ bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
*foc = valid_foc; *foc = valid_foc;
return false; return false;
} }
EXPORT_SYMBOL(tcp_fastopen_check); EXPORT_SYMBOL(tcp_try_fastopen);
...@@ -822,7 +822,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, ...@@ -822,7 +822,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
*/ */
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req, struct request_sock *req,
u16 queue_mapping) u16 queue_mapping,
struct tcp_fastopen_cookie *foc)
{ {
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4; struct flowi4 fl4;
...@@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, ...@@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
return -1; return -1;
skb = tcp_make_synack(sk, dst, req, NULL); skb = tcp_make_synack(sk, dst, req, foc);
if (skb) { if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
...@@ -852,7 +853,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, ...@@ -852,7 +853,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
{ {
int res = tcp_v4_send_synack(sk, NULL, req, 0); int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
if (!res) { if (!res) {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
...@@ -1270,11 +1271,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1270,11 +1271,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 saddr = ip_hdr(skb)->saddr; __be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr; __be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->when;
bool want_cookie = false; bool want_cookie = false, fastopen;
struct flowi4 fl4; struct flowi4 fl4;
struct tcp_fastopen_cookie foc = { .len = -1 }; struct tcp_fastopen_cookie foc = { .len = -1 };
struct sk_buff *skb_synack; int err;
int do_fastopen;
/* Never answer to SYNs send to broadcast or multicast */ /* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
...@@ -1373,49 +1373,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1373,49 +1373,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
isn = tcp_v4_init_sequence(skb); isn = tcp_v4_init_sequence(skb);
} }
tcp_rsk(req)->snt_isn = isn; if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
if (dst == NULL) {
dst = inet_csk_route_req(sk, &fl4, req);
if (dst == NULL)
goto drop_and_free;
}
do_fastopen = !want_cookie &&
tcp_fastopen_check(sk, skb, req, &foc);
/* We don't call tcp_v4_send_synack() directly because we need
* to make sure a child socket can be created successfully before
* sending back synack!
*
* XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
* (or better yet, call tcp_send_synack() in the child context
* directly, but will have to fix bunch of other code first)
* after syn_recv_sock() except one will need to first fix the
* latter to remove its dependency on the current implementation
* of tcp_v4_send_synack()->tcp_select_initial_window().
*/
skb_synack = tcp_make_synack(sk, dst, req, &foc);
if (skb_synack) {
__tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
} else
goto drop_and_free; goto drop_and_free;
if (likely(!do_fastopen)) { tcp_rsk(req)->snt_isn = isn;
int err; tcp_rsk(req)->snt_synack = tcp_time_stamp;
err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, tcp_openreq_init_rwin(req, sk, dst);
ireq->ir_rmt_addr, ireq->opt); fastopen = !want_cookie &&
err = net_xmit_eval(err); tcp_try_fastopen(sk, skb, req, &foc, dst);
err = tcp_v4_send_synack(sk, dst, req,
skb_get_queue_mapping(skb), &foc);
if (!fastopen) {
if (err || want_cookie) if (err || want_cookie)
goto drop_and_free; goto drop_and_free;
tcp_rsk(req)->snt_synack = tcp_time_stamp; tcp_rsk(req)->snt_synack = tcp_time_stamp;
tcp_rsk(req)->listener = NULL; tcp_rsk(req)->listener = NULL;
/* Add the request_sock to the SYN table */
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
} else if (tcp_fastopen_create_child(sk, skb, skb_synack, req)) }
goto drop_and_release;
return 0; return 0;
......
...@@ -362,6 +362,37 @@ void tcp_twsk_destructor(struct sock *sk) ...@@ -362,6 +362,37 @@ void tcp_twsk_destructor(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(tcp_twsk_destructor); EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
int mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
req->window_clamp = tcp_full_space(sk);
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
&rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
}
EXPORT_SYMBOL(tcp_openreq_init_rwin);
static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
struct request_sock *req) struct request_sock *req)
{ {
......
...@@ -2803,27 +2803,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2803,27 +2803,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss; mss = tp->rx_opt.user_mss;
if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
__u8 rcv_wscale;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
req->window_clamp = tcp_full_space(sk);
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
&rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
}
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts)) if (unlikely(req->cookie_ts))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment