Commit 7233da86 authored by Alexander Ovechkin's avatar Alexander Ovechkin Committed by David S. Miller

tcp: relookup sock for RST+ACK packets handled by obsolete req sock

Currently tcp_check_req can be called with obsolete req socket for which big
socket have been already created (because of CPU race or early demux
assigning req socket to multiple packets in gro batch).

Commit e0f9759f ("tcp: try to keep packet if SYN_RCV race
is lost") added retry in case when tcp_check_req is called for PSH|ACK packet.
But if client sends RST+ACK immediatly after connection being
established (it is performing healthcheck, for example) retry does not
occur. In that case tcp_check_req tries to close req socket,
leaving big socket active.

Fixes: e0f9759f ("tcp: try to keep packet if SYN_RCV race is lost")
Signed-off-by: default avatarAlexander Ovechkin <ovov@yandex-team.ru>
Reported-by: default avatarOleg Senin <olegsenin@yandex-team.ru>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0217ed28
...@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) ...@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
} }
void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
......
...@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req) ...@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
return found; return found;
} }
void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{ {
if (reqsk_queue_unlink(req)) { bool unlinked = reqsk_queue_unlink(req);
if (unlinked) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
reqsk_put(req); reqsk_put(req);
} }
return unlinked;
} }
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
......
...@@ -804,8 +804,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -804,8 +804,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
tcp_reset(sk, skb); tcp_reset(sk, skb);
} }
if (!fastopen) { if (!fastopen) {
inet_csk_reqsk_queue_drop(sk, req); bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
if (unlinked)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
*req_stolen = !unlinked;
} }
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment