Commit 356dd413 authored by Eric Dumazet's avatar Eric Dumazet Committed by Khalid Elmously

tcp: increment sk_drops for dropped rx packets

BugLink: https://bugs.launchpad.net/bugs/1801893

[ Upstream commit 532182cd ]

Now ss can report sk_drops, we can instruct TCP to increment
this per socket counter when it drops an incoming frame, to refine
monitoring and debugging.

Following patch takes care of listeners drops.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarMao Wenan <maowenan@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent 851cb7cd
...@@ -2164,6 +2164,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) ...@@ -2164,6 +2164,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
} }
static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
{
int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
atomic_add(segs, &sk->sk_drops);
}
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb); struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
......
...@@ -4293,6 +4293,12 @@ static bool tcp_try_coalesce(struct sock *sk, ...@@ -4293,6 +4293,12 @@ static bool tcp_try_coalesce(struct sock *sk,
return true; return true;
} }
static void tcp_drop(struct sock *sk, struct sk_buff *skb)
{
sk_drops_add(sk, skb);
__kfree_skb(skb);
}
/* This one checks to see if we can put data from the /* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue. * out_of_order queue into the receive_queue.
*/ */
...@@ -4317,7 +4323,7 @@ static void tcp_ofo_queue(struct sock *sk) ...@@ -4317,7 +4323,7 @@ static void tcp_ofo_queue(struct sock *sk)
__skb_unlink(skb, &tp->out_of_order_queue); __skb_unlink(skb, &tp->out_of_order_queue);
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received\n"); SOCK_DEBUG(sk, "ofo packet was already received\n");
__kfree_skb(skb); tcp_drop(sk, skb);
continue; continue;
} }
SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
...@@ -4369,7 +4375,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4369,7 +4375,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
__kfree_skb(skb); tcp_drop(sk, skb);
return; return;
} }
...@@ -4433,7 +4439,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4433,7 +4439,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
/* All the bits are present. Drop. */ /* All the bits are present. Drop. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
__kfree_skb(skb); tcp_drop(sk, skb);
skb = NULL; skb = NULL;
tcp_dsack_set(sk, seq, end_seq); tcp_dsack_set(sk, seq, end_seq);
goto add_sack; goto add_sack;
...@@ -4472,7 +4478,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4472,7 +4478,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq); TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
__kfree_skb(skb1); tcp_drop(sk, skb1);
} }
add_sack: add_sack:
...@@ -4555,12 +4561,13 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -4555,12 +4561,13 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int eaten = -1;
bool fragstolen = false; bool fragstolen = false;
int eaten = -1;
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
goto drop; __kfree_skb(skb);
return;
}
skb_dst_drop(skb); skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4); __skb_pull(skb, tcp_hdr(skb)->doff * 4);
...@@ -4642,7 +4649,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -4642,7 +4649,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_schedule_ack(sk); inet_csk_schedule_ack(sk);
drop: drop:
__kfree_skb(skb); tcp_drop(sk, skb);
return; return;
} }
...@@ -5233,7 +5240,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, ...@@ -5233,7 +5240,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
return true; return true;
discard: discard:
__kfree_skb(skb); tcp_drop(sk, skb);
return false; return false;
} }
...@@ -5451,7 +5458,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -5451,7 +5458,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
discard: discard:
__kfree_skb(skb); tcp_drop(sk, skb);
} }
EXPORT_SYMBOL(tcp_rcv_established); EXPORT_SYMBOL(tcp_rcv_established);
...@@ -5681,7 +5688,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -5681,7 +5688,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
TCP_DELACK_MAX, TCP_RTO_MAX); TCP_DELACK_MAX, TCP_RTO_MAX);
discard: discard:
__kfree_skb(skb); tcp_drop(sk, skb);
return 0; return 0;
} else { } else {
tcp_send_ack(sk); tcp_send_ack(sk);
...@@ -6035,7 +6042,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) ...@@ -6035,7 +6042,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (!queued) { if (!queued) {
discard: discard:
__kfree_skb(skb); tcp_drop(sk, skb);
} }
return 0; return 0;
} }
......
...@@ -1716,6 +1716,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1716,6 +1716,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
return 0; return 0;
discard_and_relse: discard_and_relse:
sk_drops_add(sk, skb);
sock_put(sk); sock_put(sk);
goto discard_it; goto discard_it;
......
...@@ -1505,6 +1505,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1505,6 +1505,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
return 0; return 0;
discard_and_relse: discard_and_relse:
sk_drops_add(sk, skb);
sock_put(sk); sock_put(sk);
goto discard_it; goto discard_it;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment