Commit 781dd043 authored by Cong Wang's avatar Cong Wang Committed by Daniel Borkmann

skmsg: Increase sk->sk_drops when dropping packets

It is hard to observe packet drops without increasing relevant
drop counters, here we should increase sk->sk_drops which is
a protocol-independent counter. Fortunately psock is always
associated with a struct sock, we can just use psock->sk.
Suggested-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarCong Wang <cong.wang@bytedance.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Acked-by: default avatarJakub Sitnicki <jakub@cloudflare.com>
Link: https://lore.kernel.org/bpf/20210615021342.7416-9-xiyou.wangcong@gmail.com
parent 42830571
......@@ -578,6 +578,12 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
return sk_psock_skb_ingress(psock, skb);
}
static void sock_drop(struct sock *sk, struct sk_buff *skb)
{
sk_drops_add(sk, skb);
kfree_skb(skb);
}
static void sk_psock_backlog(struct work_struct *work)
{
struct sk_psock *psock = container_of(work, struct sk_psock, work);
......@@ -617,7 +623,7 @@ static void sk_psock_backlog(struct work_struct *work)
/* Hard errors break pipe and stop xmit. */
sk_psock_report_error(psock, ret ? -ret : EPIPE);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
kfree_skb(skb);
sock_drop(psock->sk, skb);
goto end;
}
off += ret;
......@@ -708,7 +714,7 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
skb_bpf_redirect_clear(skb);
kfree_skb(skb);
sock_drop(psock->sk, skb);
}
__sk_psock_purge_ingress_msg(psock);
}
......@@ -834,7 +840,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
* return code, but then didn't set a redirect interface.
*/
if (unlikely(!sk_other)) {
kfree_skb(skb);
sock_drop(from->sk, skb);
return -EIO;
}
psock_other = sk_psock(sk_other);
......@@ -844,14 +850,14 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
*/
if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
skb_bpf_redirect_clear(skb);
kfree_skb(skb);
sock_drop(from->sk, skb);
return -EIO;
}
spin_lock_bh(&psock_other->ingress_lock);
if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
spin_unlock_bh(&psock_other->ingress_lock);
skb_bpf_redirect_clear(skb);
kfree_skb(skb);
sock_drop(from->sk, skb);
return -EIO;
}
......@@ -942,7 +948,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
case __SK_DROP:
default:
out_free:
kfree_skb(skb);
sock_drop(psock->sk, skb);
}
return err;
......@@ -977,7 +983,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
sk = strp->sk;
psock = sk_psock(sk);
if (unlikely(!psock)) {
kfree_skb(skb);
sock_drop(sk, skb);
goto out;
}
prog = READ_ONCE(psock->progs.stream_verdict);
......@@ -1098,7 +1104,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
psock = sk_psock(sk);
if (unlikely(!psock)) {
len = 0;
kfree_skb(skb);
sock_drop(sk, skb);
goto out;
}
prog = READ_ONCE(psock->progs.stream_verdict);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment