Commit 10d6393d authored by Julian Wiedmann's avatar Julian Wiedmann Committed by David S. Miller

net/af_iucv: support drop monitoring

Change the good paths to use consume_skb() instead of kfree_skb(). This
avoids flooding dropwatch with false-positives.
Signed-off-by: default avatarJulian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 00335237
...@@ -1044,7 +1044,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1044,7 +1044,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (err == 0) { if (err == 0) {
atomic_dec(&iucv->skbs_in_xmit); atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q); skb_unlink(skb, &iucv->send_skb_q);
kfree_skb(skb); consume_skb(skb);
} }
/* this error should never happen since the */ /* this error should never happen since the */
...@@ -1293,7 +1293,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -1293,7 +1293,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
} }
} }
kfree_skb(skb); consume_skb(skb);
if (iucv->transport == AF_IUCV_TRANS_HIPER) { if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_recv); atomic_inc(&iucv->msg_recv);
if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
...@@ -1756,7 +1756,7 @@ static void iucv_callback_txdone(struct iucv_path *path, ...@@ -1756,7 +1756,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
if (this) { if (this) {
kfree_skb(this); consume_skb(this);
/* wake up any process waiting for sending */ /* wake up any process waiting for sending */
iucv_sock_wake_msglim(sk); iucv_sock_wake_msglim(sk);
} }
...@@ -1903,17 +1903,17 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) ...@@ -1903,17 +1903,17 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{ {
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv) if (!iucv || sk->sk_state != IUCV_BOUND) {
goto out; kfree_skb(skb);
if (sk->sk_state != IUCV_BOUND) return NET_RX_SUCCESS;
goto out; }
bh_lock_sock(sk); bh_lock_sock(sk);
iucv->msglimit_peer = iucv_trans_hdr(skb)->window; iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
sk->sk_state = IUCV_CONNECTED; sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk); sk->sk_state_change(sk);
bh_unlock_sock(sk); bh_unlock_sock(sk);
out: consume_skb(skb);
kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -1924,16 +1924,16 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) ...@@ -1924,16 +1924,16 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{ {
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
if (!iucv) if (!iucv || sk->sk_state != IUCV_BOUND) {
goto out; kfree_skb(skb);
if (sk->sk_state != IUCV_BOUND) return NET_RX_SUCCESS;
goto out; }
bh_lock_sock(sk); bh_lock_sock(sk);
sk->sk_state = IUCV_DISCONN; sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
bh_unlock_sock(sk); bh_unlock_sock(sk);
out: consume_skb(skb);
kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -1945,16 +1945,18 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) ...@@ -1945,16 +1945,18 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
struct iucv_sock *iucv = iucv_sk(sk); struct iucv_sock *iucv = iucv_sk(sk);
/* other end of connection closed */ /* other end of connection closed */
if (!iucv) if (!iucv) {
goto out; kfree_skb(skb);
return NET_RX_SUCCESS;
}
bh_lock_sock(sk); bh_lock_sock(sk);
if (sk->sk_state == IUCV_CONNECTED) { if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN; sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
out: consume_skb(skb);
kfree_skb(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
...@@ -2107,7 +2109,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -2107,7 +2109,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
case (AF_IUCV_FLAG_WIN): case (AF_IUCV_FLAG_WIN):
err = afiucv_hs_callback_win(sk, skb); err = afiucv_hs_callback_win(sk, skb);
if (skb->len == sizeof(struct af_iucv_trans_hdr)) { if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
kfree_skb(skb); consume_skb(skb);
break; break;
} }
fallthrough; /* and receive non-zero length data */ fallthrough; /* and receive non-zero length data */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment