Commit c57943a1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by David S. Miller

net: wrap sk->sk_backlog_rcv()

Wrap calling sk->sk_backlog_rcv() in a function. This will allow extending the
generic sk_backlog_rcv behaviour.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b339a47c
...@@ -482,6 +482,11 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) ...@@ -482,6 +482,11 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb->next = NULL; skb->next = NULL;
} }
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
return sk->sk_backlog_rcv(sk, skb);
}
#define sk_wait_event(__sk, __timeo, __condition) \ #define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \ ({ int __rc; \
release_sock(__sk); \ release_sock(__sk); \
......
...@@ -896,7 +896,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) ...@@ -896,7 +896,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
BUG_ON(sock_owned_by_user(sk)); BUG_ON(sock_owned_by_user(sk));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1); sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
} }
......
...@@ -327,7 +327,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) ...@@ -327,7 +327,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
*/ */
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
rc = sk->sk_backlog_rcv(sk, skb); rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else } else
...@@ -1374,7 +1374,7 @@ static void __release_sock(struct sock *sk) ...@@ -1374,7 +1374,7 @@ static void __release_sock(struct sock *sk)
struct sk_buff *next = skb->next; struct sk_buff *next = skb->next;
skb->next = NULL; skb->next = NULL;
sk->sk_backlog_rcv(sk, skb); sk_backlog_rcv(sk, skb);
/* /*
* We are in process context here with softirqs * We are in process context here with softirqs
......
...@@ -1161,7 +1161,7 @@ static void tcp_prequeue_process(struct sock *sk) ...@@ -1161,7 +1161,7 @@ static void tcp_prequeue_process(struct sock *sk)
* necessary */ * necessary */
local_bh_disable(); local_bh_disable();
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb); sk_backlog_rcv(sk, skb);
local_bh_enable(); local_bh_enable();
/* Clear memory counter. */ /* Clear memory counter. */
......
...@@ -201,7 +201,7 @@ static void tcp_delack_timer(unsigned long data) ...@@ -201,7 +201,7 @@ static void tcp_delack_timer(unsigned long data)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb); sk_backlog_rcv(sk, skb);
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment