Commit d97106ea authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

udp: Drop socket lock for encapsulated packets

The socket lock is there to protect the normal UDP receive path.
Encapsulation UDP sockets don't need that protection.  In fact
the locking is deadly for them as they may contain another UDP
packet within, possibly with the same addresses.

Also the nested bit was copied from TCP.  TCP needs it because
of accept(2) spawning sockets.  This simply doesn't apply to UDP
so I've removed it.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8123b421
...@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) ...@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
up->encap_rcv != NULL) { up->encap_rcv != NULL) {
int ret; int ret;
bh_unlock_sock(sk);
ret = (*up->encap_rcv)(sk, skb); ret = (*up->encap_rcv)(sk, skb);
bh_lock_sock(sk);
if (ret <= 0) { if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk), UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS, UDP_MIB_INDATAGRAMS,
...@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, ...@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (skb1) { if (skb1) {
int ret = 0; int ret = 0;
bh_lock_sock_nested(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
ret = udp_queue_rcv_skb(sk, skb1); ret = udp_queue_rcv_skb(sk, skb1);
else else
...@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], ...@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
if (sk != NULL) { if (sk != NULL) {
int ret = 0; int ret = 0;
bh_lock_sock_nested(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
ret = udp_queue_rcv_skb(sk, skb); ret = udp_queue_rcv_skb(sk, skb);
else else
......
...@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, ...@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
uh->source, saddr, dif))) { uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
if (buff) { if (buff) {
bh_lock_sock_nested(sk2); bh_lock_sock(sk2);
if (!sock_owned_by_user(sk2)) if (!sock_owned_by_user(sk2))
udpv6_queue_rcv_skb(sk2, buff); udpv6_queue_rcv_skb(sk2, buff);
else else
...@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, ...@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
bh_unlock_sock(sk2); bh_unlock_sock(sk2);
} }
} }
bh_lock_sock_nested(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb); udpv6_queue_rcv_skb(sk, skb);
else else
...@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], ...@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
/* deliver */ /* deliver */
bh_lock_sock_nested(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb); udpv6_queue_rcv_skb(sk, skb);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment