Commit 3106ecb4 authored by Xin Long's avatar Xin Long Committed by David S. Miller

sctp: not disable bh in the whole sctp_get_port_local()

With disabling bh in the whole sctp_get_port_local(), when
snum == 0 and too many ports have been used, the do-while
loop will take the cpu for a long time and cause cpu stuck:

  [ ] watchdog: BUG: soft lockup - CPU#11 stuck for 22s!
  [ ] RIP: 0010:native_queued_spin_lock_slowpath+0x4de/0x940
  [ ] Call Trace:
  [ ]  _raw_spin_lock+0xc1/0xd0
  [ ]  sctp_get_port_local+0x527/0x650 [sctp]
  [ ]  sctp_do_bind+0x208/0x5e0 [sctp]
  [ ]  sctp_autobind+0x165/0x1e0 [sctp]
  [ ]  sctp_connect_new_asoc+0x355/0x480 [sctp]
  [ ]  __sctp_connect+0x360/0xb10 [sctp]

There's no need to disable bh in the whole function of
sctp_get_port_local. So fix this cpu stuck by removing
local_bh_disable() called at the beginning, and using
spin_lock_bh() instead.

The same thing was actually done for inet_csk_get_port() in
Commit ea8add2b ("tcp/dccp: better use of ephemeral
ports in bind()").

Thanks to Marcelo for pointing the buggy code out.

v1->v2:
  - use cond_resched() to yield cpu to other tasks if needed,
    as Eric noticed.

Fixes: 1da177e4 ("Linux-2.6.12-rc2")
Reported-by: default avatarYing Xu <yinxu@redhat.com>
Signed-off-by: default avatarXin Long <lucien.xin@gmail.com>
Acked-by: default avatarMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1838d6c6
...@@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum); pr_debug("%s: begins, snum:%d\n", __func__, snum);
local_bh_disable();
if (snum == 0) { if (snum == 0) {
/* Search for an available port. */ /* Search for an available port. */
int low, high, remaining, index; int low, high, remaining, index;
...@@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue; continue;
index = sctp_phashfn(net, rover); index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index]; head = &sctp_port_hashtable[index];
spin_lock(&head->lock); spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) && if ((pp->port == rover) &&
net_eq(net, pp->net)) net_eq(net, pp->net))
goto next; goto next;
break; break;
next: next:
spin_unlock(&head->lock); spin_unlock_bh(&head->lock);
cond_resched();
} while (--remaining > 0); } while (--remaining > 0);
/* Exhausted local port range during search? */ /* Exhausted local port range during search? */
ret = 1; ret = 1;
if (remaining <= 0) if (remaining <= 0)
goto fail; return ret;
/* OK, here is the one we will use. HEAD (the port /* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's * hash table list entry) is non-NULL and we hold it's
...@@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL. * port iterator, pp being NULL.
*/ */
head = &sctp_port_hashtable[sctp_phashfn(net, snum)]; head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
spin_lock(&head->lock); spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) { sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, net)) if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found; goto pp_found;
...@@ -8207,10 +8206,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -8207,10 +8206,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
ret = 0; ret = 0;
fail_unlock: fail_unlock:
spin_unlock(&head->lock); spin_unlock_bh(&head->lock);
fail:
local_bh_enable();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment