Commit 50b1a782 authored by Zhu Yi's avatar Zhu Yi Committed by David S. Miller

sctp: use limited socket backlog

Make sctp adapt to the limited socket backlog change.

Cc: Vlad Yasevich <vladislav.yasevich@hp.com>
Cc: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: default avatarZhu Yi <yi.zhu@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 79545b68
...@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer, const union sctp_addr *peer,
struct sctp_transport **pt); struct sctp_transport **pt);
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */ /* Calculate the SCTP checksum of an SCTP packet. */
...@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
} }
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) {
sctp_bh_unlock_sock(sk);
sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release;
}
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
} else { } else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk); sctp_inq_push(&chunk->rcvr->inqueue, chunk);
...@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk); sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
sk_add_backlog(sk, skb); if (sk_add_backlog_limited(sk, skb))
backloged = 1; sctp_chunk_free(chunk);
else
backloged = 1;
} else } else
sctp_inq_push(inqueue, chunk); sctp_inq_push(inqueue, chunk);
...@@ -362,22 +369,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -362,22 +369,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
} }
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{ {
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_ep_common *rcvr = chunk->rcvr; struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
/* Hold the assoc/ep while hanging on the backlog queue. ret = sk_add_backlog_limited(sk, skb);
* This way, we know structures we need will not disappear from us if (!ret) {
*/ /* Hold the assoc/ep while hanging on the backlog queue.
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) * This way, we know structures we need will not disappear
sctp_association_hold(sctp_assoc(rcvr)); * from us
else if (SCTP_EP_TYPE_SOCKET == rcvr->type) */
sctp_endpoint_hold(sctp_ep(rcvr)); if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
else sctp_association_hold(sctp_assoc(rcvr));
BUG(); else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_hold(sctp_ep(rcvr));
else
BUG();
}
return ret;
sk_add_backlog(sk, skb);
} }
/* Handle icmp frag needed error. */ /* Handle icmp frag needed error. */
......
...@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) ...@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock); SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated); percpu_counter_inc(&sctp_sockets_allocated);
/* Set socket backlog limit. */
sk->sk_backlog.limit = sysctl_sctp_rmem[1];
local_bh_disable(); local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable(); local_bh_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment