Commit bce94327 authored by David S. Miller's avatar David S. Miller

Merge branch 'pending' of master.kernel.org:/pub/scm/linux/kernel/git/vxy/lksctp-dev

parents 91cf45f0 9abed245
...@@ -103,6 +103,7 @@ typedef enum { ...@@ -103,6 +103,7 @@ typedef enum {
SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */ SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */
SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */ SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */ SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_LAST SCTP_CMD_LAST
} sctp_verb_t; } sctp_verb_t;
......
...@@ -186,6 +186,8 @@ typedef enum { ...@@ -186,6 +186,8 @@ typedef enum {
SCTP_IERROR_AUTH_BAD_HMAC, SCTP_IERROR_AUTH_BAD_HMAC,
SCTP_IERROR_AUTH_BAD_KEYID, SCTP_IERROR_AUTH_BAD_KEYID,
SCTP_IERROR_PROTO_VIOLATION, SCTP_IERROR_PROTO_VIOLATION,
SCTP_IERROR_ERROR,
SCTP_IERROR_ABORT,
} sctp_ierror_t; } sctp_ierror_t;
...@@ -407,6 +409,7 @@ typedef enum { ...@@ -407,6 +409,7 @@ typedef enum {
SCTP_RTXR_T3_RTX, SCTP_RTXR_T3_RTX,
SCTP_RTXR_FAST_RTX, SCTP_RTXR_FAST_RTX,
SCTP_RTXR_PMTUD, SCTP_RTXR_PMTUD,
SCTP_RTXR_T1_RTX,
} sctp_retransmit_reason_t; } sctp_retransmit_reason_t;
/* Reasons to lower cwnd. */ /* Reasons to lower cwnd. */
......
...@@ -65,7 +65,6 @@ ...@@ -65,7 +65,6 @@
#ifdef TEST_FRAME #ifdef TEST_FRAME
#undef CONFIG_PROC_FS
#undef CONFIG_SCTP_DBG_OBJCNT #undef CONFIG_SCTP_DBG_OBJCNT
#undef CONFIG_SYSCTL #undef CONFIG_SYSCTL
#endif /* TEST_FRAME */ #endif /* TEST_FRAME */
...@@ -267,6 +266,7 @@ enum ...@@ -267,6 +266,7 @@ enum
SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS,
SCTP_MIB_DELAY_SACK_EXPIREDS, SCTP_MIB_DELAY_SACK_EXPIREDS,
SCTP_MIB_AUTOCLOSE_EXPIREDS, SCTP_MIB_AUTOCLOSE_EXPIREDS,
SCTP_MIB_T1_RETRANSMITS,
SCTP_MIB_T3_RETRANSMITS, SCTP_MIB_T3_RETRANSMITS,
SCTP_MIB_PMTUD_RETRANSMITS, SCTP_MIB_PMTUD_RETRANSMITS,
SCTP_MIB_FAST_RETRANSMITS, SCTP_MIB_FAST_RETRANSMITS,
...@@ -664,6 +664,9 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag) ...@@ -664,6 +664,9 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
return (h & (sctp_assoc_hashsize-1)); return (h & (sctp_assoc_hashsize-1));
} }
#define sctp_for_each_hentry(epb, node, head) \
hlist_for_each_entry(epb, node, head, node)
/* Is a socket of this style? */ /* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style) static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style)
......
...@@ -100,20 +100,19 @@ struct crypto_hash; ...@@ -100,20 +100,19 @@ struct crypto_hash;
struct sctp_bind_bucket { struct sctp_bind_bucket {
unsigned short port; unsigned short port;
unsigned short fastreuse; unsigned short fastreuse;
struct sctp_bind_bucket *next; struct hlist_node node;
struct sctp_bind_bucket **pprev;
struct hlist_head owner; struct hlist_head owner;
}; };
struct sctp_bind_hashbucket { struct sctp_bind_hashbucket {
spinlock_t lock; spinlock_t lock;
struct sctp_bind_bucket *chain; struct hlist_head chain;
}; };
/* Used for hashing all associations. */ /* Used for hashing all associations. */
struct sctp_hashbucket { struct sctp_hashbucket {
rwlock_t lock; rwlock_t lock;
struct sctp_ep_common *chain; struct hlist_head chain;
} __attribute__((__aligned__(8))); } __attribute__((__aligned__(8)));
...@@ -212,6 +211,7 @@ extern struct sctp_globals { ...@@ -212,6 +211,7 @@ extern struct sctp_globals {
/* Flag to indicate if addip is enabled. */ /* Flag to indicate if addip is enabled. */
int addip_enable; int addip_enable;
int addip_noauth_enable;
/* Flag to indicate if PR-SCTP is enabled. */ /* Flag to indicate if PR-SCTP is enabled. */
int prsctp_enable; int prsctp_enable;
...@@ -249,6 +249,7 @@ extern struct sctp_globals { ...@@ -249,6 +249,7 @@ extern struct sctp_globals {
#define sctp_local_addr_list (sctp_globals.local_addr_list) #define sctp_local_addr_list (sctp_globals.local_addr_list)
#define sctp_local_addr_lock (sctp_globals.addr_list_lock) #define sctp_local_addr_lock (sctp_globals.addr_list_lock)
#define sctp_addip_enable (sctp_globals.addip_enable) #define sctp_addip_enable (sctp_globals.addip_enable)
#define sctp_addip_noauth (sctp_globals.addip_noauth_enable)
#define sctp_prsctp_enable (sctp_globals.prsctp_enable) #define sctp_prsctp_enable (sctp_globals.prsctp_enable)
#define sctp_auth_enable (sctp_globals.auth_enable) #define sctp_auth_enable (sctp_globals.auth_enable)
...@@ -873,10 +874,11 @@ struct sctp_transport { ...@@ -873,10 +874,11 @@ struct sctp_transport {
* address list derived from the INIT or INIT ACK chunk, a * address list derived from the INIT or INIT ACK chunk, a
* number of data elements needs to be maintained including: * number of data elements needs to be maintained including:
*/ */
__u32 rtt; /* This is the most recent RTT. */
/* RTO : The current retransmission timeout value. */ /* RTO : The current retransmission timeout value. */
unsigned long rto; unsigned long rto;
unsigned long last_rto;
__u32 rtt; /* This is the most recent RTT. */
/* RTTVAR : The current RTT variation. */ /* RTTVAR : The current RTT variation. */
__u32 rttvar; __u32 rttvar;
...@@ -1184,9 +1186,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, ...@@ -1184,9 +1186,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
int flags); int flags);
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
__u8 use_as_src, gfp_t gfp); __u8 use_as_src, gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
void fastcall (*rcu_call)(struct rcu_head *,
void (*func)(struct rcu_head *)));
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *); struct sctp_sock *);
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
...@@ -1229,8 +1229,7 @@ typedef enum { ...@@ -1229,8 +1229,7 @@ typedef enum {
struct sctp_ep_common { struct sctp_ep_common {
/* Fields to help us manage our entries in the hash tables. */ /* Fields to help us manage our entries in the hash tables. */
struct sctp_ep_common *next; struct hlist_node node;
struct sctp_ep_common **pprev;
int hashent; int hashent;
/* Runtime type information. What kind of endpoint is this? */ /* Runtime type information. What kind of endpoint is this? */
...@@ -1541,7 +1540,6 @@ struct sctp_association { ...@@ -1541,7 +1540,6 @@ struct sctp_association {
__u8 asconf_capable; /* Does peer support ADDIP? */ __u8 asconf_capable; /* Does peer support ADDIP? */
__u8 prsctp_capable; /* Can peer do PR-SCTP? */ __u8 prsctp_capable; /* Can peer do PR-SCTP? */
__u8 auth_capable; /* Is peer doing SCTP-AUTH? */ __u8 auth_capable; /* Is peer doing SCTP-AUTH? */
__u8 addip_capable; /* Can peer do ADD-IP */
__u32 adaptation_ind; /* Adaptation Code point. */ __u32 adaptation_ind; /* Adaptation Code point. */
......
...@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a ...@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/ */
asoc->peer.sack_needed = 1; asoc->peer.sack_needed = 1;
/* Assume that the peer recongizes ASCONF until reported otherwise /* Assume that the peer will tell us if he recognizes ASCONF
* via an ERROR chunk. * as part of INIT exchange.
* The sctp_addip_noauth option is there for backward compatibilty
* and will revert old behavior.
*/ */
asoc->peer.asconf_capable = 1; asoc->peer.asconf_capable = 0;
if (sctp_addip_noauth)
asoc->peer.asconf_capable = 1;
/* Create an input queue. */ /* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue); sctp_inq_init(&asoc->base.inqueue);
......
...@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, ...@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
/* Delete an address from the bind address list in the SCTP_bind_addr /* Delete an address from the bind address list in the SCTP_bind_addr
* structure. * structure.
*/ */
int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
void fastcall (*rcu_call)(struct rcu_head *head,
void (*func)(struct rcu_head *head)))
{ {
struct sctp_sockaddr_entry *addr, *temp; struct sctp_sockaddr_entry *addr, *temp;
...@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, ...@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
} }
} }
/* Call the rcu callback provided in the args. This function is
* called by both BH packet processing and user side socket option
* processing, but it works on different lists in those 2 contexts.
* Each context provides it's own callback, whether call_rcu_bh()
* or call_rcu(), to make sure that we wait for an appropriate time.
*/
if (addr && !addr->valid) { if (addr && !addr->valid) {
rcu_call(&addr->rcu, sctp_local_addr_free); call_rcu(&addr->rcu, sctp_local_addr_free);
SCTP_DBG_OBJCNT_DEC(addr); SCTP_DBG_OBJCNT_DEC(addr);
return 0;
} }
return -EINVAL; return -EINVAL;
......
...@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( ...@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
const union sctp_addr *paddr, const union sctp_addr *paddr,
struct sctp_transport **transport) struct sctp_transport **transport)
{ {
struct sctp_association *asoc = NULL;
struct sctp_transport *t = NULL;
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct hlist_node *node;
int hash;
int rport; int rport;
struct sctp_association *asoc;
struct list_head *pos;
*transport = NULL;
rport = ntohs(paddr->v4.sin_port); rport = ntohs(paddr->v4.sin_port);
list_for_each(pos, &ep->asocs) { hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
asoc = list_entry(pos, struct sctp_association, asocs); head = &sctp_assoc_hashtable[hash];
if (rport == asoc->peer.port) { read_lock(&head->lock);
*transport = sctp_assoc_lookup_paddr(asoc, paddr); sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb);
if (*transport) if (asoc->ep != ep || rport != asoc->peer.port)
return asoc; goto next;
t = sctp_assoc_lookup_paddr(asoc, paddr);
if (t) {
*transport = t;
break;
} }
next:
asoc = NULL;
} }
read_unlock(&head->lock);
*transport = NULL; return asoc;
return NULL;
} }
/* Lookup association on an endpoint based on a peer address. BH-safe. */ /* Lookup association on an endpoint based on a peer address. BH-safe. */
......
...@@ -656,7 +656,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb) ...@@ -656,7 +656,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
/* Insert endpoint into the hash table. */ /* Insert endpoint into the hash table. */
static void __sctp_hash_endpoint(struct sctp_endpoint *ep) static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
{ {
struct sctp_ep_common **epp;
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_hashbucket *head; struct sctp_hashbucket *head;
...@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep) ...@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[epb->hashent]; head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
epp = &head->chain; hlist_add_head(&epb->node, &head->chain);
epb->next = *epp;
if (epb->next)
(*epp)->pprev = &epb->next;
*epp = epb;
epb->pprev = epp;
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) ...@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base; epb = &ep->base;
if (hlist_unhashed(&epb->node))
return;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent]; head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
if (epb->pprev) {
if (epb->next)
epb->next->pprev = epb->pprev;
*epb->pprev = epb->next;
epb->pprev = NULL;
}
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l ...@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
struct sctp_hashbucket *head; struct sctp_hashbucket *head;
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_endpoint *ep; struct sctp_endpoint *ep;
struct hlist_node *node;
int hash; int hash;
hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
head = &sctp_ep_hashtable[hash]; head = &sctp_ep_hashtable[hash];
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb); ep = sctp_ep(epb);
if (sctp_endpoint_is_match(ep, laddr)) if (sctp_endpoint_is_match(ep, laddr))
goto hit; goto hit;
...@@ -744,7 +735,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l ...@@ -744,7 +735,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
/* Insert association into the hash table. */ /* Insert association into the hash table. */
static void __sctp_hash_established(struct sctp_association *asoc) static void __sctp_hash_established(struct sctp_association *asoc)
{ {
struct sctp_ep_common **epp;
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_hashbucket *head; struct sctp_hashbucket *head;
...@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc) ...@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent]; head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
epp = &head->chain; hlist_add_head(&epb->node, &head->chain);
epb->next = *epp;
if (epb->next)
(*epp)->pprev = &epb->next;
*epp = epb;
epb->pprev = epp;
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc) ...@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent]; head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
if (epb->pprev) {
if (epb->next)
epb->next->pprev = epb->pprev;
*epb->pprev = epb->next;
epb->pprev = NULL;
}
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_association *asoc; struct sctp_association *asoc;
struct sctp_transport *transport; struct sctp_transport *transport;
struct hlist_node *node;
int hash; int hash;
/* Optimize here for direct hit, only listening connections can /* Optimize here for direct hit, only listening connections can
...@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association(
hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
head = &sctp_assoc_hashtable[hash]; head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb); asoc = sctp_assoc(epb);
transport = sctp_assoc_is_match(asoc, local, peer); transport = sctp_assoc_is_match(asoc, local, peer);
if (transport) if (transport)
......
...@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue) ...@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue)
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
{ {
/* Directly call the packet handling routine. */ /* Directly call the packet handling routine. */
if (chunk->rcvr->dead) {
sctp_chunk_free(chunk);
return;
}
/* We are now calling this either from the soft interrupt /* We are now calling this either from the soft interrupt
* or from the backlog processing. * or from the backlog processing.
......
...@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new) ...@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
/* Mark all the eligible packets on a transport for retransmission. */ /* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(struct sctp_outq *q, void sctp_retransmit_mark(struct sctp_outq *q,
struct sctp_transport *transport, struct sctp_transport *transport,
__u8 fast_retransmit) __u8 reason)
{ {
struct list_head *lchunk, *ltemp; struct list_head *lchunk, *ltemp;
struct sctp_chunk *chunk; struct sctp_chunk *chunk;
...@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q, ...@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q,
continue; continue;
} }
/* If we are doing retransmission due to a fast retransmit, /* If we are doing retransmission due to a timeout or pmtu
* only the chunk's that are marked for fast retransmit * discovery, only the chunks that are not yet acked should
* should be added to the retransmit queue. If we are doing * be added to the retransmit queue.
* retransmission due to a timeout or pmtu discovery, only the
* chunks that are not yet acked should be added to the
* retransmit queue.
*/ */
if ((fast_retransmit && (chunk->fast_retransmit > 0)) || if ((reason == SCTP_RTXR_FAST_RTX &&
(!fast_retransmit && !chunk->tsn_gap_acked)) { (chunk->fast_retransmit > 0)) ||
(reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
/* If this chunk was sent less then 1 rto ago, do not /* If this chunk was sent less then 1 rto ago, do not
* retransmit this chunk, but give the peer time * retransmit this chunk, but give the peer time
* to acknowlege it. * to acknowlege it. Do this only when
* retransmitting due to T3 timeout.
*/ */
if ((jiffies - chunk->sent_at) < transport->rto) if (reason == SCTP_RTXR_T3_RTX &&
(jiffies - chunk->sent_at) < transport->last_rto)
continue; continue;
/* RFC 2960 6.2.1 Processing a Received SACK /* RFC 2960 6.2.1 Processing a Received SACK
...@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q, ...@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
} }
} }
SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
"cwnd: %d, ssthresh: %d, flight_size: %d, " "cwnd: %d, ssthresh: %d, flight_size: %d, "
"pba: %d\n", __FUNCTION__, "pba: %d\n", __FUNCTION__,
transport, fast_retransmit, transport, reason,
transport->cwnd, transport->ssthresh, transport->cwnd, transport->ssthresh,
transport->flight_size, transport->flight_size,
transport->partial_bytes_acked); transport->partial_bytes_acked);
...@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, ...@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason) sctp_retransmit_reason_t reason)
{ {
int error = 0; int error = 0;
__u8 fast_retransmit = 0;
switch(reason) { switch(reason) {
case SCTP_RTXR_T3_RTX: case SCTP_RTXR_T3_RTX:
...@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, ...@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
case SCTP_RTXR_FAST_RTX: case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
fast_retransmit = 1;
break; break;
case SCTP_RTXR_PMTUD: case SCTP_RTXR_PMTUD:
SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
break; break;
case SCTP_RTXR_T1_RTX:
SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
break;
default: default:
BUG(); BUG();
} }
sctp_retransmit_mark(q, transport, fast_retransmit); sctp_retransmit_mark(q, transport, reason);
/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
...@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
/* If we are here due to a retransmit timeout or a fast /* If we are here due to a retransmit timeout or a fast
* retransmit and if there are any chunks left in the retransmit * retransmit and if there are any chunks left in the retransmit
* queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. * queue that could not fit in the PMTU sized packet, they need
* to be marked as ineligible for a subsequent fast retransmit.
*/ */
if (rtx_timeout && !lchunk) { if (rtx_timeout && !lchunk) {
list_for_each(lchunk1, lqueue) { list_for_each(lchunk1, lqueue) {
...@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
int sctp_outq_uncork(struct sctp_outq *q) int sctp_outq_uncork(struct sctp_outq *q)
{ {
int error = 0; int error = 0;
if (q->cork) { if (q->cork)
q->cork = 0; q->cork = 0;
error = sctp_outq_flush(q, 0); error = sctp_outq_flush(q, 0);
}
return error; return error;
} }
......
...@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) ...@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_endpoint *ep; struct sctp_endpoint *ep;
struct sock *sk; struct sock *sk;
struct hlist_node *node;
int hash = *(loff_t *)v; int hash = *(loff_t *)v;
if (hash >= sctp_ep_hashsize) if (hash >= sctp_ep_hashsize)
...@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) ...@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
head = &sctp_ep_hashtable[hash]; head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable(); sctp_local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb); ep = sctp_ep(epb);
sk = epb->sk; sk = epb->sk;
seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
...@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) ...@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_association *assoc; struct sctp_association *assoc;
struct sock *sk; struct sock *sk;
struct hlist_node *node;
int hash = *(loff_t *)v; int hash = *(loff_t *)v;
if (hash >= sctp_assoc_hashsize) if (hash >= sctp_assoc_hashsize)
...@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) ...@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
head = &sctp_assoc_hashtable[hash]; head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable(); sctp_local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
assoc = sctp_assoc(epb); assoc = sctp_assoc(epb);
sk = epb->sk; sk = epb->sk;
seq_printf(seq, seq_printf(seq,
......
...@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void)
} }
for (i = 0; i < sctp_assoc_hashsize; i++) { for (i = 0; i < sctp_assoc_hashsize; i++) {
rwlock_init(&sctp_assoc_hashtable[i].lock); rwlock_init(&sctp_assoc_hashtable[i].lock);
sctp_assoc_hashtable[i].chain = NULL; INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
} }
/* Allocate and initialize the endpoint hash table. */ /* Allocate and initialize the endpoint hash table. */
...@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void)
} }
for (i = 0; i < sctp_ep_hashsize; i++) { for (i = 0; i < sctp_ep_hashsize; i++) {
rwlock_init(&sctp_ep_hashtable[i].lock); rwlock_init(&sctp_ep_hashtable[i].lock);
sctp_ep_hashtable[i].chain = NULL; INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
} }
/* Allocate and initialize the SCTP port hash table. */ /* Allocate and initialize the SCTP port hash table. */
...@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void)
} }
for (i = 0; i < sctp_port_hashsize; i++) { for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock); spin_lock_init(&sctp_port_hashtable[i].lock);
sctp_port_hashtable[i].chain = NULL; INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
} }
printk(KERN_INFO "SCTP: Hash tables configured " printk(KERN_INFO "SCTP: Hash tables configured "
...@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable ADDIP by default. */ /* Disable ADDIP by default. */
sctp_addip_enable = 0; sctp_addip_enable = 0;
sctp_addip_noauth = 0;
/* Enable PR-SCTP by default. */ /* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1; sctp_prsctp_enable = 1;
......
This diff is collapsed.
...@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, ...@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* maximum value discussed in rule C7 above (RTO.max) may be * maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation. * used to provide an upper bound to this doubling operation.
*/ */
transport->last_rto = transport->rto;
transport->rto = min((transport->rto * 2), transport->asoc->rto_max); transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
} }
...@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_ootb_pkt_free(packet); sctp_ootb_pkt_free(packet);
break; break;
case SCTP_CMD_T1_RETRAN:
/* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
SCTP_RTXR_T1_RTX);
break;
case SCTP_CMD_RETRAN: case SCTP_CMD_RETRAN:
/* Mark a transport for retransmission. */ /* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue, cmd->obj.transport, sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
...@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
list_for_each(pos, &asoc->peer.transport_addr_list) { list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, t = list_entry(pos, struct sctp_transport,
transports); transports);
sctp_retransmit_mark(&asoc->outqueue, t, 0); sctp_retransmit_mark(&asoc->outqueue, t,
SCTP_RTXR_T1_RTX);
} }
sctp_add_cmd_sf(commands, sctp_add_cmd_sf(commands,
......
...@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, ...@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
/* If we've sent any data bundled with COOKIE-ECHO we will need to /* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend * resend
*/ */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path)); SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just /* Cast away the const modifier, as we want to just
...@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation( ...@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation(
struct sctp_chunk *chunk = arg; struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL; struct sctp_chunk *abort = NULL;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
/* SCTP-AUTH, Section 6.3: /* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear * It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the * down an association in an authenticated way only, the
...@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation( ...@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation(
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard; goto discard;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
if (asoc) { if (asoc) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
......
...@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) ...@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* socket routing and failover schemes. Refer to comments in * socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy * sctp_do_bind(). -daisy
*/ */
retval = sctp_del_bind_addr(bp, sa_addr, call_rcu); retval = sctp_del_bind_addr(bp, sa_addr);
addr_buf += af->sockaddr_len; addr_buf += af->sockaddr_len;
err_bindx_rem: err_bindx_rem:
...@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{ {
struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_bind_bucket *pp; /* hash list port iterator */
struct hlist_node *node;
unsigned short snum; unsigned short snum;
int ret; int ret;
...@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
index = sctp_phashfn(rover); index = sctp_phashfn(rover);
head = &sctp_port_hashtable[index]; head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock); sctp_spin_lock(&head->lock);
for (pp = head->chain; pp; pp = pp->next) sctp_for_each_hentry(pp, node, &head->chain)
if (pp->port == rover) if (pp->port == rover)
goto next; goto next;
break; break;
...@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
*/ */
head = &sctp_port_hashtable[sctp_phashfn(snum)]; head = &sctp_port_hashtable[sctp_phashfn(snum)];
sctp_spin_lock(&head->lock); sctp_spin_lock(&head->lock);
for (pp = head->chain; pp; pp = pp->next) { sctp_for_each_hentry(pp, node, &head->chain) {
if (pp->port == snum) if (pp->port == snum)
goto pp_found; goto pp_found;
} }
...@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( ...@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
pp->port = snum; pp->port = snum;
pp->fastreuse = 0; pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner); INIT_HLIST_HEAD(&pp->owner);
if ((pp->next = head->chain) != NULL) hlist_add_head(&pp->node, &head->chain);
pp->next->pprev = &pp->next;
head->chain = pp;
pp->pprev = &head->chain;
} }
return pp; return pp;
} }
...@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( ...@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{ {
if (pp && hlist_empty(&pp->owner)) { if (pp && hlist_empty(&pp->owner)) {
if (pp->next) __hlist_del(&pp->node);
pp->next->pprev = pp->pprev;
*(pp->pprev) = pp->next;
kmem_cache_free(sctp_bucket_cachep, pp); kmem_cache_free(sctp_bucket_cachep, pp);
SCTP_DBG_OBJCNT_DEC(bind_bucket); SCTP_DBG_OBJCNT_DEC(bind_bucket);
} }
......
...@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = { ...@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = {
.proc_handler = &proc_dointvec, .proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec .strategy = &sysctl_intvec
}, },
{
.ctl_name = CTL_UNNUMBERED,
.procname = "addip_noauth_enable",
.data = &sctp_addip_noauth,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec
},
{ .ctl_name = 0 } { .ctl_name = 0 }
}; };
......
...@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, ...@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* given destination transport address, set RTO to the protocol * given destination transport address, set RTO to the protocol
* parameter 'RTO.Initial'. * parameter 'RTO.Initial'.
*/ */
peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rtt = 0; peer->rtt = 0;
peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rttvar = 0; peer->rttvar = 0;
peer->srtt = 0; peer->srtt = 0;
peer->rto_pending = 0; peer->rto_pending = 0;
...@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) ...@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto = tp->asoc->rto_max; tp->rto = tp->asoc->rto_max;
tp->rtt = rtt; tp->rtt = rtt;
tp->last_rto = tp->rto;
/* Reset rto_pending so that a new RTT measurement is started when a /* Reset rto_pending so that a new RTT measurement is started when a
* new data chunk is sent. * new data chunk is sent.
...@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t) ...@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t)
*/ */
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
t->ssthresh = asoc->peer.i.a_rwnd; t->ssthresh = asoc->peer.i.a_rwnd;
t->rto = asoc->rto_initial; t->last_rto = t->rto = asoc->rto_initial;
t->rtt = 0; t->rtt = 0;
t->srtt = 0; t->srtt = 0;
t->rttvar = 0; t->rttvar = 0;
......
...@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) ...@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
continue; continue;
/* see if this ssn has been marked by skipping */ /* see if this ssn has been marked by skipping */
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) if (!SSN_lte(cssn, sctp_ssn_peek(in, csid)))
break; break;
__skb_unlink(pos, &ulpq->lobby); __skb_unlink(pos, &ulpq->lobby);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment