Commit f51b15cd authored by Sridhar Samudrala's avatar Sridhar Samudrala

Merge us.ibm.com:/home/sridhar/BK/linux-2.5.59

into us.ibm.com:/home/sridhar/BK/lksctp-2.5.59
parents 6a3354a9 ea393c93
...@@ -56,8 +56,10 @@ ...@@ -56,8 +56,10 @@
#include <linux/ipv6.h> /* For ipv6hdr. */ #include <linux/ipv6.h> /* For ipv6hdr. */
#include <net/sctp/user.h> #include <net/sctp/user.h>
/* What a hack! Jiminy Cricket! */ /* Value used for stream negotiation. */
enum { SCTP_MAX_STREAM = 10 }; enum { SCTP_MAX_STREAM = 0xffff };
enum { SCTP_DEFAULT_OUTSTREAMS = 10 };
enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
/* Define the amount of space to reserve for SCTP, IP, LL. /* Define the amount of space to reserve for SCTP, IP, LL.
* There is a little bit of waste that we are always allocating * There is a little bit of waste that we are always allocating
......
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp. * Copyright (c) 2001-2003 Intel Corp.
* *
* This file is part of the SCTP kernel reference Implementation * This file is part of the SCTP kernel reference Implementation
* *
...@@ -36,7 +36,9 @@ ...@@ -36,7 +36,9 @@
* La Monte H.P. Yarroll <piggy@acm.org> * La Monte H.P. Yarroll <piggy@acm.org>
* Xingang Guo <xingang.guo@intel.com> * Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -147,7 +149,9 @@ extern int sctp_primitive_REQUESTHEARTBEAT(sctp_association_t *, void *arg); ...@@ -147,7 +149,9 @@ extern int sctp_primitive_REQUESTHEARTBEAT(sctp_association_t *, void *arg);
/* /*
* sctp_crc32c.c * sctp_crc32c.c
*/ */
extern __u32 count_crc(__u8 *ptr, __u16 count); extern __u32 sctp_start_cksum(__u8 *ptr, __u16 count);
extern __u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum);
extern __u32 sctp_end_cksum(__u32 cksum);
/* /*
* sctp_input.c * sctp_input.c
...@@ -266,6 +270,7 @@ extern atomic_t sctp_dbg_objcnt_transport; ...@@ -266,6 +270,7 @@ extern atomic_t sctp_dbg_objcnt_transport;
extern atomic_t sctp_dbg_objcnt_chunk; extern atomic_t sctp_dbg_objcnt_chunk;
extern atomic_t sctp_dbg_objcnt_bind_addr; extern atomic_t sctp_dbg_objcnt_bind_addr;
extern atomic_t sctp_dbg_objcnt_addr; extern atomic_t sctp_dbg_objcnt_addr;
extern atomic_t sctp_dbg_objcnt_ssnmap;
/* Macros to atomically increment/decrement objcnt counters. */ /* Macros to atomically increment/decrement objcnt counters. */
#define SCTP_DBG_OBJCNT_INC(name) \ #define SCTP_DBG_OBJCNT_INC(name) \
...@@ -418,6 +423,23 @@ static inline size_t get_user_iov_size(struct iovec *iov, int iovlen) ...@@ -418,6 +423,23 @@ static inline size_t get_user_iov_size(struct iovec *iov, int iovlen)
return retval; return retval;
} }
/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
static inline __s32 sctp_jitter(__u32 rto)
{
static __u32 sctp_rand;
__s32 ret;
sctp_rand += jiffies;
sctp_rand ^= (sctp_rand << 12);
sctp_rand ^= (sctp_rand >> 20);
/* Choose random number from 0 to rto, then move to -50% ~ +50%
* of rto.
*/
ret = sctp_rand % rto - (rto >> 1);
return ret;
}
/* Walk through a list of TLV parameters. Don't trust the /* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on * individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure * the chunk length to indicate when to stop. Make sure
......
...@@ -269,6 +269,7 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *, ...@@ -269,6 +269,7 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *,
const void *payload, const void *payload,
size_t paylen); size_t paylen);
void sctp_chunk_assign_tsn(sctp_chunk_t *); void sctp_chunk_assign_tsn(sctp_chunk_t *);
void sctp_chunk_assign_ssn(sctp_chunk_t *);
/* Prototypes for statetable processing. */ /* Prototypes for statetable processing. */
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* Sridhar Samudrala <sri@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com> * Dajiang Zhang <dajiang.zhang@nokia.com>
* Ardelle Fan <ardelle.fan@intel.com> * Ardelle Fan <ardelle.fan@intel.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -104,27 +104,27 @@ union sctp_addr { ...@@ -104,27 +104,27 @@ union sctp_addr {
/* Forward declarations for data structures. */ /* Forward declarations for data structures. */
struct SCTP_protocol; struct sctp_protocol;
struct SCTP_endpoint; struct SCTP_endpoint;
struct SCTP_association; struct SCTP_association;
struct SCTP_transport; struct SCTP_transport;
struct SCTP_packet; struct SCTP_packet;
struct SCTP_chunk; struct SCTP_chunk;
struct SCTP_inqueue; struct SCTP_inqueue;
struct SCTP_outqueue; struct sctp_outq;
struct SCTP_bind_addr; struct SCTP_bind_addr;
struct sctp_ulpq;
struct sctp_opt; struct sctp_opt;
struct sctp_endpoint_common; struct sctp_endpoint_common;
struct sctp_ssnmap;
typedef struct sctp_protocol sctp_protocol_t;
typedef struct SCTP_protocol sctp_protocol_t;
typedef struct SCTP_endpoint sctp_endpoint_t; typedef struct SCTP_endpoint sctp_endpoint_t;
typedef struct SCTP_association sctp_association_t; typedef struct SCTP_association sctp_association_t;
typedef struct SCTP_transport sctp_transport_t; typedef struct SCTP_transport sctp_transport_t;
typedef struct SCTP_packet sctp_packet_t; typedef struct SCTP_packet sctp_packet_t;
typedef struct SCTP_chunk sctp_chunk_t; typedef struct SCTP_chunk sctp_chunk_t;
typedef struct SCTP_inqueue sctp_inqueue_t; typedef struct SCTP_inqueue sctp_inqueue_t;
typedef struct SCTP_outqueue sctp_outqueue_t;
typedef struct SCTP_bind_addr sctp_bind_addr_t; typedef struct SCTP_bind_addr sctp_bind_addr_t;
typedef struct sctp_opt sctp_opt_t; typedef struct sctp_opt sctp_opt_t;
typedef struct sctp_endpoint_common sctp_endpoint_common_t; typedef struct sctp_endpoint_common sctp_endpoint_common_t;
...@@ -133,7 +133,6 @@ typedef struct sctp_endpoint_common sctp_endpoint_common_t; ...@@ -133,7 +133,6 @@ typedef struct sctp_endpoint_common sctp_endpoint_common_t;
#include <net/sctp/ulpevent.h> #include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h> #include <net/sctp/ulpqueue.h>
/* Structures useful for managing bind/connect. */ /* Structures useful for managing bind/connect. */
typedef struct sctp_bind_bucket { typedef struct sctp_bind_bucket {
...@@ -157,7 +156,7 @@ typedef struct sctp_hashbucket { ...@@ -157,7 +156,7 @@ typedef struct sctp_hashbucket {
/* The SCTP protocol structure. */ /* The SCTP protocol structure. */
struct SCTP_protocol { struct sctp_protocol {
/* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
* *
* The following protocol parameters are RECOMMENDED: * The following protocol parameters are RECOMMENDED:
...@@ -183,8 +182,8 @@ struct SCTP_protocol { ...@@ -183,8 +182,8 @@ struct SCTP_protocol {
/* Valid.Cookie.Life - 60 seconds */ /* Valid.Cookie.Life - 60 seconds */
int valid_cookie_life; int valid_cookie_life;
/* Whether Cookie Preservative is enabled(1) or not(0) */ /* Whether Cookie Preservative is enabled(1) or not(0) */
int cookie_preserve_enable; int cookie_preserve_enable;
/* Association.Max.Retrans - 10 attempts /* Association.Max.Retrans - 10 attempts
...@@ -282,7 +281,7 @@ struct sctp_af *sctp_get_af_specific(sa_family_t); ...@@ -282,7 +281,7 @@ struct sctp_af *sctp_get_af_specific(sa_family_t);
int sctp_register_af(struct sctp_af *); int sctp_register_af(struct sctp_af *);
/* Protocol family functions. */ /* Protocol family functions. */
typedef struct sctp_pf { struct sctp_pf {
void (*event_msgname)(sctp_ulpevent_t *, char *, int *); void (*event_msgname)(sctp_ulpevent_t *, char *, int *);
void (*skb_msgname) (struct sk_buff *, char *, int *); void (*skb_msgname) (struct sk_buff *, char *, int *);
int (*af_supported) (sa_family_t); int (*af_supported) (sa_family_t);
...@@ -291,7 +290,7 @@ typedef struct sctp_pf { ...@@ -291,7 +290,7 @@ typedef struct sctp_pf {
struct sctp_opt *); struct sctp_opt *);
int (*bind_verify) (struct sctp_opt *, union sctp_addr *); int (*bind_verify) (struct sctp_opt *, union sctp_addr *);
struct sctp_af *af; struct sctp_af *af;
} sctp_pf_t; };
/* SCTP Socket type: UDP or TCP style. */ /* SCTP Socket type: UDP or TCP style. */
typedef enum { typedef enum {
...@@ -318,7 +317,7 @@ struct sctp_opt { ...@@ -318,7 +317,7 @@ struct sctp_opt {
__u32 autoclose; __u32 autoclose;
__u8 nodelay; __u8 nodelay;
__u8 disable_fragments; __u8 disable_fragments;
sctp_pf_t *pf; struct sctp_pf *pf;
}; };
...@@ -360,7 +359,8 @@ typedef struct sctp_cookie { ...@@ -360,7 +359,8 @@ typedef struct sctp_cookie {
struct timeval expiration; struct timeval expiration;
/* Number of inbound/outbound streams which are set /* Number of inbound/outbound streams which are set
* and negotiated during the INIT process. */ * and negotiated during the INIT process.
*/
__u16 sinit_num_ostreams; __u16 sinit_num_ostreams;
__u16 sinit_max_instreams; __u16 sinit_max_instreams;
...@@ -426,6 +426,49 @@ typedef struct sctp_sender_hb_info { ...@@ -426,6 +426,49 @@ typedef struct sctp_sender_hb_info {
unsigned long sent_at; unsigned long sent_at;
} sctp_sender_hb_info_t __attribute__((packed)); } sctp_sender_hb_info_t __attribute__((packed));
/*
* RFC 2960 1.3.2 Sequenced Delivery within Streams
*
* The term "stream" is used in SCTP to refer to a sequence of user
* messages that are to be delivered to the upper-layer protocol in
* order with respect to other messages within the same stream. This is
* in contrast to its usage in TCP, where it refers to a sequence of
* bytes (in this document a byte is assumed to be eight bits).
* ...
*
* This is the structure we use to track both our outbound and inbound
* SSN, or Stream Sequence Numbers.
*/
struct sctp_stream {
__u16 *ssn;
unsigned int len;
};
struct sctp_ssnmap {
struct sctp_stream in;
struct sctp_stream out;
int malloced;
};
struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *, __u16, __u16);
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int priority);
void sctp_ssnmap_free(struct sctp_ssnmap *map);
void sctp_ssnmap_clear(struct sctp_ssnmap *map);
/* What is the current SSN number for this stream? */
static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id)
{
return stream->ssn[id];
}
/* Return the next SSN number for this stream. */
static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
{
return stream->ssn[id]++;
}
/* RFC2960 1.4 Key Terms /* RFC2960 1.4 Key Terms
* *
* o Chunk: A unit of information within an SCTP packet, consisting of * o Chunk: A unit of information within an SCTP packet, consisting of
...@@ -499,6 +542,7 @@ struct SCTP_chunk { ...@@ -499,6 +542,7 @@ struct SCTP_chunk {
__u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */ __u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */
__u8 num_times_sent; /* How man times did we send this? */ __u8 num_times_sent; /* How man times did we send this? */
__u8 has_tsn; /* Does this chunk have a TSN yet? */ __u8 has_tsn; /* Does this chunk have a TSN yet? */
__u8 has_ssn; /* Does this chunk have a SSN yet? */
__u8 singleton; /* Was this the only chunk in the packet? */ __u8 singleton; /* Was this the only chunk in the packet? */
__u8 end_of_packet; /* Was this the last chunk in the packet? */ __u8 end_of_packet; /* Was this the last chunk in the packet? */
__u8 ecn_ce_done; /* Have we processed the ECN CE bit? */ __u8 ecn_ce_done; /* Have we processed the ECN CE bit? */
...@@ -578,27 +622,27 @@ struct SCTP_packet { ...@@ -578,27 +622,27 @@ struct SCTP_packet {
int malloced; int malloced;
}; };
typedef int (sctp_outqueue_thandler_t)(sctp_outqueue_t *, void *); typedef int (sctp_outq_thandler_t)(struct sctp_outq *, void *);
typedef int (sctp_outqueue_ehandler_t)(sctp_outqueue_t *); typedef int (sctp_outq_ehandler_t)(struct sctp_outq *);
typedef sctp_packet_t *(sctp_outqueue_ohandler_init_t) typedef sctp_packet_t *(sctp_outq_ohandler_init_t)
(sctp_packet_t *, (sctp_packet_t *,
sctp_transport_t *, sctp_transport_t *,
__u16 sport, __u16 sport,
__u16 dport); __u16 dport);
typedef sctp_packet_t *(sctp_outqueue_ohandler_config_t) typedef sctp_packet_t *(sctp_outq_ohandler_config_t)
(sctp_packet_t *, (sctp_packet_t *,
__u32 vtag, __u32 vtag,
int ecn_capable, int ecn_capable,
sctp_packet_phandler_t *get_prepend_chunk); sctp_packet_phandler_t *get_prepend_chunk);
typedef sctp_xmit_t (sctp_outqueue_ohandler_t)(sctp_packet_t *, typedef sctp_xmit_t (sctp_outq_ohandler_t)(sctp_packet_t *,
sctp_chunk_t *); sctp_chunk_t *);
typedef int (sctp_outqueue_ohandler_force_t)(sctp_packet_t *); typedef int (sctp_outq_ohandler_force_t)(sctp_packet_t *);
sctp_outqueue_ohandler_init_t sctp_packet_init; sctp_outq_ohandler_init_t sctp_packet_init;
sctp_outqueue_ohandler_config_t sctp_packet_config; sctp_outq_ohandler_config_t sctp_packet_config;
sctp_outqueue_ohandler_t sctp_packet_append_chunk; sctp_outq_ohandler_t sctp_packet_append_chunk;
sctp_outqueue_ohandler_t sctp_packet_transmit_chunk; sctp_outq_ohandler_t sctp_packet_transmit_chunk;
sctp_outqueue_ohandler_force_t sctp_packet_transmit; sctp_outq_ohandler_force_t sctp_packet_transmit;
void sctp_packet_free(sctp_packet_t *); void sctp_packet_free(sctp_packet_t *);
...@@ -835,7 +879,7 @@ void sctp_inqueue_set_th_handler(sctp_inqueue_t *, ...@@ -835,7 +879,7 @@ void sctp_inqueue_set_th_handler(sctp_inqueue_t *,
* *
* When free()'d, it empties itself out via output_handler(). * When free()'d, it empties itself out via output_handler().
*/ */
struct SCTP_outqueue { struct sctp_outq {
sctp_association_t *asoc; sctp_association_t *asoc;
/* BUG: This really should be an array of streams. /* BUG: This really should be an array of streams.
...@@ -861,11 +905,11 @@ struct SCTP_outqueue { ...@@ -861,11 +905,11 @@ struct SCTP_outqueue {
* layer. This is always SCTP_packet, but we separate the two * layer. This is always SCTP_packet, but we separate the two
* structures to make testing simpler. * structures to make testing simpler.
*/ */
sctp_outqueue_ohandler_init_t *init_output; sctp_outq_ohandler_init_t *init_output;
sctp_outqueue_ohandler_config_t *config_output; sctp_outq_ohandler_config_t *config_output;
sctp_outqueue_ohandler_t *append_output; sctp_outq_ohandler_t *append_output;
sctp_outqueue_ohandler_t *build_output; sctp_outq_ohandler_t *build_output;
sctp_outqueue_ohandler_force_t *force_output; sctp_outq_ohandler_force_t *force_output;
/* How many unackd bytes do we have in-flight? */ /* How many unackd bytes do we have in-flight? */
__u32 outstanding_bytes; __u32 outstanding_bytes;
...@@ -877,24 +921,23 @@ struct SCTP_outqueue { ...@@ -877,24 +921,23 @@ struct SCTP_outqueue {
int malloced; int malloced;
}; };
sctp_outqueue_t *sctp_outqueue_new(sctp_association_t *); struct sctp_outq *sctp_outq_new(sctp_association_t *);
void sctp_outqueue_init(sctp_association_t *, sctp_outqueue_t *); void sctp_outq_init(sctp_association_t *, struct sctp_outq *);
void sctp_outqueue_teardown(sctp_outqueue_t *); void sctp_outq_teardown(struct sctp_outq *);
void sctp_outqueue_free(sctp_outqueue_t*); void sctp_outq_free(struct sctp_outq*);
void sctp_force_outqueue(sctp_outqueue_t *); int sctp_outq_tail(struct sctp_outq *, sctp_chunk_t *chunk);
int sctp_push_outqueue(sctp_outqueue_t *, sctp_chunk_t *chunk); int sctp_outq_flush(struct sctp_outq *, int);
int sctp_flush_outqueue(sctp_outqueue_t *, int); int sctp_outq_sack(struct sctp_outq *, sctp_sackhdr_t *);
int sctp_sack_outqueue(sctp_outqueue_t *, sctp_sackhdr_t *); int sctp_outq_is_empty(const struct sctp_outq *);
int sctp_outqueue_is_empty(const sctp_outqueue_t *); int sctp_outq_set_output_handlers(struct sctp_outq *,
int sctp_outqueue_set_output_handlers(sctp_outqueue_t *, sctp_outq_ohandler_init_t init,
sctp_outqueue_ohandler_init_t init, sctp_outq_ohandler_config_t config,
sctp_outqueue_ohandler_config_t config, sctp_outq_ohandler_t append,
sctp_outqueue_ohandler_t append, sctp_outq_ohandler_t build,
sctp_outqueue_ohandler_t build, sctp_outq_ohandler_force_t force);
sctp_outqueue_ohandler_force_t force); void sctp_outq_restart(struct sctp_outq *);
void sctp_outqueue_restart(sctp_outqueue_t *); void sctp_retransmit(struct sctp_outq *, sctp_transport_t *, __u8);
void sctp_retransmit(sctp_outqueue_t *, sctp_transport_t *, __u8); void sctp_retransmit_mark(struct sctp_outq *, sctp_transport_t *, __u8);
void sctp_retransmit_mark(sctp_outqueue_t *, sctp_transport_t *, __u8);
/* These bind address data fields common between endpoints and associations */ /* These bind address data fields common between endpoints and associations */
...@@ -1027,7 +1070,7 @@ struct SCTP_endpoint { ...@@ -1027,7 +1070,7 @@ struct SCTP_endpoint {
/* These are the system-wide defaults and other stuff which is /* These are the system-wide defaults and other stuff which is
* endpoint-independent. * endpoint-independent.
*/ */
sctp_protocol_t *proto; struct sctp_protocol *proto;
/* Associations: A list of current associations and mappings /* Associations: A list of current associations and mappings
* to the data consumers for each association. This * to the data consumers for each association. This
...@@ -1408,18 +1451,15 @@ struct SCTP_association { ...@@ -1408,18 +1451,15 @@ struct SCTP_association {
} defaults; } defaults;
/* This tracks outbound ssn for a given stream. */ /* This tracks outbound ssn for a given stream. */
__u16 ssn[SCTP_MAX_STREAM]; struct sctp_ssnmap *ssnmap;
/* All outbound chunks go through this structure. */ /* All outbound chunks go through this structure. */
sctp_outqueue_t outqueue; struct sctp_outq outqueue;
/* A smart pipe that will handle reordering and fragmentation, /* A smart pipe that will handle reordering and fragmentation,
* as well as handle passing events up to the ULP. * as well as handle passing events up to the ULP.
* In the future, we should make this at least dynamic, if
* not also some sparse structure.
*/ */
sctp_ulpqueue_t ulpq; struct sctp_ulpq ulpq;
__u8 _ssnmap[sctp_ulpqueue_storage_size(SCTP_MAX_STREAM)];
/* Need to send an ECNE Chunk? */ /* Need to send an ECNE Chunk? */
int need_ecne; int need_ecne;
...@@ -1505,7 +1545,7 @@ struct SCTP_association { ...@@ -1505,7 +1545,7 @@ struct SCTP_association {
* *
* *
* [I really think this is EXACTLY the sort of intelligence * [I really think this is EXACTLY the sort of intelligence
* which already resides in SCTP_outqueue. Please move this * which already resides in sctp_outq. Please move this
* queue and its supporting logic down there. --piggy] * queue and its supporting logic down there. --piggy]
*/ */
struct sk_buff_head addip_chunks; struct sk_buff_head addip_chunks;
......
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll * Copyright (c) 2001 La Monte H.P. Yarroll
* *
* These are the definitions needed for the sctp_ulpqueue type. The * These are the definitions needed for the sctp_ulpq type. The
* sctp_ulpqueue is the interface between the Upper Layer Protocol, or ULP, * sctp_ulpq is the interface between the Upper Layer Protocol, or ULP,
* and the core SCTP state machine. This is the component which handles * and the core SCTP state machine. This is the component which handles
* reassembly and ordering. * reassembly and ordering.
* *
* The SCTP reference implementation is free software; * The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of * you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by * the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option) * the Free Software Foundation; either version 2, or (at your option)
* any later version. * any later version.
* *
* the SCTP reference implementation is distributed in the hope that it * the SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied * will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************ * ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details. * See the GNU General Public License for more details.
* *
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to * along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330, * the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA. * Boston, MA 02111-1307, USA.
* *
* Please send any bug reports or fixes you make to one of the * Please send any bug reports or fixes you make to the
* following email addresses: * email addresses:
* * lksctp developers <lksctp-developers@lists.sourceforge.net>
* Jon Grimm <jgrimm@us.ibm.com> *
* La Monte H.P. Yarroll <piggy@acm.org> * Or submit a bug report through the following website:
* * http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
*
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
*/ */
...@@ -42,46 +47,26 @@ ...@@ -42,46 +47,26 @@
#define __sctp_ulpqueue_h__ #define __sctp_ulpqueue_h__
/* A structure to carry information to the ULP (e.g. Sockets API) */ /* A structure to carry information to the ULP (e.g. Sockets API) */
typedef struct sctp_ulpqueue { struct sctp_ulpq {
int malloced; int malloced;
spinlock_t lock;
sctp_association_t *asoc; sctp_association_t *asoc;
struct sk_buff_head reasm; struct sk_buff_head reasm;
struct sk_buff_head lobby; struct sk_buff_head lobby;
__u16 ssn[0]; };
} sctp_ulpqueue_t;
/* This macro assists in creation of external storage for variable length
* internal buffers.
*/
#define sctp_ulpqueue_storage_size(inbound) (sizeof(__u16) * (inbound))
sctp_ulpqueue_t *sctp_ulpqueue_new(sctp_association_t *asoc,
__u16 inbound,
int priority);
sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
sctp_association_t *asoc,
__u16 inbound);
void sctp_ulpqueue_free(sctp_ulpqueue_t *);
/* Prototypes. */
struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority);
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, sctp_association_t *);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */ /* Add a new DATA chunk for processing. */
int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *, int sctp_ulpq_tail_data(struct sctp_ulpq *, sctp_chunk_t *chunk, int priority);
sctp_chunk_t *chunk,
int priority);
/* Add a new event for propogation to the ULP. */ /* Add a new event for propogation to the ULP. */
int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *, int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
sctp_ulpevent_t *event);
/* Is the ulpqueue empty. */ /* Is the ulpqueue empty. */
int sctp_ulpqueue_is_empty(sctp_ulpqueue_t *); int sctp_ulpqueue_is_empty(struct sctp_ulpq *);
int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *);
#endif /* __sctp_ulpqueue_h__ */ #endif /* __sctp_ulpqueue_h__ */
...@@ -90,4 +75,4 @@ int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *); ...@@ -90,4 +75,4 @@ int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *);
...@@ -100,6 +100,14 @@ enum sctp_optname { ...@@ -100,6 +100,14 @@ enum sctp_optname {
#define SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_BINDX_REM #define SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_BINDX_REM
SCTP_SOCKOPT_PEELOFF, /* peel off association. */ SCTP_SOCKOPT_PEELOFF, /* peel off association. */
#define SCTP_SOCKOPT_PEELOFF SCTP_SOCKOPT_PEELOFF #define SCTP_SOCKOPT_PEELOFF SCTP_SOCKOPT_PEELOFF
SCTP_GET_PEER_ADDRS_NUM, /* Get number of peer addresss. */
#define SCTP_GET_PEER_ADDRS_NUM SCTP_GET_PEER_ADDRS_NUM
SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */
#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS
SCTP_GET_LOCAL_ADDRS_NUM, /* Get number of local addresss. */
#define SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS_NUM
SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */
#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS
}; };
...@@ -576,6 +584,15 @@ struct sctp_setstrm_timeout { ...@@ -576,6 +584,15 @@ struct sctp_setstrm_timeout {
__u16 ssto_streamid_end; __u16 ssto_streamid_end;
}; };
/*
* 8.3 8.5 get all peer/local addresses on a socket
* This parameter struct is for getsockopt
*/
struct sctp_getaddrs {
sctp_assoc_t assoc_id;
int addr_num;
struct sockaddr_storage *addrs;
};
/* These are bit fields for msghdr->msg_flags. See section 5.1. */ /* These are bit fields for msghdr->msg_flags. See section 5.1. */
/* On user space Linux, these live in <bits/socket.h> as an enum. */ /* On user space Linux, these live in <bits/socket.h> as an enum. */
......
...@@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ ...@@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
inqueue.o outqueue.o ulpqueue.o command.o \ inqueue.o outqueue.o ulpqueue.o command.o \
tsnmap.o bind_addr.o socket.o primitive.o \ tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o hashdriver.o sla1.o \ output.o input.o hashdriver.o sla1.o \
debug.o debug.o ssnmap.o
ifeq ($(CONFIG_SCTP_ADLER32), y) ifeq ($(CONFIG_SCTP_ADLER32), y)
sctp-y += adler32.o sctp-y += adler32.o
......
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2003 International Business Machines, Corp.
* *
* This file is part of the SCTP kernel reference Implementation * This file is part of the SCTP kernel reference Implementation
* *
...@@ -36,6 +37,7 @@ ...@@ -36,6 +37,7 @@
* Randall Stewart <rstewar1@email.mot.com> * Randall Stewart <rstewar1@email.mot.com>
* Ken Morneau <kmorneau@cisco.com> * Ken Morneau <kmorneau@cisco.com>
* Qiaobing Xie <qxie1@email.mot.com> * Qiaobing Xie <qxie1@email.mot.com>
* Sridhar Samudrala <sri@us.ibm.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -122,7 +124,7 @@ unsigned long update_adler32(unsigned long adler, ...@@ -122,7 +124,7 @@ unsigned long update_adler32(unsigned long adler,
return (s2 << 16) + s1; return (s2 << 16) + s1;
} }
__u32 count_crc(__u8 *ptr, __u16 count) __u32 sctp_start_cksum(__u8 *ptr, __u16 count)
{ {
/* /*
* Update a running Adler-32 checksum with the bytes * Update a running Adler-32 checksum with the bytes
...@@ -146,3 +148,15 @@ __u32 count_crc(__u8 *ptr, __u16 count) ...@@ -146,3 +148,15 @@ __u32 count_crc(__u8 *ptr, __u16 count)
return adler; return adler;
} }
__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 adler)
{
adler = update_adler32(adler, ptr, count);
return adler;
}
__u32 sctp_end_cksum(__u32 adler)
{
return adler;
}
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines Corp. * Copyright (c) 2001-2003 International Business Machines Corp.
* Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 La Monte H.P. Yarroll * Copyright (c) 2001 La Monte H.P. Yarroll
* *
...@@ -166,15 +166,10 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc, ...@@ -166,15 +166,10 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
asoc->max_init_timeo = sp->initmsg.sinit_max_init_timeo * HZ; asoc->max_init_timeo = sp->initmsg.sinit_max_init_timeo * HZ;
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number /* Allocate storage for the ssnmap after the inbound and outbound
* * streams have been negotiated during Init.
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
*/ */
for (i = 0; i < SCTP_MAX_STREAM; i++) asoc->ssnmap = NULL;
asoc->ssn[i] = 0;
/* Set the local window size for receive. /* Set the local window size for receive.
* This is also the rcvbuf space per association. * This is also the rcvbuf space per association.
...@@ -252,15 +247,15 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc, ...@@ -252,15 +247,15 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc); asoc);
/* Create an output queue. */ /* Create an output queue. */
sctp_outqueue_init(asoc, &asoc->outqueue); sctp_outq_init(asoc, &asoc->outqueue);
sctp_outqueue_set_output_handlers(&asoc->outqueue, sctp_outq_set_output_handlers(&asoc->outqueue,
sctp_packet_init, sctp_packet_init,
sctp_packet_config, sctp_packet_config,
sctp_packet_append_chunk, sctp_packet_append_chunk,
sctp_packet_transmit_chunk, sctp_packet_transmit_chunk,
sctp_packet_transmit); sctp_packet_transmit);
if (NULL == sctp_ulpqueue_init(&asoc->ulpq, asoc, SCTP_MAX_STREAM)) if (NULL == sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init; goto fail_init;
/* Set up the tsn tracking. */ /* Set up the tsn tracking. */
...@@ -310,14 +305,17 @@ void sctp_association_free(sctp_association_t *asoc) ...@@ -310,14 +305,17 @@ void sctp_association_free(sctp_association_t *asoc)
asoc->base.dead = 1; asoc->base.dead = 1;
/* Dispose of any data lying around in the outqueue. */ /* Dispose of any data lying around in the outqueue. */
sctp_outqueue_free(&asoc->outqueue); sctp_outq_free(&asoc->outqueue);
/* Dispose of any pending messages for the upper layer. */ /* Dispose of any pending messages for the upper layer. */
sctp_ulpqueue_free(&asoc->ulpq); sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */ /* Dispose of any pending chunks on the inqueue. */
sctp_inqueue_free(&asoc->base.inqueue); sctp_inqueue_free(&asoc->base.inqueue);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
/* Clean up the bound address list. */ /* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr); sctp_bind_addr_free(&asoc->base.bind_addr);
...@@ -524,7 +522,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc, ...@@ -524,7 +522,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
break; break;
default: default:
BUG(); return;
}; };
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
...@@ -534,7 +532,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc, ...@@ -534,7 +532,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
(struct sockaddr_storage *) &transport->ipaddr, (struct sockaddr_storage *) &transport->ipaddr,
0, spc_state, error, GFP_ATOMIC); 0, spc_state, error, GFP_ATOMIC);
if (event) if (event)
sctp_ulpqueue_tail_event(&asoc->ulpq, event); sctp_ulpq_tail_event(&asoc->ulpq, event);
/* Select new active and retran paths. */ /* Select new active and retran paths. */
...@@ -634,7 +632,7 @@ __u32 __sctp_association_get_tsn_block(sctp_association_t *asoc, int num) ...@@ -634,7 +632,7 @@ __u32 __sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
/* Fetch the next Stream Sequence Number for stream number 'sid'. */ /* Fetch the next Stream Sequence Number for stream number 'sid'. */
__u16 __sctp_association_get_next_ssn(sctp_association_t *asoc, __u16 sid) __u16 __sctp_association_get_next_ssn(sctp_association_t *asoc, __u16 sid)
{ {
return asoc->ssn[sid]++; return sctp_ssn_next(&asoc->ssnmap->out, sid);
} }
/* Compare two addresses to see if they match. Wildcard addresses /* Compare two addresses to see if they match. Wildcard addresses
...@@ -852,8 +850,6 @@ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk) ...@@ -852,8 +850,6 @@ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
/* Update an association (possibly from unexpected COOKIE-ECHO processing). */ /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new) void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
{ {
int i;
/* Copy in new parameters of peer. */ /* Copy in new parameters of peer. */
asoc->c = new->c; asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd; asoc->peer.rwnd = new->peer.rwnd;
...@@ -872,23 +868,28 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new) ...@@ -872,23 +868,28 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
/* If the case is A (association restart), use /* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use * initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case there is data sent to peer * current next_tsn in case data sent to peer
* has been discarded and needs retransmission. * has been discarded and needs retransmission.
*/ */
if (SCTP_STATE_ESTABLISHED == asoc->state) { if (SCTP_STATE_ESTABLISHED == asoc->state) {
asoc->next_tsn = new->next_tsn; asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point; asoc->ctsn_ack_point = new->ctsn_ack_point;
/* Reinitialize SSN for both local streams /* Reinitialize SSN for both local streams
* and peer's streams. * and peer's streams.
*/ */
for (i = 0; i < SCTP_MAX_STREAM; i++) { sctp_ssnmap_clear(asoc->ssnmap);
asoc->ssn[i] = 0;
asoc->ulpq.ssn[i] = 0;
}
} else { } else {
asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->ctsn_ack_point = asoc->next_tsn - 1;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
new->ssnmap = NULL;
}
} }
} }
/* Choose the transport for sending a shutdown packet. /* Choose the transport for sending a shutdown packet.
......
...@@ -47,8 +47,8 @@ sctp_cmd_seq_t *sctp_new_cmd_seq(int priority) ...@@ -47,8 +47,8 @@ sctp_cmd_seq_t *sctp_new_cmd_seq(int priority)
{ {
sctp_cmd_seq_t *retval = t_new(sctp_cmd_seq_t, priority); sctp_cmd_seq_t *retval = t_new(sctp_cmd_seq_t, priority);
/* XXX Check for NULL? -DaveM */ if (retval)
sctp_init_cmd_seq(retval); sctp_init_cmd_seq(retval);
return retval; return retval;
} }
......
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* *
* This file is part of the SCTP kernel reference Implementation * This file is part of the SCTP kernel reference Implementation
* *
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
* Written or modified by: * Written or modified by:
* Dinakaran Joseph * Dinakaran Joseph
* Jon Grimm <jgrimm@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -135,11 +136,10 @@ __u32 crc_c[256] = { ...@@ -135,11 +136,10 @@ __u32 crc_c[256] = {
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
}; };
__u32 count_crc(__u8 *buffer, __u16 length) __u32 sctp_start_cksum(__u8 *buffer, __u16 length)
{ {
__u32 crc32 = ~(__u32) 0; __u32 crc32 = ~(__u32) 0;
__u32 i, result; __u32 i;
__u8 byte0, byte1, byte2, byte3;
/* Optimize this routine to be SCTP specific, knowing how /* Optimize this routine to be SCTP specific, knowing how
* to skip the checksum field of the SCTP header. * to skip the checksum field of the SCTP header.
...@@ -157,6 +157,24 @@ __u32 count_crc(__u8 *buffer, __u16 length) ...@@ -157,6 +157,24 @@ __u32 count_crc(__u8 *buffer, __u16 length)
for (i = sizeof(struct sctphdr); i < length ; i++) for (i = sizeof(struct sctphdr); i < length ; i++)
CRC32C(crc32, buffer[i]); CRC32C(crc32, buffer[i]);
return crc32;
}
__u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
{
__u32 i;
for (i = 0; i < length ; i++)
CRC32C(crc32, buffer[i]);
return crc32;
}
__u32 sctp_end_cksum(__u32 crc32)
{
__u32 result;
__u8 byte0, byte1, byte2, byte3;
result = ~crc32; result = ~crc32;
/* result now holds the negated polynomial remainder; /* result now holds the negated polynomial remainder;
...@@ -183,5 +201,3 @@ __u32 count_crc(__u8 *buffer, __u16 length) ...@@ -183,5 +201,3 @@ __u32 count_crc(__u8 *buffer, __u16 length)
byte3); byte3);
return crc32; return crc32;
} }
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll * Copyright (c) 2001 La Monte H.P. Yarroll
...@@ -72,10 +72,19 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb) ...@@ -72,10 +72,19 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
{ {
struct sctphdr *sh; struct sctphdr *sh;
__u32 cmp, val; __u32 cmp, val;
struct sk_buff *list = skb_shinfo(skb)->frag_list;
sh = (struct sctphdr *) skb->h.raw; sh = (struct sctphdr *) skb->h.raw;
cmp = ntohl(sh->checksum); cmp = ntohl(sh->checksum);
val = count_crc((__u8 *)sh, skb->len);
val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
for (; list; list = list->next)
val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
val);
val = sctp_end_cksum(val);
if (val != cmp) { if (val != cmp) {
/* CRC failure, dump it. */ /* CRC failure, dump it. */
return -1; return -1;
......
...@@ -113,14 +113,14 @@ static inline int sctp_v6_xmit(struct sk_buff *skb) ...@@ -113,14 +113,14 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
/* FIXME: Currently, ip6_route_output() doesn't fill in the source /* FIXME: Currently, ip6_route_output() doesn't fill in the source
* address in the returned route entry. So we call ipv6_get_saddr() * address in the returned route entry. So we call ipv6_get_saddr()
* to get an appropriate source address. It is possible that this address * to get an appropriate source address. It is possible that this
* may not be part of the bind address list of the association. * address may not be part of the bind address list of the association.
* Once ip6_route_ouput() is fixed so that it returns a route entry * Once ip6_route_ouput() is fixed so that it returns a route entry
* with an appropriate source address, the following if condition can * with an appropriate source address, the following if condition can
* be removed. With ip6_route_output() returning a source address filled * be removed. With ip6_route_output() returning a source address
* route entry, sctp_transport_route() can do real source address * filled route entry, sctp_transport_route() can do real source
* selection for v6. * address selection for v6.
*/ */
if (ipv6_addr_any(&rt6->rt6i_src.addr)) { if (ipv6_addr_any(&rt6->rt6i_src.addr)) {
err = ipv6_get_saddr(dst, fl.fl6_dst, &saddr); err = ipv6_get_saddr(dst, fl.fl6_dst, &saddr);
...@@ -130,7 +130,6 @@ static inline int sctp_v6_xmit(struct sk_buff *skb) ...@@ -130,7 +130,6 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
__FUNCTION__, NIP6(fl.fl6_src)); __FUNCTION__, NIP6(fl.fl6_src));
return err; return err;
} }
fl.fl6_src = &saddr; fl.fl6_src = &saddr;
} else { } else {
fl.fl6_src = &rt6->rt6i_src.addr; fl.fl6_src = &rt6->rt6i_src.addr;
...@@ -572,7 +571,7 @@ int sctp_v6_init(void) ...@@ -572,7 +571,7 @@ int sctp_v6_init(void)
/* Register the SCTP specfic AF_INET6 functions. */ /* Register the SCTP specfic AF_INET6 functions. */
sctp_register_af(&sctp_ipv6_specific); sctp_register_af(&sctp_ipv6_specific);
/* Register notifier for inet6 address additions/deletions. */ /* Register notifier for inet6 address additions/deletions. */
register_inet6addr_notifier(&sctp_inetaddr_notifier); register_inet6addr_notifier(&sctp_inetaddr_notifier);
return 0; return 0;
......
...@@ -54,6 +54,7 @@ SCTP_DBG_OBJCNT(assoc); ...@@ -54,6 +54,7 @@ SCTP_DBG_OBJCNT(assoc);
SCTP_DBG_OBJCNT(bind_addr); SCTP_DBG_OBJCNT(bind_addr);
SCTP_DBG_OBJCNT(chunk); SCTP_DBG_OBJCNT(chunk);
SCTP_DBG_OBJCNT(addr); SCTP_DBG_OBJCNT(addr);
SCTP_DBG_OBJCNT(ssnmap);
/* An array to make it easy to pretty print the debug information /* An array to make it easy to pretty print the debug information
* to the proc fs. * to the proc fs.
...@@ -66,6 +67,7 @@ sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { ...@@ -66,6 +67,7 @@ sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
SCTP_DBG_OBJCNT_ENTRY(chunk), SCTP_DBG_OBJCNT_ENTRY(chunk),
SCTP_DBG_OBJCNT_ENTRY(bind_addr), SCTP_DBG_OBJCNT_ENTRY(bind_addr),
SCTP_DBG_OBJCNT_ENTRY(addr), SCTP_DBG_OBJCNT_ENTRY(addr),
SCTP_DBG_OBJCNT_ENTRY(ssnmap),
}; };
/* Callback from procfs to read out objcount information. /* Callback from procfs to read out objcount information.
......
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* *
* This file is part of the SCTP kernel reference Implementation * This file is part of the SCTP kernel reference Implementation
* *
...@@ -62,7 +62,6 @@ ...@@ -62,7 +62,6 @@
#include <net/sctp/sm.h> #include <net/sctp/sm.h>
/* Forward declarations for private helpers. */ /* Forward declarations for private helpers. */
__u32 count_crc(__u8 *ptr, __u16 count);
static void sctp_packet_reset(sctp_packet_t *packet); static void sctp_packet_reset(sctp_packet_t *packet);
static sctp_xmit_t sctp_packet_append_data(sctp_packet_t *packet, static sctp_xmit_t sctp_packet_append_data(sctp_packet_t *packet,
sctp_chunk_t *chunk); sctp_chunk_t *chunk);
...@@ -228,7 +227,7 @@ sctp_xmit_t sctp_packet_append_chunk(sctp_packet_t *packet, sctp_chunk_t *chunk) ...@@ -228,7 +227,7 @@ sctp_xmit_t sctp_packet_append_chunk(sctp_packet_t *packet, sctp_chunk_t *chunk)
} }
/* All packets are sent to the network through this function from /* All packets are sent to the network through this function from
* sctp_push_outqueue(). * sctp_outq_tail().
* *
* The return value is a normal kernel error return value. * The return value is a normal kernel error return value.
*/ */
...@@ -358,7 +357,8 @@ int sctp_packet_transmit(sctp_packet_t *packet) ...@@ -358,7 +357,8 @@ int sctp_packet_transmit(sctp_packet_t *packet)
* Note: Adler-32 is no longer applicable, as has been replaced * Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/ */
crc32 = count_crc((__u8 *)sh, nskb->len); crc32 = sctp_start_cksum((__u8 *)sh, nskb->len);
crc32 = sctp_end_cksum(crc32);
/* 3) Put the resultant value into the checksum field in the /* 3) Put the resultant value into the checksum field in the
* common header, and leave the rest of the bits unchanged. * common header, and leave the rest of the bits unchanged.
......
...@@ -2,11 +2,11 @@ ...@@ -2,11 +2,11 @@
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001-2002 International Business Machines Corp. * Copyright (c) 2001-2003 International Business Machines Corp.
* *
* This file is part of the SCTP kernel reference Implementation * This file is part of the SCTP kernel reference Implementation
* *
* These functions implement the outqueue class. The outqueue handles * These functions implement the sctp_outq class. The outqueue handles
* bundling and queueing of outgoing SCTP chunks. * bundling and queueing of outgoing SCTP chunks.
* *
* The SCTP reference implementation is free software; * The SCTP reference implementation is free software;
...@@ -47,39 +47,39 @@ ...@@ -47,39 +47,39 @@
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/list.h> /* For struct list_head */ #include <linux/list.h> /* For struct list_head */
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <net/sock.h> /* For skb_set_owner_w */ #include <net/sock.h> /* For skb_set_owner_w */
#include <net/sctp/sctp.h> #include <net/sctp/sctp.h>
/* Declare internal functions here. */ /* Declare internal functions here. */
static int sctp_acked(sctp_sackhdr_t *sack, __u32 tsn); static int sctp_acked(sctp_sackhdr_t *sack, __u32 tsn);
static void sctp_check_transmitted(sctp_outqueue_t *q, static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue, struct list_head *transmitted_queue,
sctp_transport_t *transport, sctp_transport_t *transport,
sctp_sackhdr_t *sack, sctp_sackhdr_t *sack,
__u32 highest_new_tsn); __u32 highest_new_tsn);
/* Generate a new outqueue. */ /* Generate a new outqueue. */
sctp_outqueue_t *sctp_outqueue_new(sctp_association_t *asoc) struct sctp_outq *sctp_outq_new(sctp_association_t *asoc)
{ {
sctp_outqueue_t *q; struct sctp_outq *q;
q = t_new(sctp_outqueue_t, GFP_KERNEL); q = t_new(struct sctp_outq, GFP_KERNEL);
if (q) { if (q) {
sctp_outqueue_init(asoc, q); sctp_outq_init(asoc, q);
q->malloced = 1; q->malloced = 1;
} }
return q; return q;
} }
/* Initialize an existing SCTP_outqueue. This does the boring stuff. /* Initialize an existing sctp_outq. This does the boring stuff.
* You still need to define handlers if you really want to DO * You still need to define handlers if you really want to DO
* something with this structure... * something with this structure...
*/ */
void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q) void sctp_outq_init(sctp_association_t *asoc, struct sctp_outq *q)
{ {
q->asoc = asoc; q->asoc = asoc;
skb_queue_head_init(&q->out); skb_queue_head_init(&q->out);
...@@ -102,7 +102,7 @@ void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q) ...@@ -102,7 +102,7 @@ void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
/* Free the outqueue structure and any related pending chunks. /* Free the outqueue structure and any related pending chunks.
* FIXME: Add SEND_FAILED support. * FIXME: Add SEND_FAILED support.
*/ */
void sctp_outqueue_teardown(sctp_outqueue_t *q) void sctp_outq_teardown(struct sctp_outq *q)
{ {
sctp_transport_t *transport; sctp_transport_t *transport;
struct list_head *lchunk, *pos, *temp; struct list_head *lchunk, *pos, *temp;
...@@ -131,29 +131,22 @@ void sctp_outqueue_teardown(sctp_outqueue_t *q) ...@@ -131,29 +131,22 @@ void sctp_outqueue_teardown(sctp_outqueue_t *q)
} }
/* Free the outqueue structure and any related pending chunks. */ /* Free the outqueue structure and any related pending chunks. */
void sctp_outqueue_free(sctp_outqueue_t *q) void sctp_outq_free(struct sctp_outq *q)
{ {
/* Throw away leftover chunks. */ /* Throw away leftover chunks. */
sctp_outqueue_teardown(q); sctp_outq_teardown(q);
/* If we were kmalloc()'d, free the memory. */ /* If we were kmalloc()'d, free the memory. */
if (q->malloced) if (q->malloced)
kfree(q); kfree(q);
} }
/* Transmit any pending partial chunks. */ /* Put a new chunk in an sctp_outq. */
void sctp_force_outqueue(sctp_outqueue_t *q) int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
{
/* Do we really need this? */
/* BUG */
}
/* Put a new chunk in an SCTP_outqueue. */
int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
{ {
int error = 0; int error = 0;
SCTP_DEBUG_PRINTK("sctp_push_outqueue(%p, %p[%s])\n", SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
q, chunk, chunk && chunk->chunk_hdr ? q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
: "Illegal Chunk"); : "Illegal Chunk");
...@@ -184,8 +177,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk) ...@@ -184,8 +177,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
default: default:
SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
q, chunk, q, chunk, chunk && chunk->chunk_hdr ?
chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
: "Illegal Chunk"); : "Illegal Chunk");
...@@ -193,13 +185,13 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk) ...@@ -193,13 +185,13 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
q->empty = 0; q->empty = 0;
break; break;
}; };
} else { } else
skb_queue_tail(&q->control, (struct sk_buff *) chunk); skb_queue_tail(&q->control, (struct sk_buff *) chunk);
}
if (error < 0) if (error < 0)
return error; return error;
error = sctp_flush_outqueue(q, 0); error = sctp_outq_flush(q, 0);
return error; return error;
} }
...@@ -207,7 +199,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk) ...@@ -207,7 +199,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
/* Insert a chunk into the retransmit queue. Chunks on the retransmit /* Insert a chunk into the retransmit queue. Chunks on the retransmit
* queue are kept in order, based on the TSNs. * queue are kept in order, based on the TSNs.
*/ */
void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q) void sctp_retransmit_insert(struct list_head *tlchunk, struct sctp_outq *q)
{ {
struct list_head *rlchunk; struct list_head *rlchunk;
sctp_chunk_t *tchunk, *rchunk; sctp_chunk_t *tchunk, *rchunk;
...@@ -230,9 +222,9 @@ void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q) ...@@ -230,9 +222,9 @@ void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q)
list_add_tail(tlchunk, &q->retransmit); list_add_tail(tlchunk, &q->retransmit);
} }
} }
/* Mark all the eligible packets on a transport for retransmission. */ /* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport, void sctp_retransmit_mark(struct sctp_outq *q, sctp_transport_t *transport,
__u8 fast_retransmit) __u8 fast_retransmit)
{ {
struct list_head *lchunk, *ltemp; struct list_head *lchunk, *ltemp;
...@@ -302,7 +294,7 @@ void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport, ...@@ -302,7 +294,7 @@ void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
/* Mark all the eligible packets on a transport for retransmission and force /* Mark all the eligible packets on a transport for retransmission and force
* one packet out. * one packet out.
*/ */
void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport, void sctp_retransmit(struct sctp_outq *q, sctp_transport_t *transport,
__u8 fast_retransmit) __u8 fast_retransmit)
{ {
int error = 0; int error = 0;
...@@ -315,7 +307,7 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport, ...@@ -315,7 +307,7 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
sctp_retransmit_mark(q, transport, fast_retransmit); sctp_retransmit_mark(q, transport, fast_retransmit);
error = sctp_flush_outqueue(q, /* rtx_timeout */ 1); error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error) if (error)
q->asoc->base.sk->err = -error; q->asoc->base.sk->err = -error;
...@@ -323,14 +315,14 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport, ...@@ -323,14 +315,14 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
/* /*
* Transmit DATA chunks on the retransmit queue. Upon return from * Transmit DATA chunks on the retransmit queue. Upon return from
* sctp_flush_retran_queue() the packet 'pkt' may contain chunks which * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
* need to be transmitted by the caller. * need to be transmitted by the caller.
* We assume that pkt->transport has already been set. * We assume that pkt->transport has already been set.
* *
* The return value is a normal kernel error return value. * The return value is a normal kernel error return value.
*/ */
static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt, static int sctp_outq_flush_rtx(struct sctp_outq *q, sctp_packet_t *pkt,
int rtx_timeout, int *start_timer) int rtx_timeout, int *start_timer)
{ {
struct list_head *lqueue; struct list_head *lqueue;
struct list_head *lchunk; struct list_head *lchunk;
...@@ -374,6 +366,18 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt, ...@@ -374,6 +366,18 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
continue; continue;
} }
#endif #endif
/* Make sure that Gap Acked TSNs are not retransmitted. A
* simple approach is just to move such TSNs out of the
* way and into a 'transmitted' queue and skip to the
* next chunk.
*/
if (chunk->tsn_gap_acked) {
list_add_tail(lchunk, &transport->transmitted);
lchunk = sctp_list_dequeue(lqueue);
continue;
}
/* Attempt to append this chunk to the packet. */ /* Attempt to append this chunk to the packet. */
status = (*q->append_output)(pkt, chunk); status = (*q->append_output)(pkt, chunk);
...@@ -427,7 +431,7 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt, ...@@ -427,7 +431,7 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
* queue. 'pos' points to the next chunk in the output queue after the * queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation. * chunk that is currently in the process of fragmentation.
*/ */
void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos, void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
sctp_packet_t *packet, sctp_chunk_t *frag, __u32 tsn) sctp_packet_t *packet, sctp_chunk_t *frag, __u32 tsn)
{ {
sctp_transport_t *transport = packet->transport; sctp_transport_t *transport = packet->transport;
...@@ -503,7 +507,7 @@ void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos, ...@@ -503,7 +507,7 @@ void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos,
* The argument 'frag' point to the first fragment and it holds the list * The argument 'frag' point to the first fragment and it holds the list
* of all the other fragments in the 'frag_list' field. * of all the other fragments in the 'frag_list' field.
*/ */
void sctp_xmit_fragmented_chunks(sctp_outqueue_t *q, sctp_packet_t *packet, void sctp_xmit_fragmented_chunks(struct sctp_outq *q, sctp_packet_t *packet,
sctp_chunk_t *frag) sctp_chunk_t *frag)
{ {
sctp_association_t *asoc = frag->asoc; sctp_association_t *asoc = frag->asoc;
...@@ -562,7 +566,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk, ...@@ -562,7 +566,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
if (!first_frag) if (!first_frag)
goto err; goto err;
first_frag->has_ssn = 1;
/* All the fragments are added to the frag_list of the first chunk. */ /* All the fragments are added to the frag_list of the first chunk. */
frag_list = &first_frag->frag_list; frag_list = &first_frag->frag_list;
...@@ -576,7 +580,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk, ...@@ -576,7 +580,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
ssn); ssn);
if (!frag) if (!frag)
goto err; goto err;
frag->has_ssn = 1;
/* Add the middle fragment to the first fragment's /* Add the middle fragment to the first fragment's
* frag_list. * frag_list.
*/ */
...@@ -591,6 +595,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk, ...@@ -591,6 +595,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
SCTP_DATA_LAST_FRAG, ssn); SCTP_DATA_LAST_FRAG, ssn);
if (!frag) if (!frag)
goto err; goto err;
frag->has_ssn = 1;
/* Add the last fragment to the first fragment's frag_list. */ /* Add the last fragment to the first fragment's frag_list. */
list_add_tail(&frag->frag_list, frag_list); list_add_tail(&frag->frag_list, frag_list);
...@@ -620,7 +625,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk, ...@@ -620,7 +625,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
} }
/* /*
* sctp_flush_outqueue - Try to flush an outqueue. * sctp_outq_flush - Try to flush an outqueue.
* *
* Description: Send everything in q which we legally can, subject to * Description: Send everything in q which we legally can, subject to
* congestion limitations. * congestion limitations.
...@@ -629,7 +634,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk, ...@@ -629,7 +634,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
* locking concerns must be made. Today we use the sock lock to protect * locking concerns must be made. Today we use the sock lock to protect
* this function. * this function.
*/ */
int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
{ {
sctp_packet_t *packet; sctp_packet_t *packet;
sctp_packet_t singleton; sctp_packet_t singleton;
...@@ -648,7 +653,6 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) ...@@ -648,7 +653,6 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
sctp_xmit_t status; sctp_xmit_t status;
int error = 0; int error = 0;
int start_timer = 0; int start_timer = 0;
sctp_ulpevent_t *event;
/* These transports have chunks to send. */ /* These transports have chunks to send. */
struct list_head transport_list; struct list_head transport_list;
...@@ -783,10 +787,8 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) ...@@ -783,10 +787,8 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
(*q->config_output)(packet, vtag, (*q->config_output)(packet, vtag,
ecn_capable, ecne_handler); ecn_capable, ecne_handler);
retran: retran:
error = sctp_flush_retran_queue(q, error = sctp_outq_flush_rtx(q, packet,
packet, rtx_timeout, &start_timer);
rtx_timeout,
&start_timer);
if (start_timer) if (start_timer)
sctp_transport_reset_timers(transport); sctp_transport_reset_timers(transport);
...@@ -813,15 +815,14 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) ...@@ -813,15 +815,14 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
*/ */
if (chunk->sinfo.sinfo_stream >= if (chunk->sinfo.sinfo_stream >=
asoc->c.sinit_num_ostreams) { asoc->c.sinit_num_ostreams) {
struct sctp_ulpevent *ev;
/* Generate a SEND FAILED event. */ /* Generate a SEND FAILED event. */
event = sctp_ulpevent_make_send_failed(asoc, ev = sctp_ulpevent_make_send_failed(asoc,
chunk, SCTP_DATA_UNSENT, chunk, SCTP_DATA_UNSENT,
SCTP_ERROR_INV_STRM, SCTP_ERROR_INV_STRM, GFP_ATOMIC);
GFP_ATOMIC); if (ev)
if (event) { sctp_ulpq_tail_event(&asoc->ulpq, ev);
sctp_ulpqueue_tail_event(&asoc->ulpq,
event);
}
/* Free the chunk. This chunk is not on any /* Free the chunk. This chunk is not on any
* list yet, just free it. * list yet, just free it.
...@@ -830,6 +831,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) ...@@ -830,6 +831,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
continue; continue;
} }
/* Now do delayed assignment of SSN. This will
* probably change again when we start supporting
* large (> approximately 2^16) size messages.
*/
sctp_chunk_assign_ssn(chunk);
/* If there is a specified transport, use it. /* If there is a specified transport, use it.
* Otherwise, we want to use the active path. * Otherwise, we want to use the active path.
*/ */
...@@ -878,7 +885,7 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) ...@@ -878,7 +885,7 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
/* We could not append this chunk, so put /* We could not append this chunk, so put
* the chunk back on the output queue. * the chunk back on the output queue.
*/ */
SCTP_DEBUG_PRINTK("sctp_flush_outqueue: could " SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
"not transmit TSN: 0x%x, status: %d\n", "not transmit TSN: 0x%x, status: %d\n",
ntohl(chunk->subh.data_hdr->tsn), ntohl(chunk->subh.data_hdr->tsn),
status); status);
...@@ -966,12 +973,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout) ...@@ -966,12 +973,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
} }
/* Set the various output handling callbacks. */ /* Set the various output handling callbacks. */
int sctp_outqueue_set_output_handlers(sctp_outqueue_t *q, int sctp_outq_set_output_handlers(struct sctp_outq *q,
sctp_outqueue_ohandler_init_t init, sctp_outq_ohandler_init_t init,
sctp_outqueue_ohandler_config_t config, sctp_outq_ohandler_config_t config,
sctp_outqueue_ohandler_t append, sctp_outq_ohandler_t append,
sctp_outqueue_ohandler_t build, sctp_outq_ohandler_t build,
sctp_outqueue_ohandler_force_t force) sctp_outq_ohandler_force_t force)
{ {
q->init_output = init; q->init_output = init;
q->config_output = config; q->config_output = config;
...@@ -1028,14 +1035,14 @@ static __u32 sctp_highest_new_tsn(sctp_sackhdr_t *sack, ...@@ -1028,14 +1035,14 @@ static __u32 sctp_highest_new_tsn(sctp_sackhdr_t *sack,
} }
return highest_new_tsn; return highest_new_tsn;
} }
/* This is where we REALLY process a SACK. /* This is where we REALLY process a SACK.
* *
* Process the sack against the outqueue. Mostly, this just frees * Process the SACK against the outqueue. Mostly, this just frees
* things off the transmitted queue. * things off the transmitted queue.
*/ */
int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack) int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
{ {
sctp_association_t *asoc = q->asoc; sctp_association_t *asoc = q->asoc;
sctp_transport_t *transport; sctp_transport_t *transport;
...@@ -1053,7 +1060,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack) ...@@ -1053,7 +1060,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
sack_ctsn = ntohl(sack->cum_tsn_ack); sack_ctsn = ntohl(sack->cum_tsn_ack);
/* Get the highest TSN in the sack. */ /* Get the highest TSN in the sack. */
highest_tsn = sack_ctsn + highest_tsn = sack_ctsn +
ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end); ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end);
if (TSN_lt(asoc->highest_sacked, highest_tsn)) { if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
...@@ -1139,7 +1146,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack) ...@@ -1139,7 +1146,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
} }
/* Is the outqueue empty? */ /* Is the outqueue empty? */
int sctp_outqueue_is_empty(const sctp_outqueue_t *q) int sctp_outq_is_empty(const struct sctp_outq *q)
{ {
return q->empty; return q->empty;
} }
...@@ -1161,7 +1168,7 @@ int sctp_outqueue_is_empty(const sctp_outqueue_t *q) ...@@ -1161,7 +1168,7 @@ int sctp_outqueue_is_empty(const sctp_outqueue_t *q)
* transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
* KEPT TSN6-TSN7, etc. * KEPT TSN6-TSN7, etc.
*/ */
static void sctp_check_transmitted(sctp_outqueue_t *q, static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue, struct list_head *transmitted_queue,
sctp_transport_t *transport, sctp_transport_t *transport,
sctp_sackhdr_t *sack, sctp_sackhdr_t *sack,
......
...@@ -82,7 +82,7 @@ struct sock *sctp_get_ctl_sock(void) ...@@ -82,7 +82,7 @@ struct sock *sctp_get_ctl_sock(void)
} }
/* Set up the proc fs entry for the SCTP protocol. */ /* Set up the proc fs entry for the SCTP protocol. */
void sctp_proc_init(void) __init void sctp_proc_init(void)
{ {
if (!proc_net_sctp) { if (!proc_net_sctp) {
struct proc_dir_entry *ent; struct proc_dir_entry *ent;
...@@ -95,7 +95,7 @@ void sctp_proc_init(void) ...@@ -95,7 +95,7 @@ void sctp_proc_init(void)
} }
/* Clean up the proc fs entry for the SCTP protocol. */ /* Clean up the proc fs entry for the SCTP protocol. */
void sctp_proc_exit(void) __exit void sctp_proc_exit(void)
{ {
if (proc_net_sctp) { if (proc_net_sctp) {
proc_net_sctp = NULL; proc_net_sctp = NULL;
...@@ -688,7 +688,7 @@ static void cleanup_sctp_mibs(void) ...@@ -688,7 +688,7 @@ static void cleanup_sctp_mibs(void)
} }
/* Initialize the universe into something sensible. */ /* Initialize the universe into something sensible. */
int sctp_init(void) __init int sctp_init(void)
{ {
int i; int i;
int status = 0; int status = 0;
...@@ -750,13 +750,9 @@ int sctp_init(void) ...@@ -750,13 +750,9 @@ int sctp_init(void)
/* Implementation specific variables. */ /* Implementation specific variables. */
/* Initialize default stream count setup information. /* Initialize default stream count setup information. */
* Note: today the stream accounting data structures are very sctp_proto.max_instreams = SCTP_DEFAULT_INSTREAMS;
* fixed size, so one really does need to make sure that these have sctp_proto.max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
* upper/lower limits when changing.
*/
sctp_proto.max_instreams = SCTP_MAX_STREAM;
sctp_proto.max_outstreams = SCTP_MAX_STREAM;
/* Allocate and initialize the association hash table. */ /* Allocate and initialize the association hash table. */
sctp_proto.assoc_hashsize = 4096; sctp_proto.assoc_hashsize = 4096;
...@@ -852,7 +848,7 @@ int sctp_init(void) ...@@ -852,7 +848,7 @@ int sctp_init(void)
} }
/* Exit handler for the SCTP protocol. */ /* Exit handler for the SCTP protocol. */
void sctp_exit(void) __exit void sctp_exit(void)
{ {
/* BUG. This should probably do something useful like clean /* BUG. This should probably do something useful like clean
* up all the remaining associations and all that memory. * up all the remaining associations and all that memory.
...@@ -889,4 +885,3 @@ module_exit(sctp_exit); ...@@ -889,4 +885,3 @@ module_exit(sctp_exit);
MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -82,12 +82,12 @@ static const sctp_supported_addrs_param_t sat_param = { ...@@ -82,12 +82,12 @@ static const sctp_supported_addrs_param_t sat_param = {
/* gcc 3.2 doesn't allow initialization of zero-length arrays. So the above /* gcc 3.2 doesn't allow initialization of zero-length arrays. So the above
* structure is split and the address types array is initialized using a * structure is split and the address types array is initialized using a
* fixed length array. * fixed length array.
*/ */
static const __u16 sat_addr_types[2] = { static const __u16 sat_addr_types[2] = {
SCTP_PARAM_IPV4_ADDRESS, SCTP_PARAM_IPV4_ADDRESS,
SCTP_V6(SCTP_PARAM_IPV6_ADDRESS,) SCTP_V6(SCTP_PARAM_IPV6_ADDRESS,)
}; };
/* RFC 2960 3.3.2 Initiation (INIT) (1) /* RFC 2960 3.3.2 Initiation (INIT) (1)
* *
...@@ -540,7 +540,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc, ...@@ -540,7 +540,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
dp.stream = htons(sinfo->sinfo_stream); dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = htonl(sinfo->sinfo_ppid); dp.ppid = htonl(sinfo->sinfo_ppid);
dp.ssn = htons(ssn); dp.ssn = htons(ssn);
/* Set the flags for an unordered send. */ /* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & MSG_UNORDERED) if (sinfo->sinfo_flags & MSG_UNORDERED)
flags |= SCTP_DATA_UNORDERED; flags |= SCTP_DATA_UNORDERED;
...@@ -552,7 +552,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc, ...@@ -552,7 +552,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata: nodata:
return retval; return retval;
} }
...@@ -607,12 +607,12 @@ sctp_chunk_t *sctp_make_data_empty(sctp_association_t *asoc, ...@@ -607,12 +607,12 @@ sctp_chunk_t *sctp_make_data_empty(sctp_association_t *asoc,
* ordered send and a new ssn is generated. The flags field is set * ordered send and a new ssn is generated. The flags field is set
* in the inner routine - sctp_make_datafrag_empty(). * in the inner routine - sctp_make_datafrag_empty().
*/ */
if (sinfo->sinfo_flags & MSG_UNORDERED) { // if (sinfo->sinfo_flags & MSG_UNORDERED) {
ssn = 0; ssn = 0;
} else { // } else {
ssn = __sctp_association_get_next_ssn(asoc, // ssn = __sctp_association_get_next_ssn(asoc,
sinfo->sinfo_stream); // sinfo->sinfo_stream);
} // }
return sctp_make_datafrag_empty(asoc, sinfo, data_len, flags, ssn); return sctp_make_datafrag_empty(asoc, sinfo, data_len, flags, ssn);
} }
...@@ -1013,6 +1013,7 @@ sctp_chunk_t *sctp_chunkify(struct sk_buff *skb, const sctp_association_t *asoc, ...@@ -1013,6 +1013,7 @@ sctp_chunk_t *sctp_chunkify(struct sk_buff *skb, const sctp_association_t *asoc,
retval->asoc = (sctp_association_t *) asoc; retval->asoc = (sctp_association_t *) asoc;
retval->num_times_sent = 0; retval->num_times_sent = 0;
retval->has_tsn = 0; retval->has_tsn = 0;
retval->has_ssn = 0;
retval->rtt_in_progress = 0; retval->rtt_in_progress = 0;
retval->sent_at = jiffies; retval->sent_at = jiffies;
retval->singleton = 1; retval->singleton = 1;
...@@ -1214,6 +1215,29 @@ int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data) ...@@ -1214,6 +1215,29 @@ int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data)
return err; return err;
} }
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
void sctp_chunk_assign_ssn(sctp_chunk_t *chunk)
{
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
/* This is the last possible instant to assign a SSN. */
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
sid = htons(chunk->subh.data_hdr->stream);
ssn = htons(__sctp_association_get_next_ssn(chunk->asoc, sid));
}
chunk->subh.data_hdr->ssn = ssn;
chunk->has_ssn = 1;
}
/* Helper function to assign a TSN if needed. This assumes that both /* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned. * the data_hdr and association have already been assigned.
*/ */
...@@ -1654,6 +1678,7 @@ int sctp_verify_init(const sctp_association_t *asoc, ...@@ -1654,6 +1678,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
/* Unpack the parameters in an INIT packet into an association. /* Unpack the parameters in an INIT packet into an association.
* Returns 0 on failure, else success. * Returns 0 on failure, else success.
* FIXME: This is an association method.
*/ */
int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid, int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
const union sctp_addr *peer_addr, const union sctp_addr *peer_addr,
...@@ -1710,6 +1735,12 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid, ...@@ -1710,6 +1735,12 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
ntohs(peer_init->init_hdr.num_inbound_streams); ntohs(peer_init->init_hdr.num_inbound_streams);
} }
if (asoc->c.sinit_max_instreams >
ntohs(peer_init->init_hdr.num_outbound_streams)) {
asoc->c.sinit_max_instreams =
ntohs(peer_init->init_hdr.num_outbound_streams);
}
/* Copy Initiation tag from INIT to VT_peer in cookie. */ /* Copy Initiation tag from INIT to VT_peer in cookie. */
asoc->c.peer_vtag = asoc->peer.i.init_tag; asoc->c.peer_vtag = asoc->peer.i.init_tag;
...@@ -1738,6 +1769,21 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid, ...@@ -1738,6 +1769,21 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
asoc->peer.i.initial_tsn); asoc->peer.i.initial_tsn);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
*/
/* Allocate storage for the negotiated streams. */
asoc->ssnmap = sctp_ssnmap_new(asoc->peer.i.num_outbound_streams,
asoc->c.sinit_num_ostreams,
priority);
if (!asoc->ssnmap)
goto nomem_ssnmap;
/* ADDIP Section 4.1 ASCONF Chunk Procedures /* ADDIP Section 4.1 ASCONF Chunk Procedures
* *
* When an endpoint has an ASCONF signaled change to be sent to the * When an endpoint has an ASCONF signaled change to be sent to the
...@@ -1751,6 +1797,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid, ...@@ -1751,6 +1797,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1;
return 1; return 1;
nomem_ssnmap:
clean_up: clean_up:
/* Release the transport structures. */ /* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
......
...@@ -296,7 +296,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, ...@@ -296,7 +296,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break; break;
case SCTP_CMD_PURGE_OUTQUEUE: case SCTP_CMD_PURGE_OUTQUEUE:
sctp_outqueue_teardown(&asoc->outqueue); sctp_outq_teardown(&asoc->outqueue);
break; break;
case SCTP_CMD_DELETE_TCB: case SCTP_CMD_DELETE_TCB:
...@@ -395,9 +395,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, ...@@ -395,9 +395,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
command->obj.ptr, command->obj.ptr,
"ulpq:", "ulpq:",
&asoc->ulpq); &asoc->ulpq);
sctp_ulpqueue_tail_data(&asoc->ulpq, sctp_ulpq_tail_data(&asoc->ulpq,
command->obj.ptr, command->obj.ptr,
GFP_ATOMIC); GFP_ATOMIC);
break; break;
case SCTP_CMD_EVENT_ULP: case SCTP_CMD_EVENT_ULP:
...@@ -407,14 +407,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, ...@@ -407,14 +407,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
command->obj.ptr, command->obj.ptr,
"ulpq:", "ulpq:",
&asoc->ulpq); &asoc->ulpq);
sctp_ulpqueue_tail_event(&asoc->ulpq, sctp_ulpq_tail_event(&asoc->ulpq,
command->obj.ptr); command->obj.ptr);
break; break;
case SCTP_CMD_REPLY: case SCTP_CMD_REPLY:
/* Send a chunk to our peer. */ /* Send a chunk to our peer. */
error = sctp_push_outqueue(&asoc->outqueue, error = sctp_outq_tail(&asoc->outqueue,
command->obj.ptr); command->obj.ptr);
break; break;
case SCTP_CMD_SEND_PKT: case SCTP_CMD_SEND_PKT:
...@@ -432,7 +432,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, ...@@ -432,7 +432,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_TRANSMIT: case SCTP_CMD_TRANSMIT:
/* Kick start transmission. */ /* Kick start transmission. */
error = sctp_flush_outqueue(&asoc->outqueue, 0); error = sctp_outq_flush(&asoc->outqueue, 0);
break; break;
case SCTP_CMD_ECN_CE: case SCTP_CMD_ECN_CE:
...@@ -599,7 +599,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, ...@@ -599,7 +599,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_RTO_PENDING: case SCTP_CMD_RTO_PENDING:
t = command->obj.transport; t = command->obj.transport;
t->rto_pending = 1; t->rto_pending = 1;
break; break;
default: default:
...@@ -743,7 +743,7 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands) ...@@ -743,7 +743,7 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands)
asoc->peer.sack_needed = 0; asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0; asoc->peer.next_dup_tsn = 0;
error = sctp_push_outqueue(&asoc->outqueue, sack); error = sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */ /* Stop the SACK timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
...@@ -1095,7 +1095,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, ...@@ -1095,7 +1095,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing * inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all * since all other cases use "temporary" associations and can do all
* their work in statefuns directly. * their work in statefuns directly.
*/ */
static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
sctp_association_t *asoc, sctp_association_t *asoc,
...@@ -1134,8 +1134,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, ...@@ -1134,8 +1134,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
*/ */
list_for_each(pos, &asoc->peer.transport_addr_list) { list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, sctp_transport_t, transports); t = list_entry(pos, sctp_transport_t, transports);
if (!mod_timer(&t->hb_timer, if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto +
t->hb_interval + t->rto + jiffies)) { sctp_jitter(t->rto) + jiffies)) {
sctp_transport_hold(t); sctp_transport_hold(t);
} }
} }
...@@ -1147,7 +1147,8 @@ static void sctp_cmd_hb_timers_update(sctp_cmd_seq_t *cmds, ...@@ -1147,7 +1147,8 @@ static void sctp_cmd_hb_timers_update(sctp_cmd_seq_t *cmds,
sctp_transport_t *t) sctp_transport_t *t)
{ {
/* Update the heartbeat timer. */ /* Update the heartbeat timer. */
if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto + jiffies)) if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto +
sctp_jitter(t->rto) + jiffies))
sctp_transport_hold(t); sctp_transport_hold(t);
} }
...@@ -1218,7 +1219,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, ...@@ -1218,7 +1219,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
{ {
int err; int err;
if (sctp_sack_outqueue(&asoc->outqueue, sackh)) { if (sctp_outq_sack(&asoc->outqueue, sackh)) {
/* There are no more TSNs awaiting SACK. */ /* There are no more TSNs awaiting SACK. */
err = sctp_do_sm(SCTP_EVENT_T_OTHER, err = sctp_do_sm(SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
...@@ -1228,7 +1229,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, ...@@ -1228,7 +1229,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
/* Windows may have opened, so we need /* Windows may have opened, so we need
* to check if we have DATA to transmit * to check if we have DATA to transmit
*/ */
err = sctp_flush_outqueue(&asoc->outqueue, 0); err = sctp_outq_flush(&asoc->outqueue, 0);
} }
return err; return err;
......
...@@ -191,7 +191,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep, ...@@ -191,7 +191,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
int len; int len;
/* If the packet is an OOTB packet which is temporarily on the /* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT. * control endpoint, respond with an ABORT.
*/ */
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands); return sctp_sf_ootb(ep, asoc, type, arg, commands);
...@@ -506,7 +506,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep, ...@@ -506,7 +506,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_chunk_t *err_chk_p; sctp_chunk_t *err_chk_p;
/* If the packet is an OOTB packet which is temporarily on the /* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT. * control endpoint, respond with an ABORT.
*/ */
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands); return sctp_sf_ootb(ep, asoc, type, arg, commands);
...@@ -1337,7 +1337,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const sctp_endpoint_t *ep, ...@@ -1337,7 +1337,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const sctp_endpoint_t *ep,
/* Unexpected COOKIE-ECHO handlerfor peer restart (Table 2, action 'A') /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
* *
* Section 5.2.4 * Section 5.2.4
* A) In this case, the peer may have restarted. * A) In this case, the peer may have restarted.
...@@ -2030,7 +2030,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const sctp_endpoint_t *ep, ...@@ -2030,7 +2030,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const sctp_endpoint_t *ep,
SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED)); SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
disposition = SCTP_DISPOSITION_CONSUME; disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) { if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type, disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
arg, commands); arg, commands);
} }
...@@ -3429,7 +3429,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(const sctp_endpoint_t *ep, ...@@ -3429,7 +3429,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME; disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) { if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type, disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands); arg, commands);
} }
...@@ -4203,7 +4203,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(const sctp_endpoint_t *ep, ...@@ -4203,7 +4203,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME; disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) { if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type, disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands); arg, commands);
} }
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include <net/sctp/sctp.h> #include <net/sctp/sctp.h>
#include <net/sctp/sm.h> #include <net/sctp/sm.h>
sctp_sm_table_entry_t bug = { static sctp_sm_table_entry_t bug = {
.fn = sctp_sf_bug, .fn = sctp_sf_bug,
.name = "sctp_sf_bug" .name = "sctp_sf_bug"
}; };
......
/* Copyright (c) 1999-2000 Cisco, Inc. /* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001-2002 Intel Corp. * Copyright (c) 2001-2003 Intel Corp.
* Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001-2002 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll * Copyright (c) 2001 La Monte H.P. Yarroll
* *
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
* Daisy Chang <daisyc@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <samudrala@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com>
* Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
* Ardelle Fan <ardelle.fan@intel.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -131,7 +132,7 @@ int sctp_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -131,7 +132,7 @@ int sctp_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static long sctp_get_port_local(struct sock *, union sctp_addr *); static long sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */ /* Verify this is a valid sockaddr. */
static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt, static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
union sctp_addr *addr, int len) union sctp_addr *addr, int len)
{ {
struct sctp_af *af; struct sctp_af *af;
...@@ -754,8 +755,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -754,8 +755,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
*/ */
if ((SCTP_SOCKET_UDP_HIGH_BANDWIDTH != sp->type) && msg->msg_name) { if ((SCTP_SOCKET_UDP_HIGH_BANDWIDTH != sp->type) && msg->msg_name) {
int msg_namelen = msg->msg_namelen; int msg_namelen = msg->msg_namelen;
err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
msg_namelen); msg_namelen);
if (err) if (err)
return err; return err;
...@@ -806,7 +807,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -806,7 +807,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (!asoc) { if (!asoc) {
/* If we could not find a matching association on the /* If we could not find a matching association on the
* endpoint, make sure that there is no peeled-off * endpoint, make sure that there is no peeled-off
* association on another socket. * association on another socket.
*/ */
if (sctp_endpoint_is_peeled_off(ep, &to)) { if (sctp_endpoint_is_peeled_off(ep, &to)) {
...@@ -868,13 +869,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -868,13 +869,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock; goto out_unlock;
} }
} else { } else {
/* Check against the defaults. */
if (sinfo->sinfo_stream >=
sp->initmsg.sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
/* Check against the requested. */ /* Check against the requested. */
if (sinfo->sinfo_stream >= if (sinfo->sinfo_stream >=
sinit->sinit_num_ostreams) { sinit->sinit_num_ostreams) {
...@@ -915,14 +909,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -915,14 +909,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sinit->sinit_num_ostreams; sinit->sinit_num_ostreams;
} }
if (sinit->sinit_max_instreams) { if (sinit->sinit_max_instreams) {
if (sinit->sinit_max_instreams <= asoc->c.sinit_max_instreams =
SCTP_MAX_STREAM) { sinit->sinit_max_instreams;
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
} else {
asoc->c.sinit_max_instreams =
SCTP_MAX_STREAM;
}
} }
if (sinit->sinit_max_attempts) { if (sinit->sinit_max_attempts) {
asoc->max_init_attempts asoc->max_init_attempts
...@@ -1086,23 +1074,30 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1086,23 +1074,30 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* frag_list. len specifies the total amount of data that needs to be removed. * frag_list. len specifies the total amount of data that needs to be removed.
* when 'len' bytes could be removed from the skb, it returns 0. * when 'len' bytes could be removed from the skb, it returns 0.
* If 'len' exceeds the total skb length, it returns the no. of bytes that * If 'len' exceeds the total skb length, it returns the no. of bytes that
* could not be removed. * could not be removed.
*/ */
static int sctp_skb_pull(struct sk_buff *skb, int len) static int sctp_skb_pull(struct sk_buff *skb, int len)
{ {
struct sk_buff *list; struct sk_buff *list;
int skb_len = skb_headlen(skb);
int rlen;
if (len <= skb->len) { if (len <= skb_len) {
__skb_pull(skb, len); __skb_pull(skb, len);
return 0; return 0;
} }
len -= skb->len; len -= skb_len;
__skb_pull(skb, skb->len); __skb_pull(skb, skb_len);
for (list = skb_shinfo(skb)->frag_list; list; list = list->next) { for (list = skb_shinfo(skb)->frag_list; list; list = list->next) {
len = sctp_skb_pull(list, len); rlen = sctp_skb_pull(list, len);
if (!len) skb->len -= (len-rlen);
skb->data_len -= (len-rlen);
if (!rlen)
return 0; return 0;
len = rlen;
} }
return len; return len;
...@@ -1130,7 +1125,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr ...@@ -1130,7 +1125,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
{ {
sctp_ulpevent_t *event = NULL; sctp_ulpevent_t *event = NULL;
sctp_opt_t *sp = sctp_sk(sk); sctp_opt_t *sp = sctp_sk(sk);
struct sk_buff *skb, *list; struct sk_buff *skb;
int copied; int copied;
int err = 0; int err = 0;
int skb_len; int skb_len;
...@@ -1152,10 +1147,8 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr ...@@ -1152,10 +1147,8 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
/* Get the total length of the skb including any skb's in the /* Get the total length of the skb including any skb's in the
* frag_list. * frag_list.
*/ */
skb_len = skb->len; skb_len = skb->len;
for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
skb_len += list->len;
copied = skb_len; copied = skb_len;
if (copied > len) if (copied > len)
...@@ -1190,12 +1183,12 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr ...@@ -1190,12 +1183,12 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
/* If skb's length exceeds the user's buffer, update the skb and /* If skb's length exceeds the user's buffer, update the skb and
* push it back to the receive_queue so that the next call to * push it back to the receive_queue so that the next call to
* recvmsg() will return the remaining data. Don't set MSG_EOR. * recvmsg() will return the remaining data. Don't set MSG_EOR.
* Otherwise, set MSG_EOR indicating the end of a message. * Otherwise, set MSG_EOR indicating the end of a message.
*/ */
if (skb_len > copied) { if (skb_len > copied) {
msg->msg_flags &= ~MSG_EOR; msg->msg_flags &= ~MSG_EOR;
if (flags & MSG_PEEK) if (flags & MSG_PEEK)
goto out_free; goto out_free;
sctp_skb_pull(skb, copied); sctp_skb_pull(skb, copied);
skb_queue_head(&sk->receive_queue, skb); skb_queue_head(&sk->receive_queue, skb);
goto out; goto out;
...@@ -1463,7 +1456,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -1463,7 +1456,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
sp = sctp_sk(sk); sp = sctp_sk(sk);
ep = sp->ep; ep = sp->ep;
/* connect() cannot be done on a peeled-off socket. */ /* connect() cannot be done on a peeled-off socket. */
if (SCTP_SOCKET_UDP_HIGH_BANDWIDTH == sp->type) { if (SCTP_SOCKET_UDP_HIGH_BANDWIDTH == sp->type) {
err = -EISCONN; err = -EISCONN;
goto out_unlock; goto out_unlock;
...@@ -1471,7 +1464,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -1471,7 +1464,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
err = sctp_verify_addr(sk, (union sctp_addr *)uaddr, addr_len); err = sctp_verify_addr(sk, (union sctp_addr *)uaddr, addr_len);
if (err) if (err)
goto out_unlock; goto out_unlock;
memcpy(&to, uaddr, addr_len); memcpy(&to, uaddr, addr_len);
to.v4.sin_port = ntohs(to.v4.sin_port); to.v4.sin_port = ntohs(to.v4.sin_port);
...@@ -1479,7 +1472,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -1479,7 +1472,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc) { if (asoc) {
if (asoc->state >= SCTP_STATE_ESTABLISHED) if (asoc->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN; err = -EISCONN;
else else
err = -EALREADY; err = -EALREADY;
goto out_unlock; goto out_unlock;
...@@ -1517,7 +1510,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -1517,7 +1510,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
err = sctp_primitive_ASSOCIATE(asoc, NULL); err = sctp_primitive_ASSOCIATE(asoc, NULL);
if (err < 0) { if (err < 0) {
sctp_association_free(asoc); sctp_association_free(asoc);
goto out_unlock; goto out_unlock;
} }
...@@ -1915,7 +1908,7 @@ static inline int sctp_getsockopt_get_peer_addr_params(struct sock *sk, ...@@ -1915,7 +1908,7 @@ static inline int sctp_getsockopt_get_peer_addr_params(struct sock *sk,
* before this address shall be considered unreachable. * before this address shall be considered unreachable.
*/ */
params.spp_pathmaxrxt = trans->error_threshold; params.spp_pathmaxrxt = trans->error_threshold;
if (copy_to_user(optval, &params, len)) if (copy_to_user(optval, &params, len))
return -EFAULT; return -EFAULT;
*optlen = len; *optlen = len;
...@@ -1932,6 +1925,166 @@ static inline int sctp_getsockopt_initmsg(struct sock *sk, int len, char *optval ...@@ -1932,6 +1925,166 @@ static inline int sctp_getsockopt_initmsg(struct sock *sk, int len, char *optval
return 0; return 0;
} }
static inline int sctp_getsockopt_get_peer_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_assoc_t id;
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
if (len != sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
return -EFAULT;
/*
* For UDP-style sockets, id specifies the association to query.
*/
asoc = sctp_id2assoc(sk, id);
if (!asoc)
return -EINVAL;
list_for_each(pos, &asoc->peer.transport_addr_list) {
cnt ++;
}
if (copy_to_user(optval, &cnt, sizeof(int)))
return -EFAULT;
return 0;
}
static inline int sctp_getsockopt_get_peer_addrs(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
struct sctp_getaddrs getaddrs;
sctp_transport_t *from;
struct sockaddr_storage *to;
if (len != sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
if (getaddrs.addr_num <= 0) return -EINVAL;
/*
* For UDP-style sockets, id specifies the association to query.
*/
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
to = getaddrs.addrs;
list_for_each(pos, &asoc->peer.transport_addr_list) {
from = list_entry(pos, sctp_transport_t, transports);
if (copy_to_user(to, &from->ipaddr, sizeof(from->ipaddr)))
return -EFAULT;
to ++;
cnt ++;
if (cnt >= getaddrs.addr_num) break;
}
getaddrs.addr_num = cnt;
if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs)))
return -EFAULT;
return 0;
}
static inline int sctp_getsockopt_get_local_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_assoc_t id;
sctp_bind_addr_t *bp;
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
if (len != sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
return -EFAULT;
/*
* For UDP-style sockets, id specifies the association to query.
* If the id field is set to the value '0' then the locally bound
* addresses are returned without regard to any particular
* association.
*/
if (0 == id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
} else {
asoc = sctp_id2assoc(sk, id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
}
list_for_each(pos, &bp->address_list) {
cnt ++;
}
if (copy_to_user(optval, &cnt, sizeof(int)))
return -EFAULT;
return 0;
}
static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_bind_addr_t *bp;
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sockaddr_storage_list *from;
struct sockaddr_storage *to;
if (len != sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
if (getaddrs.addr_num <= 0) return -EINVAL;
/*
* For UDP-style sockets, id specifies the association to query.
* If the id field is set to the value '0' then the locally bound
* addresses are returned without regard to any particular
* association.
*/
if (0 == getaddrs.assoc_id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
} else {
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
}
to = getaddrs.addrs;
list_for_each(pos, &bp->address_list) {
from = list_entry(pos,
struct sockaddr_storage_list,
list);
if (copy_to_user(to, &from->a, sizeof(from->a)))
return -EFAULT;
to ++;
cnt ++;
if (cnt >= getaddrs.addr_num) break;
}
getaddrs.addr_num = cnt;
if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs)))
return -EFAULT;
return 0;
}
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen) char *optval, int *optlen)
{ {
...@@ -1989,6 +2142,26 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, ...@@ -1989,6 +2142,26 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
break; break;
case SCTP_GET_PEER_ADDRS_NUM:
retval = sctp_getsockopt_get_peer_addrs_num(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS_NUM:
retval = sctp_getsockopt_get_local_addrs_num(sk, len, optval,
optlen);
break;
case SCTP_GET_PEER_ADDRS:
retval = sctp_getsockopt_get_peer_addrs(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS:
retval = sctp_getsockopt_get_local_addrs(sk, len, optval,
optlen);
break;
default: default:
retval = -ENOPROTOOPT; retval = -ENOPROTOOPT;
break; break;
...@@ -2029,7 +2202,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -2029,7 +2202,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
sctp_protocol_t *sctp = sctp_get_protocol(); sctp_protocol_t *sctp = sctp_get_protocol();
unsigned short snum; unsigned short snum;
int ret; int ret;
/* NOTE: Remember to put this back to net order. */ /* NOTE: Remember to put this back to net order. */
addr->v4.sin_port = ntohs(addr->v4.sin_port); addr->v4.sin_port = ntohs(addr->v4.sin_port);
snum = addr->v4.sin_port; snum = addr->v4.sin_port;
...@@ -2098,7 +2271,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -2098,7 +2271,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
} }
} }
if (pp != NULL && pp->sk != NULL) { if (pp != NULL && pp->sk != NULL) {
/* We had a port hash table hit - there is an /* We had a port hash table hit - there is an
* available port (pp != NULL) and it is being * available port (pp != NULL) and it is being
...@@ -2129,7 +2302,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -2129,7 +2302,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
if (sk_reuse && sk2->reuse) if (sk_reuse && sk2->reuse)
continue; continue;
if (sctp_bind_addr_match(&ep2->base.bind_addr, addr, if (sctp_bind_addr_match(&ep2->base.bind_addr, addr,
sctp_sk(sk))) sctp_sk(sk)))
goto found; goto found;
...@@ -2187,7 +2360,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -2187,7 +2360,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
} }
/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
* port is requested. * port is requested.
*/ */
static int sctp_get_port(struct sock *sk, unsigned short snum) static int sctp_get_port(struct sock *sk, unsigned short snum)
{ {
...@@ -2657,10 +2830,10 @@ static int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) ...@@ -2657,10 +2830,10 @@ static int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len)
return -EINVAL; return -EINVAL;
/* Is this a valid SCTP address? */ /* Is this a valid SCTP address? */
if (!af->addr_valid((union sctp_addr *)addr)) if (!af->addr_valid((union sctp_addr *)addr))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
/* Get the sndbuf space available at the time on the association. */ /* Get the sndbuf space available at the time on the association. */
......
/* SCTP kernel reference Implementation
* Copyright (c) 2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
* These functions manipulate sctp SSN tracker.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Storage size needed for map includes 2 headers and then the
* specific needs of in or out streams.
*/
static inline size_t sctp_ssnmap_size(__u16 in, __u16 out)
{
return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16);
}
/* Create a new sctp_ssnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int priority)
{
struct sctp_ssnmap *retval;
retval = kmalloc(sctp_ssnmap_size(in, out), priority);
if (!retval)
goto fail;
if (!sctp_ssnmap_init(retval, in, out))
goto fail_map;
retval->malloced = 1;
SCTP_DBG_OBJCNT_INC(ssnmap);
return retval;
fail_map:
kfree(retval);
fail:
return NULL;
}
/* Initialize a block of memory as a ssnmap. */
struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
__u16 out)
{
memset(map, 0x00, sctp_ssnmap_size(in, out));
/* Start 'in' stream just after the map header. */
map->in.ssn = (__u16 *)&map[1];
map->in.len = in;
/* Start 'out' stream just after 'in'. */
map->out.ssn = &map->in.ssn[in];
map->out.len = out;
return map;
}
/* Clear out the ssnmap streams. */
void sctp_ssnmap_clear(struct sctp_ssnmap *map)
{
size_t size;
size = (map->in.len + map->out.len) * sizeof(__u16);
memset(map->in.ssn, 0x00, size);
}
/* Dispose of a ssnmap. */
void sctp_ssnmap_free(struct sctp_ssnmap *map)
{
if (map && map->malloced) {
kfree(map);
SCTP_DBG_OBJCNT_DEC(ssnmap);
}
}
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
* Xingang Guo <xingang.guo@intel.com> * Xingang Guo <xingang.guo@intel.com>
* Hui Huang <hui.huang@nokia.com> * Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* *
* Any bugs reported given to us we will try to fix... any fixes shared will * Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release. * be incorporated into the next SCTP release.
...@@ -184,8 +185,9 @@ void sctp_transport_reset_timers(sctp_transport_t *transport) ...@@ -184,8 +185,9 @@ void sctp_transport_reset_timers(sctp_transport_t *transport)
} }
/* When a data chunk is sent, reset the heartbeat interval. */ /* When a data chunk is sent, reset the heartbeat interval. */
if (!mod_timer(&transport->hb_timer, if (!mod_timer(&transport->hb_timer, transport->hb_interval +
transport->hb_interval + transport->rto + jiffies)) transport->rto + sctp_jitter(transport->rto) +
jiffies))
sctp_transport_hold(transport); sctp_transport_hold(transport);
} }
...@@ -202,7 +204,7 @@ void sctp_transport_set_owner(sctp_transport_t *transport, ...@@ -202,7 +204,7 @@ void sctp_transport_set_owner(sctp_transport_t *transport,
/* Caches the dst entry for a transport's destination address and an optional /* Caches the dst entry for a transport's destination address and an optional
* souce address. * souce address.
*/ */
void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr, void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr,
struct sctp_opt *opt) struct sctp_opt *opt)
{ {
...@@ -245,10 +247,10 @@ void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr, ...@@ -245,10 +247,10 @@ void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr,
goto out_unlock; goto out_unlock;
} }
sctp_read_unlock(addr_lock); sctp_read_unlock(addr_lock);
/* None of the bound addresses match the source address of the /* None of the bound addresses match the source address of the
* dst. So release it. * dst. So release it.
*/ */
dst_release(dst); dst_release(dst);
} }
......
...@@ -606,9 +606,9 @@ sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event( ...@@ -606,9 +606,9 @@ sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc, sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
sctp_chunk_t *chunk, int priority) sctp_chunk_t *chunk, int priority)
{ {
sctp_ulpevent_t *event; sctp_ulpevent_t *event, *levent;
struct sctp_sndrcvinfo *info; struct sctp_sndrcvinfo *info;
struct sk_buff *skb; struct sk_buff *skb, *list;
size_t padding, len; size_t padding, len;
/* Clone the original skb, sharing the data. */ /* Clone the original skb, sharing the data. */
...@@ -647,6 +647,16 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc, ...@@ -647,6 +647,16 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
event->malloced = 1; event->malloced = 1;
for (list = skb_shinfo(skb)->frag_list; list; list = list->next) {
sctp_ulpevent_set_owner_r(list, asoc);
/* Initialize event with flags 0. */
levent = sctp_ulpevent_init(event, skb, 0);
if (!levent)
goto fail_init;
levent->malloced = 1;
}
info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo; info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo;
/* Sockets API Extensions for SCTP /* Sockets API Extensions for SCTP
...@@ -764,6 +774,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb) ...@@ -764,6 +774,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
sctp_ulpevent_t *event; sctp_ulpevent_t *event;
sctp_chunk_t *sack; sctp_chunk_t *sack;
struct timer_list *timer; struct timer_list *timer;
int skb_len = skb_headlen(skb);
/* Current stack structures assume that the rcv buffer is /* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as * per socket. For UDP style sockets this is not true as
...@@ -774,23 +785,23 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb) ...@@ -774,23 +785,23 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
event = (sctp_ulpevent_t *) skb->cb; event = (sctp_ulpevent_t *) skb->cb;
asoc = event->asoc; asoc = event->asoc;
if (asoc->rwnd_over) { if (asoc->rwnd_over) {
if (asoc->rwnd_over >= skb->len) { if (asoc->rwnd_over >= skb_len) {
asoc->rwnd_over -= skb->len; asoc->rwnd_over -= skb_len;
} else { } else {
asoc->rwnd += (skb->len - asoc->rwnd_over); asoc->rwnd += (skb_len - asoc->rwnd_over);
asoc->rwnd_over = 0; asoc->rwnd_over = 0;
} }
} else { } else {
asoc->rwnd += skb->len; asoc->rwnd += skb_len;
} }
SCTP_DEBUG_PRINTK("rwnd increased by %d to (%u, %u) - %u\n", SCTP_DEBUG_PRINTK("rwnd increased by %d to (%u, %u) - %u\n",
skb->len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd); skb_len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd);
/* Send a window update SACK if the rwnd has increased by at least the /* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer. * minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in Section 4.2.3.3 * The algorithm used is similar to the one described in
* of RFC 1122. * Section 4.2.3.3 of RFC 1122.
*/ */
if ((asoc->state == SCTP_STATE_ESTABLISHED) && if ((asoc->state == SCTP_STATE_ESTABLISHED) &&
(asoc->rwnd > asoc->a_rwnd) && (asoc->rwnd > asoc->a_rwnd) &&
...@@ -808,7 +819,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb) ...@@ -808,7 +819,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
asoc->peer.sack_needed = 0; asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0; asoc->peer.next_dup_tsn = 0;
sctp_push_outqueue(&asoc->outqueue, sack); sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */ /* Stop the SACK timer. */
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
...@@ -824,6 +835,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb) ...@@ -824,6 +835,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *asoc) static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *asoc)
{ {
sctp_ulpevent_t *event; sctp_ulpevent_t *event;
int skb_len = skb_headlen(skb);
/* The current stack structures assume that the rcv buffer is /* The current stack structures assume that the rcv buffer is
* per socket. For UDP-style sockets this is not true as * per socket. For UDP-style sockets this is not true as
...@@ -840,14 +852,14 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a ...@@ -840,14 +852,14 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a
SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
if (asoc->rwnd >= skb->len) { if (asoc->rwnd >= skb_len) {
asoc->rwnd -= skb->len; asoc->rwnd -= skb_len;
} else { } else {
asoc->rwnd_over = skb->len - asoc->rwnd; asoc->rwnd_over = skb_len - asoc->rwnd;
asoc->rwnd = 0; asoc->rwnd = 0;
} }
SCTP_DEBUG_PRINTK("rwnd decreased by %d to (%u, %u)\n", SCTP_DEBUG_PRINTK("rwnd decreased by %d to (%u, %u)\n",
skb->len, asoc->rwnd, asoc->rwnd_over); skb_len, asoc->rwnd, asoc->rwnd_over);
} }
/* A simple destructor to give up the reference to the association. */ /* A simple destructor to give up the reference to the association. */
......
/* SCTP kernel reference Implementation /* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp. * Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll * Copyright (c) 2001 La Monte H.P. Yarroll
...@@ -49,51 +49,39 @@ ...@@ -49,51 +49,39 @@
#include <net/sctp/sm.h> #include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */ /* Forward declarations for internal helpers. */
static inline sctp_ulpevent_t * sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq, static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event); struct sctp_ulpevent *);
static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq, static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
sctp_ulpevent_t *event); struct sctp_ulpevent *);
/* 1st Level Abstractions */ /* 1st Level Abstractions */
/* Create a new ULP queue. */ /* Create a new ULP queue. */
sctp_ulpqueue_t *sctp_ulpqueue_new(sctp_association_t *asoc, struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority)
__u16 inbound, int priority)
{ {
sctp_ulpqueue_t *ulpq; struct sctp_ulpq *ulpq;
size_t size;
/* Today, there is only a fixed size of storage needed for ulpq = kmalloc(sizeof(struct sctp_ulpq), priority);
* stream support, but make the interfaces acceptable for
* the future.
*/
size = sizeof(sctp_ulpqueue_t)+sctp_ulpqueue_storage_size(inbound);
ulpq = kmalloc(size, priority);
if (!ulpq) if (!ulpq)
goto fail; goto fail;
if (!sctp_ulpqueue_init(ulpq, asoc, inbound)) if (!sctp_ulpq_init(ulpq, asoc))
goto fail_init; goto fail_init;
ulpq->malloced = 1; ulpq->malloced = 1;
return ulpq; return ulpq;
fail_init: fail_init:
kfree(ulpq); kfree(ulpq);
fail: fail:
return NULL; return NULL;
} }
/* Initialize a ULP queue from a block of memory. */ /* Initialize a ULP queue from a block of memory. */
sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq, struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
sctp_association_t *asoc, sctp_association_t *asoc)
__u16 inbound)
{ {
memset(ulpq, memset(ulpq, sizeof(struct sctp_ulpq), 0x00);
sizeof(sctp_ulpqueue_t) + sctp_ulpqueue_storage_size(inbound),
0x00);
ulpq->asoc = asoc; ulpq->asoc = asoc;
spin_lock_init(&ulpq->lock);
skb_queue_head_init(&ulpq->reasm); skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby); skb_queue_head_init(&ulpq->lobby);
ulpq->malloced = 0; ulpq->malloced = 0;
...@@ -101,38 +89,39 @@ sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq, ...@@ -101,38 +89,39 @@ sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
return ulpq; return ulpq;
} }
/* Flush the reassembly and ordering queues. */ /* Flush the reassembly and ordering queues. */
void sctp_ulpqueue_flush(sctp_ulpqueue_t *ulpq) void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
{ {
struct sk_buff *skb; struct sk_buff *skb;
sctp_ulpevent_t *event; struct sctp_ulpevent *event;
while ((skb = skb_dequeue(&ulpq->lobby))) { while ((skb = skb_dequeue(&ulpq->lobby))) {
event = (sctp_ulpevent_t *) skb->cb; event = (struct sctp_ulpevent *) skb->cb;
sctp_ulpevent_free(event); sctp_ulpevent_free(event);
} }
while ((skb = skb_dequeue(&ulpq->reasm))) { while ((skb = skb_dequeue(&ulpq->reasm))) {
event = (sctp_ulpevent_t *) skb->cb; event = (struct sctp_ulpevent *) skb->cb;
sctp_ulpevent_free(event); sctp_ulpevent_free(event);
} }
} }
/* Dispose of a ulpqueue. */ /* Dispose of a ulpqueue. */
void sctp_ulpqueue_free(sctp_ulpqueue_t *ulpq) void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{ {
sctp_ulpqueue_flush(ulpq); sctp_ulpq_flush(ulpq);
if (ulpq->malloced) if (ulpq->malloced)
kfree(ulpq); kfree(ulpq);
} }
/* Process an incoming DATA chunk. */ /* Process an incoming DATA chunk. */
int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk, int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
int priority) int priority)
{ {
struct sk_buff_head temp; struct sk_buff_head temp;
sctp_data_chunk_t *hdr; sctp_data_chunk_t *hdr;
sctp_ulpevent_t *event; struct sctp_ulpevent *event;
hdr = (sctp_data_chunk_t *) chunk->chunk_hdr; hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
...@@ -147,7 +136,7 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk, ...@@ -147,7 +136,7 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
return -ENOMEM; return -ENOMEM;
/* Do reassembly if needed. */ /* Do reassembly if needed. */
event = sctp_ulpqueue_reasm(ulpq, event); event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */ /* Do ordering if needed. */
if (event) { if (event) {
...@@ -155,18 +144,18 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk, ...@@ -155,18 +144,18 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
skb_queue_head_init(&temp); skb_queue_head_init(&temp);
skb_queue_tail(&temp, event->parent); skb_queue_tail(&temp, event->parent);
event = sctp_ulpqueue_order(ulpq, event); event = sctp_ulpq_order(ulpq, event);
} }
/* Send event to the ULP. */ /* Send event to the ULP. */
if (event) if (event)
sctp_ulpqueue_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, event);
return 0; return 0;
} }
/* Add a new event for propogation to the ULP. */ /* Add a new event for propogation to the ULP. */
int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event) int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{ {
struct sock *sk = ulpq->asoc->base.sk; struct sock *sk = ulpq->asoc->base.sk;
...@@ -202,20 +191,18 @@ int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event) ...@@ -202,20 +191,18 @@ int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
/* 2nd Level Abstractions */ /* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */ /* Helper function to store chunks that need to be reassembled. */
static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event) static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{ {
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent; struct sctp_ulpevent *cevent;
__u32 tsn, ctsn; __u32 tsn, ctsn;
unsigned long flags __attribute ((unused));
tsn = event->sndrcvinfo.sinfo_tsn; tsn = event->sndrcvinfo.sinfo_tsn;
sctp_spin_lock_irqsave(&ulpq->reasm.lock, flags);
/* Find the right place in this list. We store them by TSN. */ /* Find the right place in this list. We store them by TSN. */
sctp_skb_for_each(pos, &ulpq->reasm, tmp) { sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (sctp_ulpevent_t *)pos->cb; cevent = (struct sctp_ulpevent *)pos->cb;
ctsn = cevent->sndrcvinfo.sinfo_tsn; ctsn = cevent->sndrcvinfo.sinfo_tsn;
if (TSN_lt(tsn, ctsn)) if (TSN_lt(tsn, ctsn))
...@@ -227,29 +214,45 @@ static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpeven ...@@ -227,29 +214,45 @@ static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpeven
__skb_insert(event->parent, pos->prev, pos, &ulpq->reasm); __skb_insert(event->parent, pos->prev, pos, &ulpq->reasm);
else else
__skb_queue_tail(&ulpq->reasm, event->parent); __skb_queue_tail(&ulpq->reasm, event->parent);
sctp_spin_unlock_irqrestore(&ulpq->reasm.lock, flags);
} }
/* Helper function to return an event corresponding to the reassembled /* Helper function to return an event corresponding to the reassembled
* datagram. * datagram.
* This routine creates a re-assembled skb given the first and last skb's
* as stored in the reassembly queue. The skb's may be non-linear if the sctp
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/ */
static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
{ {
struct sk_buff *pos; struct sk_buff *pos;
sctp_ulpevent_t *event; struct sctp_ulpevent *event;
struct sk_buff *pnext; struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
pos = f_frag->next; pos = f_frag->next;
/* Set the first fragment's frag_list to point to the 2nd fragment. */ /* Get the last skb in the f_frag's frag_list if present. */
skb_shinfo(f_frag)->frag_list = pos; for (last = list; list; last = list, list = list->next);
/* Add the list of remaining fragments to the first fragments
* frag_list.
*/
if (last)
last->next = pos;
else
skb_shinfo(f_frag)->frag_list = pos;
/* Remove the first fragment from the reassembly queue. */ /* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, f_frag->list); __skb_unlink(f_frag, f_frag->list);
do { do {
pnext = pos->next; pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
f_frag->len += pos->len;
f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */ /* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, pos->list); __skb_unlink(pos, pos->list);
...@@ -269,13 +272,12 @@ static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_fra ...@@ -269,13 +272,12 @@ static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_fra
/* Helper function to check if an incoming chunk has filled up the last /* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event. * missing fragment in a SCTP datagram and return the corresponding event.
*/ */
static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_t *ulpq) static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{ {
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent; sctp_ulpevent_t *cevent;
struct sk_buff *first_frag = NULL; struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn; __u32 ctsn, next_tsn;
unsigned long flags __attribute ((unused));
sctp_ulpevent_t *retval = NULL; sctp_ulpevent_t *retval = NULL;
/* Initialized to 0 just to avoid compiler warning message. Will /* Initialized to 0 just to avoid compiler warning message. Will
...@@ -284,8 +286,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_ ...@@ -284,8 +286,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
*/ */
next_tsn = 0; next_tsn = 0;
sctp_spin_lock_irqsave(&ulpq->reasm.lock, flags);
/* The chunks are held in the reasm queue sorted by TSN. /* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of * Walk through the queue sequentially and look for a sequence of
* fragmented chunks that complete a datagram. * fragmented chunks that complete a datagram.
...@@ -327,7 +327,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_ ...@@ -327,7 +327,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
if (retval) if (retval)
break; break;
} }
sctp_spin_unlock_irqrestore(&ulpq->reasm.lock, flags);
return retval; return retval;
} }
...@@ -335,7 +334,7 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_ ...@@ -335,7 +334,7 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling. * need reassembling.
*/ */
static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq, static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event) sctp_ulpevent_t *event)
{ {
sctp_ulpevent_t *retval = NULL; sctp_ulpevent_t *retval = NULL;
...@@ -350,8 +349,8 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq, ...@@ -350,8 +349,8 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK)) if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK))
return event; return event;
sctp_ulpqueue_store_reasm(ulpq, event); sctp_ulpq_store_reasm(ulpq, event);
retval = sctp_ulpqueue_retrieve_reassembled(ulpq); retval = sctp_ulpq_retrieve_reassembled(ulpq);
return retval; return retval;
} }
...@@ -359,20 +358,20 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq, ...@@ -359,20 +358,20 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
/* Helper function to gather skbs that have possibly become /* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk. * ordered by an an incoming chunk.
*/ */
static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq, static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event) sctp_ulpevent_t *event)
{ {
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent; struct sctp_ulpevent *cevent;
struct sctp_stream *in;
__u16 sid, csid; __u16 sid, csid;
__u16 ssn, cssn; __u16 ssn, cssn;
unsigned long flags __attribute ((unused));
sid = event->sndrcvinfo.sinfo_stream; sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn; ssn = event->sndrcvinfo.sinfo_ssn;
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */ /* We are holding the chunks by stream, by SSN. */
sctp_spin_lock_irqsave(&ulpq->lobby.lock, flags);
sctp_skb_for_each(pos, &ulpq->lobby, tmp) { sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb; cevent = (sctp_ulpevent_t *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream; csid = cevent->sndrcvinfo.sinfo_stream;
...@@ -386,32 +385,31 @@ static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq, ...@@ -386,32 +385,31 @@ static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq,
if (csid < sid) if (csid < sid)
continue; continue;
if (cssn != ulpq->ssn[sid]) if (cssn != sctp_ssn_peek(in, sid))
break; break;
ulpq->ssn[sid]++; /* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
__skb_unlink(pos, pos->list); __skb_unlink(pos, pos->list);
/* Attach all gathered skbs to the event. */ /* Attach all gathered skbs to the event. */
__skb_queue_tail(event->parent->list, pos); __skb_queue_tail(event->parent->list, pos);
} }
sctp_spin_unlock_irqrestore(&ulpq->lobby.lock, flags);
} }
/* Helper function to store chunks needing ordering. */ /* Helper function to store chunks needing ordering. */
static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq, static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event) sctp_ulpevent_t *event)
{ {
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent; sctp_ulpevent_t *cevent;
__u16 sid, csid; __u16 sid, csid;
__u16 ssn, cssn; __u16 ssn, cssn;
unsigned long flags __attribute ((unused));
sid = event->sndrcvinfo.sinfo_stream; sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn; ssn = event->sndrcvinfo.sinfo_ssn;
sctp_spin_lock_irqsave(&ulpq->lobby.lock, flags);
/* Find the right place in this list. We store them by /* Find the right place in this list. We store them by
* stream ID and then by SSN. * stream ID and then by SSN.
...@@ -432,14 +430,13 @@ static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq, ...@@ -432,14 +430,13 @@ static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq,
__skb_insert(event->parent, pos->prev, pos, &ulpq->lobby); __skb_insert(event->parent, pos->prev, pos, &ulpq->lobby);
else else
__skb_queue_tail(&ulpq->lobby, event->parent); __skb_queue_tail(&ulpq->lobby, event->parent);
sctp_spin_unlock_irqrestore(&ulpq->lobby.lock, flags);
} }
static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq, static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event) sctp_ulpevent_t *event)
{ {
__u16 sid, ssn; __u16 sid, ssn;
struct sctp_stream *in;
/* FIXME: We should be using some new chunk structure here /* FIXME: We should be using some new chunk structure here
* instead of carrying chunk fields in the event structure. * instead of carrying chunk fields in the event structure.
...@@ -454,23 +451,24 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq, ...@@ -454,23 +451,24 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
/* Note: The stream ID must be verified before this routine. */ /* Note: The stream ID must be verified before this routine. */
sid = event->sndrcvinfo.sinfo_stream; sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn; ssn = event->sndrcvinfo.sinfo_ssn;
in = &ulpq->asoc->ssnmap->in;
/* Is this the expected SSN for this stream ID? */ /* Is this the expected SSN for this stream ID? */
if (ssn != ulpq->ssn[sid]) { if (ssn != sctp_ssn_peek(in, sid)) {
/* We've received something out of order, so find where it /* We've received something out of order, so find where it
* needs to be placed. We order by stream and then by SSN. * needs to be placed. We order by stream and then by SSN.
*/ */
sctp_ulpqueue_store_ordered(ulpq, event); sctp_ulpq_store_ordered(ulpq, event);
return NULL; return NULL;
} }
/* Mark that the next chunk has been found. */ /* Mark that the next chunk has been found. */
ulpq->ssn[sid]++; sctp_ssn_next(in, sid);
/* Go find any other chunks that were waiting for /* Go find any other chunks that were waiting for
* ordering. * ordering.
*/ */
sctp_ulpqueue_retrieve_ordered(ulpq, event); sctp_ulpq_retrieve_ordered(ulpq, event);
return event; return event;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment