Commit f51b15cd authored by Sridhar Samudrala's avatar Sridhar Samudrala

Merge us.ibm.com:/home/sridhar/BK/linux-2.5.59

into us.ibm.com:/home/sridhar/BK/lksctp-2.5.59
parents 6a3354a9 ea393c93
......@@ -56,8 +56,10 @@
#include <linux/ipv6.h> /* For ipv6hdr. */
#include <net/sctp/user.h>
/* What a hack! Jiminy Cricket! */
enum { SCTP_MAX_STREAM = 10 };
/* Value used for stream negotiation. */
enum { SCTP_MAX_STREAM = 0xffff };
enum { SCTP_DEFAULT_OUTSTREAMS = 10 };
enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
/* Define the amount of space to reserve for SCTP, IP, LL.
* There is a little bit of waste that we are always allocating
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001-2003 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -36,7 +36,9 @@
* La Monte H.P. Yarroll <piggy@acm.org>
* Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -147,7 +149,9 @@ extern int sctp_primitive_REQUESTHEARTBEAT(sctp_association_t *, void *arg);
/*
* sctp_crc32c.c
*/
extern __u32 count_crc(__u8 *ptr, __u16 count);
extern __u32 sctp_start_cksum(__u8 *ptr, __u16 count);
extern __u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum);
extern __u32 sctp_end_cksum(__u32 cksum);
/*
* sctp_input.c
......@@ -266,6 +270,7 @@ extern atomic_t sctp_dbg_objcnt_transport;
extern atomic_t sctp_dbg_objcnt_chunk;
extern atomic_t sctp_dbg_objcnt_bind_addr;
extern atomic_t sctp_dbg_objcnt_addr;
extern atomic_t sctp_dbg_objcnt_ssnmap;
/* Macros to atomically increment/decrement objcnt counters. */
#define SCTP_DBG_OBJCNT_INC(name) \
......@@ -418,6 +423,23 @@ static inline size_t get_user_iov_size(struct iovec *iov, int iovlen)
return retval;
}
/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
static inline __s32 sctp_jitter(__u32 rto)
{
static __u32 sctp_rand;
__s32 ret;
sctp_rand += jiffies;
sctp_rand ^= (sctp_rand << 12);
sctp_rand ^= (sctp_rand >> 20);
/* Choose random number from 0 to rto, then move to -50% ~ +50%
* of rto.
*/
ret = sctp_rand % rto - (rto >> 1);
return ret;
}
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
......
......@@ -269,6 +269,7 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *,
const void *payload,
size_t paylen);
void sctp_chunk_assign_tsn(sctp_chunk_t *);
void sctp_chunk_assign_ssn(sctp_chunk_t *);
/* Prototypes for statetable processing. */
......
......@@ -42,7 +42,7 @@
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -104,27 +104,27 @@ union sctp_addr {
/* Forward declarations for data structures. */
struct SCTP_protocol;
struct sctp_protocol;
struct SCTP_endpoint;
struct SCTP_association;
struct SCTP_transport;
struct SCTP_packet;
struct SCTP_chunk;
struct SCTP_inqueue;
struct SCTP_outqueue;
struct sctp_outq;
struct SCTP_bind_addr;
struct sctp_ulpq;
struct sctp_opt;
struct sctp_endpoint_common;
struct sctp_ssnmap;
typedef struct SCTP_protocol sctp_protocol_t;
typedef struct sctp_protocol sctp_protocol_t;
typedef struct SCTP_endpoint sctp_endpoint_t;
typedef struct SCTP_association sctp_association_t;
typedef struct SCTP_transport sctp_transport_t;
typedef struct SCTP_packet sctp_packet_t;
typedef struct SCTP_chunk sctp_chunk_t;
typedef struct SCTP_inqueue sctp_inqueue_t;
typedef struct SCTP_outqueue sctp_outqueue_t;
typedef struct SCTP_bind_addr sctp_bind_addr_t;
typedef struct sctp_opt sctp_opt_t;
typedef struct sctp_endpoint_common sctp_endpoint_common_t;
......@@ -133,7 +133,6 @@ typedef struct sctp_endpoint_common sctp_endpoint_common_t;
#include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h>
/* Structures useful for managing bind/connect. */
typedef struct sctp_bind_bucket {
......@@ -157,7 +156,7 @@ typedef struct sctp_hashbucket {
/* The SCTP protocol structure. */
struct SCTP_protocol {
struct sctp_protocol {
/* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
*
* The following protocol parameters are RECOMMENDED:
......@@ -183,8 +182,8 @@ struct SCTP_protocol {
/* Valid.Cookie.Life - 60 seconds */
int valid_cookie_life;
/* Whether Cookie Preservative is enabled(1) or not(0) */
/* Whether Cookie Preservative is enabled(1) or not(0) */
int cookie_preserve_enable;
/* Association.Max.Retrans - 10 attempts
......@@ -282,7 +281,7 @@ struct sctp_af *sctp_get_af_specific(sa_family_t);
int sctp_register_af(struct sctp_af *);
/* Protocol family functions. */
typedef struct sctp_pf {
struct sctp_pf {
void (*event_msgname)(sctp_ulpevent_t *, char *, int *);
void (*skb_msgname) (struct sk_buff *, char *, int *);
int (*af_supported) (sa_family_t);
......@@ -291,7 +290,7 @@ typedef struct sctp_pf {
struct sctp_opt *);
int (*bind_verify) (struct sctp_opt *, union sctp_addr *);
struct sctp_af *af;
} sctp_pf_t;
};
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
......@@ -318,7 +317,7 @@ struct sctp_opt {
__u32 autoclose;
__u8 nodelay;
__u8 disable_fragments;
sctp_pf_t *pf;
struct sctp_pf *pf;
};
......@@ -360,7 +359,8 @@ typedef struct sctp_cookie {
struct timeval expiration;
/* Number of inbound/outbound streams which are set
* and negotiated during the INIT process. */
* and negotiated during the INIT process.
*/
__u16 sinit_num_ostreams;
__u16 sinit_max_instreams;
......@@ -426,6 +426,49 @@ typedef struct sctp_sender_hb_info {
unsigned long sent_at;
} sctp_sender_hb_info_t __attribute__((packed));
/*
* RFC 2960 1.3.2 Sequenced Delivery within Streams
*
* The term "stream" is used in SCTP to refer to a sequence of user
* messages that are to be delivered to the upper-layer protocol in
* order with respect to other messages within the same stream. This is
* in contrast to its usage in TCP, where it refers to a sequence of
* bytes (in this document a byte is assumed to be eight bits).
* ...
*
* This is the structure we use to track both our outbound and inbound
* SSN, or Stream Sequence Numbers.
*/
struct sctp_stream {
__u16 *ssn;
unsigned int len;
};
struct sctp_ssnmap {
struct sctp_stream in;
struct sctp_stream out;
int malloced;
};
struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *, __u16, __u16);
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int priority);
void sctp_ssnmap_free(struct sctp_ssnmap *map);
void sctp_ssnmap_clear(struct sctp_ssnmap *map);
/* What is the current SSN number for this stream? */
static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id)
{
return stream->ssn[id];
}
/* Return the next SSN number for this stream. */
static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
{
return stream->ssn[id]++;
}
/* RFC2960 1.4 Key Terms
*
* o Chunk: A unit of information within an SCTP packet, consisting of
......@@ -499,6 +542,7 @@ struct SCTP_chunk {
__u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */
__u8 num_times_sent; /* How man times did we send this? */
__u8 has_tsn; /* Does this chunk have a TSN yet? */
__u8 has_ssn; /* Does this chunk have a SSN yet? */
__u8 singleton; /* Was this the only chunk in the packet? */
__u8 end_of_packet; /* Was this the last chunk in the packet? */
__u8 ecn_ce_done; /* Have we processed the ECN CE bit? */
......@@ -578,27 +622,27 @@ struct SCTP_packet {
int malloced;
};
typedef int (sctp_outqueue_thandler_t)(sctp_outqueue_t *, void *);
typedef int (sctp_outqueue_ehandler_t)(sctp_outqueue_t *);
typedef sctp_packet_t *(sctp_outqueue_ohandler_init_t)
typedef int (sctp_outq_thandler_t)(struct sctp_outq *, void *);
typedef int (sctp_outq_ehandler_t)(struct sctp_outq *);
typedef sctp_packet_t *(sctp_outq_ohandler_init_t)
(sctp_packet_t *,
sctp_transport_t *,
__u16 sport,
__u16 dport);
typedef sctp_packet_t *(sctp_outqueue_ohandler_config_t)
typedef sctp_packet_t *(sctp_outq_ohandler_config_t)
(sctp_packet_t *,
__u32 vtag,
int ecn_capable,
sctp_packet_phandler_t *get_prepend_chunk);
typedef sctp_xmit_t (sctp_outqueue_ohandler_t)(sctp_packet_t *,
typedef sctp_xmit_t (sctp_outq_ohandler_t)(sctp_packet_t *,
sctp_chunk_t *);
typedef int (sctp_outqueue_ohandler_force_t)(sctp_packet_t *);
typedef int (sctp_outq_ohandler_force_t)(sctp_packet_t *);
sctp_outqueue_ohandler_init_t sctp_packet_init;
sctp_outqueue_ohandler_config_t sctp_packet_config;
sctp_outqueue_ohandler_t sctp_packet_append_chunk;
sctp_outqueue_ohandler_t sctp_packet_transmit_chunk;
sctp_outqueue_ohandler_force_t sctp_packet_transmit;
sctp_outq_ohandler_init_t sctp_packet_init;
sctp_outq_ohandler_config_t sctp_packet_config;
sctp_outq_ohandler_t sctp_packet_append_chunk;
sctp_outq_ohandler_t sctp_packet_transmit_chunk;
sctp_outq_ohandler_force_t sctp_packet_transmit;
void sctp_packet_free(sctp_packet_t *);
......@@ -835,7 +879,7 @@ void sctp_inqueue_set_th_handler(sctp_inqueue_t *,
*
* When free()'d, it empties itself out via output_handler().
*/
struct SCTP_outqueue {
struct sctp_outq {
sctp_association_t *asoc;
/* BUG: This really should be an array of streams.
......@@ -861,11 +905,11 @@ struct SCTP_outqueue {
* layer. This is always SCTP_packet, but we separate the two
* structures to make testing simpler.
*/
sctp_outqueue_ohandler_init_t *init_output;
sctp_outqueue_ohandler_config_t *config_output;
sctp_outqueue_ohandler_t *append_output;
sctp_outqueue_ohandler_t *build_output;
sctp_outqueue_ohandler_force_t *force_output;
sctp_outq_ohandler_init_t *init_output;
sctp_outq_ohandler_config_t *config_output;
sctp_outq_ohandler_t *append_output;
sctp_outq_ohandler_t *build_output;
sctp_outq_ohandler_force_t *force_output;
/* How many unackd bytes do we have in-flight? */
__u32 outstanding_bytes;
......@@ -877,24 +921,23 @@ struct SCTP_outqueue {
int malloced;
};
sctp_outqueue_t *sctp_outqueue_new(sctp_association_t *);
void sctp_outqueue_init(sctp_association_t *, sctp_outqueue_t *);
void sctp_outqueue_teardown(sctp_outqueue_t *);
void sctp_outqueue_free(sctp_outqueue_t*);
void sctp_force_outqueue(sctp_outqueue_t *);
int sctp_push_outqueue(sctp_outqueue_t *, sctp_chunk_t *chunk);
int sctp_flush_outqueue(sctp_outqueue_t *, int);
int sctp_sack_outqueue(sctp_outqueue_t *, sctp_sackhdr_t *);
int sctp_outqueue_is_empty(const sctp_outqueue_t *);
int sctp_outqueue_set_output_handlers(sctp_outqueue_t *,
sctp_outqueue_ohandler_init_t init,
sctp_outqueue_ohandler_config_t config,
sctp_outqueue_ohandler_t append,
sctp_outqueue_ohandler_t build,
sctp_outqueue_ohandler_force_t force);
void sctp_outqueue_restart(sctp_outqueue_t *);
void sctp_retransmit(sctp_outqueue_t *, sctp_transport_t *, __u8);
void sctp_retransmit_mark(sctp_outqueue_t *, sctp_transport_t *, __u8);
struct sctp_outq *sctp_outq_new(sctp_association_t *);
void sctp_outq_init(sctp_association_t *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
int sctp_outq_tail(struct sctp_outq *, sctp_chunk_t *chunk);
int sctp_outq_flush(struct sctp_outq *, int);
int sctp_outq_sack(struct sctp_outq *, sctp_sackhdr_t *);
int sctp_outq_is_empty(const struct sctp_outq *);
int sctp_outq_set_output_handlers(struct sctp_outq *,
sctp_outq_ohandler_init_t init,
sctp_outq_ohandler_config_t config,
sctp_outq_ohandler_t append,
sctp_outq_ohandler_t build,
sctp_outq_ohandler_force_t force);
void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, sctp_transport_t *, __u8);
void sctp_retransmit_mark(struct sctp_outq *, sctp_transport_t *, __u8);
/* These bind address data fields common between endpoints and associations */
......@@ -1027,7 +1070,7 @@ struct SCTP_endpoint {
/* These are the system-wide defaults and other stuff which is
* endpoint-independent.
*/
sctp_protocol_t *proto;
struct sctp_protocol *proto;
/* Associations: A list of current associations and mappings
* to the data consumers for each association. This
......@@ -1408,18 +1451,15 @@ struct SCTP_association {
} defaults;
/* This tracks outbound ssn for a given stream. */
__u16 ssn[SCTP_MAX_STREAM];
struct sctp_ssnmap *ssnmap;
/* All outbound chunks go through this structure. */
sctp_outqueue_t outqueue;
struct sctp_outq outqueue;
/* A smart pipe that will handle reordering and fragmentation,
* as well as handle passing events up to the ULP.
* In the future, we should make this at least dynamic, if
* not also some sparse structure.
*/
sctp_ulpqueue_t ulpq;
__u8 _ssnmap[sctp_ulpqueue_storage_size(SCTP_MAX_STREAM)];
struct sctp_ulpq ulpq;
/* Need to send an ECNE Chunk? */
int need_ecne;
......@@ -1505,7 +1545,7 @@ struct SCTP_association {
*
*
* [I really think this is EXACTLY the sort of intelligence
* which already resides in SCTP_outqueue. Please move this
* which already resides in sctp_outq. Please move this
* queue and its supporting logic down there. --piggy]
*/
struct sk_buff_head addip_chunks;
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* These are the definitions needed for the sctp_ulpqueue type. The
* sctp_ulpqueue is the interface between the Upper Layer Protocol, or ULP,
*
* These are the definitions needed for the sctp_ulpq type. The
* sctp_ulpq is the interface between the Upper Layer Protocol, or ULP,
* and the core SCTP state machine. This is the component which handles
* reassembly and ordering.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* reassembly and ordering.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* the SCTP reference implementation is distributed in the hope that it
*
* the SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
*
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email addresses:
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -42,46 +47,26 @@
#define __sctp_ulpqueue_h__
/* A structure to carry information to the ULP (e.g. Sockets API) */
typedef struct sctp_ulpqueue {
struct sctp_ulpq {
int malloced;
spinlock_t lock;
sctp_association_t *asoc;
struct sk_buff_head reasm;
struct sk_buff_head lobby;
__u16 ssn[0];
} sctp_ulpqueue_t;
/* This macro assists in creation of external storage for variable length
* internal buffers.
*/
#define sctp_ulpqueue_storage_size(inbound) (sizeof(__u16) * (inbound))
sctp_ulpqueue_t *sctp_ulpqueue_new(sctp_association_t *asoc,
__u16 inbound,
int priority);
sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
sctp_association_t *asoc,
__u16 inbound);
void sctp_ulpqueue_free(sctp_ulpqueue_t *);
};
/* Prototypes. */
struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority);
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, sctp_association_t *);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */
int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *,
sctp_chunk_t *chunk,
int priority);
int sctp_ulpq_tail_data(struct sctp_ulpq *, sctp_chunk_t *chunk, int priority);
/* Add a new event for propogation to the ULP. */
int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *,
sctp_ulpevent_t *event);
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
/* Is the ulpqueue empty. */
int sctp_ulpqueue_is_empty(sctp_ulpqueue_t *);
int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *);
int sctp_ulpqueue_is_empty(struct sctp_ulpq *);
#endif /* __sctp_ulpqueue_h__ */
......@@ -90,4 +75,4 @@ int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *);
......@@ -100,6 +100,14 @@ enum sctp_optname {
#define SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_BINDX_REM
SCTP_SOCKOPT_PEELOFF, /* peel off association. */
#define SCTP_SOCKOPT_PEELOFF SCTP_SOCKOPT_PEELOFF
SCTP_GET_PEER_ADDRS_NUM, /* Get number of peer addresss. */
#define SCTP_GET_PEER_ADDRS_NUM SCTP_GET_PEER_ADDRS_NUM
SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */
#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS
SCTP_GET_LOCAL_ADDRS_NUM, /* Get number of local addresss. */
#define SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS_NUM
SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */
#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS
};
......@@ -576,6 +584,15 @@ struct sctp_setstrm_timeout {
__u16 ssto_streamid_end;
};
/*
* 8.3 8.5 get all peer/local addresses on a socket
* This parameter struct is for getsockopt
*/
struct sctp_getaddrs {
sctp_assoc_t assoc_id;
int addr_num;
struct sockaddr_storage *addrs;
};
/* These are bit fields for msghdr->msg_flags. See section 5.1. */
/* On user space Linux, these live in <bits/socket.h> as an enum. */
......
......@@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
inqueue.o outqueue.o ulpqueue.o command.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o hashdriver.o sla1.o \
debug.o
debug.o ssnmap.o
ifeq ($(CONFIG_SCTP_ADLER32), y)
sctp-y += adler32.o
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -36,6 +37,7 @@
* Randall Stewart <rstewar1@email.mot.com>
* Ken Morneau <kmorneau@cisco.com>
* Qiaobing Xie <qxie1@email.mot.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -122,7 +124,7 @@ unsigned long update_adler32(unsigned long adler,
return (s2 << 16) + s1;
}
__u32 count_crc(__u8 *ptr, __u16 count)
__u32 sctp_start_cksum(__u8 *ptr, __u16 count)
{
/*
* Update a running Adler-32 checksum with the bytes
......@@ -146,3 +148,15 @@ __u32 count_crc(__u8 *ptr, __u16 count)
return adler;
}
__u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 adler)
{
adler = update_adler32(adler, ptr, count);
return adler;
}
__u32 sctp_end_cksum(__u32 adler)
{
return adler;
}
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
......@@ -166,15 +166,10 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
asoc->max_init_timeo = sp->initmsg.sinit_max_init_timeo * HZ;
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
/* Allocate storage for the ssnmap after the inbound and outbound
* streams have been negotiated during Init.
*/
for (i = 0; i < SCTP_MAX_STREAM; i++)
asoc->ssn[i] = 0;
asoc->ssnmap = NULL;
/* Set the local window size for receive.
* This is also the rcvbuf space per association.
......@@ -252,15 +247,15 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc);
/* Create an output queue. */
sctp_outqueue_init(asoc, &asoc->outqueue);
sctp_outqueue_set_output_handlers(&asoc->outqueue,
sctp_packet_init,
sctp_packet_config,
sctp_packet_append_chunk,
sctp_packet_transmit_chunk,
sctp_packet_transmit);
if (NULL == sctp_ulpqueue_init(&asoc->ulpq, asoc, SCTP_MAX_STREAM))
sctp_outq_init(asoc, &asoc->outqueue);
sctp_outq_set_output_handlers(&asoc->outqueue,
sctp_packet_init,
sctp_packet_config,
sctp_packet_append_chunk,
sctp_packet_transmit_chunk,
sctp_packet_transmit);
if (NULL == sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
/* Set up the tsn tracking. */
......@@ -310,14 +305,17 @@ void sctp_association_free(sctp_association_t *asoc)
asoc->base.dead = 1;
/* Dispose of any data lying around in the outqueue. */
sctp_outqueue_free(&asoc->outqueue);
sctp_outq_free(&asoc->outqueue);
/* Dispose of any pending messages for the upper layer. */
sctp_ulpqueue_free(&asoc->ulpq);
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inqueue_free(&asoc->base.inqueue);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
/* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr);
......@@ -524,7 +522,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
break;
default:
BUG();
return;
};
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
......@@ -534,7 +532,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
(struct sockaddr_storage *) &transport->ipaddr,
0, spc_state, error, GFP_ATOMIC);
if (event)
sctp_ulpqueue_tail_event(&asoc->ulpq, event);
sctp_ulpq_tail_event(&asoc->ulpq, event);
/* Select new active and retran paths. */
......@@ -634,7 +632,7 @@ __u32 __sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
/* Fetch the next Stream Sequence Number for stream number 'sid'. */
__u16 __sctp_association_get_next_ssn(sctp_association_t *asoc, __u16 sid)
{
return asoc->ssn[sid]++;
return sctp_ssn_next(&asoc->ssnmap->out, sid);
}
/* Compare two addresses to see if they match. Wildcard addresses
......@@ -852,8 +850,6 @@ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
{
int i;
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
......@@ -872,23 +868,28 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
/* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case there is data sent to peer
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if (SCTP_STATE_ESTABLISHED == asoc->state) {
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
/* Reinitialize SSN for both local streams
* and peer's streams.
*/
for (i = 0; i < SCTP_MAX_STREAM; i++) {
asoc->ssn[i] = 0;
asoc->ulpq.ssn[i] = 0;
}
sctp_ssnmap_clear(asoc->ssnmap);
} else {
asoc->ctsn_ack_point = asoc->next_tsn - 1;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
new->ssnmap = NULL;
}
}
}
/* Choose the transport for sending a shutdown packet.
......
......@@ -47,8 +47,8 @@ sctp_cmd_seq_t *sctp_new_cmd_seq(int priority)
{
sctp_cmd_seq_t *retval = t_new(sctp_cmd_seq_t, priority);
/* XXX Check for NULL? -DaveM */
sctp_init_cmd_seq(retval);
if (retval)
sctp_init_cmd_seq(retval);
return retval;
}
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -33,6 +33,7 @@
* Written or modified by:
* Dinakaran Joseph
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -135,11 +136,10 @@ __u32 crc_c[256] = {
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
};
__u32 count_crc(__u8 *buffer, __u16 length)
__u32 sctp_start_cksum(__u8 *buffer, __u16 length)
{
__u32 crc32 = ~(__u32) 0;
__u32 i, result;
__u8 byte0, byte1, byte2, byte3;
__u32 i;
/* Optimize this routine to be SCTP specific, knowing how
* to skip the checksum field of the SCTP header.
......@@ -157,6 +157,24 @@ __u32 count_crc(__u8 *buffer, __u16 length)
for (i = sizeof(struct sctphdr); i < length ; i++)
CRC32C(crc32, buffer[i]);
return crc32;
}
__u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
{
__u32 i;
for (i = 0; i < length ; i++)
CRC32C(crc32, buffer[i]);
return crc32;
}
__u32 sctp_end_cksum(__u32 crc32)
{
__u32 result;
__u8 byte0, byte1, byte2, byte3;
result = ~crc32;
/* result now holds the negated polynomial remainder;
......@@ -183,5 +201,3 @@ __u32 count_crc(__u8 *buffer, __u16 length)
byte3);
return crc32;
}
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
......@@ -72,10 +72,19 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
{
struct sctphdr *sh;
__u32 cmp, val;
struct sk_buff *list = skb_shinfo(skb)->frag_list;
sh = (struct sctphdr *) skb->h.raw;
cmp = ntohl(sh->checksum);
val = count_crc((__u8 *)sh, skb->len);
val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
for (; list; list = list->next)
val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
val);
val = sctp_end_cksum(val);
if (val != cmp) {
/* CRC failure, dump it. */
return -1;
......
......@@ -113,14 +113,14 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
/* FIXME: Currently, ip6_route_output() doesn't fill in the source
* address in the returned route entry. So we call ipv6_get_saddr()
* to get an appropriate source address. It is possible that this address
* may not be part of the bind address list of the association.
* to get an appropriate source address. It is possible that this
* address may not be part of the bind address list of the association.
* Once ip6_route_ouput() is fixed so that it returns a route entry
* with an appropriate source address, the following if condition can
* be removed. With ip6_route_output() returning a source address filled
* route entry, sctp_transport_route() can do real source address
* selection for v6.
*/
* be removed. With ip6_route_output() returning a source address
* filled route entry, sctp_transport_route() can do real source
* address selection for v6.
*/
if (ipv6_addr_any(&rt6->rt6i_src.addr)) {
err = ipv6_get_saddr(dst, fl.fl6_dst, &saddr);
......@@ -130,7 +130,6 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
__FUNCTION__, NIP6(fl.fl6_src));
return err;
}
fl.fl6_src = &saddr;
} else {
fl.fl6_src = &rt6->rt6i_src.addr;
......@@ -572,7 +571,7 @@ int sctp_v6_init(void)
/* Register the SCTP specfic AF_INET6 functions. */
sctp_register_af(&sctp_ipv6_specific);
/* Register notifier for inet6 address additions/deletions. */
/* Register notifier for inet6 address additions/deletions. */
register_inet6addr_notifier(&sctp_inetaddr_notifier);
return 0;
......
......@@ -54,6 +54,7 @@ SCTP_DBG_OBJCNT(assoc);
SCTP_DBG_OBJCNT(bind_addr);
SCTP_DBG_OBJCNT(chunk);
SCTP_DBG_OBJCNT(addr);
SCTP_DBG_OBJCNT(ssnmap);
/* An array to make it easy to pretty print the debug information
* to the proc fs.
......@@ -66,6 +67,7 @@ sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
SCTP_DBG_OBJCNT_ENTRY(chunk),
SCTP_DBG_OBJCNT_ENTRY(bind_addr),
SCTP_DBG_OBJCNT_ENTRY(addr),
SCTP_DBG_OBJCNT_ENTRY(ssnmap),
};
/* Callback from procfs to read out objcount information.
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -62,7 +62,6 @@
#include <net/sctp/sm.h>
/* Forward declarations for private helpers. */
__u32 count_crc(__u8 *ptr, __u16 count);
static void sctp_packet_reset(sctp_packet_t *packet);
static sctp_xmit_t sctp_packet_append_data(sctp_packet_t *packet,
sctp_chunk_t *chunk);
......@@ -228,7 +227,7 @@ sctp_xmit_t sctp_packet_append_chunk(sctp_packet_t *packet, sctp_chunk_t *chunk)
}
/* All packets are sent to the network through this function from
* sctp_push_outqueue().
* sctp_outq_tail().
*
* The return value is a normal kernel error return value.
*/
......@@ -358,7 +357,8 @@ int sctp_packet_transmit(sctp_packet_t *packet)
* Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
crc32 = count_crc((__u8 *)sh, nskb->len);
crc32 = sctp_start_cksum((__u8 *)sh, nskb->len);
crc32 = sctp_end_cksum(crc32);
/* 3) Put the resultant value into the checksum field in the
* common header, and leave the rest of the bits unchanged.
......
......@@ -2,11 +2,11 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001-2002 International Business Machines Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
* These functions implement the outqueue class. The outqueue handles
* These functions implement the sctp_outq class. The outqueue handles
* bundling and queueing of outgoing SCTP chunks.
*
* The SCTP reference implementation is free software;
......@@ -47,39 +47,39 @@
*/
#include <linux/types.h>
#include <linux/list.h> /* For struct list_head */
#include <linux/list.h> /* For struct list_head */
#include <linux/socket.h>
#include <linux/ip.h>
#include <net/sock.h> /* For skb_set_owner_w */
#include <net/sock.h> /* For skb_set_owner_w */
#include <net/sctp/sctp.h>
/* Declare internal functions here. */
static int sctp_acked(sctp_sackhdr_t *sack, __u32 tsn);
static void sctp_check_transmitted(sctp_outqueue_t *q,
static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
sctp_transport_t *transport,
sctp_sackhdr_t *sack,
__u32 highest_new_tsn);
/* Generate a new outqueue. */
sctp_outqueue_t *sctp_outqueue_new(sctp_association_t *asoc)
struct sctp_outq *sctp_outq_new(sctp_association_t *asoc)
{
sctp_outqueue_t *q;
struct sctp_outq *q;
q = t_new(sctp_outqueue_t, GFP_KERNEL);
q = t_new(struct sctp_outq, GFP_KERNEL);
if (q) {
sctp_outqueue_init(asoc, q);
sctp_outq_init(asoc, q);
q->malloced = 1;
}
return q;
}
/* Initialize an existing SCTP_outqueue. This does the boring stuff.
/* Initialize an existing sctp_outq. This does the boring stuff.
* You still need to define handlers if you really want to DO
* something with this structure...
*/
void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
void sctp_outq_init(sctp_association_t *asoc, struct sctp_outq *q)
{
q->asoc = asoc;
skb_queue_head_init(&q->out);
......@@ -102,7 +102,7 @@ void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
/* Free the outqueue structure and any related pending chunks.
* FIXME: Add SEND_FAILED support.
*/
void sctp_outqueue_teardown(sctp_outqueue_t *q)
void sctp_outq_teardown(struct sctp_outq *q)
{
sctp_transport_t *transport;
struct list_head *lchunk, *pos, *temp;
......@@ -131,29 +131,22 @@ void sctp_outqueue_teardown(sctp_outqueue_t *q)
}
/* Free the outqueue structure and any related pending chunks. */
void sctp_outqueue_free(sctp_outqueue_t *q)
void sctp_outq_free(struct sctp_outq *q)
{
/* Throw away leftover chunks. */
sctp_outqueue_teardown(q);
sctp_outq_teardown(q);
/* If we were kmalloc()'d, free the memory. */
if (q->malloced)
kfree(q);
}
/* Transmit any pending partial chunks. */
void sctp_force_outqueue(sctp_outqueue_t *q)
{
/* Do we really need this? */
/* BUG */
}
/* Put a new chunk in an SCTP_outqueue. */
int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
/* Put a new chunk in an sctp_outq. */
int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
{
int error = 0;
SCTP_DEBUG_PRINTK("sctp_push_outqueue(%p, %p[%s])\n",
SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
: "Illegal Chunk");
......@@ -184,8 +177,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
default:
SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
q, chunk,
chunk && chunk->chunk_hdr ?
q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
: "Illegal Chunk");
......@@ -193,13 +185,13 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
q->empty = 0;
break;
};
} else {
} else
skb_queue_tail(&q->control, (struct sk_buff *) chunk);
}
if (error < 0)
return error;
error = sctp_flush_outqueue(q, 0);
error = sctp_outq_flush(q, 0);
return error;
}
......@@ -207,7 +199,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
/* Insert a chunk into the retransmit queue. Chunks on the retransmit
* queue are kept in order, based on the TSNs.
*/
void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q)
void sctp_retransmit_insert(struct list_head *tlchunk, struct sctp_outq *q)
{
struct list_head *rlchunk;
sctp_chunk_t *tchunk, *rchunk;
......@@ -230,9 +222,9 @@ void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q)
list_add_tail(tlchunk, &q->retransmit);
}
}
/* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
void sctp_retransmit_mark(struct sctp_outq *q, sctp_transport_t *transport,
__u8 fast_retransmit)
{
struct list_head *lchunk, *ltemp;
......@@ -302,7 +294,7 @@ void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
/* Mark all the eligible packets on a transport for retransmission and force
* one packet out.
*/
void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
void sctp_retransmit(struct sctp_outq *q, sctp_transport_t *transport,
__u8 fast_retransmit)
{
int error = 0;
......@@ -315,7 +307,7 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
sctp_retransmit_mark(q, transport, fast_retransmit);
error = sctp_flush_outqueue(q, /* rtx_timeout */ 1);
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error)
q->asoc->base.sk->err = -error;
......@@ -323,14 +315,14 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
/*
* Transmit DATA chunks on the retransmit queue. Upon return from
* sctp_flush_retran_queue() the packet 'pkt' may contain chunks which
* sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
* need to be transmitted by the caller.
* We assume that pkt->transport has already been set.
*
* The return value is a normal kernel error return value.
*/
static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
int rtx_timeout, int *start_timer)
static int sctp_outq_flush_rtx(struct sctp_outq *q, sctp_packet_t *pkt,
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
struct list_head *lchunk;
......@@ -374,6 +366,18 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
continue;
}
#endif
/* Make sure that Gap Acked TSNs are not retransmitted. A
* simple approach is just to move such TSNs out of the
* way and into a 'transmitted' queue and skip to the
* next chunk.
*/
if (chunk->tsn_gap_acked) {
list_add_tail(lchunk, &transport->transmitted);
lchunk = sctp_list_dequeue(lqueue);
continue;
}
/* Attempt to append this chunk to the packet. */
status = (*q->append_output)(pkt, chunk);
......@@ -427,7 +431,7 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
* queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation.
*/
void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos,
void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
sctp_packet_t *packet, sctp_chunk_t *frag, __u32 tsn)
{
sctp_transport_t *transport = packet->transport;
......@@ -503,7 +507,7 @@ void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos,
* The argument 'frag' point to the first fragment and it holds the list
* of all the other fragments in the 'frag_list' field.
*/
void sctp_xmit_fragmented_chunks(sctp_outqueue_t *q, sctp_packet_t *packet,
void sctp_xmit_fragmented_chunks(struct sctp_outq *q, sctp_packet_t *packet,
sctp_chunk_t *frag)
{
sctp_association_t *asoc = frag->asoc;
......@@ -562,7 +566,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
if (!first_frag)
goto err;
first_frag->has_ssn = 1;
/* All the fragments are added to the frag_list of the first chunk. */
frag_list = &first_frag->frag_list;
......@@ -576,7 +580,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
ssn);
if (!frag)
goto err;
frag->has_ssn = 1;
/* Add the middle fragment to the first fragment's
* frag_list.
*/
......@@ -591,6 +595,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
SCTP_DATA_LAST_FRAG, ssn);
if (!frag)
goto err;
frag->has_ssn = 1;
/* Add the last fragment to the first fragment's frag_list. */
list_add_tail(&frag->frag_list, frag_list);
......@@ -620,7 +625,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
}
/*
* sctp_flush_outqueue - Try to flush an outqueue.
* sctp_outq_flush - Try to flush an outqueue.
*
* Description: Send everything in q which we legally can, subject to
* congestion limitations.
......@@ -629,7 +634,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
{
sctp_packet_t *packet;
sctp_packet_t singleton;
......@@ -648,7 +653,6 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
sctp_xmit_t status;
int error = 0;
int start_timer = 0;
sctp_ulpevent_t *event;
/* These transports have chunks to send. */
struct list_head transport_list;
......@@ -783,10 +787,8 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
(*q->config_output)(packet, vtag,
ecn_capable, ecne_handler);
retran:
error = sctp_flush_retran_queue(q,
packet,
rtx_timeout,
&start_timer);
error = sctp_outq_flush_rtx(q, packet,
rtx_timeout, &start_timer);
if (start_timer)
sctp_transport_reset_timers(transport);
......@@ -813,15 +815,14 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
*/
if (chunk->sinfo.sinfo_stream >=
asoc->c.sinit_num_ostreams) {
struct sctp_ulpevent *ev;
/* Generate a SEND FAILED event. */
event = sctp_ulpevent_make_send_failed(asoc,
chunk, SCTP_DATA_UNSENT,
SCTP_ERROR_INV_STRM,
GFP_ATOMIC);
if (event) {
sctp_ulpqueue_tail_event(&asoc->ulpq,
event);
}
ev = sctp_ulpevent_make_send_failed(asoc,
chunk, SCTP_DATA_UNSENT,
SCTP_ERROR_INV_STRM, GFP_ATOMIC);
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
/* Free the chunk. This chunk is not on any
* list yet, just free it.
......@@ -830,6 +831,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
continue;
}
/* Now do delayed assignment of SSN. This will
* probably change again when we start supporting
* large (> approximately 2^16) size messages.
*/
sctp_chunk_assign_ssn(chunk);
/* If there is a specified transport, use it.
* Otherwise, we want to use the active path.
*/
......@@ -878,7 +885,7 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
/* We could not append this chunk, so put
* the chunk back on the output queue.
*/
SCTP_DEBUG_PRINTK("sctp_flush_outqueue: could "
SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
"not transmit TSN: 0x%x, status: %d\n",
ntohl(chunk->subh.data_hdr->tsn),
status);
......@@ -966,12 +973,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
}
/* Set the various output handling callbacks. */
int sctp_outqueue_set_output_handlers(sctp_outqueue_t *q,
sctp_outqueue_ohandler_init_t init,
sctp_outqueue_ohandler_config_t config,
sctp_outqueue_ohandler_t append,
sctp_outqueue_ohandler_t build,
sctp_outqueue_ohandler_force_t force)
int sctp_outq_set_output_handlers(struct sctp_outq *q,
sctp_outq_ohandler_init_t init,
sctp_outq_ohandler_config_t config,
sctp_outq_ohandler_t append,
sctp_outq_ohandler_t build,
sctp_outq_ohandler_force_t force)
{
q->init_output = init;
q->config_output = config;
......@@ -1028,14 +1035,14 @@ static __u32 sctp_highest_new_tsn(sctp_sackhdr_t *sack,
}
return highest_new_tsn;
}
}
/* This is where we REALLY process a SACK.
*
* Process the sack against the outqueue. Mostly, this just frees
* Process the SACK against the outqueue. Mostly, this just frees
* things off the transmitted queue.
*/
int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
{
sctp_association_t *asoc = q->asoc;
sctp_transport_t *transport;
......@@ -1053,7 +1060,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
sack_ctsn = ntohl(sack->cum_tsn_ack);
/* Get the highest TSN in the sack. */
highest_tsn = sack_ctsn +
highest_tsn = sack_ctsn +
ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end);
if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
......@@ -1139,7 +1146,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
}
/* Is the outqueue empty? */
int sctp_outqueue_is_empty(const sctp_outqueue_t *q)
int sctp_outq_is_empty(const struct sctp_outq *q)
{
return q->empty;
}
......@@ -1161,7 +1168,7 @@ int sctp_outqueue_is_empty(const sctp_outqueue_t *q)
* transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
* KEPT TSN6-TSN7, etc.
*/
static void sctp_check_transmitted(sctp_outqueue_t *q,
static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
sctp_transport_t *transport,
sctp_sackhdr_t *sack,
......
......@@ -82,7 +82,7 @@ struct sock *sctp_get_ctl_sock(void)
}
/* Set up the proc fs entry for the SCTP protocol. */
void sctp_proc_init(void)
__init void sctp_proc_init(void)
{
if (!proc_net_sctp) {
struct proc_dir_entry *ent;
......@@ -95,7 +95,7 @@ void sctp_proc_init(void)
}
/* Clean up the proc fs entry for the SCTP protocol. */
void sctp_proc_exit(void)
__exit void sctp_proc_exit(void)
{
if (proc_net_sctp) {
proc_net_sctp = NULL;
......@@ -688,7 +688,7 @@ static void cleanup_sctp_mibs(void)
}
/* Initialize the universe into something sensible. */
int sctp_init(void)
__init int sctp_init(void)
{
int i;
int status = 0;
......@@ -750,13 +750,9 @@ int sctp_init(void)
/* Implementation specific variables. */
/* Initialize default stream count setup information.
* Note: today the stream accounting data structures are very
* fixed size, so one really does need to make sure that these have
* upper/lower limits when changing.
*/
sctp_proto.max_instreams = SCTP_MAX_STREAM;
sctp_proto.max_outstreams = SCTP_MAX_STREAM;
/* Initialize default stream count setup information. */
sctp_proto.max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_proto.max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
/* Allocate and initialize the association hash table. */
sctp_proto.assoc_hashsize = 4096;
......@@ -852,7 +848,7 @@ int sctp_init(void)
}
/* Exit handler for the SCTP protocol. */
void sctp_exit(void)
__exit void sctp_exit(void)
{
/* BUG. This should probably do something useful like clean
* up all the remaining associations and all that memory.
......@@ -889,4 +885,3 @@ module_exit(sctp_exit);
MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
MODULE_LICENSE("GPL");
......@@ -82,12 +82,12 @@ static const sctp_supported_addrs_param_t sat_param = {
/* gcc 3.2 doesn't allow initialization of zero-length arrays. So the above
* structure is split and the address types array is initialized using a
* fixed length array.
* fixed length array.
*/
static const __u16 sat_addr_types[2] = {
SCTP_PARAM_IPV4_ADDRESS,
SCTP_V6(SCTP_PARAM_IPV6_ADDRESS,)
};
};
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
......@@ -540,7 +540,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = htonl(sinfo->sinfo_ppid);
dp.ssn = htons(ssn);
/* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & MSG_UNORDERED)
flags |= SCTP_DATA_UNORDERED;
......@@ -552,7 +552,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval;
}
......@@ -607,12 +607,12 @@ sctp_chunk_t *sctp_make_data_empty(sctp_association_t *asoc,
* ordered send and a new ssn is generated. The flags field is set
* in the inner routine - sctp_make_datafrag_empty().
*/
if (sinfo->sinfo_flags & MSG_UNORDERED) {
ssn = 0;
} else {
ssn = __sctp_association_get_next_ssn(asoc,
sinfo->sinfo_stream);
}
// if (sinfo->sinfo_flags & MSG_UNORDERED) {
ssn = 0;
// } else {
// ssn = __sctp_association_get_next_ssn(asoc,
// sinfo->sinfo_stream);
// }
return sctp_make_datafrag_empty(asoc, sinfo, data_len, flags, ssn);
}
......@@ -1013,6 +1013,7 @@ sctp_chunk_t *sctp_chunkify(struct sk_buff *skb, const sctp_association_t *asoc,
retval->asoc = (sctp_association_t *) asoc;
retval->num_times_sent = 0;
retval->has_tsn = 0;
retval->has_ssn = 0;
retval->rtt_in_progress = 0;
retval->sent_at = jiffies;
retval->singleton = 1;
......@@ -1214,6 +1215,29 @@ int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data)
return err;
}
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
void sctp_chunk_assign_ssn(sctp_chunk_t *chunk)
{
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
/* This is the last possible instant to assign a SSN. */
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
sid = htons(chunk->subh.data_hdr->stream);
ssn = htons(__sctp_association_get_next_ssn(chunk->asoc, sid));
}
chunk->subh.data_hdr->ssn = ssn;
chunk->has_ssn = 1;
}
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
......@@ -1654,6 +1678,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
/* Unpack the parameters in an INIT packet into an association.
* Returns 0 on failure, else success.
* FIXME: This is an association method.
*/
int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
const union sctp_addr *peer_addr,
......@@ -1710,6 +1735,12 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
ntohs(peer_init->init_hdr.num_inbound_streams);
}
if (asoc->c.sinit_max_instreams >
ntohs(peer_init->init_hdr.num_outbound_streams)) {
asoc->c.sinit_max_instreams =
ntohs(peer_init->init_hdr.num_outbound_streams);
}
/* Copy Initiation tag from INIT to VT_peer in cookie. */
asoc->c.peer_vtag = asoc->peer.i.init_tag;
......@@ -1738,6 +1769,21 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
asoc->peer.i.initial_tsn);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
*/
/* Allocate storage for the negotiated streams. */
asoc->ssnmap = sctp_ssnmap_new(asoc->peer.i.num_outbound_streams,
asoc->c.sinit_num_ostreams,
priority);
if (!asoc->ssnmap)
goto nomem_ssnmap;
/* ADDIP Section 4.1 ASCONF Chunk Procedures
*
* When an endpoint has an ASCONF signaled change to be sent to the
......@@ -1751,6 +1797,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1;
return 1;
nomem_ssnmap:
clean_up:
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
......
......@@ -296,7 +296,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_PURGE_OUTQUEUE:
sctp_outqueue_teardown(&asoc->outqueue);
sctp_outq_teardown(&asoc->outqueue);
break;
case SCTP_CMD_DELETE_TCB:
......@@ -395,9 +395,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
command->obj.ptr,
"ulpq:",
&asoc->ulpq);
sctp_ulpqueue_tail_data(&asoc->ulpq,
command->obj.ptr,
GFP_ATOMIC);
sctp_ulpq_tail_data(&asoc->ulpq,
command->obj.ptr,
GFP_ATOMIC);
break;
case SCTP_CMD_EVENT_ULP:
......@@ -407,14 +407,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
command->obj.ptr,
"ulpq:",
&asoc->ulpq);
sctp_ulpqueue_tail_event(&asoc->ulpq,
command->obj.ptr);
sctp_ulpq_tail_event(&asoc->ulpq,
command->obj.ptr);
break;
case SCTP_CMD_REPLY:
/* Send a chunk to our peer. */
error = sctp_push_outqueue(&asoc->outqueue,
command->obj.ptr);
error = sctp_outq_tail(&asoc->outqueue,
command->obj.ptr);
break;
case SCTP_CMD_SEND_PKT:
......@@ -432,7 +432,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_TRANSMIT:
/* Kick start transmission. */
error = sctp_flush_outqueue(&asoc->outqueue, 0);
error = sctp_outq_flush(&asoc->outqueue, 0);
break;
case SCTP_CMD_ECN_CE:
......@@ -599,7 +599,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_RTO_PENDING:
t = command->obj.transport;
t->rto_pending = 1;
t->rto_pending = 1;
break;
default:
......@@ -743,7 +743,7 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands)
asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0;
error = sctp_push_outqueue(&asoc->outqueue, sack);
error = sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
......@@ -1095,7 +1095,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
* their work in statefuns directly.
*/
static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
sctp_association_t *asoc,
......@@ -1134,8 +1134,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
*/
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, sctp_transport_t, transports);
if (!mod_timer(&t->hb_timer,
t->hb_interval + t->rto + jiffies)) {
if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto +
sctp_jitter(t->rto) + jiffies)) {
sctp_transport_hold(t);
}
}
......@@ -1147,7 +1147,8 @@ static void sctp_cmd_hb_timers_update(sctp_cmd_seq_t *cmds,
sctp_transport_t *t)
{
/* Update the heartbeat timer. */
if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto + jiffies))
if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto +
sctp_jitter(t->rto) + jiffies))
sctp_transport_hold(t);
}
......@@ -1218,7 +1219,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
{
int err;
if (sctp_sack_outqueue(&asoc->outqueue, sackh)) {
if (sctp_outq_sack(&asoc->outqueue, sackh)) {
/* There are no more TSNs awaiting SACK. */
err = sctp_do_sm(SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
......@@ -1228,7 +1229,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err = sctp_flush_outqueue(&asoc->outqueue, 0);
err = sctp_outq_flush(&asoc->outqueue, 0);
}
return err;
......
......@@ -191,7 +191,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
int len;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT.
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
......@@ -506,7 +506,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_chunk_t *err_chk_p;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT.
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
......@@ -1337,7 +1337,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const sctp_endpoint_t *ep,
/* Unexpected COOKIE-ECHO handlerfor peer restart (Table 2, action 'A')
/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
*
* Section 5.2.4
* A) In this case, the peer may have restarted.
......@@ -2030,7 +2030,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const sctp_endpoint_t *ep,
SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) {
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
arg, commands);
}
......@@ -3429,7 +3429,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) {
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands);
}
......@@ -4203,7 +4203,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) {
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands);
}
......
......@@ -49,7 +49,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
sctp_sm_table_entry_t bug = {
static sctp_sm_table_entry_t bug = {
.fn = sctp_sf_bug,
.name = "sctp_sf_bug"
};
......
/* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001-2002 Intel Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001-2003 Intel Corp.
* Copyright (c) 2001-2002 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
......@@ -47,6 +47,7 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -131,7 +132,7 @@ int sctp_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static long sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */
static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
union sctp_addr *addr, int len)
{
struct sctp_af *af;
......@@ -754,8 +755,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
*/
if ((SCTP_SOCKET_UDP_HIGH_BANDWIDTH != sp->type) && msg->msg_name) {
int msg_namelen = msg->msg_namelen;
err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
msg_namelen);
if (err)
return err;
......@@ -806,7 +807,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (!asoc) {
/* If we could not find a matching association on the
* endpoint, make sure that there is no peeled-off
* endpoint, make sure that there is no peeled-off
* association on another socket.
*/
if (sctp_endpoint_is_peeled_off(ep, &to)) {
......@@ -868,13 +869,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
} else {
/* Check against the defaults. */
if (sinfo->sinfo_stream >=
sp->initmsg.sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
/* Check against the requested. */
if (sinfo->sinfo_stream >=
sinit->sinit_num_ostreams) {
......@@ -915,14 +909,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sinit->sinit_num_ostreams;
}
if (sinit->sinit_max_instreams) {
if (sinit->sinit_max_instreams <=
SCTP_MAX_STREAM) {
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
} else {
asoc->c.sinit_max_instreams =
SCTP_MAX_STREAM;
}
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
}
if (sinit->sinit_max_attempts) {
asoc->max_init_attempts
......@@ -1086,23 +1074,30 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* frag_list. len specifies the total amount of data that needs to be removed.
* when 'len' bytes could be removed from the skb, it returns 0.
* If 'len' exceeds the total skb length, it returns the no. of bytes that
* could not be removed.
*/
* could not be removed.
*/
static int sctp_skb_pull(struct sk_buff *skb, int len)
{
struct sk_buff *list;
int skb_len = skb_headlen(skb);
int rlen;
if (len <= skb->len) {
if (len <= skb_len) {
__skb_pull(skb, len);
return 0;
}
len -= skb->len;
__skb_pull(skb, skb->len);
len -= skb_len;
__skb_pull(skb, skb_len);
for (list = skb_shinfo(skb)->frag_list; list; list = list->next) {
len = sctp_skb_pull(list, len);
if (!len)
rlen = sctp_skb_pull(list, len);
skb->len -= (len-rlen);
skb->data_len -= (len-rlen);
if (!rlen)
return 0;
len = rlen;
}
return len;
......@@ -1130,7 +1125,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
{
sctp_ulpevent_t *event = NULL;
sctp_opt_t *sp = sctp_sk(sk);
struct sk_buff *skb, *list;
struct sk_buff *skb;
int copied;
int err = 0;
int skb_len;
......@@ -1152,10 +1147,8 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
/* Get the total length of the skb including any skb's in the
* frag_list.
*/
*/
skb_len = skb->len;
for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
skb_len += list->len;
copied = skb_len;
if (copied > len)
......@@ -1190,12 +1183,12 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
/* If skb's length exceeds the user's buffer, update the skb and
* push it back to the receive_queue so that the next call to
* recvmsg() will return the remaining data. Don't set MSG_EOR.
* Otherwise, set MSG_EOR indicating the end of a message.
* Otherwise, set MSG_EOR indicating the end of a message.
*/
if (skb_len > copied) {
msg->msg_flags &= ~MSG_EOR;
if (flags & MSG_PEEK)
goto out_free;
goto out_free;
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->receive_queue, skb);
goto out;
......@@ -1463,7 +1456,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
sp = sctp_sk(sk);
ep = sp->ep;
/* connect() cannot be done on a peeled-off socket. */
/* connect() cannot be done on a peeled-off socket. */
if (SCTP_SOCKET_UDP_HIGH_BANDWIDTH == sp->type) {
err = -EISCONN;
goto out_unlock;
......@@ -1471,7 +1464,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
err = sctp_verify_addr(sk, (union sctp_addr *)uaddr, addr_len);
if (err)
goto out_unlock;
goto out_unlock;
memcpy(&to, uaddr, addr_len);
to.v4.sin_port = ntohs(to.v4.sin_port);
......@@ -1479,7 +1472,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc) {
if (asoc->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
err = -EISCONN;
else
err = -EALREADY;
goto out_unlock;
......@@ -1517,7 +1510,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
err = sctp_primitive_ASSOCIATE(asoc, NULL);
if (err < 0) {
sctp_association_free(asoc);
sctp_association_free(asoc);
goto out_unlock;
}
......@@ -1915,7 +1908,7 @@ static inline int sctp_getsockopt_get_peer_addr_params(struct sock *sk,
* before this address shall be considered unreachable.
*/
params.spp_pathmaxrxt = trans->error_threshold;
if (copy_to_user(optval, &params, len))
return -EFAULT;
*optlen = len;
......@@ -1932,6 +1925,166 @@ static inline int sctp_getsockopt_initmsg(struct sock *sk, int len, char *optval
return 0;
}
static inline int sctp_getsockopt_get_peer_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_assoc_t id;
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
if (len != sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
return -EFAULT;
/*
* For UDP-style sockets, id specifies the association to query.
*/
asoc = sctp_id2assoc(sk, id);
if (!asoc)
return -EINVAL;
list_for_each(pos, &asoc->peer.transport_addr_list) {
cnt ++;
}
if (copy_to_user(optval, &cnt, sizeof(int)))
return -EFAULT;
return 0;
}
static inline int sctp_getsockopt_get_peer_addrs(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
struct sctp_getaddrs getaddrs;
sctp_transport_t *from;
struct sockaddr_storage *to;
if (len != sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
if (getaddrs.addr_num <= 0) return -EINVAL;
/*
* For UDP-style sockets, id specifies the association to query.
*/
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
to = getaddrs.addrs;
list_for_each(pos, &asoc->peer.transport_addr_list) {
from = list_entry(pos, sctp_transport_t, transports);
if (copy_to_user(to, &from->ipaddr, sizeof(from->ipaddr)))
return -EFAULT;
to ++;
cnt ++;
if (cnt >= getaddrs.addr_num) break;
}
getaddrs.addr_num = cnt;
if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs)))
return -EFAULT;
return 0;
}
static inline int sctp_getsockopt_get_local_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_assoc_t id;
sctp_bind_addr_t *bp;
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
if (len != sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
return -EFAULT;
/*
* For UDP-style sockets, id specifies the association to query.
* If the id field is set to the value '0' then the locally bound
* addresses are returned without regard to any particular
* association.
*/
if (0 == id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
} else {
asoc = sctp_id2assoc(sk, id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
}
list_for_each(pos, &bp->address_list) {
cnt ++;
}
if (copy_to_user(optval, &cnt, sizeof(int)))
return -EFAULT;
return 0;
}
static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_bind_addr_t *bp;
sctp_association_t *asoc;
struct list_head *pos;
int cnt = 0;
struct sctp_getaddrs getaddrs;
struct sockaddr_storage_list *from;
struct sockaddr_storage *to;
if (len != sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
return -EFAULT;
if (getaddrs.addr_num <= 0) return -EINVAL;
/*
* For UDP-style sockets, id specifies the association to query.
* If the id field is set to the value '0' then the locally bound
* addresses are returned without regard to any particular
* association.
*/
if (0 == getaddrs.assoc_id) {
bp = &sctp_sk(sk)->ep->base.bind_addr;
} else {
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
if (!asoc)
return -EINVAL;
bp = &asoc->base.bind_addr;
}
to = getaddrs.addrs;
list_for_each(pos, &bp->address_list) {
from = list_entry(pos,
struct sockaddr_storage_list,
list);
if (copy_to_user(to, &from->a, sizeof(from->a)))
return -EFAULT;
to ++;
cnt ++;
if (cnt >= getaddrs.addr_num) break;
}
getaddrs.addr_num = cnt;
if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs)))
return -EFAULT;
return 0;
}
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen)
{
......@@ -1989,6 +2142,26 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDRS_NUM:
retval = sctp_getsockopt_get_peer_addrs_num(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS_NUM:
retval = sctp_getsockopt_get_local_addrs_num(sk, len, optval,
optlen);
break;
case SCTP_GET_PEER_ADDRS:
retval = sctp_getsockopt_get_peer_addrs(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS:
retval = sctp_getsockopt_get_local_addrs(sk, len, optval,
optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
......@@ -2029,7 +2202,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
sctp_protocol_t *sctp = sctp_get_protocol();
unsigned short snum;
int ret;
/* NOTE: Remember to put this back to net order. */
addr->v4.sin_port = ntohs(addr->v4.sin_port);
snum = addr->v4.sin_port;
......@@ -2098,7 +2271,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
}
}
if (pp != NULL && pp->sk != NULL) {
/* We had a port hash table hit - there is an
* available port (pp != NULL) and it is being
......@@ -2129,7 +2302,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
if (sk_reuse && sk2->reuse)
continue;
if (sctp_bind_addr_match(&ep2->base.bind_addr, addr,
sctp_sk(sk)))
goto found;
......@@ -2187,7 +2360,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
}
/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
* port is requested.
* port is requested.
*/
static int sctp_get_port(struct sock *sk, unsigned short snum)
{
......@@ -2657,10 +2830,10 @@ static int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid((union sctp_addr *)addr))
if (!af->addr_valid((union sctp_addr *)addr))
return -EINVAL;
return 0;
return 0;
}
/* Get the sndbuf space available at the time on the association. */
......
/* SCTP kernel reference Implementation
* Copyright (c) 2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
* These functions manipulate sctp SSN tracker.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Storage size needed for map includes 2 headers and then the
* specific needs of in or out streams.
*/
static inline size_t sctp_ssnmap_size(__u16 in, __u16 out)
{
return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16);
}
/* Create a new sctp_ssnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int priority)
{
struct sctp_ssnmap *retval;
retval = kmalloc(sctp_ssnmap_size(in, out), priority);
if (!retval)
goto fail;
if (!sctp_ssnmap_init(retval, in, out))
goto fail_map;
retval->malloced = 1;
SCTP_DBG_OBJCNT_INC(ssnmap);
return retval;
fail_map:
kfree(retval);
fail:
return NULL;
}
/* Initialize a block of memory as a ssnmap. */
struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
__u16 out)
{
memset(map, 0x00, sctp_ssnmap_size(in, out));
/* Start 'in' stream just after the map header. */
map->in.ssn = (__u16 *)&map[1];
map->in.len = in;
/* Start 'out' stream just after 'in'. */
map->out.ssn = &map->in.ssn[in];
map->out.len = out;
return map;
}
/* Clear out the ssnmap streams. */
void sctp_ssnmap_clear(struct sctp_ssnmap *map)
{
size_t size;
size = (map->in.len + map->out.len) * sizeof(__u16);
memset(map->in.ssn, 0x00, size);
}
/* Dispose of a ssnmap. */
void sctp_ssnmap_free(struct sctp_ssnmap *map)
{
if (map && map->malloced) {
kfree(map);
SCTP_DBG_OBJCNT_DEC(ssnmap);
}
}
......@@ -42,6 +42,7 @@
* Xingang Guo <xingang.guo@intel.com>
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -184,8 +185,9 @@ void sctp_transport_reset_timers(sctp_transport_t *transport)
}
/* When a data chunk is sent, reset the heartbeat interval. */
if (!mod_timer(&transport->hb_timer,
transport->hb_interval + transport->rto + jiffies))
if (!mod_timer(&transport->hb_timer, transport->hb_interval +
transport->rto + sctp_jitter(transport->rto) +
jiffies))
sctp_transport_hold(transport);
}
......@@ -202,7 +204,7 @@ void sctp_transport_set_owner(sctp_transport_t *transport,
/* Caches the dst entry for a transport's destination address and an optional
* souce address.
*/
*/
void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr,
struct sctp_opt *opt)
{
......@@ -245,10 +247,10 @@ void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr,
goto out_unlock;
}
sctp_read_unlock(addr_lock);
/* None of the bound addresses match the source address of the
* dst. So release it.
*/
*/
dst_release(dst);
}
......
......@@ -606,9 +606,9 @@ sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
sctp_chunk_t *chunk, int priority)
{
sctp_ulpevent_t *event;
sctp_ulpevent_t *event, *levent;
struct sctp_sndrcvinfo *info;
struct sk_buff *skb;
struct sk_buff *skb, *list;
size_t padding, len;
/* Clone the original skb, sharing the data. */
......@@ -647,6 +647,16 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
event->malloced = 1;
for (list = skb_shinfo(skb)->frag_list; list; list = list->next) {
sctp_ulpevent_set_owner_r(list, asoc);
/* Initialize event with flags 0. */
levent = sctp_ulpevent_init(event, skb, 0);
if (!levent)
goto fail_init;
levent->malloced = 1;
}
info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo;
/* Sockets API Extensions for SCTP
......@@ -764,6 +774,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
sctp_ulpevent_t *event;
sctp_chunk_t *sack;
struct timer_list *timer;
int skb_len = skb_headlen(skb);
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
......@@ -774,23 +785,23 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
event = (sctp_ulpevent_t *) skb->cb;
asoc = event->asoc;
if (asoc->rwnd_over) {
if (asoc->rwnd_over >= skb->len) {
asoc->rwnd_over -= skb->len;
if (asoc->rwnd_over >= skb_len) {
asoc->rwnd_over -= skb_len;
} else {
asoc->rwnd += (skb->len - asoc->rwnd_over);
asoc->rwnd += (skb_len - asoc->rwnd_over);
asoc->rwnd_over = 0;
}
} else {
asoc->rwnd += skb->len;
asoc->rwnd += skb_len;
}
SCTP_DEBUG_PRINTK("rwnd increased by %d to (%u, %u) - %u\n",
skb->len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd);
skb_len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in Section 4.2.3.3
* of RFC 1122.
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
if ((asoc->state == SCTP_STATE_ESTABLISHED) &&
(asoc->rwnd > asoc->a_rwnd) &&
......@@ -808,7 +819,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0;
sctp_push_outqueue(&asoc->outqueue, sack);
sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
......@@ -824,6 +835,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *asoc)
{
sctp_ulpevent_t *event;
int skb_len = skb_headlen(skb);
/* The current stack structures assume that the rcv buffer is
* per socket. For UDP-style sockets this is not true as
......@@ -840,14 +852,14 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a
SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
if (asoc->rwnd >= skb->len) {
asoc->rwnd -= skb->len;
if (asoc->rwnd >= skb_len) {
asoc->rwnd -= skb_len;
} else {
asoc->rwnd_over = skb->len - asoc->rwnd;
asoc->rwnd_over = skb_len - asoc->rwnd;
asoc->rwnd = 0;
}
SCTP_DEBUG_PRINTK("rwnd decreased by %d to (%u, %u)\n",
skb->len, asoc->rwnd, asoc->rwnd_over);
skb_len, asoc->rwnd, asoc->rwnd_over);
}
/* A simple destructor to give up the reference to the association. */
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
......@@ -49,51 +49,39 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static inline sctp_ulpevent_t * sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event);
static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event);
static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
/* 1st Level Abstractions */
/* Create a new ULP queue. */
sctp_ulpqueue_t *sctp_ulpqueue_new(sctp_association_t *asoc,
__u16 inbound, int priority)
struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority)
{
sctp_ulpqueue_t *ulpq;
size_t size;
struct sctp_ulpq *ulpq;
/* Today, there is only a fixed size of storage needed for
* stream support, but make the interfaces acceptable for
* the future.
*/
size = sizeof(sctp_ulpqueue_t)+sctp_ulpqueue_storage_size(inbound);
ulpq = kmalloc(size, priority);
ulpq = kmalloc(sizeof(struct sctp_ulpq), priority);
if (!ulpq)
goto fail;
if (!sctp_ulpqueue_init(ulpq, asoc, inbound))
if (!sctp_ulpq_init(ulpq, asoc))
goto fail_init;
ulpq->malloced = 1;
return ulpq;
fail_init:
kfree(ulpq);
fail:
return NULL;
}
/* Initialize a ULP queue from a block of memory. */
sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
sctp_association_t *asoc,
__u16 inbound)
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
sctp_association_t *asoc)
{
memset(ulpq,
sizeof(sctp_ulpqueue_t) + sctp_ulpqueue_storage_size(inbound),
0x00);
memset(ulpq, sizeof(struct sctp_ulpq), 0x00);
ulpq->asoc = asoc;
spin_lock_init(&ulpq->lock);
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby);
ulpq->malloced = 0;
......@@ -101,38 +89,39 @@ sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
return ulpq;
}
/* Flush the reassembly and ordering queues. */
void sctp_ulpqueue_flush(sctp_ulpqueue_t *ulpq)
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
{
struct sk_buff *skb;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
while ((skb = skb_dequeue(&ulpq->lobby))) {
event = (sctp_ulpevent_t *) skb->cb;
event = (struct sctp_ulpevent *) skb->cb;
sctp_ulpevent_free(event);
}
while ((skb = skb_dequeue(&ulpq->reasm))) {
event = (sctp_ulpevent_t *) skb->cb;
event = (struct sctp_ulpevent *) skb->cb;
sctp_ulpevent_free(event);
}
}
/* Dispose of a ulpqueue. */
void sctp_ulpqueue_free(sctp_ulpqueue_t *ulpq)
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{
sctp_ulpqueue_flush(ulpq);
sctp_ulpq_flush(ulpq);
if (ulpq->malloced)
kfree(ulpq);
}
/* Process an incoming DATA chunk. */
int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
int priority)
{
struct sk_buff_head temp;
sctp_data_chunk_t *hdr;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
......@@ -147,7 +136,7 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
return -ENOMEM;
/* Do reassembly if needed. */
event = sctp_ulpqueue_reasm(ulpq, event);
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
if (event) {
......@@ -155,18 +144,18 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
skb_queue_head_init(&temp);
skb_queue_tail(&temp, event->parent);
event = sctp_ulpqueue_order(ulpq, event);
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. */
if (event)
sctp_ulpqueue_tail_event(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
return 0;
}
/* Add a new event for propogation to the ULP. */
int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
......@@ -202,20 +191,18 @@ int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */
static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
__u32 tsn, ctsn;
unsigned long flags __attribute ((unused));
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_spin_lock_irqsave(&ulpq->reasm.lock, flags);
/* Find the right place in this list. We store them by TSN. */
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (sctp_ulpevent_t *)pos->cb;
cevent = (struct sctp_ulpevent *)pos->cb;
ctsn = cevent->sndrcvinfo.sinfo_tsn;
if (TSN_lt(tsn, ctsn))
......@@ -227,29 +214,45 @@ static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpeven
__skb_insert(event->parent, pos->prev, pos, &ulpq->reasm);
else
__skb_queue_tail(&ulpq->reasm, event->parent);
sctp_spin_unlock_irqrestore(&ulpq->reasm.lock, flags);
}
/* Helper function to return an event corresponding to the reassembled
* datagram.
* This routine creates a re-assembled skb given the first and last skb's
* as stored in the reassembly queue. The skb's may be non-linear if the sctp
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
sctp_ulpevent_t *event;
struct sk_buff *pnext;
struct sctp_ulpevent *event;
struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
pos = f_frag->next;
/* Set the first fragment's frag_list to point to the 2nd fragment. */
skb_shinfo(f_frag)->frag_list = pos;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list, list = list->next);
/* Add the list of remaining fragments to the first fragments
* frag_list.
*/
if (last)
last->next = pos;
else
skb_shinfo(f_frag)->frag_list = pos;
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, f_frag->list);
do {
pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
f_frag->len += pos->len;
f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, pos->list);
......@@ -269,13 +272,12 @@ static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_fra
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_t *ulpq)
static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
unsigned long flags __attribute ((unused));
sctp_ulpevent_t *retval = NULL;
/* Initialized to 0 just to avoid compiler warning message. Will
......@@ -284,8 +286,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
*/
next_tsn = 0;
sctp_spin_lock_irqsave(&ulpq->reasm.lock, flags);
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that complete a datagram.
......@@ -327,7 +327,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
if (retval)
break;
}
sctp_spin_unlock_irqrestore(&ulpq->reasm.lock, flags);
return retval;
}
......@@ -335,7 +334,7 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
sctp_ulpevent_t *retval = NULL;
......@@ -350,8 +349,8 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK))
return event;
sctp_ulpqueue_store_reasm(ulpq, event);
retval = sctp_ulpqueue_retrieve_reassembled(ulpq);
sctp_ulpq_store_reasm(ulpq, event);
retval = sctp_ulpq_retrieve_reassembled(ulpq);
return retval;
}
......@@ -359,20 +358,20 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event)
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
struct sctp_stream *in;
__u16 sid, csid;
__u16 ssn, cssn;
unsigned long flags __attribute ((unused));
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */
sctp_spin_lock_irqsave(&ulpq->lobby.lock, flags);
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
......@@ -386,32 +385,31 @@ static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq,
if (csid < sid)
continue;
if (cssn != ulpq->ssn[sid])
if (cssn != sctp_ssn_peek(in, sid))
break;
ulpq->ssn[sid]++;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
__skb_unlink(pos, pos->list);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(event->parent->list, pos);
}
sctp_spin_unlock_irqrestore(&ulpq->lobby.lock, flags);
}
/* Helper function to store chunks needing ordering. */
static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event)
static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
unsigned long flags __attribute ((unused));
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
sctp_spin_lock_irqsave(&ulpq->lobby.lock, flags);
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
......@@ -432,14 +430,13 @@ static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq,
__skb_insert(event->parent, pos->prev, pos, &ulpq->lobby);
else
__skb_queue_tail(&ulpq->lobby, event->parent);
sctp_spin_unlock_irqrestore(&ulpq->lobby.lock, flags);
}
static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event)
static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
/* FIXME: We should be using some new chunk structure here
* instead of carrying chunk fields in the event structure.
......@@ -454,23 +451,24 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
/* Note: The stream ID must be verified before this routine. */
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
in = &ulpq->asoc->ssnmap->in;
/* Is this the expected SSN for this stream ID? */
if (ssn != ulpq->ssn[sid]) {
if (ssn != sctp_ssn_peek(in, sid)) {
/* We've received something out of order, so find where it
* needs to be placed. We order by stream and then by SSN.
*/
sctp_ulpqueue_store_ordered(ulpq, event);
sctp_ulpq_store_ordered(ulpq, event);
return NULL;
}
/* Mark that the next chunk has been found. */
ulpq->ssn[sid]++;
sctp_ssn_next(in, sid);
/* Go find any other chunks that were waiting for
* ordering.
*/
sctp_ulpqueue_retrieve_ordered(ulpq, event);
sctp_ulpq_retrieve_ordered(ulpq, event);
return event;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment