Commit 1c5f1800 authored by Jon Grimm's avatar Jon Grimm Committed by Sridhar Samudrala

[SCTP] Fix hardcoded stream counts.

Code had hardcoded limits to the maximum stream that could be used, 
and consequent static data structures.   Now dynamically allocate
storage for the SSN maps until _after_ we know what they are.  
Protocols such as SIP want to use all possible streams.  
parent 4cc42719
......@@ -56,8 +56,10 @@
#include <linux/ipv6.h> /* For ipv6hdr. */
#include <net/sctp/user.h>
/* What a hack! Jiminy Cricket! */
enum { SCTP_MAX_STREAM = 10 };
/* Value used for stream negotiation. */
enum { SCTP_MAX_STREAM = 0xffff };
enum { SCTP_DEFAULT_OUTSTREAMS = 10 };
enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
/* Define the amount of space to reserve for SCTP, IP, LL.
* There is a little bit of waste that we are always allocating
......
......@@ -269,6 +269,7 @@ extern atomic_t sctp_dbg_objcnt_transport;
extern atomic_t sctp_dbg_objcnt_chunk;
extern atomic_t sctp_dbg_objcnt_bind_addr;
extern atomic_t sctp_dbg_objcnt_addr;
extern atomic_t sctp_dbg_objcnt_ssnmap;
/* Macros to atomically increment/decrement objcnt counters. */
#define SCTP_DBG_OBJCNT_INC(name) \
......
......@@ -269,6 +269,7 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *,
const void *payload,
size_t paylen);
void sctp_chunk_assign_tsn(sctp_chunk_t *);
void sctp_chunk_assign_ssn(sctp_chunk_t *);
/* Prototypes for statetable processing. */
......
......@@ -42,7 +42,7 @@
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -104,27 +104,27 @@ union sctp_addr {
/* Forward declarations for data structures. */
struct SCTP_protocol;
struct sctp_protocol;
struct SCTP_endpoint;
struct SCTP_association;
struct SCTP_transport;
struct SCTP_packet;
struct SCTP_chunk;
struct SCTP_inqueue;
struct SCTP_outqueue;
struct sctp_outq;
struct SCTP_bind_addr;
struct sctp_ulpq;
struct sctp_opt;
struct sctp_endpoint_common;
struct sctp_ssnmap;
typedef struct SCTP_protocol sctp_protocol_t;
typedef struct sctp_protocol sctp_protocol_t;
typedef struct SCTP_endpoint sctp_endpoint_t;
typedef struct SCTP_association sctp_association_t;
typedef struct SCTP_transport sctp_transport_t;
typedef struct SCTP_packet sctp_packet_t;
typedef struct SCTP_chunk sctp_chunk_t;
typedef struct SCTP_inqueue sctp_inqueue_t;
typedef struct SCTP_outqueue sctp_outqueue_t;
typedef struct SCTP_bind_addr sctp_bind_addr_t;
typedef struct sctp_opt sctp_opt_t;
typedef struct sctp_endpoint_common sctp_endpoint_common_t;
......@@ -133,7 +133,6 @@ typedef struct sctp_endpoint_common sctp_endpoint_common_t;
#include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h>
/* Structures useful for managing bind/connect. */
typedef struct sctp_bind_bucket {
......@@ -157,7 +156,7 @@ typedef struct sctp_hashbucket {
/* The SCTP protocol structure. */
struct SCTP_protocol {
struct sctp_protocol {
/* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
*
* The following protocol parameters are RECOMMENDED:
......@@ -183,8 +182,8 @@ struct SCTP_protocol {
/* Valid.Cookie.Life - 60 seconds */
int valid_cookie_life;
/* Whether Cookie Preservative is enabled(1) or not(0) */
/* Whether Cookie Preservative is enabled(1) or not(0) */
int cookie_preserve_enable;
/* Association.Max.Retrans - 10 attempts
......@@ -282,7 +281,7 @@ struct sctp_af *sctp_get_af_specific(sa_family_t);
int sctp_register_af(struct sctp_af *);
/* Protocol family functions. */
typedef struct sctp_pf {
struct sctp_pf {
void (*event_msgname)(sctp_ulpevent_t *, char *, int *);
void (*skb_msgname) (struct sk_buff *, char *, int *);
int (*af_supported) (sa_family_t);
......@@ -291,7 +290,7 @@ typedef struct sctp_pf {
struct sctp_opt *);
int (*bind_verify) (struct sctp_opt *, union sctp_addr *);
struct sctp_af *af;
} sctp_pf_t;
};
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
......@@ -318,7 +317,7 @@ struct sctp_opt {
__u32 autoclose;
__u8 nodelay;
__u8 disable_fragments;
sctp_pf_t *pf;
struct sctp_pf *pf;
};
......@@ -360,7 +359,8 @@ typedef struct sctp_cookie {
struct timeval expiration;
/* Number of inbound/outbound streams which are set
* and negotiated during the INIT process. */
* and negotiated during the INIT process.
*/
__u16 sinit_num_ostreams;
__u16 sinit_max_instreams;
......@@ -426,6 +426,49 @@ typedef struct sctp_sender_hb_info {
unsigned long sent_at;
} sctp_sender_hb_info_t __attribute__((packed));
/*
* RFC 2960 1.3.2 Sequenced Delivery within Streams
*
* The term "stream" is used in SCTP to refer to a sequence of user
* messages that are to be delivered to the upper-layer protocol in
* order with respect to other messages within the same stream. This is
* in contrast to its usage in TCP, where it refers to a sequence of
* bytes (in this document a byte is assumed to be eight bits).
* ...
*
* This is the structure we use to track both our outbound and inbound
* SSN, or Stream Sequence Numbers.
*/
struct sctp_stream {
__u16 *ssn;
unsigned int len;
};
struct sctp_ssnmap {
struct sctp_stream in;
struct sctp_stream out;
int malloced;
};
struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *, __u16, __u16);
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int priority);
void sctp_ssnmap_free(struct sctp_ssnmap *map);
void sctp_ssnmap_clear(struct sctp_ssnmap *map);
/* What is the current SSN number for this stream? */
static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id)
{
return stream->ssn[id];
}
/* Return the next SSN number for this stream. */
static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
{
return stream->ssn[id]++;
}
/* RFC2960 1.4 Key Terms
*
* o Chunk: A unit of information within an SCTP packet, consisting of
......@@ -499,6 +542,7 @@ struct SCTP_chunk {
__u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */
__u8 num_times_sent; /* How man times did we send this? */
__u8 has_tsn; /* Does this chunk have a TSN yet? */
__u8 has_ssn; /* Does this chunk have a SSN yet? */
__u8 singleton; /* Was this the only chunk in the packet? */
__u8 end_of_packet; /* Was this the last chunk in the packet? */
__u8 ecn_ce_done; /* Have we processed the ECN CE bit? */
......@@ -578,27 +622,27 @@ struct SCTP_packet {
int malloced;
};
typedef int (sctp_outqueue_thandler_t)(sctp_outqueue_t *, void *);
typedef int (sctp_outqueue_ehandler_t)(sctp_outqueue_t *);
typedef sctp_packet_t *(sctp_outqueue_ohandler_init_t)
typedef int (sctp_outq_thandler_t)(struct sctp_outq *, void *);
typedef int (sctp_outq_ehandler_t)(struct sctp_outq *);
typedef sctp_packet_t *(sctp_outq_ohandler_init_t)
(sctp_packet_t *,
sctp_transport_t *,
__u16 sport,
__u16 dport);
typedef sctp_packet_t *(sctp_outqueue_ohandler_config_t)
typedef sctp_packet_t *(sctp_outq_ohandler_config_t)
(sctp_packet_t *,
__u32 vtag,
int ecn_capable,
sctp_packet_phandler_t *get_prepend_chunk);
typedef sctp_xmit_t (sctp_outqueue_ohandler_t)(sctp_packet_t *,
typedef sctp_xmit_t (sctp_outq_ohandler_t)(sctp_packet_t *,
sctp_chunk_t *);
typedef int (sctp_outqueue_ohandler_force_t)(sctp_packet_t *);
typedef int (sctp_outq_ohandler_force_t)(sctp_packet_t *);
sctp_outqueue_ohandler_init_t sctp_packet_init;
sctp_outqueue_ohandler_config_t sctp_packet_config;
sctp_outqueue_ohandler_t sctp_packet_append_chunk;
sctp_outqueue_ohandler_t sctp_packet_transmit_chunk;
sctp_outqueue_ohandler_force_t sctp_packet_transmit;
sctp_outq_ohandler_init_t sctp_packet_init;
sctp_outq_ohandler_config_t sctp_packet_config;
sctp_outq_ohandler_t sctp_packet_append_chunk;
sctp_outq_ohandler_t sctp_packet_transmit_chunk;
sctp_outq_ohandler_force_t sctp_packet_transmit;
void sctp_packet_free(sctp_packet_t *);
......@@ -835,7 +879,7 @@ void sctp_inqueue_set_th_handler(sctp_inqueue_t *,
*
* When free()'d, it empties itself out via output_handler().
*/
struct SCTP_outqueue {
struct sctp_outq {
sctp_association_t *asoc;
/* BUG: This really should be an array of streams.
......@@ -861,11 +905,11 @@ struct SCTP_outqueue {
* layer. This is always SCTP_packet, but we separate the two
* structures to make testing simpler.
*/
sctp_outqueue_ohandler_init_t *init_output;
sctp_outqueue_ohandler_config_t *config_output;
sctp_outqueue_ohandler_t *append_output;
sctp_outqueue_ohandler_t *build_output;
sctp_outqueue_ohandler_force_t *force_output;
sctp_outq_ohandler_init_t *init_output;
sctp_outq_ohandler_config_t *config_output;
sctp_outq_ohandler_t *append_output;
sctp_outq_ohandler_t *build_output;
sctp_outq_ohandler_force_t *force_output;
/* How many unackd bytes do we have in-flight? */
__u32 outstanding_bytes;
......@@ -877,24 +921,23 @@ struct SCTP_outqueue {
int malloced;
};
sctp_outqueue_t *sctp_outqueue_new(sctp_association_t *);
void sctp_outqueue_init(sctp_association_t *, sctp_outqueue_t *);
void sctp_outqueue_teardown(sctp_outqueue_t *);
void sctp_outqueue_free(sctp_outqueue_t*);
void sctp_force_outqueue(sctp_outqueue_t *);
int sctp_push_outqueue(sctp_outqueue_t *, sctp_chunk_t *chunk);
int sctp_flush_outqueue(sctp_outqueue_t *, int);
int sctp_sack_outqueue(sctp_outqueue_t *, sctp_sackhdr_t *);
int sctp_outqueue_is_empty(const sctp_outqueue_t *);
int sctp_outqueue_set_output_handlers(sctp_outqueue_t *,
sctp_outqueue_ohandler_init_t init,
sctp_outqueue_ohandler_config_t config,
sctp_outqueue_ohandler_t append,
sctp_outqueue_ohandler_t build,
sctp_outqueue_ohandler_force_t force);
void sctp_outqueue_restart(sctp_outqueue_t *);
void sctp_retransmit(sctp_outqueue_t *, sctp_transport_t *, __u8);
void sctp_retransmit_mark(sctp_outqueue_t *, sctp_transport_t *, __u8);
struct sctp_outq *sctp_outq_new(sctp_association_t *);
void sctp_outq_init(sctp_association_t *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
int sctp_outq_tail(struct sctp_outq *, sctp_chunk_t *chunk);
int sctp_outq_flush(struct sctp_outq *, int);
int sctp_outq_sack(struct sctp_outq *, sctp_sackhdr_t *);
int sctp_outq_is_empty(const struct sctp_outq *);
int sctp_outq_set_output_handlers(struct sctp_outq *,
sctp_outq_ohandler_init_t init,
sctp_outq_ohandler_config_t config,
sctp_outq_ohandler_t append,
sctp_outq_ohandler_t build,
sctp_outq_ohandler_force_t force);
void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, sctp_transport_t *, __u8);
void sctp_retransmit_mark(struct sctp_outq *, sctp_transport_t *, __u8);
/* These bind address data fields common between endpoints and associations */
......@@ -1027,7 +1070,7 @@ struct SCTP_endpoint {
/* These are the system-wide defaults and other stuff which is
* endpoint-independent.
*/
sctp_protocol_t *proto;
struct sctp_protocol *proto;
/* Associations: A list of current associations and mappings
* to the data consumers for each association. This
......@@ -1408,18 +1451,15 @@ struct SCTP_association {
} defaults;
/* This tracks outbound ssn for a given stream. */
__u16 ssn[SCTP_MAX_STREAM];
struct sctp_ssnmap *ssnmap;
/* All outbound chunks go through this structure. */
sctp_outqueue_t outqueue;
struct sctp_outq outqueue;
/* A smart pipe that will handle reordering and fragmentation,
* as well as handle passing events up to the ULP.
* In the future, we should make this at least dynamic, if
* not also some sparse structure.
*/
sctp_ulpqueue_t ulpq;
__u8 _ssnmap[sctp_ulpqueue_storage_size(SCTP_MAX_STREAM)];
struct sctp_ulpq ulpq;
/* Need to send an ECNE Chunk? */
int need_ecne;
......@@ -1505,7 +1545,7 @@ struct SCTP_association {
*
*
* [I really think this is EXACTLY the sort of intelligence
* which already resides in SCTP_outqueue. Please move this
* which already resides in sctp_outq. Please move this
* queue and its supporting logic down there. --piggy]
*/
struct sk_buff_head addip_chunks;
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* These are the definitions needed for the sctp_ulpqueue type. The
* sctp_ulpqueue is the interface between the Upper Layer Protocol, or ULP,
*
* These are the definitions needed for the sctp_ulpq type. The
* sctp_ulpq is the interface between the Upper Layer Protocol, or ULP,
* and the core SCTP state machine. This is the component which handles
* reassembly and ordering.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* reassembly and ordering.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* the SCTP reference implementation is distributed in the hope that it
*
* the SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
*
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email addresses:
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -42,46 +47,26 @@
#define __sctp_ulpqueue_h__
/* A structure to carry information to the ULP (e.g. Sockets API) */
typedef struct sctp_ulpqueue {
struct sctp_ulpq {
int malloced;
spinlock_t lock;
sctp_association_t *asoc;
struct sk_buff_head reasm;
struct sk_buff_head lobby;
__u16 ssn[0];
} sctp_ulpqueue_t;
/* This macro assists in creation of external storage for variable length
* internal buffers.
*/
#define sctp_ulpqueue_storage_size(inbound) (sizeof(__u16) * (inbound))
sctp_ulpqueue_t *sctp_ulpqueue_new(sctp_association_t *asoc,
__u16 inbound,
int priority);
sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
sctp_association_t *asoc,
__u16 inbound);
void sctp_ulpqueue_free(sctp_ulpqueue_t *);
};
/* Prototypes. */
struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority);
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, sctp_association_t *);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */
int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *,
sctp_chunk_t *chunk,
int priority);
int sctp_ulpq_tail_data(struct sctp_ulpq *, sctp_chunk_t *chunk, int priority);
/* Add a new event for propogation to the ULP. */
int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *,
sctp_ulpevent_t *event);
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
/* Is the ulpqueue empty. */
int sctp_ulpqueue_is_empty(sctp_ulpqueue_t *);
int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *);
int sctp_ulpqueue_is_empty(struct sctp_ulpq *);
#endif /* __sctp_ulpqueue_h__ */
......@@ -90,4 +75,4 @@ int sctp_ulpqueue_is_data_empty(sctp_ulpqueue_t *);
......@@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
inqueue.o outqueue.o ulpqueue.o command.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o hashdriver.o sla1.o \
debug.o
debug.o ssnmap.o
ifeq ($(CONFIG_SCTP_ADLER32), y)
sctp-y += adler32.o
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
......@@ -166,15 +166,10 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
asoc->max_init_timeo = sp->initmsg.sinit_max_init_timeo * HZ;
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
/* Allocate storage for the ssnmap after the inbound and outbound
* streams have been negotiated during Init.
*/
for (i = 0; i < SCTP_MAX_STREAM; i++)
asoc->ssn[i] = 0;
asoc->ssnmap = NULL;
/* Set the local window size for receive.
* This is also the rcvbuf space per association.
......@@ -252,15 +247,15 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc);
/* Create an output queue. */
sctp_outqueue_init(asoc, &asoc->outqueue);
sctp_outqueue_set_output_handlers(&asoc->outqueue,
sctp_packet_init,
sctp_packet_config,
sctp_packet_append_chunk,
sctp_packet_transmit_chunk,
sctp_packet_transmit);
if (NULL == sctp_ulpqueue_init(&asoc->ulpq, asoc, SCTP_MAX_STREAM))
sctp_outq_init(asoc, &asoc->outqueue);
sctp_outq_set_output_handlers(&asoc->outqueue,
sctp_packet_init,
sctp_packet_config,
sctp_packet_append_chunk,
sctp_packet_transmit_chunk,
sctp_packet_transmit);
if (NULL == sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
/* Set up the tsn tracking. */
......@@ -310,14 +305,17 @@ void sctp_association_free(sctp_association_t *asoc)
asoc->base.dead = 1;
/* Dispose of any data lying around in the outqueue. */
sctp_outqueue_free(&asoc->outqueue);
sctp_outq_free(&asoc->outqueue);
/* Dispose of any pending messages for the upper layer. */
sctp_ulpqueue_free(&asoc->ulpq);
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inqueue_free(&asoc->base.inqueue);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
/* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr);
......@@ -524,7 +522,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
break;
default:
BUG();
return;
};
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
......@@ -534,7 +532,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
(struct sockaddr_storage *) &transport->ipaddr,
0, spc_state, error, GFP_ATOMIC);
if (event)
sctp_ulpqueue_tail_event(&asoc->ulpq, event);
sctp_ulpq_tail_event(&asoc->ulpq, event);
/* Select new active and retran paths. */
......@@ -634,7 +632,7 @@ __u32 __sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
/* Fetch the next Stream Sequence Number for stream number 'sid'. */
__u16 __sctp_association_get_next_ssn(sctp_association_t *asoc, __u16 sid)
{
return asoc->ssn[sid]++;
return sctp_ssn_next(&asoc->ssnmap->out, sid);
}
/* Compare two addresses to see if they match. Wildcard addresses
......@@ -852,8 +850,6 @@ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
{
int i;
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
......@@ -872,23 +868,28 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
/* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case there is data sent to peer
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if (SCTP_STATE_ESTABLISHED == asoc->state) {
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
/* Reinitialize SSN for both local streams
* and peer's streams.
*/
for (i = 0; i < SCTP_MAX_STREAM; i++) {
asoc->ssn[i] = 0;
asoc->ulpq.ssn[i] = 0;
}
sctp_ssnmap_clear(asoc->ssnmap);
} else {
asoc->ctsn_ack_point = asoc->next_tsn - 1;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
new->ssnmap = NULL;
}
}
}
/* Choose the transport for sending a shutdown packet.
......
......@@ -47,8 +47,8 @@ sctp_cmd_seq_t *sctp_new_cmd_seq(int priority)
{
sctp_cmd_seq_t *retval = t_new(sctp_cmd_seq_t, priority);
/* XXX Check for NULL? -DaveM */
sctp_init_cmd_seq(retval);
if (retval)
sctp_init_cmd_seq(retval);
return retval;
}
......
......@@ -113,14 +113,14 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
/* FIXME: Currently, ip6_route_output() doesn't fill in the source
* address in the returned route entry. So we call ipv6_get_saddr()
* to get an appropriate source address. It is possible that this address
* may not be part of the bind address list of the association.
* to get an appropriate source address. It is possible that this
* address may not be part of the bind address list of the association.
* Once ip6_route_ouput() is fixed so that it returns a route entry
* with an appropriate source address, the following if condition can
* be removed. With ip6_route_output() returning a source address filled
* route entry, sctp_transport_route() can do real source address
* selection for v6.
*/
* be removed. With ip6_route_output() returning a source address
* filled route entry, sctp_transport_route() can do real source
* address selection for v6.
*/
if (ipv6_addr_any(&rt6->rt6i_src.addr)) {
err = ipv6_get_saddr(dst, fl.fl6_dst, &saddr);
......@@ -130,7 +130,6 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
__FUNCTION__, NIP6(fl.fl6_src));
return err;
}
fl.fl6_src = &saddr;
} else {
fl.fl6_src = &rt6->rt6i_src.addr;
......@@ -572,7 +571,7 @@ int sctp_v6_init(void)
/* Register the SCTP specfic AF_INET6 functions. */
sctp_register_af(&sctp_ipv6_specific);
/* Register notifier for inet6 address additions/deletions. */
/* Register notifier for inet6 address additions/deletions. */
register_inet6addr_notifier(&sctp_inetaddr_notifier);
return 0;
......
......@@ -54,6 +54,7 @@ SCTP_DBG_OBJCNT(assoc);
SCTP_DBG_OBJCNT(bind_addr);
SCTP_DBG_OBJCNT(chunk);
SCTP_DBG_OBJCNT(addr);
SCTP_DBG_OBJCNT(ssnmap);
/* An array to make it easy to pretty print the debug information
* to the proc fs.
......@@ -66,6 +67,7 @@ sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
SCTP_DBG_OBJCNT_ENTRY(chunk),
SCTP_DBG_OBJCNT_ENTRY(bind_addr),
SCTP_DBG_OBJCNT_ENTRY(addr),
SCTP_DBG_OBJCNT_ENTRY(ssnmap),
};
/* Callback from procfs to read out objcount information.
......
......@@ -227,7 +227,7 @@ sctp_xmit_t sctp_packet_append_chunk(sctp_packet_t *packet, sctp_chunk_t *chunk)
}
/* All packets are sent to the network through this function from
* sctp_push_outqueue().
* sctp_outq_tail().
*
* The return value is a normal kernel error return value.
*/
......
......@@ -6,7 +6,7 @@
*
* This file is part of the SCTP kernel reference Implementation
*
* These functions implement the outqueue class. The outqueue handles
* These functions implement the sctp_outq class. The outqueue handles
* bundling and queueing of outgoing SCTP chunks.
*
* The SCTP reference implementation is free software;
......@@ -47,39 +47,39 @@
*/
#include <linux/types.h>
#include <linux/list.h> /* For struct list_head */
#include <linux/list.h> /* For struct list_head */
#include <linux/socket.h>
#include <linux/ip.h>
#include <net/sock.h> /* For skb_set_owner_w */
#include <net/sock.h> /* For skb_set_owner_w */
#include <net/sctp/sctp.h>
/* Declare internal functions here. */
static int sctp_acked(sctp_sackhdr_t *sack, __u32 tsn);
static void sctp_check_transmitted(sctp_outqueue_t *q,
static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
sctp_transport_t *transport,
sctp_sackhdr_t *sack,
__u32 highest_new_tsn);
/* Generate a new outqueue. */
sctp_outqueue_t *sctp_outqueue_new(sctp_association_t *asoc)
struct sctp_outq *sctp_outq_new(sctp_association_t *asoc)
{
sctp_outqueue_t *q;
struct sctp_outq *q;
q = t_new(sctp_outqueue_t, GFP_KERNEL);
q = t_new(struct sctp_outq, GFP_KERNEL);
if (q) {
sctp_outqueue_init(asoc, q);
sctp_outq_init(asoc, q);
q->malloced = 1;
}
return q;
}
/* Initialize an existing SCTP_outqueue. This does the boring stuff.
/* Initialize an existing sctp_outq. This does the boring stuff.
* You still need to define handlers if you really want to DO
* something with this structure...
*/
void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
void sctp_outq_init(sctp_association_t *asoc, struct sctp_outq *q)
{
q->asoc = asoc;
skb_queue_head_init(&q->out);
......@@ -102,7 +102,7 @@ void sctp_outqueue_init(sctp_association_t *asoc, sctp_outqueue_t *q)
/* Free the outqueue structure and any related pending chunks.
* FIXME: Add SEND_FAILED support.
*/
void sctp_outqueue_teardown(sctp_outqueue_t *q)
void sctp_outq_teardown(struct sctp_outq *q)
{
sctp_transport_t *transport;
struct list_head *lchunk, *pos, *temp;
......@@ -131,29 +131,22 @@ void sctp_outqueue_teardown(sctp_outqueue_t *q)
}
/* Free the outqueue structure and any related pending chunks. */
void sctp_outqueue_free(sctp_outqueue_t *q)
void sctp_outq_free(struct sctp_outq *q)
{
/* Throw away leftover chunks. */
sctp_outqueue_teardown(q);
sctp_outq_teardown(q);
/* If we were kmalloc()'d, free the memory. */
if (q->malloced)
kfree(q);
}
/* Transmit any pending partial chunks. */
void sctp_force_outqueue(sctp_outqueue_t *q)
{
/* Do we really need this? */
/* BUG */
}
/* Put a new chunk in an SCTP_outqueue. */
int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
/* Put a new chunk in an sctp_outq. */
int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
{
int error = 0;
SCTP_DEBUG_PRINTK("sctp_push_outqueue(%p, %p[%s])\n",
SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
: "Illegal Chunk");
......@@ -184,8 +177,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
default:
SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
q, chunk,
chunk && chunk->chunk_hdr ?
q, chunk, chunk && chunk->chunk_hdr ?
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
: "Illegal Chunk");
......@@ -193,13 +185,13 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
q->empty = 0;
break;
};
} else {
} else
skb_queue_tail(&q->control, (struct sk_buff *) chunk);
}
if (error < 0)
return error;
error = sctp_flush_outqueue(q, 0);
error = sctp_outq_flush(q, 0);
return error;
}
......@@ -207,7 +199,7 @@ int sctp_push_outqueue(sctp_outqueue_t *q, sctp_chunk_t *chunk)
/* Insert a chunk into the retransmit queue. Chunks on the retransmit
* queue are kept in order, based on the TSNs.
*/
void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q)
void sctp_retransmit_insert(struct list_head *tlchunk, struct sctp_outq *q)
{
struct list_head *rlchunk;
sctp_chunk_t *tchunk, *rchunk;
......@@ -232,7 +224,7 @@ void sctp_retransmit_insert(struct list_head *tlchunk, sctp_outqueue_t *q)
}
/* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
void sctp_retransmit_mark(struct sctp_outq *q, sctp_transport_t *transport,
__u8 fast_retransmit)
{
struct list_head *lchunk, *ltemp;
......@@ -302,7 +294,7 @@ void sctp_retransmit_mark(sctp_outqueue_t *q, sctp_transport_t *transport,
/* Mark all the eligible packets on a transport for retransmission and force
* one packet out.
*/
void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
void sctp_retransmit(struct sctp_outq *q, sctp_transport_t *transport,
__u8 fast_retransmit)
{
int error = 0;
......@@ -315,7 +307,7 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
sctp_retransmit_mark(q, transport, fast_retransmit);
error = sctp_flush_outqueue(q, /* rtx_timeout */ 1);
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error)
q->asoc->base.sk->err = -error;
......@@ -323,14 +315,14 @@ void sctp_retransmit(sctp_outqueue_t *q, sctp_transport_t *transport,
/*
* Transmit DATA chunks on the retransmit queue. Upon return from
* sctp_flush_retran_queue() the packet 'pkt' may contain chunks which
* sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
* need to be transmitted by the caller.
* We assume that pkt->transport has already been set.
*
* The return value is a normal kernel error return value.
*/
static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
int rtx_timeout, int *start_timer)
static int sctp_outq_flush_rtx(struct sctp_outq *q, sctp_packet_t *pkt,
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
struct list_head *lchunk;
......@@ -439,7 +431,7 @@ static int sctp_flush_retran_queue(sctp_outqueue_t *q, sctp_packet_t *pkt,
* queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation.
*/
void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos,
void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
sctp_packet_t *packet, sctp_chunk_t *frag, __u32 tsn)
{
sctp_transport_t *transport = packet->transport;
......@@ -515,7 +507,7 @@ void sctp_xmit_frag(sctp_outqueue_t *q, struct sk_buff *pos,
* The argument 'frag' point to the first fragment and it holds the list
* of all the other fragments in the 'frag_list' field.
*/
void sctp_xmit_fragmented_chunks(sctp_outqueue_t *q, sctp_packet_t *packet,
void sctp_xmit_fragmented_chunks(struct sctp_outq *q, sctp_packet_t *packet,
sctp_chunk_t *frag)
{
sctp_association_t *asoc = frag->asoc;
......@@ -574,7 +566,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
if (!first_frag)
goto err;
first_frag->has_ssn = 1;
/* All the fragments are added to the frag_list of the first chunk. */
frag_list = &first_frag->frag_list;
......@@ -588,7 +580,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
ssn);
if (!frag)
goto err;
frag->has_ssn = 1;
/* Add the middle fragment to the first fragment's
* frag_list.
*/
......@@ -603,6 +595,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
SCTP_DATA_LAST_FRAG, ssn);
if (!frag)
goto err;
frag->has_ssn = 1;
/* Add the last fragment to the first fragment's frag_list. */
list_add_tail(&frag->frag_list, frag_list);
......@@ -632,7 +625,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
}
/*
* sctp_flush_outqueue - Try to flush an outqueue.
* sctp_outq_flush - Try to flush an outqueue.
*
* Description: Send everything in q which we legally can, subject to
* congestion limitations.
......@@ -641,7 +634,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
{
sctp_packet_t *packet;
sctp_packet_t singleton;
......@@ -660,7 +653,6 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
sctp_xmit_t status;
int error = 0;
int start_timer = 0;
sctp_ulpevent_t *event;
/* These transports have chunks to send. */
struct list_head transport_list;
......@@ -795,10 +787,8 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
(*q->config_output)(packet, vtag,
ecn_capable, ecne_handler);
retran:
error = sctp_flush_retran_queue(q,
packet,
rtx_timeout,
&start_timer);
error = sctp_outq_flush_rtx(q, packet,
rtx_timeout, &start_timer);
if (start_timer)
sctp_transport_reset_timers(transport);
......@@ -825,15 +815,14 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
*/
if (chunk->sinfo.sinfo_stream >=
asoc->c.sinit_num_ostreams) {
struct sctp_ulpevent *ev;
/* Generate a SEND FAILED event. */
event = sctp_ulpevent_make_send_failed(asoc,
chunk, SCTP_DATA_UNSENT,
SCTP_ERROR_INV_STRM,
GFP_ATOMIC);
if (event) {
sctp_ulpqueue_tail_event(&asoc->ulpq,
event);
}
ev = sctp_ulpevent_make_send_failed(asoc,
chunk, SCTP_DATA_UNSENT,
SCTP_ERROR_INV_STRM, GFP_ATOMIC);
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
/* Free the chunk. This chunk is not on any
* list yet, just free it.
......@@ -842,6 +831,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
continue;
}
/* Now do delayed assignment of SSN. This will
* probably change again when we start supporting
* large (> approximately 2^16) size messages.
*/
sctp_chunk_assign_ssn(chunk);
/* If there is a specified transport, use it.
* Otherwise, we want to use the active path.
*/
......@@ -890,7 +885,7 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
/* We could not append this chunk, so put
* the chunk back on the output queue.
*/
SCTP_DEBUG_PRINTK("sctp_flush_outqueue: could "
SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
"not transmit TSN: 0x%x, status: %d\n",
ntohl(chunk->subh.data_hdr->tsn),
status);
......@@ -978,12 +973,12 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
}
/* Set the various output handling callbacks. */
int sctp_outqueue_set_output_handlers(sctp_outqueue_t *q,
sctp_outqueue_ohandler_init_t init,
sctp_outqueue_ohandler_config_t config,
sctp_outqueue_ohandler_t append,
sctp_outqueue_ohandler_t build,
sctp_outqueue_ohandler_force_t force)
int sctp_outq_set_output_handlers(struct sctp_outq *q,
sctp_outq_ohandler_init_t init,
sctp_outq_ohandler_config_t config,
sctp_outq_ohandler_t append,
sctp_outq_ohandler_t build,
sctp_outq_ohandler_force_t force)
{
q->init_output = init;
q->config_output = config;
......@@ -1044,10 +1039,10 @@ static __u32 sctp_highest_new_tsn(sctp_sackhdr_t *sack,
/* This is where we REALLY process a SACK.
*
* Process the sack against the outqueue. Mostly, this just frees
* Process the SACK against the outqueue. Mostly, this just frees
* things off the transmitted queue.
*/
int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
{
sctp_association_t *asoc = q->asoc;
sctp_transport_t *transport;
......@@ -1151,7 +1146,7 @@ int sctp_sack_outqueue(sctp_outqueue_t *q, sctp_sackhdr_t *sack)
}
/* Is the outqueue empty? */
int sctp_outqueue_is_empty(const sctp_outqueue_t *q)
int sctp_outq_is_empty(const struct sctp_outq *q)
{
return q->empty;
}
......@@ -1173,7 +1168,7 @@ int sctp_outqueue_is_empty(const sctp_outqueue_t *q)
* transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
* KEPT TSN6-TSN7, etc.
*/
static void sctp_check_transmitted(sctp_outqueue_t *q,
static void sctp_check_transmitted(struct sctp_outq *q,
struct list_head *transmitted_queue,
sctp_transport_t *transport,
sctp_sackhdr_t *sack,
......
......@@ -82,7 +82,7 @@ struct sock *sctp_get_ctl_sock(void)
}
/* Set up the proc fs entry for the SCTP protocol. */
void sctp_proc_init(void)
__init void sctp_proc_init(void)
{
if (!proc_net_sctp) {
struct proc_dir_entry *ent;
......@@ -95,7 +95,7 @@ void sctp_proc_init(void)
}
/* Clean up the proc fs entry for the SCTP protocol. */
void sctp_proc_exit(void)
__exit void sctp_proc_exit(void)
{
if (proc_net_sctp) {
proc_net_sctp = NULL;
......@@ -688,7 +688,7 @@ static void cleanup_sctp_mibs(void)
}
/* Initialize the universe into something sensible. */
int sctp_init(void)
__init int sctp_init(void)
{
int i;
int status = 0;
......@@ -750,13 +750,9 @@ int sctp_init(void)
/* Implementation specific variables. */
/* Initialize default stream count setup information.
* Note: today the stream accounting data structures are very
* fixed size, so one really does need to make sure that these have
* upper/lower limits when changing.
*/
sctp_proto.max_instreams = SCTP_MAX_STREAM;
sctp_proto.max_outstreams = SCTP_MAX_STREAM;
/* Initialize default stream count setup information. */
sctp_proto.max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_proto.max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
/* Allocate and initialize the association hash table. */
sctp_proto.assoc_hashsize = 4096;
......@@ -852,7 +848,7 @@ int sctp_init(void)
}
/* Exit handler for the SCTP protocol. */
void sctp_exit(void)
__exit void sctp_exit(void)
{
/* BUG. This should probably do something useful like clean
* up all the remaining associations and all that memory.
......@@ -889,4 +885,3 @@ module_exit(sctp_exit);
MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
MODULE_LICENSE("GPL");
......@@ -82,12 +82,12 @@ static const sctp_supported_addrs_param_t sat_param = {
/* gcc 3.2 doesn't allow initialization of zero-length arrays. So the above
* structure is split and the address types array is initialized using a
* fixed length array.
* fixed length array.
*/
static const __u16 sat_addr_types[2] = {
SCTP_PARAM_IPV4_ADDRESS,
SCTP_V6(SCTP_PARAM_IPV6_ADDRESS,)
};
};
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
......@@ -540,7 +540,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = htonl(sinfo->sinfo_ppid);
dp.ssn = htons(ssn);
/* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & MSG_UNORDERED)
flags |= SCTP_DATA_UNORDERED;
......@@ -552,7 +552,7 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval;
}
......@@ -607,12 +607,12 @@ sctp_chunk_t *sctp_make_data_empty(sctp_association_t *asoc,
* ordered send and a new ssn is generated. The flags field is set
* in the inner routine - sctp_make_datafrag_empty().
*/
if (sinfo->sinfo_flags & MSG_UNORDERED) {
ssn = 0;
} else {
ssn = __sctp_association_get_next_ssn(asoc,
sinfo->sinfo_stream);
}
// if (sinfo->sinfo_flags & MSG_UNORDERED) {
ssn = 0;
// } else {
// ssn = __sctp_association_get_next_ssn(asoc,
// sinfo->sinfo_stream);
// }
return sctp_make_datafrag_empty(asoc, sinfo, data_len, flags, ssn);
}
......@@ -1013,6 +1013,7 @@ sctp_chunk_t *sctp_chunkify(struct sk_buff *skb, const sctp_association_t *asoc,
retval->asoc = (sctp_association_t *) asoc;
retval->num_times_sent = 0;
retval->has_tsn = 0;
retval->has_ssn = 0;
retval->rtt_in_progress = 0;
retval->sent_at = jiffies;
retval->singleton = 1;
......@@ -1214,6 +1215,29 @@ int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data)
return err;
}
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
void sctp_chunk_assign_ssn(sctp_chunk_t *chunk)
{
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
/* This is the last possible instant to assign a SSN. */
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
sid = htons(chunk->subh.data_hdr->stream);
ssn = htons(__sctp_association_get_next_ssn(chunk->asoc, sid));
}
chunk->subh.data_hdr->ssn = ssn;
chunk->has_ssn = 1;
}
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
......@@ -1654,6 +1678,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
/* Unpack the parameters in an INIT packet into an association.
* Returns 0 on failure, else success.
* FIXME: This is an association method.
*/
int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
const union sctp_addr *peer_addr,
......@@ -1710,6 +1735,12 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
ntohs(peer_init->init_hdr.num_inbound_streams);
}
if (asoc->c.sinit_max_instreams >
ntohs(peer_init->init_hdr.num_outbound_streams)) {
asoc->c.sinit_max_instreams =
ntohs(peer_init->init_hdr.num_outbound_streams);
}
/* Copy Initiation tag from INIT to VT_peer in cookie. */
asoc->c.peer_vtag = asoc->peer.i.init_tag;
......@@ -1738,6 +1769,21 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
asoc->peer.i.initial_tsn);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* The stream sequence number in all the streams shall start
* from 0 when the association is established. Also, when the
* stream sequence number reaches the value 65535 the next
* stream sequence number shall be set to 0.
*/
/* Allocate storage for the negotiated streams. */
asoc->ssnmap = sctp_ssnmap_new(asoc->peer.i.num_outbound_streams,
asoc->c.sinit_num_ostreams,
priority);
if (!asoc->ssnmap)
goto nomem_ssnmap;
/* ADDIP Section 4.1 ASCONF Chunk Procedures
*
* When an endpoint has an ASCONF signaled change to be sent to the
......@@ -1751,6 +1797,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1;
return 1;
nomem_ssnmap:
clean_up:
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
......
......@@ -296,7 +296,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_PURGE_OUTQUEUE:
sctp_outqueue_teardown(&asoc->outqueue);
sctp_outq_teardown(&asoc->outqueue);
break;
case SCTP_CMD_DELETE_TCB:
......@@ -395,9 +395,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
command->obj.ptr,
"ulpq:",
&asoc->ulpq);
sctp_ulpqueue_tail_data(&asoc->ulpq,
command->obj.ptr,
GFP_ATOMIC);
sctp_ulpq_tail_data(&asoc->ulpq,
command->obj.ptr,
GFP_ATOMIC);
break;
case SCTP_CMD_EVENT_ULP:
......@@ -407,14 +407,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
command->obj.ptr,
"ulpq:",
&asoc->ulpq);
sctp_ulpqueue_tail_event(&asoc->ulpq,
command->obj.ptr);
sctp_ulpq_tail_event(&asoc->ulpq,
command->obj.ptr);
break;
case SCTP_CMD_REPLY:
/* Send a chunk to our peer. */
error = sctp_push_outqueue(&asoc->outqueue,
command->obj.ptr);
error = sctp_outq_tail(&asoc->outqueue,
command->obj.ptr);
break;
case SCTP_CMD_SEND_PKT:
......@@ -432,7 +432,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_TRANSMIT:
/* Kick start transmission. */
error = sctp_flush_outqueue(&asoc->outqueue, 0);
error = sctp_outq_flush(&asoc->outqueue, 0);
break;
case SCTP_CMD_ECN_CE:
......@@ -599,7 +599,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_RTO_PENDING:
t = command->obj.transport;
t->rto_pending = 1;
t->rto_pending = 1;
break;
default:
......@@ -743,7 +743,7 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands)
asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0;
error = sctp_push_outqueue(&asoc->outqueue, sack);
error = sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
......@@ -1095,7 +1095,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
* their work in statefuns directly.
*/
static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
sctp_association_t *asoc,
......@@ -1218,7 +1218,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
{
int err;
if (sctp_sack_outqueue(&asoc->outqueue, sackh)) {
if (sctp_outq_sack(&asoc->outqueue, sackh)) {
/* There are no more TSNs awaiting SACK. */
err = sctp_do_sm(SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
......@@ -1228,7 +1228,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err = sctp_flush_outqueue(&asoc->outqueue, 0);
err = sctp_outq_flush(&asoc->outqueue, 0);
}
return err;
......
......@@ -191,7 +191,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
int len;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT.
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
......@@ -506,7 +506,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_chunk_t *err_chk_p;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, responding with an ABORT.
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
......@@ -1337,7 +1337,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const sctp_endpoint_t *ep,
/* Unexpected COOKIE-ECHO handlerfor peer restart (Table 2, action 'A')
/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
*
* Section 5.2.4
* A) In this case, the peer may have restarted.
......@@ -2030,7 +2030,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const sctp_endpoint_t *ep,
SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) {
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
arg, commands);
}
......@@ -3429,7 +3429,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) {
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands);
}
......@@ -4203,7 +4203,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outqueue_is_empty(&asoc->outqueue)) {
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
arg, commands);
}
......
......@@ -869,13 +869,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
} else {
/* Check against the defaults. */
if (sinfo->sinfo_stream >=
sp->initmsg.sinit_num_ostreams) {
err = -EINVAL;
goto out_unlock;
}
/* Check against the requested. */
if (sinfo->sinfo_stream >=
sinit->sinit_num_ostreams) {
......@@ -916,14 +909,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sinit->sinit_num_ostreams;
}
if (sinit->sinit_max_instreams) {
if (sinit->sinit_max_instreams <=
SCTP_MAX_STREAM) {
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
} else {
asoc->c.sinit_max_instreams =
SCTP_MAX_STREAM;
}
asoc->c.sinit_max_instreams =
sinit->sinit_max_instreams;
}
if (sinit->sinit_max_attempts) {
asoc->max_init_attempts
......
/* SCTP kernel reference Implementation
* Copyright (c) 2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
* These functions manipulate sctp SSN tracker.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Storage size needed for map includes 2 headers and then the
* specific needs of in or out streams.
*/
static inline size_t sctp_ssnmap_size(__u16 in, __u16 out)
{
return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16);
}
/* Create a new sctp_ssnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int priority)
{
struct sctp_ssnmap *retval;
retval = kmalloc(sctp_ssnmap_size(in, out), priority);
if (!retval)
goto fail;
if (!sctp_ssnmap_init(retval, in, out))
goto fail_map;
retval->malloced = 1;
SCTP_DBG_OBJCNT_INC(ssnmap);
return retval;
fail_map:
kfree(retval);
fail:
return NULL;
}
/* Initialize a block of memory as a ssnmap. */
struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
__u16 out)
{
memset(map, 0x00, sctp_ssnmap_size(in, out));
/* Start 'in' stream just after the map header. */
map->in.ssn = (__u16 *)&map[1];
map->in.len = in;
/* Start 'out' stream just after 'in'. */
map->out.ssn = &map->in.ssn[in];
map->out.len = out;
return map;
}
/* Clear out the ssnmap streams. */
void sctp_ssnmap_clear(struct sctp_ssnmap *map)
{
size_t size;
size = (map->in.len + map->out.len) * sizeof(__u16);
memset(map->in.ssn, 0x00, size);
}
/* Dispose of a ssnmap. */
void sctp_ssnmap_free(struct sctp_ssnmap *map)
{
if (map && map->malloced) {
kfree(map);
SCTP_DBG_OBJCNT_DEC(ssnmap);
}
}
......@@ -800,8 +800,8 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in Section 4.2.3.3
* of RFC 1122.
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
if ((asoc->state == SCTP_STATE_ESTABLISHED) &&
(asoc->rwnd > asoc->a_rwnd) &&
......@@ -819,7 +819,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0;
sctp_push_outqueue(&asoc->outqueue, sack);
sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
......
......@@ -49,51 +49,39 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static inline sctp_ulpevent_t * sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event);
static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event);
static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
/* 1st Level Abstractions */
/* Create a new ULP queue. */
sctp_ulpqueue_t *sctp_ulpqueue_new(sctp_association_t *asoc,
__u16 inbound, int priority)
struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority)
{
sctp_ulpqueue_t *ulpq;
size_t size;
struct sctp_ulpq *ulpq;
/* Today, there is only a fixed size of storage needed for
* stream support, but make the interfaces acceptable for
* the future.
*/
size = sizeof(sctp_ulpqueue_t)+sctp_ulpqueue_storage_size(inbound);
ulpq = kmalloc(size, priority);
ulpq = kmalloc(sizeof(struct sctp_ulpq), priority);
if (!ulpq)
goto fail;
if (!sctp_ulpqueue_init(ulpq, asoc, inbound))
if (!sctp_ulpq_init(ulpq, asoc))
goto fail_init;
ulpq->malloced = 1;
return ulpq;
fail_init:
kfree(ulpq);
fail:
return NULL;
}
/* Initialize a ULP queue from a block of memory. */
sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
sctp_association_t *asoc,
__u16 inbound)
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
sctp_association_t *asoc)
{
memset(ulpq,
sizeof(sctp_ulpqueue_t) + sctp_ulpqueue_storage_size(inbound),
0x00);
memset(ulpq, sizeof(struct sctp_ulpq), 0x00);
ulpq->asoc = asoc;
spin_lock_init(&ulpq->lock);
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby);
ulpq->malloced = 0;
......@@ -101,38 +89,39 @@ sctp_ulpqueue_t *sctp_ulpqueue_init(sctp_ulpqueue_t *ulpq,
return ulpq;
}
/* Flush the reassembly and ordering queues. */
void sctp_ulpqueue_flush(sctp_ulpqueue_t *ulpq)
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
{
struct sk_buff *skb;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
while ((skb = skb_dequeue(&ulpq->lobby))) {
event = (sctp_ulpevent_t *) skb->cb;
event = (struct sctp_ulpevent *) skb->cb;
sctp_ulpevent_free(event);
}
while ((skb = skb_dequeue(&ulpq->reasm))) {
event = (sctp_ulpevent_t *) skb->cb;
event = (struct sctp_ulpevent *) skb->cb;
sctp_ulpevent_free(event);
}
}
/* Dispose of a ulpqueue. */
void sctp_ulpqueue_free(sctp_ulpqueue_t *ulpq)
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{
sctp_ulpqueue_flush(ulpq);
sctp_ulpq_flush(ulpq);
if (ulpq->malloced)
kfree(ulpq);
}
/* Process an incoming DATA chunk. */
int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
int priority)
{
struct sk_buff_head temp;
sctp_data_chunk_t *hdr;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
......@@ -147,7 +136,7 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
return -ENOMEM;
/* Do reassembly if needed. */
event = sctp_ulpqueue_reasm(ulpq, event);
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
if (event) {
......@@ -155,18 +144,18 @@ int sctp_ulpqueue_tail_data(sctp_ulpqueue_t *ulpq, sctp_chunk_t *chunk,
skb_queue_head_init(&temp);
skb_queue_tail(&temp, event->parent);
event = sctp_ulpqueue_order(ulpq, event);
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. */
if (event)
sctp_ulpqueue_tail_event(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
return 0;
}
/* Add a new event for propogation to the ULP. */
int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
......@@ -202,20 +191,18 @@ int sctp_ulpqueue_tail_event(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */
static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpevent_t *event)
static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
__u32 tsn, ctsn;
unsigned long flags __attribute ((unused));
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_spin_lock_irqsave(&ulpq->reasm.lock, flags);
/* Find the right place in this list. We store them by TSN. */
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (sctp_ulpevent_t *)pos->cb;
cevent = (struct sctp_ulpevent *)pos->cb;
ctsn = cevent->sndrcvinfo.sinfo_tsn;
if (TSN_lt(tsn, ctsn))
......@@ -227,8 +214,6 @@ static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpeven
__skb_insert(event->parent, pos->prev, pos, &ulpq->reasm);
else
__skb_queue_tail(&ulpq->reasm, event->parent);
sctp_spin_unlock_irqrestore(&ulpq->reasm.lock, flags);
}
/* Helper function to return an event corresponding to the reassembled
......@@ -238,10 +223,10 @@ static inline void sctp_ulpqueue_store_reasm(sctp_ulpqueue_t *ulpq, sctp_ulpeven
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
......@@ -249,7 +234,7 @@ static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_fra
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list,list = list->next);
for (last = list; list; last = list, list = list->next);
/* Add the list of remaining fragments to the first fragments
* frag_list.
......@@ -287,13 +272,12 @@ static inline sctp_ulpevent_t *sctp_make_reassembled_event(struct sk_buff *f_fra
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_t *ulpq)
static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
unsigned long flags __attribute ((unused));
sctp_ulpevent_t *retval = NULL;
/* Initialized to 0 just to avoid compiler warning message. Will
......@@ -302,8 +286,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
*/
next_tsn = 0;
sctp_spin_lock_irqsave(&ulpq->reasm.lock, flags);
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that complete a datagram.
......@@ -345,7 +327,6 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
if (retval)
break;
}
sctp_spin_unlock_irqrestore(&ulpq->reasm.lock, flags);
return retval;
}
......@@ -353,7 +334,7 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_retrieve_reassembled(sctp_ulpqueue_
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
sctp_ulpevent_t *retval = NULL;
......@@ -368,8 +349,8 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK))
return event;
sctp_ulpqueue_store_reasm(ulpq, event);
retval = sctp_ulpqueue_retrieve_reassembled(ulpq);
sctp_ulpq_store_reasm(ulpq, event);
retval = sctp_ulpq_retrieve_reassembled(ulpq);
return retval;
}
......@@ -377,20 +358,20 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_reasm(sctp_ulpqueue_t *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event)
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
struct sctp_stream *in;
__u16 sid, csid;
__u16 ssn, cssn;
unsigned long flags __attribute ((unused));
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */
sctp_spin_lock_irqsave(&ulpq->lobby.lock, flags);
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
......@@ -404,32 +385,31 @@ static inline void sctp_ulpqueue_retrieve_ordered(sctp_ulpqueue_t *ulpq,
if (csid < sid)
continue;
if (cssn != ulpq->ssn[sid])
if (cssn != sctp_ssn_peek(in, sid))
break;
ulpq->ssn[sid]++;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
__skb_unlink(pos, pos->list);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(event->parent->list, pos);
}
sctp_spin_unlock_irqrestore(&ulpq->lobby.lock, flags);
}
/* Helper function to store chunks needing ordering. */
static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event)
static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
unsigned long flags __attribute ((unused));
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
sctp_spin_lock_irqsave(&ulpq->lobby.lock, flags);
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
......@@ -450,14 +430,13 @@ static inline void sctp_ulpqueue_store_ordered(sctp_ulpqueue_t *ulpq,
__skb_insert(event->parent, pos->prev, pos, &ulpq->lobby);
else
__skb_queue_tail(&ulpq->lobby, event->parent);
sctp_spin_unlock_irqrestore(&ulpq->lobby.lock, flags);
}
static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
sctp_ulpevent_t *event)
static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
/* FIXME: We should be using some new chunk structure here
* instead of carrying chunk fields in the event structure.
......@@ -472,23 +451,24 @@ static inline sctp_ulpevent_t *sctp_ulpqueue_order(sctp_ulpqueue_t *ulpq,
/* Note: The stream ID must be verified before this routine. */
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
in = &ulpq->asoc->ssnmap->in;
/* Is this the expected SSN for this stream ID? */
if (ssn != ulpq->ssn[sid]) {
if (ssn != sctp_ssn_peek(in, sid)) {
/* We've received something out of order, so find where it
* needs to be placed. We order by stream and then by SSN.
*/
sctp_ulpqueue_store_ordered(ulpq, event);
sctp_ulpq_store_ordered(ulpq, event);
return NULL;
}
/* Mark that the next chunk has been found. */
ulpq->ssn[sid]++;
sctp_ssn_next(in, sid);
/* Go find any other chunks that were waiting for
* ordering.
*/
sctp_ulpqueue_retrieve_ordered(ulpq, event);
sctp_ulpq_retrieve_ordered(ulpq, event);
return event;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment