Commit 088e723f authored by Sridhar Samudrala's avatar Sridhar Samudrala

Merge

parents 9f58fa60 44310455
......@@ -68,7 +68,6 @@ typedef enum {
SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
SCTP_CMD_REPORT_BIGGAP, /* Narc on a TSN (it was too high). */
SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
......@@ -86,7 +85,8 @@ typedef enum {
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_LAST
} sctp_verb_t;
......@@ -115,7 +115,7 @@ typedef union {
struct sctp_transport *transport;
sctp_bind_addr_t *bp;
sctp_init_chunk_t *init;
sctp_ulpevent_t *ulpevent;
struct sctp_ulpevent *ulpevent;
sctp_packet_t *packet;
sctp_sackhdr_t *sackh;
} sctp_arg_t;
......@@ -163,7 +163,7 @@ SCTP_ARG_CONSTRUCTOR(ASOC, sctp_association_t *, asoc)
SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
SCTP_ARG_CONSTRUCTOR(BA, sctp_bind_addr_t *, bp)
SCTP_ARG_CONSTRUCTOR(PEER_INIT, sctp_init_chunk_t *, init)
SCTP_ARG_CONSTRUCTOR(ULPEVENT, sctp_ulpevent_t *, ulpevent)
SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
SCTP_ARG_CONSTRUCTOR(PACKET, sctp_packet_t *, packet)
SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh)
......
......@@ -121,9 +121,10 @@
/*
* sctp_protocol.c
*/
extern sctp_protocol_t sctp_proto;
extern struct sctp_protocol sctp_proto;
extern struct sock *sctp_get_ctl_sock(void);
extern int sctp_copy_local_addr_list(sctp_protocol_t *, sctp_bind_addr_t *,
extern int sctp_copy_local_addr_list(struct sctp_protocol *,
struct sctp_bind_addr *,
sctp_scope_t, int priority, int flags);
extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
......@@ -214,6 +215,7 @@ DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field)
#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field)
/* Determine if this is a valid kernel address. */
static inline int sctp_is_valid_kaddr(unsigned long addr)
......@@ -321,7 +323,8 @@ static inline int sctp_ipv6_addr_type(const struct in6_addr *addr)
return ipv6_addr_type((struct in6_addr*) addr);
}
#define SCTP_SAT_LEN (sizeof(sctp_paramhdr_t) + 2 * sizeof(__u16))
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
/* Note: These V6 macros are obsolescent. */
/* Use this macro to enclose code fragments which are V6-dependent. */
......@@ -347,25 +350,10 @@ static inline sctp_assoc_t sctp_assoc2id(const sctp_association_t *asoc)
return (sctp_assoc_t) asoc;
}
/* Look up the association by its id. */
static inline sctp_association_t *sctp_id2assoc(const struct sock *sk, sctp_assoc_t id)
{
sctp_association_t *asoc = NULL;
/* First, verify that this is a kernel address. */
if (sctp_is_valid_kaddr((unsigned long) id)) {
sctp_association_t *temp = (sctp_association_t *) id;
/* Verify that this _is_ an sctp_association_t
* data structure and if so, that the socket matches.
*/
if ((SCTP_ASSOC_EYECATCHER == temp->eyecatcher) &&
(temp->base.sk == sk))
asoc = temp;
}
sctp_association_t *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
return asoc;
}
/* A macro to walk a list of skbs. */
#define sctp_skb_for_each(pos, head, tmp) \
......@@ -493,7 +481,7 @@ extern void sctp_put_port(struct sock *sk);
/* Static inline functions. */
/* Return the SCTP protocol structure. */
static inline sctp_protocol_t *sctp_get_protocol(void)
static inline struct sctp_protocol *sctp_get_protocol(void)
{
return &sctp_proto;
}
......@@ -522,21 +510,21 @@ static inline int ipver2af(__u8 ipver)
/* This is the hash function for the SCTP port hash table. */
static inline int sctp_phashfn(__u16 lport)
{
sctp_protocol_t *sctp_proto = sctp_get_protocol();
struct sctp_protocol *sctp_proto = sctp_get_protocol();
return (lport & (sctp_proto->port_hashsize - 1));
}
/* This is the hash function for the endpoint hash table. */
static inline int sctp_ep_hashfn(__u16 lport)
{
sctp_protocol_t *sctp_proto = sctp_get_protocol();
struct sctp_protocol *sctp_proto = sctp_get_protocol();
return (lport & (sctp_proto->ep_hashsize - 1));
}
/* This is the hash function for the association hash table. */
static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
{
sctp_protocol_t *sctp_proto = sctp_get_protocol();
struct sctp_protocol *sctp_proto = sctp_get_protocol();
int h = (lport << 16) + rport;
h ^= h>>8;
return (h & (sctp_proto->assoc_hashsize - 1));
......@@ -548,7 +536,7 @@ static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
*/
static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
{
sctp_protocol_t *sctp_proto = sctp_get_protocol();
struct sctp_protocol *sctp_proto = sctp_get_protocol();
int h = (lport << 16) + rport;
h ^= vtag;
return (h & (sctp_proto->assoc_hashsize-1));
......
......@@ -2,7 +2,7 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001-2002 International Business Machines Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -73,27 +73,24 @@ union sctp_addr {
/* Forward declarations for data structures. */
struct sctp_protocol;
struct SCTP_endpoint;
struct SCTP_association;
struct sctp_endpoint;
struct sctp_association;
struct sctp_transport;
struct SCTP_packet;
struct SCTP_chunk;
struct SCTP_inqueue;
struct sctp_packet;
struct sctp_chunk;
struct sctp_inq;
struct sctp_outq;
struct SCTP_bind_addr;
struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_opt;
struct sctp_endpoint_common;
struct sctp_ssnmap;
typedef struct sctp_protocol sctp_protocol_t;
typedef struct SCTP_endpoint sctp_endpoint_t;
typedef struct SCTP_association sctp_association_t;
typedef struct SCTP_packet sctp_packet_t;
typedef struct SCTP_chunk sctp_chunk_t;
typedef struct SCTP_inqueue sctp_inqueue_t;
typedef struct SCTP_bind_addr sctp_bind_addr_t;
typedef struct sctp_opt sctp_opt_t;
typedef struct sctp_endpoint sctp_endpoint_t;
typedef struct sctp_association sctp_association_t;
typedef struct sctp_packet sctp_packet_t;
typedef struct sctp_chunk sctp_chunk_t;
typedef struct sctp_bind_addr sctp_bind_addr_t;
typedef struct sctp_endpoint_common sctp_endpoint_common_t;
#include <net/sctp/tsnmap.h>
......@@ -218,13 +215,13 @@ struct sctp_af {
int optname,
char *optval,
int *optlen);
struct dst_entry *(*get_dst) (sctp_association_t *asoc,
struct dst_entry *(*get_dst) (struct sctp_association *asoc,
union sctp_addr *daddr,
union sctp_addr *saddr);
void (*get_saddr) (sctp_association_t *asoc,
void (*get_saddr) (struct sctp_association *asoc,
struct dst_entry *dst,
union sctp_addr *daddr,
union sctp_addr *saddr);
union sctp_addr *saddr);
void (*copy_addrlist) (struct list_head *,
struct net_device *);
void (*dst_saddr) (union sctp_addr *saddr,
......@@ -257,13 +254,14 @@ int sctp_register_af(struct sctp_af *);
/* Protocol family functions. */
struct sctp_pf {
void (*event_msgname)(sctp_ulpevent_t *, char *, int *);
void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
void (*skb_msgname) (struct sk_buff *, char *, int *);
int (*af_supported) (sa_family_t);
int (*cmp_addr) (const union sctp_addr *,
const union sctp_addr *,
struct sctp_opt *);
int (*bind_verify) (struct sctp_opt *, union sctp_addr *);
int (*supported_addrs)(const struct sctp_opt *, __u16 *);
struct sctp_af *af;
};
......@@ -279,6 +277,9 @@ struct sctp_opt {
/* What kind of a socket is this? */
sctp_socket_type_t type;
/* PF_ family specific functions. */
struct sctp_pf *pf;
/* What is our base endpointer? */
sctp_endpoint_t *ep;
......@@ -292,7 +293,10 @@ struct sctp_opt {
__u32 autoclose;
__u8 nodelay;
__u8 disable_fragments;
struct sctp_pf *pf;
__u8 pd_mode;
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
};
......@@ -362,8 +366,6 @@ typedef struct sctp_signed_cookie {
sctp_cookie_t c;
} sctp_signed_cookie_t;
/* This is another convenience type to allocate memory for address
* params for the maximum size and pass such structures around
* internally.
......@@ -452,7 +454,7 @@ static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
* As a matter of convenience, we remember the SCTP common header for
* each chunk as well as a few other header pointers...
*/
struct SCTP_chunk {
struct sctp_chunk {
/* These first three elements MUST PRECISELY match the first
* three elements of struct sk_buff. This allows us to reuse
* all the skb_* queue management functions.
......@@ -562,7 +564,7 @@ typedef sctp_chunk_t *(sctp_packet_phandler_t)(sctp_association_t *);
/* This structure holds lists of chunks as we are assembling for
* transmission.
*/
struct SCTP_packet {
struct sctp_packet {
/* These are the SCTP header values (host order) for the packet. */
__u16 source_port;
__u16 destination_port;
......@@ -814,8 +816,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *);
/* This is the structure we use to queue packets as they come into
* SCTP. We write packets to it and read chunks from it.
*/
struct SCTP_inqueue {
/* This is actually a queue of sctp_chunk_t each
struct sctp_inq {
/* This is actually a queue of sctp_chunk each
* containing a partially decoded packet.
*/
struct sk_buff_head in;
......@@ -832,13 +834,12 @@ struct SCTP_inqueue {
int malloced; /* Is this structure kfree()able? */
};
sctp_inqueue_t *sctp_inqueue_new(void);
void sctp_inqueue_init(sctp_inqueue_t *);
void sctp_inqueue_free(sctp_inqueue_t *);
void sctp_push_inqueue(sctp_inqueue_t *, sctp_chunk_t *packet);
sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *);
void sctp_inqueue_set_th_handler(sctp_inqueue_t *,
void (*)(void *), void *);
struct sctp_inq *sctp_inq_new(void);
void sctp_inq_init(struct sctp_inq *);
void sctp_inq_free(struct sctp_inq *);
void sctp_inq_push(struct sctp_inq *, sctp_chunk_t *packet);
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
/* This is the structure we use to hold outbound chunks. You push
* chunks in and they automatically pop out the other end as bundled
......@@ -922,7 +923,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
/* These bind address data fields common between endpoints and associations */
struct SCTP_bind_addr {
struct sctp_bind_addr {
/* RFC 2960 12.1 Parameters necessary for the SCTP instance
*
......@@ -1011,7 +1012,7 @@ struct sctp_endpoint_common {
struct sock *sk;
/* This is where we receive inbound chunks. */
sctp_inqueue_t inqueue;
struct sctp_inq inqueue;
/* This substructure includes the defining parameters of the
* endpoint:
......@@ -1044,7 +1045,7 @@ struct sctp_endpoint_common {
* off one of these.
*/
struct SCTP_endpoint {
struct sctp_endpoint {
/* Common substructure for endpoint and association. */
sctp_endpoint_common_t base;
......@@ -1095,8 +1096,9 @@ static inline sctp_endpoint_t *sctp_ep(sctp_endpoint_common_t *base)
}
/* These are function signatures for manipulating endpoints. */
sctp_endpoint_t *sctp_endpoint_new(sctp_protocol_t *, struct sock *, int);
sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *, sctp_protocol_t *,
sctp_endpoint_t *sctp_endpoint_new(struct sctp_protocol *, struct sock *, int);
sctp_endpoint_t *sctp_endpoint_init(struct sctp_endpoint *,
struct sctp_protocol *,
struct sock *, int priority);
void sctp_endpoint_free(sctp_endpoint_t *);
void sctp_endpoint_put(sctp_endpoint_t *);
......@@ -1108,7 +1110,6 @@ sctp_association_t *sctp_endpoint_lookup_assoc(const sctp_endpoint_t *ep,
int sctp_endpoint_is_peeled_off(sctp_endpoint_t *, const union sctp_addr *);
sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *,
const union sctp_addr *);
int sctp_has_association(const union sctp_addr *laddr,
const union sctp_addr *paddr);
......@@ -1140,7 +1141,7 @@ __u32 sctp_generate_tsn(const sctp_endpoint_t *ep);
/* Here we have information about each individual association. */
struct SCTP_association {
struct sctp_association {
/* A base structure common to endpoint and association.
* In this context, it represents the associations's view
......@@ -1256,19 +1257,11 @@ struct SCTP_association {
* used in the bulk of the text. This value is hidden
* in tsn_map--we get it by calling sctp_tsnmap_get_ctsn().
*/
sctp_tsnmap_t tsn_map;
struct sctp_tsnmap tsn_map;
__u8 _map[sctp_tsnmap_storage_size(SCTP_TSN_MAP_SIZE)];
/* We record duplicate TSNs here. We clear this after
* every SACK.
* FIXME: We should move this into the tsnmap? --jgrimm
*/
sctp_dup_tsn_t dup_tsns[SCTP_MAX_DUP_TSNS];
int next_dup_tsn;
/* Do we need to sack the peer? */
uint8_t sack_needed;
__u8 sack_needed;
/* These are capabilities which our peer advertised. */
__u8 ecn_capable; /* Can peer do ECN? */
__u8 ipv4_address; /* Peer understands IPv4 addresses? */
......@@ -1425,7 +1418,10 @@ struct SCTP_association {
struct {
__u16 stream;
__u16 flags;
__u32 ppid;
__u32 context;
__u32 timetolive;
} defaults;
/* This tracks outbound ssn for a given stream. */
......@@ -1583,12 +1579,13 @@ void sctp_association_put(sctp_association_t *);
void sctp_association_hold(sctp_association_t *);
struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *);
void sctp_assoc_update_retran_path(sctp_association_t *);
struct sctp_transport *sctp_assoc_lookup_paddr(const sctp_association_t *,
const union sctp_addr *);
struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *,
const union sctp_addr *address,
const int priority);
void sctp_assoc_control_transport(sctp_association_t *,
void sctp_assoc_control_transport(struct sctp_association *,
struct sctp_transport *,
sctp_transport_cmd_t, sctp_sn_error_t);
struct sctp_transport *sctp_assoc_lookup_tsn(sctp_association_t *, __u32);
......@@ -1598,14 +1595,14 @@ struct sctp_transport *sctp_assoc_is_match(sctp_association_t *,
void sctp_assoc_migrate(sctp_association_t *, struct sock *);
void sctp_assoc_update(sctp_association_t *dst, sctp_association_t *src);
__u32 __sctp_association_get_next_tsn(sctp_association_t *);
__u32 __sctp_association_get_tsn_block(sctp_association_t *, int);
__u16 __sctp_association_get_next_ssn(sctp_association_t *, __u16 sid);
void sctp_assoc_sync_pmtu(sctp_association_t *);
void sctp_assoc_rwnd_increase(sctp_association_t *, int);
void sctp_assoc_rwnd_decrease(sctp_association_t *, int);
__u32 sctp_association_get_next_tsn(struct sctp_association *);
__u32 sctp_association_get_tsn_block(struct sctp_association *, int);
void sctp_assoc_sync_pmtu(struct sctp_association *);
void sctp_assoc_rwnd_increase(struct sctp_association *, int);
void sctp_assoc_rwnd_decrease(struct sctp_association *, int);
void sctp_assoc_set_primary(struct sctp_association *,
struct sctp_transport *);
int sctp_assoc_set_bind_addr_from_ep(sctp_association_t *, int);
int sctp_assoc_set_bind_addr_from_cookie(sctp_association_t *,
sctp_cookie_t *, int);
......
/* SCTP kernel reference Implementation Copyright (C) 1999-2001
* Cisco, Motorola, Intel, and International Business Machines Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
*
* These are the definitions needed for the tsnmap type. The tsnmap is used
* to track out of order TSNs received.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* the SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
*
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -38,8 +43,6 @@
#ifndef __sctp_tsnmap_h__
#define __sctp_tsnmap_h__
/* RFC 2960 12.2 Parameters necessary per association (i.e. the TCB)
* Mapping An array of bits or bytes indicating which out of
* Array order TSN's have been received (relative to the
......@@ -48,9 +51,7 @@
* will be set to all zero. This structure may be
* in the form of a circular buffer or bit array.
*/
typedef struct sctp_tsnmap {
struct sctp_tsnmap {
/* This array counts the number of chunks with each TSN.
* It points at one of the two buffers with which we will
* ping-pong between.
......@@ -93,25 +94,30 @@ typedef struct sctp_tsnmap {
/* This is the highest TSN we've marked. */
__u32 max_tsn_seen;
/* No. of data chunks pending receipt. used by SCTP_STATUS sockopt */
/* Data chunks pending receipt. used by SCTP_STATUS sockopt */
__u16 pending_data;
/* We record duplicate TSNs here. We clear this after
* every SACK. Store up to SCTP_MAX_DUP_TSNS worth of
* information.
*/
__u32 dup_tsns[SCTP_MAX_DUP_TSNS];
__u16 num_dup_tsns;
int malloced;
__u8 raw_map[0];
} sctp_tsnmap_t;
};
typedef struct sctp_tsnmap_iter {
struct sctp_tsnmap_iter {
__u32 start;
} sctp_tsnmap_iter_t;
};
/* Create a new tsnmap. */
sctp_tsnmap_t *sctp_tsnmap_new(__u16 len, __u32 initial_tsn,
int priority);
struct sctp_tsnmap *sctp_tsnmap_new(__u16 len, __u32 init_tsn, int priority);
/* Dispose of a tsnmap. */
void sctp_tsnmap_free(sctp_tsnmap_t *map);
void sctp_tsnmap_free(struct sctp_tsnmap *);
/* This macro assists in creation of external storage for variable length
* internal buffers. We double allocate so the overflow map works.
......@@ -119,9 +125,8 @@ void sctp_tsnmap_free(sctp_tsnmap_t *map);
#define sctp_tsnmap_storage_size(count) (sizeof(__u8) * (count) * 2)
/* Initialize a block of memory as a tsnmap. */
sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn);
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len,
__u32 initial_tsn);
/* Test the tracking state of this TSN.
* Returns:
......@@ -129,31 +134,53 @@ sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn
* >0 if the TSN has been seen (duplicate)
* <0 if the TSN is invalid (too large to track)
*/
int sctp_tsnmap_check(const sctp_tsnmap_t *map, __u32 tsn);
int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
void sctp_tsnmap_mark(sctp_tsnmap_t *map, __u32 tsn);
void sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
/* Retrieve the Cumulative TSN ACK Point. */
__u32 sctp_tsnmap_get_ctsn(const sctp_tsnmap_t *map);
__u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *);
/* Retrieve the highest TSN we've seen. */
__u32 sctp_tsnmap_get_max_tsn_seen(const sctp_tsnmap_t *map);
__u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *);
/* How many Duplicate TSNs are stored? */
static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map)
{
return map->num_dup_tsns;
}
/* Return pointer to duplicate tsn array as needed by SACK. */
static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
{
map->num_dup_tsns = 0;
return map->dup_tsns;
}
/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
* information.
*/
static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
{
if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
}
/* Renege a TSN that was seen. */
void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
/* Is there a gap in the TSN map? */
int sctp_tsnmap_has_gap(const sctp_tsnmap_t *map);
int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
/* Initialize a gap ack block interator from user-provided memory. */
void sctp_tsnmap_iter_init(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter);
void sctp_tsnmap_iter_init(const struct sctp_tsnmap *,
struct sctp_tsnmap_iter *);
/* Get the next gap ack blocks. We return 0 if there are no more
* gap ack blocks.
*/
int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
__u16 *start, __u16 *end);
int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *,
struct sctp_tsnmap_iter *,__u16 *start, __u16 *end);
#endif /* __sctp_tsnmap_h__ */
......@@ -6,34 +6,34 @@
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* These are the definitions needed for the sctp_ulpevent type. The
* These are the definitions needed for the sctp_ulpevent type. The
* sctp_ulpevent type is used to carry information from the state machine
* upwards to the ULP.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* upwards to the ULP.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* the SCTP reference implementation is distributed in the hope that it
*
* the SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
*
*
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -46,85 +46,97 @@
/* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now.
*/
typedef struct sctp_ulpevent {
int malloced;
sctp_association_t *asoc;
struct sk_buff *parent;
struct sctp_ulpevent {
struct sctp_association *asoc;
struct sctp_sndrcvinfo sndrcvinfo;
int chunk_flags; /* Temp. until we get a new chunk_t */
int msg_flags;
} sctp_ulpevent_t;
sctp_ulpevent_t *sctp_ulpevent_new(int size, int msg_flags, int priority);
sctp_ulpevent_t *sctp_ulpevent_init(sctp_ulpevent_t *event, struct sk_buff *skb, int msg_flags);
void sctp_ulpevent_free(sctp_ulpevent_t *event);
int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event);
sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(
const struct SCTP_association *asoc,
__u16 flags,
__u16 state,
__u16 error,
__u16 outbound,
__u16 inbound,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
const struct SCTP_association *asoc,
const struct sockaddr_storage *aaddr,
int flags,
int state,
int error,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_remote_error(
const struct SCTP_association *asoc,
struct SCTP_chunk *chunk,
__u16 flags,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_send_failed(
const struct SCTP_association *asoc,
struct SCTP_chunk *chunk,
__u16 flags,
__u32 error,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
const struct SCTP_association *asoc,
__u16 flags,
int priority);
sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(struct SCTP_association *asoc,
struct SCTP_chunk *chunk,
int priority);
void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
struct msghdr *msghdr);
__u16 sctp_ulpevent_get_notification_type(const sctp_ulpevent_t *event);
};
/* Retrieve the skb this event sits inside of. */
static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
{
return container_of((void *)ev, struct sk_buff, cb);
}
/* Retrieve & cast the event sitting inside the skb. */
static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
{
return (struct sctp_ulpevent *)skb->cb;
}
struct sctp_ulpevent *sctp_ulpevent_new(int size, int flags, int priority);
struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *, int flags);
void sctp_ulpevent_free(struct sctp_ulpevent *);
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
__u16 flags,
__u16 state,
__u16 error,
__u16 outbound,
__u16 inbound,
int priority);
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const struct sctp_association *asoc,
const struct sockaddr_storage *aaddr,
int flags,
int state,
int error,
int priority);
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
__u16 flags,
int priority);
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
__u16 flags,
__u32 error,
int priority);
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
__u16 flags,
int priority);
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc,
__u32 indication, int priority);
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
struct sctp_chunk *chunk,
int priority);
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
/* Is this event type enabled? */
static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
struct sctp_event_subscribe *mask)
{
char *amask = (char *) mask;
return amask[sn_type - SCTP_SN_TYPE_BASE];
}
/* Given an event subscription, is this event enabled? */
static inline int sctp_ulpevent_is_enabled(const sctp_ulpevent_t *event,
const struct sctp_event_subscribe *mask)
static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
struct sctp_event_subscribe *mask)
{
const char *amask = (const char *) mask;
__u16 sn_type;
int enabled = 1;
if (sctp_ulpevent_is_notification(event)) {
sn_type = sctp_ulpevent_get_notification_type(event);
enabled = amask[sn_type - SCTP_SN_TYPE_BASE];
enabled = sctp_ulpevent_type_enabled(sn_type, mask);
}
return enabled;
}
#endif /* __sctp_ulpevent_h__ */
......
......@@ -48,7 +48,8 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */
struct sctp_ulpq {
int malloced;
char malloced;
char pd_mode;
sctp_association_t *asoc;
struct sk_buff_head reasm;
struct sk_buff_head lobby;
......@@ -60,13 +61,22 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, sctp_association_t *);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */
int sctp_ulpq_tail_data(struct sctp_ulpq *, sctp_chunk_t *chunk, int priority);
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Add a new event for propagation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
/* Is the ulpqueue empty. */
int sctp_ulpqueue_is_empty(struct sctp_ulpq *);
/* Renege previously received chunks. */
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Perform partial delivery. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Abort the partial delivery. */
void sctp_ulpq_abort_pd(struct sctp_ulpq *, int);
/* Clear the partial data delivery condition on this socket. */
int sctp_clear_pd(struct sock *sk);
#endif /* __sctp_ulpqueue_h__ */
......
......@@ -166,6 +166,7 @@ struct sctp_sndrcvinfo {
__u32 sinfo_context;
__u32 sinfo_timetolive;
__u32 sinfo_tsn;
__u32 sinfo_cumtsn;
sctp_assoc_t sinfo_assoc_id;
};
......@@ -367,6 +368,7 @@ struct sctp_rcv_pdapi_event {
sctp_assoc_t pdapi_assoc_id;
};
enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
/*
* Described in Section 7.3
......@@ -414,8 +416,8 @@ enum sctp_sn_type {
SCTP_SN_TYPE_BASE = (1<<15),
SCTP_ASSOC_CHANGE,
SCTP_PEER_ADDR_CHANGE,
SCTP_REMOTE_ERROR,
SCTP_SEND_FAILED,
SCTP_REMOTE_ERROR,
SCTP_SHUTDOWN_EVENT,
SCTP_PARTIAL_DELIVERY_EVENT,
SCTP_ADAPTION_INDICATION,
......
......@@ -10,7 +10,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
inqueue.o outqueue.o ulpqueue.o command.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o hashdriver.o sla1.o \
debug.o ssnmap.o
debug.o ssnmap.o proc.o
ifeq ($(CONFIG_SCTP_ADLER32), y)
sctp-y += adler32.o
......
......@@ -95,7 +95,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
sctp_scope_t scope,
int priority)
{
sctp_opt_t *sp;
struct sctp_opt *sp;
int i;
/* Retrieve the SCTP per socket area. */
......@@ -241,8 +241,8 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc->peer.sack_needed = 1;
/* Create an input queue. */
sctp_inqueue_init(&asoc->base.inqueue);
sctp_inqueue_set_th_handler(&asoc->base.inqueue,
sctp_inq_init(&asoc->base.inqueue);
sctp_inq_set_th_handler(&asoc->base.inqueue,
(void (*)(void *))sctp_assoc_bh_rcv,
asoc);
......@@ -260,7 +260,6 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
/* Set up the tsn tracking. */
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, 0);
asoc->peer.next_dup_tsn = 0;
skb_queue_head_init(&asoc->addip_chunks);
......@@ -311,7 +310,7 @@ void sctp_association_free(sctp_association_t *asoc)
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inqueue_free(&asoc->base.inqueue);
sctp_inq_free(&asoc->base.inqueue);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
......@@ -361,14 +360,30 @@ static void sctp_association_destroy(sctp_association_t *asoc)
}
}
/* Change the primary destination address for the peer. */
void sctp_assoc_set_primary(struct sctp_association *asoc,
struct sctp_transport *transport)
{
asoc->peer.primary_path = transport;
/* Set a default msg_name for events. */
memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
sizeof(union sctp_addr));
/* If the primary path is changing, assume that the
* user wants to use this new path.
*/
if (transport->active)
asoc->peer.active_path = transport;
}
/* Add a transport address to an association. */
struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *asoc,
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
const union sctp_addr *addr,
int priority)
{
struct sctp_transport *peer;
sctp_opt_t *sp;
struct sctp_opt *sp;
unsigned short port;
/* AF_INET and AF_INET6 share common port field. */
......@@ -461,11 +476,7 @@ struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *asoc,
/* If we do not yet have a primary path, set one. */
if (NULL == asoc->peer.primary_path) {
asoc->peer.primary_path = peer;
/* Set a default msg_name for events. */
memcpy(&asoc->peer.primary_addr, &peer->ipaddr,
sizeof(union sctp_addr));
asoc->peer.active_path = peer;
sctp_assoc_set_primary(asoc, peer);
asoc->peer.retran_path = peer;
}
......@@ -505,7 +516,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
struct sctp_transport *t = NULL;
struct sctp_transport *first;
struct sctp_transport *second;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct list_head *pos;
int spc_state = 0;
......@@ -604,7 +615,7 @@ void sctp_association_put(sctp_association_t *asoc)
/* Allocate the next TSN, Transmission Sequence Number, for the given
* association.
*/
__u32 __sctp_association_get_next_tsn(sctp_association_t *asoc)
__u32 sctp_association_get_next_tsn(sctp_association_t *asoc)
{
/* From Section 1.6 Serial Number Arithmetic:
* Transmission Sequence Numbers wrap around when they reach
......@@ -619,7 +630,7 @@ __u32 __sctp_association_get_next_tsn(sctp_association_t *asoc)
}
/* Allocate 'num' TSNs by incrementing the association's TSN by num. */
__u32 __sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
__u32 sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
{
__u32 retval = asoc->next_tsn;
......@@ -776,7 +787,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
sctp_endpoint_t *ep;
sctp_chunk_t *chunk;
struct sock *sk;
sctp_inqueue_t *inqueue;
struct sctp_inq *inqueue;
int state, subtype;
sctp_assoc_t associd = sctp_assoc2id(asoc);
int error = 0;
......@@ -786,7 +797,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
sk = asoc->base.sk;
inqueue = &asoc->base.inqueue;
while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
state = asoc->state;
subtype = chunk->chunk_hdr->type;
......@@ -795,6 +806,8 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
*/
if (sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
......@@ -819,7 +832,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
/* This routine moves an association from its old sk to a new sk. */
void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
{
sctp_opt_t *newsp = sctp_sk(newsk);
struct sctp_opt *newsp = sctp_sk(newsk);
/* Delete the association from the old endpoint's list of
* associations.
......@@ -848,7 +861,6 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
asoc->peer.next_dup_tsn = new->peer.next_dup_tsn;
asoc->peer.sack_needed = new->peer.sack_needed;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
......@@ -887,26 +899,19 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
}
/* Choose the transport for sending a shutdown packet.
/* Update the retran path for sending a retransmitted packet.
* Round-robin through the active transports, else round-robin
* through the inactive transports as this is the next best thing
* we can try.
*/
struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *asoc)
void sctp_assoc_update_retran_path(sctp_association_t *asoc)
{
struct sctp_transport *t, *next;
struct list_head *head = &asoc->peer.transport_addr_list;
struct list_head *pos;
/* If this is the first time SHUTDOWN is sent, use the active
* path.
*/
if (!asoc->shutdown_last_sent_to)
return asoc->peer.active_path;
/* Otherwise, find the next transport in a round-robin fashion. */
t = asoc->shutdown_last_sent_to;
/* Find the next transport in a round-robin fashion. */
t = asoc->peer.retran_path;
pos = &t->transports;
next = NULL;
......@@ -935,13 +940,30 @@ struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *
* other active transports. If so, use the next
* transport.
*/
if (t == asoc->shutdown_last_sent_to) {
if (t == asoc->peer.retran_path) {
t = next;
break;
}
}
return t;
asoc->peer.retran_path = t;
}
/* Choose the transport for sending a SHUTDOWN packet. */
struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *asoc)
{
/* If this is the first time SHUTDOWN is sent, use the active path,
* else use the retran path. If the last SHUTDOWN was sent over the
* retran path, update the retran path and use it.
*/
if (!asoc->shutdown_last_sent_to)
return asoc->peer.active_path;
else {
if (asoc->shutdown_last_sent_to == asoc->peer.retran_path)
sctp_assoc_update_retran_path(asoc);
return asoc->peer.retran_path;
}
}
/* Update the association's pmtu and frag_point by going through all the
......@@ -990,13 +1012,13 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
asoc->rwnd += len;
}
SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) - %u\n",
__FUNCTION__, asoc, len, asoc->rwnd, asoc->rwnd_over,
asoc->a_rwnd);
SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
"- %u\n", __FUNCTION__, asoc, len, asoc->rwnd,
asoc->rwnd_over, asoc->a_rwnd);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
if ((asoc->state == SCTP_STATE_ESTABLISHED) &&
......@@ -1004,17 +1026,16 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->rcvbuf >> 1), asoc->pmtu))) {
SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
"rwnd: %u a_rwnd: %u\n",
__FUNCTION__, asoc, asoc->rwnd, asoc->a_rwnd);
sack = sctp_make_sack(asoc);
"rwnd: %u a_rwnd: %u\n", __FUNCTION__,
asoc, asoc->rwnd, asoc->a_rwnd);
sack = sctp_make_sack(asoc);
if (!sack)
return;
return;
/* Update the last advertised rwnd value. */
asoc->a_rwnd = asoc->rwnd;
asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0;
sctp_outq_tail(&asoc->outqueue, sack);
......@@ -1022,7 +1043,7 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
if (timer_pending(timer) && del_timer(timer))
sctp_association_put(asoc);
}
}
}
/* Decrease asoc's rwnd by len. */
......
......@@ -302,7 +302,7 @@ int sctp_bind_addr_match(sctp_bind_addr_t *bp, const union sctp_addr *addr,
static int sctp_copy_one_addr(sctp_bind_addr_t *dest, union sctp_addr *addr,
sctp_scope_t scope, int priority, int flags)
{
sctp_protocol_t *proto = sctp_get_protocol();
struct sctp_protocol *proto = sctp_get_protocol();
int error = 0;
if (sctp_is_any(addr)) {
......
......@@ -65,7 +65,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep);
/* Create a sctp_endpoint_t with all that boring stuff initialized.
* Returns NULL if there isn't enough memory.
*/
sctp_endpoint_t *sctp_endpoint_new(sctp_protocol_t *proto,
sctp_endpoint_t *sctp_endpoint_new(struct sctp_protocol *proto,
struct sock *sk, int priority)
{
sctp_endpoint_t *ep;
......@@ -89,10 +89,11 @@ sctp_endpoint_t *sctp_endpoint_new(sctp_protocol_t *proto,
/*
* Initialize the base fields of the endpoint structure.
*/
sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep, sctp_protocol_t *proto,
sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep,
struct sctp_protocol *proto,
struct sock *sk, int priority)
{
sctp_opt_t *sp = sctp_sk(sk);
struct sctp_opt *sp = sctp_sk(sk);
memset(ep, 0, sizeof(sctp_endpoint_t));
/* Initialize the base structure. */
......@@ -105,10 +106,10 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep, sctp_protocol_t *proto,
ep->base.malloced = 1;
/* Create an input queue. */
sctp_inqueue_init(&ep->base.inqueue);
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
sctp_inqueue_set_th_handler(&ep->base.inqueue,
sctp_inq_set_th_handler(&ep->base.inqueue,
(void (*)(void *))sctp_endpoint_bh_rcv,
ep);
......@@ -198,7 +199,7 @@ void sctp_endpoint_destroy(sctp_endpoint_t *ep)
sctp_unhash_endpoint(ep);
/* Cleanup the inqueue. */
sctp_inqueue_free(&ep->base.inqueue);
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
......@@ -333,7 +334,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
struct sock *sk;
struct sctp_transport *transport;
sctp_chunk_t *chunk;
sctp_inqueue_t *inqueue;
struct sctp_inq *inqueue;
sctp_subtype_t subtype;
sctp_state_t state;
int error = 0;
......@@ -345,7 +346,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
inqueue = &ep->base.inqueue;
sk = ep->base.sk;
while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
subtype.chunk = chunk->chunk_hdr->type;
/* We might have grown an association since last we
......@@ -369,6 +370,8 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
*/
if (asoc && sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
SCTP_INC_STATS(SctpInCtrlChunks);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
......
......@@ -90,6 +90,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
if (val != cmp) {
/* CRC failure, dump it. */
SCTP_INC_STATS_BH(SctpChecksumErrors);
return -1;
}
return 0;
......@@ -115,6 +116,8 @@ int sctp_rcv(struct sk_buff *skb)
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
SCTP_INC_STATS_BH(SctpInSCTPPacks);
sh = (struct sctphdr *) skb->h.raw;
/* Pull up the IP and SCTP headers. */
......@@ -160,8 +163,10 @@ int sctp_rcv(struct sk_buff *skb)
*/
if (!asoc) {
ep = __sctp_rcv_lookup_endpoint(&dest);
if (sctp_rcv_ootb(skb))
if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(SctpOutOfBlues);
goto discard_release;
}
}
/* Retrieve the common input handling substructure. */
......@@ -248,7 +253,7 @@ int sctp_rcv(struct sk_buff *skb)
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
sctp_chunk_t *chunk;
sctp_inqueue_t *inqueue;
struct sctp_inq *inqueue;
/* One day chunk will live inside the skb, but for
* now this works.
......@@ -256,7 +261,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
chunk = (sctp_chunk_t *) skb;
inqueue = &chunk->rcvr->inqueue;
sctp_push_inqueue(inqueue, chunk);
sctp_inq_push(inqueue, chunk);
return 0;
}
......
......@@ -47,8 +47,8 @@
#include <net/sctp/sm.h>
#include <linux/interrupt.h>
/* Initialize an SCTP_inqueue. */
void sctp_inqueue_init(sctp_inqueue_t *queue)
/* Initialize an SCTP inqueue. */
void sctp_inq_init(struct sctp_inq *queue)
{
skb_queue_head_init(&queue->in);
queue->in_progress = NULL;
......@@ -59,21 +59,21 @@ void sctp_inqueue_init(sctp_inqueue_t *queue)
queue->malloced = 0;
}
/* Create an initialized SCTP_inqueue. */
sctp_inqueue_t *sctp_inqueue_new(void)
/* Create an initialized sctp_inq. */
struct sctp_inq *sctp_inq_new(void)
{
sctp_inqueue_t *retval;
struct sctp_inq *retval;
retval = t_new(sctp_inqueue_t, GFP_ATOMIC);
retval = t_new(struct sctp_inq, GFP_ATOMIC);
if (retval) {
sctp_inqueue_init(retval);
sctp_inq_init(retval);
retval->malloced = 1;
}
return retval;
}
/* Release the memory associated with an SCTP inqueue. */
void sctp_inqueue_free(sctp_inqueue_t *queue)
void sctp_inq_free(struct sctp_inq *queue)
{
sctp_chunk_t *chunk;
......@@ -96,7 +96,7 @@ void sctp_inqueue_free(sctp_inqueue_t *queue)
/* Put a new packet in an SCTP inqueue.
* We assume that packet->sctp_hdr is set and in host byte order.
*/
void sctp_push_inqueue(sctp_inqueue_t *q, sctp_chunk_t *packet)
void sctp_inq_push(struct sctp_inq *q, sctp_chunk_t *packet)
{
/* Directly call the packet handling routine. */
......@@ -114,7 +114,7 @@ void sctp_push_inqueue(sctp_inqueue_t *q, sctp_chunk_t *packet)
* WARNING: If you need to put the chunk on another queue, you need to
* make a shallow copy (clone) of it.
*/
sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
sctp_chunk_t *sctp_inq_pop(struct sctp_inq *queue)
{
sctp_chunk_t *chunk;
sctp_chunkhdr_t *ch = NULL;
......@@ -172,7 +172,7 @@ sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
chunk->end_of_packet = 1;
}
SCTP_DEBUG_PRINTK("+++sctp_pop_inqueue+++ chunk %p[%s],"
SCTP_DEBUG_PRINTK("+++sctp_inq_pop+++ chunk %p[%s],"
" length %d, skb->len %d\n",chunk,
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
......@@ -182,12 +182,12 @@ sctp_chunk_t *sctp_pop_inqueue(sctp_inqueue_t *queue)
/* Set a top-half handler.
*
* Originally, we the top-half handler was scheduled as a BH. We now
* call the handler directly in sctp_push_inqueue() at a time that
* call the handler directly in sctp_inq_push() at a time that
* we know we are lock safe.
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
void sctp_inqueue_set_th_handler(sctp_inqueue_t *q,
void sctp_inq_set_th_handler(struct sctp_inq *q,
void (*callback)(void *), void *arg)
{
INIT_WORK(&q->immediate, callback, arg);
......
......@@ -131,6 +131,8 @@ static inline int sctp_v6_xmit(struct sk_buff *skb,
__FUNCTION__, skb, skb->len, NIP6(fl.fl6_src),
NIP6(fl.fl6_dst));
SCTP_INC_STATS(SctpOutSCTPPacks);
return ip6_xmit(sk, skb, &fl, np->opt);
}
......@@ -443,7 +445,7 @@ static void sctp_inet6_msgname(char *msgname, int *addr_len)
}
/* Initialize a PF_INET msgname from a ulpevent. */
static void sctp_inet6_event_msgname(sctp_ulpevent_t *event, char *msgname,
static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, char *msgname,
int *addrlen)
{
struct sockaddr_in6 *sin6, *sin6from;
......@@ -562,6 +564,20 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
return af->available(addr);
}
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Note: In the future, we may want to look at sock options
* to determine whether a PF_INET6 socket really wants to have IPV4
* addresses.
* Returns number of addresses supported.
*/
static int sctp_inet6_supported_addrs(const struct sctp_opt *opt,
__u16 *types)
{
types[0] = SCTP_PARAM_IPV4_ADDRESS;
types[1] = SCTP_PARAM_IPV6_ADDRESS;
return 2;
}
static struct proto_ops inet6_seqpacket_ops = {
.family = PF_INET6,
.release = inet6_release,
......@@ -624,6 +640,7 @@ static struct sctp_pf sctp_pf_inet6_specific = {
.af_supported = sctp_inet6_af_supported,
.cmp_addr = sctp_inet6_cmp_addr,
.bind_verify = sctp_inet6_bind_verify,
.supported_addrs = sctp_inet6_supported_addrs,
.af = &sctp_ipv6_specific,
};
......
......@@ -419,6 +419,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
dst = transport->dst;
/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
if (!dst || (dst->obsolete > 1)) {
dst_release(dst);
sctp_transport_route(transport, NULL, sctp_sk(sk));
sctp_assoc_sync_pmtu(asoc);
}
......
......@@ -125,7 +125,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
sctp_free_chunk(chunk);
}
/* Throw away any chunks in the retransmit queue. */
/* Throw away any chunks in the retransmit queue. */
list_for_each_safe(lchunk, temp, &q->retransmit) {
list_del(lchunk);
chunk = list_entry(lchunk, sctp_chunk_t, transmitted_list);
......@@ -193,11 +193,17 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
: "Illegal Chunk");
skb_queue_tail(&q->out, (struct sk_buff *) chunk);
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SctpOutUnorderChunks);
else
SCTP_INC_STATS(SctpOutOrderChunks);
q->empty = 0;
break;
};
} else
} else {
skb_queue_tail(&q->control, (struct sk_buff *) chunk);
SCTP_INC_STATS(SctpOutCtrlChunks);
}
if (error < 0)
return error;
......@@ -235,7 +241,7 @@ void sctp_retransmit_insert(struct list_head *tlchunk, struct sctp_outq *q)
}
/* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(struct sctp_outq *q,
void sctp_retransmit_mark(struct sctp_outq *q,
struct sctp_transport *transport,
__u8 fast_retransmit)
{
......@@ -315,6 +321,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
switch(reason) {
case SCTP_RETRANSMIT_T3_RTX:
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
/* Update the retran path if the T3-rtx timer has expired for
* the current retran path.
*/
if (transport == transport->asoc->peer.retran_path)
sctp_assoc_update_retran_path(transport->asoc);
break;
case SCTP_RETRANSMIT_FAST_RTX:
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
......@@ -542,7 +553,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, sctp_packet_t *packet,
}
/* Get a TSN block of nfrags TSNs. */
tsn = __sctp_association_get_tsn_block(asoc, nfrags);
tsn = sctp_association_get_tsn_block(asoc, nfrags);
pos = skb_peek(&q->out);
/* Transmit the first fragment. */
......@@ -584,7 +595,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
old_flags = chunk->chunk_hdr->flags;
if (old_flags & SCTP_DATA_FIRST_FRAG)
flags = SCTP_DATA_FIRST_FRAG;
else
else
flags = SCTP_DATA_MIDDLE_FRAG;
/* Make the first fragment. */
......@@ -992,7 +1003,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
struct sctp_transport *t = list_entry(ltransport,
struct sctp_transport,
struct sctp_transport,
send_ready);
if (t != transport)
transport = t;
......@@ -1114,7 +1125,7 @@ int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
* This is a MASSIVE candidate for optimization.
*/
list_for_each(pos, transport_list) {
transport = list_entry(pos, struct sctp_transport,
transport = list_entry(pos, struct sctp_transport,
transports);
sctp_check_transmitted(q, &transport->transmitted,
transport, sack, highest_new_tsn);
......@@ -1168,7 +1179,7 @@ int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
goto finish;
list_for_each(pos, transport_list) {
transport = list_entry(pos, struct sctp_transport,
transport = list_entry(pos, struct sctp_transport,
transports);
q->empty = q->empty && list_empty(&transport->transmitted);
if (!q->empty)
......
/* SCTP kernel reference Implementation
* Copyright (c) 2003 International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <linux/seq_file.h>
#include <net/sctp/sctp.h>
static char *sctp_snmp_list[] = {
#define SCTP_SNMP_ENTRY(x) #x
SCTP_SNMP_ENTRY(SctpCurrEstab),
SCTP_SNMP_ENTRY(SctpActiveEstabs),
SCTP_SNMP_ENTRY(SctpPassiveEstabs),
SCTP_SNMP_ENTRY(SctpAborteds),
SCTP_SNMP_ENTRY(SctpShutdowns),
SCTP_SNMP_ENTRY(SctpOutOfBlues),
SCTP_SNMP_ENTRY(SctpChecksumErrors),
SCTP_SNMP_ENTRY(SctpOutCtrlChunks),
SCTP_SNMP_ENTRY(SctpOutOrderChunks),
SCTP_SNMP_ENTRY(SctpOutUnorderChunks),
SCTP_SNMP_ENTRY(SctpInCtrlChunks),
SCTP_SNMP_ENTRY(SctpInOrderChunks),
SCTP_SNMP_ENTRY(SctpInUnorderChunks),
SCTP_SNMP_ENTRY(SctpFragUsrMsgs),
SCTP_SNMP_ENTRY(SctpReasmUsrMsgs),
SCTP_SNMP_ENTRY(SctpOutSCTPPacks),
SCTP_SNMP_ENTRY(SctpInSCTPPacks),
#undef SCTP_SNMP_ENTRY
};
/* Return the current value of a particular entry in the mib by adding its
* per cpu counters.
*/
static unsigned long
fold_field(void *mib[], int nr)
{
unsigned long res = 0;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) +
sizeof (unsigned long) * nr));
}
return res;
}
/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
{
int i;
for (i = 0; i < sizeof(sctp_snmp_list) / sizeof(char *); i++)
seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i],
fold_field((void **)sctp_statistics, i));
return 0;
}
/* Initialize the seq file operations for 'snmp' object. */
static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, sctp_snmp_seq_show, NULL);
}
static struct file_operations sctp_snmp_seq_fops = {
.open = sctp_snmp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* Set up the proc fs entry for 'snmp' object. */
int __init sctp_snmp_proc_init(void)
{
struct proc_dir_entry *p;
p = create_proc_entry("snmp", S_IRUGO, proc_net_sctp);
if (!p)
return -ENOMEM;
p->proc_fops = &sctp_snmp_seq_fops;
return 0;
}
/* Cleanup the proc fs entry for 'snmp' object. */
void sctp_snmp_proc_exit(void)
{
remove_proc_entry("snmp", proc_net_sctp);
}
......@@ -58,7 +58,7 @@
#include <net/inet_common.h>
/* Global data structures. */
sctp_protocol_t sctp_proto;
struct sctp_protocol sctp_proto;
struct proc_dir_entry *proc_net_sctp;
DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
......@@ -75,6 +75,9 @@ static struct sctp_af *sctp_af_v6_specific;
extern struct net_proto_family inet_family_ops;
extern int sctp_snmp_proc_init(void);
extern int sctp_snmp_proc_exit(void);
/* Return the address of the control sock. */
struct sock *sctp_get_ctl_sock(void)
{
......@@ -82,21 +85,32 @@ struct sock *sctp_get_ctl_sock(void)
}
/* Set up the proc fs entry for the SCTP protocol. */
__init void sctp_proc_init(void)
__init int sctp_proc_init(void)
{
int rc = 0;
if (!proc_net_sctp) {
struct proc_dir_entry *ent;
ent = proc_mkdir("net/sctp", 0);
if (ent) {
ent->owner = THIS_MODULE;
proc_net_sctp = ent;
}
} else
rc = -ENOMEM;
}
if (sctp_snmp_proc_init())
rc = -ENOMEM;
return rc;
}
/* Clean up the proc fs entry for the SCTP protocol. */
void sctp_proc_exit(void)
{
sctp_snmp_proc_exit();
if (proc_net_sctp) {
proc_net_sctp = NULL;
remove_proc_entry("net/sctp", 0);
......@@ -124,7 +138,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
/* Add the address to the local list. */
addr = t_new(struct sockaddr_storage_list, GFP_ATOMIC);
if (addr) {
INIT_LIST_HEAD(&addr->list);
addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
......@@ -139,7 +152,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
/* Extract our IP addresses from the system and stash them in the
* protocol structure.
*/
static void __sctp_get_local_addr_list(sctp_protocol_t *proto)
static void __sctp_get_local_addr_list(struct sctp_protocol *proto)
{
struct net_device *dev;
struct list_head *pos;
......@@ -155,7 +168,7 @@ static void __sctp_get_local_addr_list(sctp_protocol_t *proto)
read_unlock(&dev_base_lock);
}
static void sctp_get_local_addr_list(sctp_protocol_t *proto)
static void sctp_get_local_addr_list(struct sctp_protocol *proto)
{
long flags __attribute__ ((unused));
......@@ -165,7 +178,7 @@ static void sctp_get_local_addr_list(sctp_protocol_t *proto)
}
/* Free the existing local addresses. */
static void __sctp_free_local_addr_list(sctp_protocol_t *proto)
static void __sctp_free_local_addr_list(struct sctp_protocol *proto)
{
struct sockaddr_storage_list *addr;
struct list_head *pos, *temp;
......@@ -178,7 +191,7 @@ static void __sctp_free_local_addr_list(sctp_protocol_t *proto)
}
/* Free the existing local addresses. */
static void sctp_free_local_addr_list(sctp_protocol_t *proto)
static void sctp_free_local_addr_list(struct sctp_protocol *proto)
{
long flags __attribute__ ((unused));
......@@ -188,8 +201,9 @@ static void sctp_free_local_addr_list(sctp_protocol_t *proto)
}
/* Copy the local addresses which are valid for 'scope' into 'bp'. */
int sctp_copy_local_addr_list(sctp_protocol_t *proto, sctp_bind_addr_t *bp,
sctp_scope_t scope, int priority, int copy_flags)
int sctp_copy_local_addr_list(struct sctp_protocol *proto,
struct sctp_bind_addr *bp, sctp_scope_t scope,
int priority, int copy_flags)
{
struct sockaddr_storage_list *addr;
int error = 0;
......@@ -318,7 +332,7 @@ static int sctp_v4_addr_valid(union sctp_addr *addr)
static int sctp_v4_available(const union sctp_addr *addr)
{
int ret = inet_addr_type(addr->v4.sin_addr.s_addr);
/* FIXME: ip_nonlocal_bind sysctl support. */
if (addr->v4.sin_addr.s_addr != INADDR_ANY && ret != RTN_LOCAL)
......@@ -367,7 +381,7 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
/* Returns a valid dst cache entry for the given source and destination ip
* addresses. If an association is passed, trys to get a dst entry with a
* source adddress that matches an address in the bind address list.
* source adddress that matches an address in the bind address list.
*/
struct dst_entry *sctp_v4_get_dst(sctp_association_t *asoc,
union sctp_addr *daddr,
......@@ -436,7 +450,6 @@ struct dst_entry *sctp_v4_get_dst(sctp_association_t *asoc,
if (AF_INET == laddr->a.sa.sa_family) {
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
dst = sctp_v4_get_dst(asoc, daddr, &laddr->a);
if (!ip_route_output_key(&rt, &fl)) {
dst = &rt->u.dst;
goto out_unlock;
......@@ -557,7 +570,7 @@ static void sctp_inet_msgname(char *msgname, int *addr_len)
}
/* Copy the primary address of the peer primary address as the msg_name. */
static void sctp_inet_event_msgname(sctp_ulpevent_t *event, char *msgname,
static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname,
int *addr_len)
{
struct sockaddr_in *sin, *sinfrom;
......@@ -618,6 +631,16 @@ static int sctp_inet_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
return sctp_v4_available(addr);
}
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Returns number of addresses supported.
*/
static int sctp_inet_supported_addrs(const struct sctp_opt *opt,
__u16 *types)
{
types[0] = SCTP_PARAM_IPV4_ADDRESS;
return 1;
}
/* Wrapper routine that calls the ip transmit routine. */
static inline int sctp_v4_xmit(struct sk_buff *skb,
struct sctp_transport *transport, int ipfragok)
......@@ -628,6 +651,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
NIPQUAD(((struct rtable *)skb->dst)->rt_src),
NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
SCTP_INC_STATS(SctpOutSCTPPacks);
return ip_queue_xmit(skb, ipfragok);
}
......@@ -639,6 +663,7 @@ static struct sctp_pf sctp_pf_inet = {
.af_supported = sctp_inet_af_supported,
.cmp_addr = sctp_inet_cmp_addr,
.bind_verify = sctp_inet_bind_verify,
.supported_addrs = sctp_inet_supported_addrs,
.af = &sctp_ipv4_specific,
};
......@@ -743,7 +768,7 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
static int __init init_sctp_mibs(void)
{
int i;
sctp_statistics[0] = kmalloc_percpu(sizeof (struct sctp_mib),
GFP_KERNEL);
if (!sctp_statistics[0])
......@@ -765,7 +790,7 @@ static int __init init_sctp_mibs(void)
}
}
return 0;
}
static void cleanup_sctp_mibs(void)
......@@ -789,9 +814,9 @@ __init int sctp_init(void)
/* Allocate and initialise sctp mibs. */
status = init_sctp_mibs();
if (status)
if (status)
goto err_init_mibs;
/* Initialize proc fs directory. */
sctp_proc_init();
......@@ -818,7 +843,7 @@ __init int sctp_init(void)
/* Valid.Cookie.Life - 60 seconds */
sctp_proto.valid_cookie_life = 60 * HZ;
/* Whether Cookie Preservative is enabled(1) or not(0) */
/* Whether Cookie Preservative is enabled(1) or not(0) */
sctp_proto.cookie_preserve_enable = 1;
/* Max.Burst - 4 */
......@@ -907,7 +932,7 @@ __init int sctp_init(void)
INIT_LIST_HEAD(&sctp_proto.local_addr_list);
sctp_proto.local_addr_lock = SPIN_LOCK_UNLOCKED;
/* Register notifier for inet address additions/deletions. */
/* Register notifier for inet address additions/deletions. */
register_inetaddr_notifier(&sctp_inetaddr_notifier);
sctp_get_local_addr_list(&sctp_proto);
......@@ -929,7 +954,7 @@ __init int sctp_init(void)
sctp_dbg_objcnt_exit();
sctp_proc_exit();
cleanup_sctp_mibs();
err_init_mibs:
err_init_mibs:
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
inet_unregister_protosw(&sctp_protosw);
return status;
......
......@@ -66,29 +66,6 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 4: This parameter, when present, specifies all the
* address types the sending endpoint can support. The absence
* of this parameter indicates that the sending endpoint can
* support any address type.
*/
static const sctp_supported_addrs_param_t sat_param = {
{
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES,
__constant_htons(SCTP_SAT_LEN),
}
};
/* gcc 3.2 doesn't allow initialization of zero-length arrays. So the above
* structure is split and the address types array is initialized using a
* fixed length array.
*/
static const __u16 sat_addr_types[2] = {
SCTP_PARAM_IPV4_ADDRESS,
SCTP_V6(SCTP_PARAM_IPV6_ADDRESS,)
};
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 2: The ECN capable field is reserved for future use of
......@@ -174,7 +151,10 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
union sctp_params addrs;
size_t chunksize;
sctp_chunk_t *retval = NULL;
int addrs_len = 0;
int num_types, addrs_len = 0;
struct sctp_opt *sp;
sctp_supported_addrs_param_t sat;
__u16 types[2];
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
......@@ -195,7 +175,11 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
init.num_inbound_streams = htons(asoc->c.sinit_max_instreams);
init.initial_tsn = htonl(asoc->c.initial_tsn);
chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN;
/* How many address types are needed? */
sp = sctp_sk(asoc->base.sk);
num_types = sp->pf->supported_addrs(sp, types);
chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
chunksize += sizeof(ecap_param);
chunksize += vparam_len;
......@@ -220,11 +204,19 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
retval->param_hdr.v =
sctp_addto_chunk(retval, addrs_len, addrs.v);
sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &sat_param);
sctp_addto_chunk(retval, sizeof(sat_addr_types), sat_addr_types);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 4: This parameter, when present, specifies all the
* address types the sending endpoint can support. The absence
* of this parameter indicates that the sending endpoint can
* support any address type.
*/
sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES;
sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types));
sctp_addto_chunk(retval, sizeof(sat), &sat);
sctp_addto_chunk(retval, num_types * sizeof(__u16), &types);
sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
nodata:
if (addrs.v)
kfree(addrs.v);
......@@ -245,7 +237,8 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
retval = NULL;
addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, priority);
addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len,
priority);
if (!addrs.v)
goto nomem_rawaddr;
......@@ -586,14 +579,12 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
sctp_gap_ack_block_t gab;
int length;
__u32 ctsn;
sctp_tsnmap_iter_t iter;
__u16 num_gabs;
__u16 num_dup_tsns = asoc->peer.next_dup_tsn;
const sctp_tsnmap_t *map = &asoc->peer.tsn_map;
struct sctp_tsnmap_iter iter;
__u16 num_gabs, num_dup_tsns;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
ctsn = sctp_tsnmap_get_ctsn(map);
SCTP_DEBUG_PRINTK("make_sack: sackCTSNAck sent is 0x%x.\n",
ctsn);
SCTP_DEBUG_PRINTK("sackCTSNAck sent is 0x%x.\n", ctsn);
/* Count the number of Gap Ack Blocks. */
sctp_tsnmap_iter_init(map, &iter);
......@@ -603,15 +594,17 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
/* Do nothing. */
}
num_dup_tsns = sctp_tsnmap_num_dups(map);
/* Initialize the SACK header. */
sack.cum_tsn_ack = htonl(ctsn);
sack.a_rwnd = htonl(asoc->rwnd);
sack.num_gap_ack_blocks = htons(num_gabs);
sack.num_dup_tsns = htons(num_dup_tsns);
sack.num_dup_tsns = htons(num_dup_tsns);
length = sizeof(sack)
+ sizeof(sctp_gap_ack_block_t) * num_gabs
+ sizeof(sctp_dup_tsn_t) * num_dup_tsns;
+ sizeof(__u32) * num_dup_tsns;
/* Create the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, length);
......@@ -658,21 +651,18 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
while(sctp_tsnmap_next_gap_ack(map, &iter, &gab.start, &gab.end)) {
gab.start = htons(gab.start);
gab.end = htons(gab.end);
sctp_addto_chunk(retval,
sizeof(sctp_gap_ack_block_t),
&gab);
sctp_addto_chunk(retval, sizeof(sctp_gap_ack_block_t), &gab);
}
/* Register the duplicates. */
sctp_addto_chunk(retval,
sizeof(sctp_dup_tsn_t) * num_dup_tsns,
&asoc->peer.dup_tsns);
sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
sctp_tsnmap_get_dups(map));
nodata:
return retval;
}
/* FIXME: Comments. */
/* Make a SHUTDOWN chunk. */
sctp_chunk_t *sctp_make_shutdown(const sctp_association_t *asoc)
{
sctp_chunk_t *retval;
......@@ -689,7 +679,6 @@ sctp_chunk_t *sctp_make_shutdown(const sctp_association_t *asoc)
retval->subh.shutdown_hdr =
sctp_addto_chunk(retval, sizeof(shut), &shut);
nodata:
return retval;
}
......@@ -1180,6 +1169,9 @@ int sctp_datachunks_from_user(sctp_association_t *asoc,
over = msg_len % max;
offset = 0;
if (whole && over)
SCTP_INC_STATS_USER(SctpFragUsrMsgs);
/* Create chunks for all the full sized DATA chunks. */
for (i=0, len=first_len; i < whole; i++) {
frag = SCTP_DATA_MIDDLE_FRAG;
......@@ -1284,7 +1276,7 @@ void sctp_chunk_assign_tsn(sctp_chunk_t *chunk)
* assign a TSN.
*/
chunk->subh.data_hdr->tsn =
htonl(__sctp_association_get_next_tsn(chunk->asoc));
htonl(sctp_association_get_next_tsn(chunk->asoc));
chunk->has_tsn = 1;
}
}
......
......@@ -253,7 +253,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
{
int error = 0;
int force;
sctp_cmd_t *command;
sctp_cmd_t *cmd;
sctp_chunk_t *new_obj;
sctp_chunk_t *chunk = NULL;
sctp_packet_t *packet;
......@@ -273,22 +273,22 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* cmd->handle(x, y, z)
* --jgrimm
*/
while (NULL != (command = sctp_next_cmd(commands))) {
switch (command->verb) {
while (NULL != (cmd = sctp_next_cmd(commands))) {
switch (cmd->verb) {
case SCTP_CMD_NOP:
/* Do nothing. */
break;
case SCTP_CMD_NEW_ASOC:
/* Register a new association. */
asoc = command->obj.ptr;
asoc = cmd->obj.ptr;
/* Register with the endpoint. */
sctp_endpoint_add_asoc(ep, asoc);
sctp_hash_established(asoc);
break;
case SCTP_CMD_UPDATE_ASSOC:
sctp_assoc_update(asoc, command->obj.ptr);
sctp_assoc_update(asoc, cmd->obj.ptr);
break;
case SCTP_CMD_PURGE_OUTQUEUE:
......@@ -304,13 +304,12 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_NEW_STATE:
/* Enter a new state. */
sctp_cmd_new_state(commands, asoc, command->obj.state);
sctp_cmd_new_state(commands, asoc, cmd->obj.state);
break;
case SCTP_CMD_REPORT_TSN:
/* Record the arrival of a TSN. */
sctp_tsnmap_mark(&asoc->peer.tsn_map,
command->obj.u32);
sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32);
break;
case SCTP_CMD_GEN_SACK:
......@@ -319,14 +318,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
force = command->obj.i32;
force = cmd->obj.i32;
error = sctp_gen_sack(asoc, force, commands);
break;
case SCTP_CMD_PROCESS_SACK:
/* Process an inbound SACK. */
error = sctp_cmd_process_sack(commands, asoc,
command->obj.ptr);
cmd->obj.ptr);
break;
case SCTP_CMD_GEN_INIT_ACK:
......@@ -347,16 +346,15 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* layer which will bail.
*/
error = sctp_cmd_process_init(commands, asoc, chunk,
command->obj.ptr,
priority);
cmd->obj.ptr, priority);
break;
case SCTP_CMD_GEN_COOKIE_ECHO:
/* Generate a COOKIE ECHO chunk. */
new_obj = sctp_make_cookie_echo(asoc, chunk);
if (!new_obj) {
if (command->obj.ptr)
sctp_free_chunk(command->obj.ptr);
if (cmd->obj.ptr)
sctp_free_chunk(cmd->obj.ptr);
goto nomem;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
......@@ -365,9 +363,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
if (command->obj.ptr)
if (cmd->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(command->obj.ptr));
SCTP_CHUNK(cmd->obj.ptr));
break;
case SCTP_CMD_GEN_SHUTDOWN:
......@@ -387,43 +385,36 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_CHUNK_ULP:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
"chunk_up:",
command->obj.ptr,
"ulpq:",
&asoc->ulpq);
sctp_ulpq_tail_data(&asoc->ulpq,
command->obj.ptr,
"chunk_up:", cmd->obj.ptr,
"ulpq:", &asoc->ulpq);
sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr,
GFP_ATOMIC);
break;
case SCTP_CMD_EVENT_ULP:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
"event_up:",
command->obj.ptr,
"ulpq:",
&asoc->ulpq);
sctp_ulpq_tail_event(&asoc->ulpq,
command->obj.ptr);
"event_up:",cmd->obj.ptr,
"ulpq:",&asoc->ulpq);
sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr);
break;
case SCTP_CMD_REPLY:
/* Send a chunk to our peer. */
error = sctp_outq_tail(&asoc->outqueue,
command->obj.ptr);
cmd->obj.ptr);
break;
case SCTP_CMD_SEND_PKT:
/* Send a full packet to our peer. */
packet = command->obj.ptr;
packet = cmd->obj.ptr;
sctp_packet_transmit(packet);
sctp_ootb_pkt_free(packet);
break;
case SCTP_CMD_RETRAN:
/* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue,
command->obj.transport,
sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
SCTP_RETRANSMIT_T3_RTX);
break;
......@@ -434,32 +425,30 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_ECN_CE:
/* Do delayed CE processing. */
sctp_do_ecn_ce_work(asoc, command->obj.u32);
sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
break;
case SCTP_CMD_ECN_ECNE:
/* Do delayed ECNE processing. */
new_obj = sctp_do_ecn_ecne_work(asoc,
command->obj.u32,
new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
chunk);
if (new_obj) {
if (new_obj)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
}
break;
case SCTP_CMD_ECN_CWR:
/* Do delayed CWR processing. */
sctp_do_ecn_cwr_work(asoc, command->obj.u32);
sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
break;
case SCTP_CMD_SETUP_T2:
sctp_cmd_setup_t2(commands, asoc, command->obj.ptr);
sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
break;
case SCTP_CMD_TIMER_START:
timer = &asoc->timers[command->obj.to];
timeout = asoc->timeouts[command->obj.to];
timer = &asoc->timers[cmd->obj.to];
timeout = asoc->timeouts[cmd->obj.to];
if (!timeout)
BUG();
......@@ -469,29 +458,28 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_TIMER_RESTART:
timer = &asoc->timers[command->obj.to];
timeout = asoc->timeouts[command->obj.to];
timer = &asoc->timers[cmd->obj.to];
timeout = asoc->timeouts[cmd->obj.to];
if (!mod_timer(timer, jiffies + timeout))
sctp_association_hold(asoc);
break;
case SCTP_CMD_TIMER_STOP:
timer = &asoc->timers[command->obj.to];
timer = &asoc->timers[cmd->obj.to];
if (timer_pending(timer) && del_timer(timer))
sctp_association_put(asoc);
break;
case SCTP_CMD_INIT_RESTART:
/* Do the needed accounting and updates
* associated with restarting an initialization
* timer.
*/
asoc->counters[SCTP_COUNTER_INIT_ERROR]++;
asoc->timeouts[command->obj.to] *= 2;
if (asoc->timeouts[command->obj.to] >
asoc->timeouts[cmd->obj.to] *= 2;
if (asoc->timeouts[cmd->obj.to] >
asoc->max_init_timeo) {
asoc->timeouts[command->obj.to] =
asoc->timeouts[cmd->obj.to] =
asoc->max_init_timeo;
}
......@@ -506,7 +494,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_add_cmd_sf(commands,
SCTP_CMD_TIMER_RESTART,
SCTP_TO(command->obj.to));
SCTP_TO(cmd->obj.to));
break;
case SCTP_CMD_INIT_FAILED:
......@@ -519,25 +507,16 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_COUNTER_INC:
asoc->counters[command->obj.counter]++;
asoc->counters[cmd->obj.counter]++;
break;
case SCTP_CMD_COUNTER_RESET:
asoc->counters[command->obj.counter] = 0;
asoc->counters[cmd->obj.counter] = 0;
break;
case SCTP_CMD_REPORT_DUP:
if (asoc->peer.next_dup_tsn < SCTP_MAX_DUP_TSNS) {
asoc->peer.dup_tsns[asoc->peer.next_dup_tsn++] =
ntohl(command->obj.u32);
}
break;
case SCTP_CMD_REPORT_BIGGAP:
SCTP_DEBUG_PRINTK("Big gap: %x to %x\n",
sctp_tsnmap_get_ctsn(
&asoc->peer.tsn_map),
command->obj.u32);
sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
cmd->obj.u32);
break;
case SCTP_CMD_REPORT_BAD_TAG:
......@@ -546,17 +525,16 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_STRIKE:
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike(asoc,
command->obj.transport);
sctp_do_8_2_transport_strike(asoc, cmd->obj.transport);
break;
case SCTP_CMD_TRANSPORT_RESET:
t = command->obj.transport;
t = cmd->obj.transport;
sctp_cmd_transport_reset(commands, asoc, t);
break;
case SCTP_CMD_TRANSPORT_ON:
t = command->obj.transport;
t = cmd->obj.transport;
sctp_cmd_transport_on(commands, asoc, t, chunk);
break;
......@@ -565,7 +543,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_HB_TIMER_UPDATE:
t = command->obj.transport;
t = cmd->obj.transport;
sctp_cmd_hb_timer_update(commands, asoc, t);
break;
......@@ -574,17 +552,16 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_REPORT_ERROR:
error = command->obj.error;
error = cmd->obj.error;
break;
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
sackh.cum_tsn_ack = command->obj.u32;
sackh.cum_tsn_ack = cmd->obj.u32;
sackh.a_rwnd = 0;
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
sctp_add_cmd_sf(commands,
SCTP_CMD_PROCESS_SACK,
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
SCTP_SACKH(&sackh));
break;
......@@ -594,13 +571,23 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_RTO_PENDING:
t = command->obj.transport;
t = cmd->obj.transport;
t->rto_pending = 1;
break;
case SCTP_CMD_PART_DELIVER:
sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr,
GFP_ATOMIC);
break;
case SCTP_CMD_RENEGE:
sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr,
GFP_ATOMIC);
break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
command->verb, command->obj.ptr);
cmd->verb, cmd->obj.ptr);
break;
};
if (error)
......@@ -737,7 +724,6 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands)
asoc->a_rwnd = asoc->rwnd;
asoc->peer.sack_needed = 0;
asoc->peer.next_dup_tsn = 0;
error = sctp_outq_tail(&asoc->outqueue, sack);
......@@ -1014,7 +1000,7 @@ static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
sctp_association_t *asoc)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
event = sctp_ulpevent_make_assoc_change(asoc,
0,
......@@ -1041,7 +1027,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_subtype_t subtype,
sctp_chunk_t *chunk)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
__u16 error = 0;
switch(event_type) {
......@@ -1061,12 +1047,11 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
break;
}
event = sctp_ulpevent_make_assoc_change(asoc,
0,
SCTP_COMM_LOST,
error, 0, 0,
GFP_ATOMIC);
/* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
error, 0, 0, GFP_ATOMIC);
if (event)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(event));
......@@ -1141,7 +1126,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
if (del_timer(&t->hb_timer))
sctp_transport_put(t);
}
}
}
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
......
......@@ -102,7 +102,7 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
/* RFC 2960 6.10 Bundling
*
......@@ -146,6 +146,9 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpShutdowns);
SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
......@@ -223,6 +226,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
......@@ -264,7 +268,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
goto nomem_ack;
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_ack;
......@@ -379,6 +383,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
......@@ -388,6 +393,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
SCTP_INC_STATS(SctpAborteds);
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
......@@ -403,6 +411,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SctpOutCtrlChunks);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
......@@ -504,7 +513,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc;
sctp_init_chunk_t *peer_init;
sctp_chunk_t *repl;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
int error = 0;
sctp_chunk_t *err_chk_p;
......@@ -557,6 +566,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SctpCurrEstab);
SCTP_INC_STATS(SctpPassiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->autoclose)
......@@ -636,7 +647,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
/* RFC 2960 5.1 Normal Establishment of an Association
*
......@@ -648,6 +659,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SctpCurrEstab);
SCTP_INC_STATS(SctpActiveEstabs);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
......@@ -669,7 +682,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -719,6 +731,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
......@@ -929,6 +943,8 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
SCTP_INC_STATS(SctpOutCtrlChunks);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
......@@ -1125,6 +1141,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SctpOutCtrlChunks);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
......@@ -1355,7 +1372,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc)
{
sctp_init_chunk_t *peer_init;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
sctp_chunk_t *repl;
/* new_asoc is a brand-new association, so these are not yet
......@@ -1421,7 +1438,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
sctp_association_t *new_asoc)
{
sctp_init_chunk_t *peer_init;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
sctp_chunk_t *repl;
/* new_asoc is a brand-new association, so these are not yet
......@@ -1436,6 +1453,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
......@@ -1503,7 +1521,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands,
sctp_association_t *new_asoc)
{
sctp_ulpevent_t *ev = NULL;
struct sctp_ulpevent *ev = NULL;
sctp_chunk_t *repl;
/* Clarification from Implementor's Guide:
......@@ -1519,6 +1537,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
......@@ -1540,11 +1559,11 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
SCTP_ULPEVENT(ev));
}
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
......@@ -1925,6 +1944,8 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const sctp_endpoint_t *ep,
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
/* BUG? This does not look complete... */
return SCTP_DISPOSITION_ABORT;
......@@ -1948,6 +1969,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpAborteds);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
......@@ -2241,6 +2263,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
sctp_datahdr_t *data_hdr;
sctp_chunk_t *err;
size_t datalen;
sctp_verb_t deliver;
int tmp;
__u32 tsn;
......@@ -2250,7 +2273,6 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
......@@ -2307,10 +2329,40 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) {
SCTP_DEBUG_PRINTK("Discarding tsn: %u datalen: %Zd, "
"rwnd: %d\n", tsn, datalen, asoc->rwnd);
goto discard_force;
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
(sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
deliver = SCTP_CMD_RENEGE;
} else {
SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
"rwnd: %d\n", tsn, datalen,
asoc->rwnd);
goto discard_force;
}
}
/*
......@@ -2332,13 +2384,24 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
/* We are accepting this DATA chunk. */
/* If definately accepting the DATA chunk, record its TSN, otherwise
* wait for renege processing.
*/
if (SCTP_CMD_CHUNK_ULP == deliver)
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
/* Record the fact that we have received this TSN. */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SctpInUnorderChunks);
else
SCTP_INC_STATS(SctpInOrderChunks);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
......@@ -2352,10 +2415,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
&data_hdr->stream,
sizeof(data_hdr->stream));
if (err) {
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
}
goto discard_noforce;
}
......@@ -2363,7 +2425,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_ULP, SCTP_CHUNK(chunk));
sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
......@@ -2536,6 +2599,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
......@@ -2544,6 +2609,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
/* Record the fact that we have received this TSN. */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SctpInUnorderChunks);
else
SCTP_INC_STATS(SctpInOrderChunks);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
......@@ -2705,6 +2775,8 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
}
......@@ -2726,7 +2798,7 @@ sctp_disposition_t sctp_sf_operr_notify(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
while (chunk->chunk_end > chunk->skb->data) {
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
......@@ -2764,7 +2836,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep,
{
sctp_chunk_t *chunk = arg;
sctp_chunk_t *reply;
sctp_ulpevent_t *ev;
struct sctp_ulpevent *ev;
/* 10.2 H) SHUTDOWN COMPLETE notification
*
......@@ -2794,6 +2866,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpShutdowns);
SCTP_DEC_STATS(SctpCurrEstab);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
......@@ -2834,6 +2908,8 @@ sctp_disposition_t sctp_sf_ootb(const sctp_endpoint_t *ep,
__u8 *ch_end;
int ootb_shut_ack = 0;
SCTP_INC_STATS(SctpOutOfBlues);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
......@@ -2901,6 +2977,8 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SctpOutCtrlChunks);
return SCTP_DISPOSITION_CONSUME;
}
......@@ -3472,6 +3550,10 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
return retval;
}
......@@ -3527,6 +3609,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpShutdowns);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
......@@ -3597,6 +3681,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpAborteds);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
......@@ -3929,6 +4015,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const sctp_endpoint_t *ep,
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
......@@ -4096,6 +4184,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const sctp_endpoint_t *ep,
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_DELETE_TCB;
}
......@@ -4271,6 +4361,9 @@ sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *chunk)
__u16 num_blocks;
__u16 num_dup_tsns;
/* FIXME: Protect ourselves from reading too far into
* the skb from a bogus sender.
*/
sack = (sctp_sackhdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_sackhdr_t));
......@@ -4401,6 +4494,7 @@ void sctp_send_stale_cookie_err(const sctp_endpoint_t *ep,
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SctpOutCtrlChunks);
} else
sctp_free_chunk (err_chunk);
}
......
......@@ -81,19 +81,50 @@
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static inline int sctp_wspace(sctp_association_t *asoc);
static inline int sctp_wspace(struct sctp_association *asoc);
static inline void sctp_set_owner_w(sctp_chunk_t *chunk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p,
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
int msg_len);
static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(sctp_association_t *asoc, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int);
static int sctp_bindx_add(struct sock *, struct sockaddr_storage *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr_storage *, int);
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static int sctp_autobind(struct sock *sk);
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
sctp_association_t *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
{
sctp_association_t *asoc = NULL;
/* If this is not a UDP-style socket, assoc id should be
* ignored.
*/
if (SCTP_SOCKET_UDP != sctp_sk(sk)->type) {
if (!list_empty(&sctp_sk(sk)->ep->asocs))
asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
sctp_association_t, asocs);
return asoc;
}
/* First, verify that this is a kernel address. */
if (sctp_is_valid_kaddr((unsigned long) id)) {
sctp_association_t *temp = (sctp_association_t *) id;
/* Verify that this _is_ an sctp_association_t
* data structure and if so, that the socket matches.
*/
if ((SCTP_ASSOC_EYECATCHER == temp->eyecatcher) &&
(temp->base.sk == sk))
asoc = temp;
}
return asoc;
}
/* API 3.1.2 bind() - UDP Style Syntax
* The syntax of bind() is,
......@@ -158,7 +189,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
/* Bind a local address either to an endpoint or to an association. */
SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
{
sctp_opt_t *sp = sctp_sk(sk);
struct sctp_opt *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
sctp_bind_addr_t *bp = &ep->base.bind_addr;
struct sctp_af *af;
......@@ -454,7 +485,7 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
*/
int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
{
sctp_opt_t *sp = sctp_sk(sk);
struct sctp_opt *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
int cnt;
sctp_bind_addr_t *bp = &ep->base.bind_addr;
......@@ -662,6 +693,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
/* Clean up any skbs sitting on the receive queue. */
skb_queue_purge(&sk->receive_queue);
skb_queue_purge(&sctp_sk(sk)->pd_lobby);
/* This will run the backlog queue. */
sctp_release_sock(sk);
......@@ -714,7 +746,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, int msg_len)
{
sctp_opt_t *sp;
struct sctp_opt *sp;
sctp_endpoint_t *ep;
sctp_association_t *new_asoc=NULL, *asoc=NULL;
struct sctp_transport *transport;
......@@ -817,19 +849,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
}
} else {
/* For a peeled-off socket, ignore any associd specified by
* the user with SNDRCVINFO.
*/
if (SCTP_SOCKET_UDP_HIGH_BANDWIDTH == sp->type) {
if (list_empty(&ep->asocs)) {
err = -EINVAL;
goto out_unlock;
}
asoc = list_entry(ep->asocs.next, sctp_association_t,
asocs);
} else if (associd) {
asoc = sctp_id2assoc(sk, associd);
}
asoc = sctp_id2assoc(sk, associd);
if (!asoc) {
err = -EINVAL;
goto out_unlock;
......@@ -939,6 +959,19 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* ASSERT: we have a valid association at this point. */
SCTP_DEBUG_PRINTK("We have a valid association.\n");
if (!sinfo) {
/* If the user didn't specify SNDRCVINFO, make up one with
* some defaults.
*/
default_sinfo.sinfo_stream = asoc->defaults.stream;
default_sinfo.sinfo_flags = asoc->defaults.flags;
default_sinfo.sinfo_ppid = asoc->defaults.ppid;
default_sinfo.sinfo_context = asoc->defaults.context;
default_sinfo.sinfo_timetolive = asoc->defaults.timetolive;
default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
sinfo = &default_sinfo;
}
/* API 7.1.7, the sndbuf size per association bounds the
* maximum size of data that can be sent in a single send call.
*/
......@@ -963,13 +996,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
err = -EINVAL;
goto out_free;
}
} else {
/* If the user didn't specify SNDRCVINFO, make up one with
* some defaults.
*/
default_sinfo.sinfo_stream = asoc->defaults.stream;
default_sinfo.sinfo_ppid = asoc->defaults.ppid;
sinfo = &default_sinfo;
}
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
......@@ -979,21 +1005,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_free;
}
#if 0
/* FIXME: This looks wrong so I'll comment out.
* We should be able to use this same technique for
* primary address override! --jgrimm
*/
/* If the user gave us an address, copy it in. */
if (msg->msg_name) {
chunk->transport = sctp_assoc_lookup_paddr(asoc, &to);
if (!chunk->transport) {
err = -EINVAL;
goto out_free;
}
}
#endif /* 0 */
/* Break the message into multiple chunks of maximum size. */
skb_queue_head_init(&chunks);
err = sctp_datachunks_from_user(asoc, sinfo, msg, msg_len, &chunks);
......@@ -1013,6 +1024,23 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
/* This flag, in the UDP model, requests the SCTP stack to
* override the primary destination address with the
* address found with the sendto/sendmsg call.
*/
if (sinfo_flags & MSG_ADDR_OVER) {
if (!msg->msg_name) {
err = -EINVAL;
goto out_free;
}
chunk->transport = sctp_assoc_lookup_paddr(asoc, &to);
if (!chunk->transport) {
err = -EINVAL;
goto out_free;
}
}
/* Send it to the lower layers. */
sctp_primitive_SEND(asoc, chunk);
SCTP_DEBUG_PRINTK("We sent primitively.\n");
......@@ -1110,22 +1138,17 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int len, int noblock, int flags, int *addr_len)
{
sctp_ulpevent_t *event = NULL;
sctp_opt_t *sp = sctp_sk(sk);
struct sctp_ulpevent *event = NULL;
struct sctp_opt *sp = sctp_sk(sk);
struct sk_buff *skb;
int copied;
int err = 0;
int skb_len;
SCTP_DEBUG_PRINTK("sctp_recvmsg("
"%s: %p, %s: %p, %s: %d, %s: %d, %s: "
"0x%x, %s: %p)\n",
"sk", sk,
"msghdr", msg,
"len", len,
"knoblauch", noblock,
"flags", flags,
"addr_len", addr_len);
SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %d, %s: %d, %s: "
"0x%x, %s: %p)\n", "sk", sk, "msghdr", msg,
"len", len, "knoblauch", noblock,
"flags", flags, "addr_len", addr_len);
sctp_lock_sock(sk);
skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
......@@ -1143,7 +1166,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_skb2event(skb);
if (err)
goto out_free;
......@@ -1170,7 +1193,6 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
/* If skb's length exceeds the user's buffer, update the skb and
* push it back to the receive_queue so that the next call to
* recvmsg() will return the remaining data. Don't set MSG_EOR.
* Otherwise, set MSG_EOR indicating the end of a message.
*/
if (skb_len > copied) {
msg->msg_flags &= ~MSG_EOR;
......@@ -1178,6 +1200,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
goto out_free;
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->receive_queue, skb);
/* When only partial message is copied to the user, increase
* rwnd by that amount. If all the data in the skb is read,
* rwnd is updated when the skb's destructor is called via
......@@ -1185,9 +1208,11 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
*/
sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
} else {
msg->msg_flags |= MSG_EOR;
}
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
(event->msg_flags & MSG_EOR))
msg->msg_flags |= MSG_EOR;
else
msg->msg_flags &= ~MSG_EOR;
out_free:
sctp_ulpevent_free(event); /* Free the skb. */
......@@ -1212,8 +1237,8 @@ static inline int sctp_setsockopt_disable_fragments(struct sock *sk,
return 0;
}
static inline int sctp_setsockopt_set_events(struct sock *sk, char *optval,
int optlen)
static inline int sctp_setsockopt_events(struct sock *sk, char *optval,
int optlen)
{
if (optlen != sizeof(struct sctp_event_subscribe))
return -EINVAL;
......@@ -1225,7 +1250,7 @@ static inline int sctp_setsockopt_set_events(struct sock *sk, char *optval,
static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
int optlen)
{
sctp_opt_t *sp = sctp_sk(sk);
struct sctp_opt *sp = sctp_sk(sk);
/* Applicable to UDP-style socket only */
if (SCTP_SOCKET_TCP == sp->type)
......@@ -1239,9 +1264,8 @@ static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
return 0;
}
static inline int sctp_setsockopt_set_peer_addr_params(struct sock *sk,
char *optval,
int optlen)
static inline int sctp_setsockopt_peer_addr_params(struct sock *sk,
char *optval, int optlen)
{
struct sctp_paddrparams params;
sctp_association_t *asoc;
......@@ -1279,8 +1303,7 @@ static inline int sctp_setsockopt_set_peer_addr_params(struct sock *sk,
error = sctp_primitive_REQUESTHEARTBEAT (asoc, trans);
if (error)
return error;
}
else {
} else {
/* The value of the heartbeat interval, in milliseconds. A value of 0,
* when modifying the parameter, specifies that the heartbeat on this
* address should be disabled.
......@@ -1310,6 +1333,79 @@ static inline int sctp_setsockopt_initmsg(struct sock *sk, char *optval,
return 0;
}
/*
*
* 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*/
static inline int sctp_setsockopt_default_send_param(struct sock *sk,
char *optval, int optlen)
{
struct sctp_sndrcvinfo info;
sctp_association_t *asoc;
if (optlen != sizeof(struct sctp_sndrcvinfo))
return -EINVAL;
if (copy_from_user(&info, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc)
return -EINVAL;
asoc->defaults.stream = info.sinfo_stream;
asoc->defaults.flags = info.sinfo_flags;
asoc->defaults.ppid = info.sinfo_ppid;
asoc->defaults.context = info.sinfo_context;
asoc->defaults.timetolive = info.sinfo_timetolive;
return 0;
}
/* 7.1.10 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static int sctp_setsockopt_peer_prim(struct sock *sk, char *optval, int optlen)
{
struct sctp_setpeerprim prim;
struct sctp_association *asoc;
union sctp_addr *addr;
struct sctp_transport *trans;
if (optlen != sizeof(struct sctp_setpeerprim))
return -EINVAL;
if (copy_from_user(&prim, optval, sizeof(struct sctp_setpeerprim)))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
if (!asoc)
return -EINVAL;
/* Find the requested address. */
addr = (union sctp_addr *) &(prim.sspp_addr);
trans = sctp_assoc_lookup_paddr(asoc, addr);
if (!trans)
return -ENOENT;
sctp_assoc_set_primary(asoc, trans);
return 0;
}
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
......@@ -1385,7 +1481,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
break;
case SCTP_SET_EVENTS:
retval = sctp_setsockopt_set_events(sk, optval, optlen);
retval = sctp_setsockopt_events(sk, optval, optlen);
break;
case SCTP_AUTOCLOSE:
......@@ -1393,14 +1489,22 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
break;
case SCTP_SET_PEER_ADDR_PARAMS:
retval = sctp_setsockopt_set_peer_addr_params(sk, optval,
optlen);
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
break;
case SCTP_INITMSG:
retval = sctp_setsockopt_initmsg(sk, optval, optlen);
break;
case SCTP_SET_DEFAULT_SEND_PARAM:
retval = sctp_setsockopt_default_send_param(sk, optval,
optlen);
break;
case SCTP_SET_PEER_PRIMARY_ADDR:
retval = sctp_setsockopt_peer_prim(sk, optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
......@@ -1432,7 +1536,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
sctp_opt_t *sp;
struct sctp_opt *sp;
sctp_endpoint_t *ep;
sctp_association_t *asoc;
struct sctp_transport *transport;
......@@ -1553,8 +1657,8 @@ SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
SCTP_STATIC int sctp_init_sock(struct sock *sk)
{
sctp_endpoint_t *ep;
sctp_protocol_t *proto;
sctp_opt_t *sp;
struct sctp_protocol *proto;
struct sctp_opt *sp;
SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk);
......@@ -1583,7 +1687,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
* FIXME: This are not used yet.
* FIXME: These are not used yet.
*/
sp->rtoinfo.srto_initial = proto->rto_initial;
sp->rtoinfo.srto_max = proto->rto_max;
......@@ -1620,6 +1724,11 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
*/
sp->autoclose = 0;
sp->pf = sctp_get_pf_specific(sk->family);
/* Control variables for partial data delivery. */
sp->pd_mode = 0;
skb_queue_head_init(&sp->pd_lobby);
/* Create a per socket endpoint structure. Even if we
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
......@@ -1655,6 +1764,13 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
/* STUB */
}
/* 7.2.1 Association Status (SCTP_STATUS)
* Applications can retrieve current status information about an
* association, including association state, peer receiver window size,
* number of unacked data chunks, and number of data chunks pending
* receipt. This information is read-only.
*/
static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
int *optlen)
{
......@@ -1676,20 +1792,10 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
}
associd = status.sstat_assoc_id;
if ((SCTP_SOCKET_UDP_HIGH_BANDWIDTH != sctp_sk(sk)->type) && associd) {
assoc = sctp_id2assoc(sk, associd);
if (!assoc) {
retval = -EINVAL;
goto out;
}
} else {
ep = sctp_sk(sk)->ep;
if (list_empty(&ep->asocs)) {
retval = -EINVAL;
goto out;
}
assoc = list_entry(ep->asocs.next, sctp_association_t, asocs);
assoc = sctp_id2assoc(sk, associd);
if (!assoc) {
retval = -EINVAL;
goto out;
}
transport = assoc->peer.primary_path;
......@@ -1774,10 +1880,10 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
struct sock *newsk;
struct socket *tmpsock;
sctp_endpoint_t *newep;
sctp_opt_t *oldsp = sctp_sk(oldsk);
sctp_opt_t *newsp;
struct sctp_opt *oldsp = sctp_sk(oldsk);
struct sctp_opt *newsp;
struct sk_buff *skb, *tmp;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
int err = 0;
/* An association cannot be branched off from an already peeled-off
......@@ -1811,13 +1917,50 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each(skb, &oldsk->receive_queue, tmp) {
event = (sctp_ulpevent_t *)skb->cb;
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, skb->list);
__skb_queue_tail(&newsk->receive_queue, skb);
}
}
/* Clean up an messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to recieve_queue.
*/
skb_queue_head_init(&newsp->pd_lobby);
sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode;;
if (sctp_sk(oldsk)->pd_mode) {
struct sk_buff_head *queue;
/* Decide which queue to move pd_lobby skbs to. */
if (assoc->ulpq.pd_mode) {
queue = &newsp->pd_lobby;
} else
queue = &newsk->receive_queue;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
__skb_unlink(skb, skb->list);
__skb_queue_tail(queue, skb);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if (assoc->ulpq.pd_mode)
sctp_clear_pd(oldsk);
}
/* Set the type of socket to indicate that it is peeled off from the
* original socket.
*/
......@@ -1874,8 +2017,8 @@ static inline int sctp_getsockopt_peeloff(struct sock *sk, int len, char *optval
return retval;
}
static inline int sctp_getsockopt_get_peer_addr_params(struct sock *sk,
int len, char *optval, int *optlen)
static inline int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
char *optval, int *optlen)
{
struct sctp_paddrparams params;
sctp_association_t *asoc;
......@@ -1927,8 +2070,8 @@ static inline int sctp_getsockopt_initmsg(struct sock *sk, int len, char *optval
return 0;
}
static inline int sctp_getsockopt_get_peer_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_assoc_t id;
sctp_association_t *asoc;
......@@ -1957,7 +2100,7 @@ static inline int sctp_getsockopt_get_peer_addrs_num(struct sock *sk, int len,
return 0;
}
static inline int sctp_getsockopt_get_peer_addrs(struct sock *sk, int len,
static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_association_t *asoc;
......@@ -1997,8 +2140,8 @@ static inline int sctp_getsockopt_get_peer_addrs(struct sock *sk, int len,
return 0;
}
static inline int sctp_getsockopt_get_local_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
static inline int sctp_getsockopt_local_addrs_num(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_assoc_t id;
sctp_bind_addr_t *bp;
......@@ -2036,8 +2179,8 @@ static inline int sctp_getsockopt_get_local_addrs_num(struct sock *sk, int len,
return 0;
}
static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
char *optval, int *optlen)
static inline int sctp_getsockopt_local_addrs(struct sock *sk, int len,
char *optval, int *optlen)
{
sctp_bind_addr_t *bp;
sctp_association_t *asoc;
......@@ -2087,6 +2230,84 @@ static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
return 0;
}
/* 7.1.10 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static int sctp_getsockopt_peer_prim(struct sock *sk, int len,
char *optval, int *optlen)
{
struct sctp_setpeerprim prim;
struct sctp_association *asoc;
if (len != sizeof(struct sctp_setpeerprim))
return -EINVAL;
if (copy_from_user(&prim, optval, sizeof(struct sctp_setpeerprim)))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
if (!asoc)
return -EINVAL;
if (!asoc->peer.primary_path)
return -ENOTCONN;
memcpy(&prim.sspp_addr, &asoc->peer.primary_path->ipaddr,
sizeof(union sctp_addr));
if (copy_to_user(optval, &prim, sizeof(struct sctp_setpeerprim)))
return -EFAULT;
return 0;
}
/*
*
* 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
* specify a default set of parameters that would normally be supplied
* through the inclusion of ancillary data. This socket option allows
* such an application to set the default sctp_sndrcvinfo structure.
* The application that wishes to use this socket option simply passes
* in to this call the sctp_sndrcvinfo structure defined in Section
* 5.2.2) The input parameters accepted by this call include
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*
* For getsockopt, it get the default sctp_sndrcvinfo structure.
*/
static inline int sctp_getsockopt_default_send_param(struct sock *sk,
int len, char *optval, int *optlen)
{
struct sctp_sndrcvinfo info;
sctp_association_t *asoc;
if (len != sizeof(struct sctp_sndrcvinfo))
return -EINVAL;
if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo)))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
if (!asoc)
return -EINVAL;
info.sinfo_stream = asoc->defaults.stream;
info.sinfo_flags = asoc->defaults.flags;
info.sinfo_ppid = asoc->defaults.ppid;
info.sinfo_context = asoc->defaults.context;
info.sinfo_timetolive = asoc->defaults.timetolive;
if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo)))
return -EFAULT;
return 0;
}
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen)
{
......@@ -2117,53 +2338,49 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_STATUS:
retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
break;
case SCTP_DISABLE_FRAGMENTS:
retval = sctp_getsockopt_disable_fragments(sk, len, optval,
optlen);
break;
case SCTP_SET_EVENTS:
retval = sctp_getsockopt_set_events(sk, len, optval, optlen);
break;
case SCTP_AUTOCLOSE:
retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
break;
case SCTP_SOCKOPT_PEELOFF:
retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDR_PARAMS:
retval = sctp_getsockopt_get_peer_addr_params(sk, len, optval,
optlen);
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
optlen);
break;
case SCTP_INITMSG:
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
break;
case SCTP_GET_PEER_ADDRS_NUM:
retval = sctp_getsockopt_get_peer_addrs_num(sk, len, optval,
optlen);
retval = sctp_getsockopt_peer_addrs_num(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS_NUM:
retval = sctp_getsockopt_get_local_addrs_num(sk, len, optval,
optlen);
retval = sctp_getsockopt_local_addrs_num(sk, len, optval,
optlen);
break;
case SCTP_GET_PEER_ADDRS:
retval = sctp_getsockopt_get_peer_addrs(sk, len, optval,
optlen);
retval = sctp_getsockopt_peer_addrs(sk, len, optval,
optlen);
break;
case SCTP_GET_LOCAL_ADDRS:
retval = sctp_getsockopt_get_local_addrs(sk, len, optval,
optlen);
retval = sctp_getsockopt_local_addrs(sk, len, optval,
optlen);
break;
case SCTP_SET_DEFAULT_SEND_PARAM:
retval = sctp_getsockopt_default_send_param(sk, len,
optval, optlen);
break;
case SCTP_SET_PEER_PRIMARY_ADDR:
retval = sctp_getsockopt_peer_prim(sk, len, optval, optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
......@@ -2186,7 +2403,7 @@ static void sctp_unhash(struct sock *sk)
/* Check if port is acceptable. Possibly find first available port.
*
* The port hash table (contained in the 'global' SCTP protocol storage
* returned by sctp_protocol_t * sctp_get_protocol()). The hash
* returned by struct sctp_protocol *sctp_get_protocol()). The hash
* table is an array of 4096 lists (sctp_bind_hashbucket_t). Each
* list (the list number is the port number hashed out, so as you
* would expect from a hash function, all the ports in a given list have
......@@ -2201,7 +2418,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
sctp_bind_hashbucket_t *head; /* hash list */
sctp_bind_bucket_t *pp; /* hash list port iterator */
sctp_protocol_t *sctp = sctp_get_protocol();
struct sctp_protocol *sctp = sctp_get_protocol();
unsigned short snum;
int ret;
......@@ -2389,7 +2606,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
*/
SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
{
sctp_opt_t *sp = sctp_sk(sk);
struct sctp_opt *sp = sctp_sk(sk);
sctp_endpoint_t *ep = sp->ep;
/* Only UDP style sockets that are not peeled off are allowed to
......@@ -2539,7 +2756,7 @@ static sctp_bind_bucket_t *sctp_bucket_create(sctp_bind_hashbucket_t *head, unsi
/* FIXME: Commments! */
static __inline__ void __sctp_put_port(struct sock *sk)
{
sctp_protocol_t *sctp_proto = sctp_get_protocol();
struct sctp_protocol *sctp_proto = sctp_get_protocol();
sctp_bind_hashbucket_t *head =
&sctp_proto->port_hashtable[sctp_phashfn(inet_sk(sk)->num)];
sctp_bind_bucket_t *pp;
......
......@@ -42,7 +42,7 @@
#include <net/sctp/structs.h>
#include <linux/sysctl.h>
extern sctp_protocol_t sctp_proto;
extern struct sctp_protocol sctp_proto;
static ctl_table sctp_table[] = {
{
......
......@@ -83,7 +83,7 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
const union sctp_addr *addr,
int priority)
{
sctp_protocol_t *proto = sctp_get_protocol();
struct sctp_protocol *proto = sctp_get_protocol();
/* Copy in the address. */
peer->ipaddr = *addr;
......@@ -262,7 +262,7 @@ void sctp_transport_put(struct sctp_transport *transport)
/* Update transport's RTO based on the newly calculated RTT. */
void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
{
sctp_protocol_t *proto = sctp_get_protocol();
struct sctp_protocol *proto = sctp_get_protocol();
/* Check for valid transport. */
SCTP_ASSERT(tp, "NULL transport", return);
......
......@@ -3,40 +3,40 @@
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
*
* These functions manipulate sctp tsn mapping array.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -45,21 +45,21 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
static void _sctp_tsnmap_update(sctp_tsnmap_t *map);
static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map);
static void _sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
__u16 len, __u16 base,
int *started, __u16 *start,
int *ended, __u16 *end);
static void sctp_tsnmap_update(struct sctp_tsnmap *map);
static void sctp_tsnmap_update_pending_data(struct sctp_tsnmap *map);
static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
__u16 len, __u16 base,
int *started, __u16 *start,
int *ended, __u16 *end);
/* Create a new sctp_tsnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
sctp_tsnmap_t *sctp_tsnmap_new(__u16 len, __u32 initial_tsn, int priority)
struct sctp_tsnmap *sctp_tsnmap_new(__u16 len, __u32 initial_tsn, int priority)
{
sctp_tsnmap_t *retval;
struct sctp_tsnmap *retval;
retval = kmalloc(sizeof(sctp_tsnmap_t) +
retval = kmalloc(sizeof(struct sctp_tsnmap) +
sctp_tsnmap_storage_size(len),
priority);
if (!retval)
......@@ -72,13 +72,13 @@ sctp_tsnmap_t *sctp_tsnmap_new(__u16 len, __u32 initial_tsn, int priority)
fail_map:
kfree(retval);
fail:
return NULL;
}
/* Initialize a block of memory as a tsnmap. */
sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn)
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
__u32 initial_tsn)
{
map->tsn_map = map->raw_map;
map->overflow_map = map->tsn_map + len;
......@@ -94,6 +94,7 @@ sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn
map->max_tsn_seen = map->cumulative_tsn_ack_point;
map->malloced = 0;
map->pending_data = 0;
map->num_dup_tsns = 0;
return map;
}
......@@ -104,7 +105,7 @@ sctp_tsnmap_t *sctp_tsnmap_init(sctp_tsnmap_t *map, __u16 len, __u32 initial_tsn
* >0 if the TSN has been seen (duplicate)
* <0 if the TSN is invalid (too large to track)
*/
int sctp_tsnmap_check(const sctp_tsnmap_t *map, __u32 tsn)
int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
int dup;
......@@ -136,7 +137,7 @@ int sctp_tsnmap_check(const sctp_tsnmap_t *map, __u32 tsn)
}
/* Is there a gap in the TSN map? */
int sctp_tsnmap_has_gap(const sctp_tsnmap_t *map)
int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
{
int has_gap;
......@@ -145,7 +146,7 @@ int sctp_tsnmap_has_gap(const sctp_tsnmap_t *map)
}
/* Mark this TSN as seen. */
void sctp_tsnmap_mark(sctp_tsnmap_t *map, __u32 tsn)
void sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
......@@ -173,40 +174,45 @@ void sctp_tsnmap_mark(sctp_tsnmap_t *map, __u32 tsn)
/* Go fixup any internal TSN mapping variables including
* cumulative_tsn_ack_point.
*/
_sctp_tsnmap_update(map);
sctp_tsnmap_update(map);
}
void sctp_tsnmap_report_dup(struct sctp_tsnmap *map, __u32 tsn)
{
}
/* Retrieve the Cumulative TSN Ack Point. */
__u32 sctp_tsnmap_get_ctsn(const sctp_tsnmap_t *map)
__u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
{
return map->cumulative_tsn_ack_point;
}
/* Retrieve the highest TSN we've seen. */
__u32 sctp_tsnmap_get_max_tsn_seen(const sctp_tsnmap_t *map)
__u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map)
{
return map->max_tsn_seen;
}
/* Dispose of a tsnmap. */
void sctp_tsnmap_free(sctp_tsnmap_t *map)
void sctp_tsnmap_free(struct sctp_tsnmap *map)
{
if (map->malloced)
kfree(map);
}
/* Initialize a Gap Ack Block iterator from memory being provided. */
void sctp_tsnmap_iter_init(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter)
void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
struct sctp_tsnmap_iter *iter)
{
/* Only start looking one past the Cumulative TSN Ack Point. */
iter->start = map->cumulative_tsn_ack_point + 1;
}
/* Get the next Gap Ack Blocks. Returns 0 if there was not
* another block to get.
/* Get the next Gap Ack Blocks. Returns 0 if there was not another block
* to get.
*/
int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
__u16 *start, __u16 *end)
int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
struct sctp_tsnmap_iter *iter, __u16 *start, __u16 *end)
{
int started, ended;
__u16 _start, _end, offset;
......@@ -216,12 +222,10 @@ int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
/* Search the first mapping array. */
if (iter->start - map->base_tsn < map->len) {
offset = iter->start - map->base_tsn;
_sctp_tsnmap_find_gap_ack(map->tsn_map,
offset,
map->len, 0,
&started, &_start,
&ended, &_end);
sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0,
&started, &_start, &ended, &_end);
}
/* Do we need to check the overflow map? */
......@@ -235,12 +239,12 @@ int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
offset = iter->start - map->base_tsn - map->len;
/* Search the overflow map. */
_sctp_tsnmap_find_gap_ack(map->overflow_map,
offset,
map->len,
map->len,
&started, &_start,
&ended, &_end);
sctp_tsnmap_find_gap_ack(map->overflow_map,
offset,
map->len,
map->len,
&started, &_start,
&ended, &_end);
}
/* The Gap Ack Block happens to end at the end of the
......@@ -278,7 +282,7 @@ int sctp_tsnmap_next_gap_ack(const sctp_tsnmap_t *map, sctp_tsnmap_iter_t *iter,
/* This private helper function updates the tsnmap buffers and
* the Cumulative TSN Ack Point.
*/
static void _sctp_tsnmap_update(sctp_tsnmap_t *map)
static void sctp_tsnmap_update(struct sctp_tsnmap *map)
{
__u32 ctsn;
......@@ -301,10 +305,10 @@ static void _sctp_tsnmap_update(sctp_tsnmap_t *map)
} while (map->tsn_map[ctsn - map->base_tsn]);
map->cumulative_tsn_ack_point = ctsn - 1; /* Back up one. */
_sctp_tsnmap_update_pending_data(map);
sctp_tsnmap_update_pending_data(map);
}
static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map)
static void sctp_tsnmap_update_pending_data(struct sctp_tsnmap *map)
{
__u32 cum_tsn = map->cumulative_tsn_ack_point;
__u32 max_tsn = map->max_tsn_seen;
......@@ -324,7 +328,7 @@ static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map)
for (i = start; i < end; i++) {
if (map->tsn_map[i])
pending_data--;
}
}
if (gap >= map->len) {
start = 0;
......@@ -345,14 +349,14 @@ static void _sctp_tsnmap_update_pending_data(sctp_tsnmap_t *map)
* The flags "started" and "ended" tell is if we found the beginning
* or (respectively) the end of a Gap Ack Block.
*/
static void _sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
__u16 len, __u16 base,
int *started, __u16 *start,
int *ended, __u16 *end)
static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
__u16 len, __u16 base,
int *started, __u16 *start,
int *ended, __u16 *end)
{
int i = off;
/* Let's look through the entire array, but break out
/* Look through the entire array, but break out
* early if we have found the end of the Gap Ack Block.
*/
......@@ -381,3 +385,23 @@ static void _sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
}
}
}
/* Renege that we have seen a TSN. */
void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
if (TSN_lt(tsn, map->base_tsn))
return;
if (!TSN_lt(tsn, map->base_tsn + map->len + map->len))
return;
/* Assert: TSN is in range. */
gap = tsn - map->base_tsn;
/* Pretend we never saw the TSN. */
if (gap < map->len)
map->tsn_map[gap] = 0;
else
map->overflow_map[gap - map->len] = 0;
}
......@@ -5,37 +5,37 @@
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* These functions manipulate an sctp event. The sctp_ulpevent_t is used
* to carry notifications and data to the ULP (sockets).
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* These functions manipulate an sctp event. The struct ulpevent is used
* to carry notifications and data to the ULP (sockets).
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -47,58 +47,51 @@
#include <net/sctp/sm.h>
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb,
sctp_association_t *asoc);
static void
sctp_ulpevent_set_owner(struct sk_buff *skb, const sctp_association_t *asoc);
struct sctp_association *asoc);
static void sctp_ulpevent_set_owner(struct sk_buff *skb,
const struct sctp_association *asoc);
/* Create a new sctp_ulpevent. */
sctp_ulpevent_t *sctp_ulpevent_new(int size, int msg_flags, int priority)
struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int priority)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sk_buff *skb;
skb = alloc_skb(size, priority);
if (!skb)
goto fail;
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_ulpevent_init(event, skb, msg_flags);
event = sctp_skb2event(skb);
event = sctp_ulpevent_init(event, msg_flags);
if (!event)
goto fail_init;
event->malloced = 1;
return event;
fail_init:
kfree_skb(event->parent);
kfree_skb(skb);
fail:
return NULL;
}
/* Initialize an ULP event from an given skb. */
sctp_ulpevent_t *sctp_ulpevent_init(sctp_ulpevent_t *event,
struct sk_buff *parent,
int msg_flags)
struct sctp_ulpevent *sctp_ulpevent_init(struct sctp_ulpevent *event,
int msg_flags)
{
memset(event, sizeof(sctp_ulpevent_t), 0x00);
memset(event, sizeof(struct sctp_ulpevent), 0x00);
event->msg_flags = msg_flags;
event->parent = parent;
event->malloced = 0;
return event;
}
/* Dispose of an event. */
void sctp_ulpevent_free(sctp_ulpevent_t *event)
void sctp_ulpevent_free(struct sctp_ulpevent *event)
{
if (event->malloced)
kfree_skb(event->parent);
kfree_skb(sctp_event2skb(event));
}
/* Is this a MSG_NOTIFICATION? */
int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event)
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event)
{
return event->msg_flags & MSG_NOTIFICATION;
return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION);
}
/* Create and initialize an SCTP_ASSOC_CHANGE event.
......@@ -112,24 +105,22 @@ int sctp_ulpevent_is_notification(const sctp_ulpevent_t *event)
* Note: There is no field checking here. If a field is unused it will be
* zero'd out.
*/
sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(const sctp_association_t *asoc,
__u16 flags,
__u16 state,
__u16 error,
__u16 outbound,
__u16 inbound,
int priority)
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const sctp_association_t *asoc,
__u16 flags, __u16 state, __u16 error, __u16 outbound,
__u16 inbound, int priority)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sctp_assoc_change *sac;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
skb = sctp_event2skb(event);
sac = (struct sctp_assoc_change *)
skb_put(event->parent, sizeof(struct sctp_assoc_change));
skb_put(skb, sizeof(struct sctp_assoc_change));
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
......@@ -198,13 +189,13 @@ sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(const sctp_association_t *asoc,
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event->parent, asoc);
sctp_ulpevent_set_owner(skb, asoc);
sac->sac_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
return NULL;
}
/* Create and initialize an SCTP_PEER_ADDR_CHANGE event.
......@@ -215,24 +206,22 @@ sctp_ulpevent_t *sctp_ulpevent_make_assoc_change(const sctp_association_t *asoc,
* When a destination address on a multi-homed peer encounters a change
* an interface details event is sent.
*/
sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
const sctp_association_t *asoc,
const struct sockaddr_storage *aaddr,
int flags,
int state,
int error,
int priority)
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const sctp_association_t *asoc, const struct sockaddr_storage *aaddr,
int flags, int state, int error, int priority)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sctp_paddr_change *spc;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
skb = sctp_event2skb(event);
spc = (struct sctp_paddr_change *)
skb_put(event->parent, sizeof(struct sctp_paddr_change));
skb_put(skb, sizeof(struct sctp_paddr_change));
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
......@@ -265,7 +254,7 @@ sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_state: 32 bits (signed integer)
*
*
* This field holds one of a number of values that communicate the
* event that happened to the address.
*/
......@@ -291,7 +280,7 @@ sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event->parent, asoc);
sctp_ulpevent_set_owner(skb, asoc);
spc->spc_assoc_id = sctp_assoc2id(asoc);
/* Sockets API Extensions for SCTP
......@@ -325,12 +314,11 @@ sctp_ulpevent_t *sctp_ulpevent_make_peer_addr_change(
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
sctp_chunk_t *chunk,
__u16 flags,
int priority)
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const sctp_association_t *asoc, sctp_chunk_t *chunk,
__u16 flags, int priority)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
struct sk_buff *skb;
sctp_errhdr_t *ch;
......@@ -358,13 +346,12 @@ sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
goto fail;
/* Embed the event fields inside the cloned skb. */
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_ulpevent_init(event, skb, MSG_NOTIFICATION);
event = sctp_skb2event(skb);
event = sctp_ulpevent_init(event, MSG_NOTIFICATION);
if (!event)
goto fail;
event->malloced = 1;
sre = (struct sctp_remote_error *)
skb_push(skb, sizeof(struct sctp_remote_error));
......@@ -416,7 +403,8 @@ sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event->parent, asoc);
skb = sctp_event2skb(event);
sctp_ulpevent_set_owner(skb, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
......@@ -430,13 +418,11 @@ sctp_ulpevent_t *sctp_ulpevent_make_remote_error(const sctp_association_t *asoc,
* Socket Extensions for SCTP - draft-01
* 5.3.1.4 SCTP_SEND_FAILED
*/
sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
sctp_chunk_t *chunk,
__u16 flags,
__u32 error,
int priority)
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
const sctp_association_t *asoc, sctp_chunk_t *chunk,
__u16 flags, __u32 error, int priority)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sctp_send_failed *ssf;
struct sk_buff *skb;
......@@ -452,16 +438,11 @@ sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
skb_pull(skb, sizeof(sctp_data_chunk_t));
/* Embed the event fields inside the cloned skb. */
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_ulpevent_init(event, skb, MSG_NOTIFICATION);
event = sctp_skb2event(skb);
event = sctp_ulpevent_init(event, MSG_NOTIFICATION);
if (!event)
goto fail;
/* Mark as malloced, even though the constructor was not
* called.
*/
event->malloced = 1;
ssf = (struct sctp_send_failed *)
skb_push(skb, sizeof(struct sctp_send_failed));
......@@ -525,7 +506,8 @@ sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
* same association identifier. For TCP style socket, this field is
* ignored.
*/
sctp_ulpevent_set_owner(event->parent, asoc);
skb = sctp_event2skb(event);
sctp_ulpevent_set_owner(skb, asoc);
ssf->ssf_assoc_id = sctp_assoc2id(asoc);
return event;
......@@ -538,21 +520,22 @@ sctp_ulpevent_t *sctp_ulpevent_make_send_failed(const sctp_association_t *asoc,
* Socket Extensions for SCTP - draft-01
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*/
sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const sctp_association_t *asoc,
__u16 flags,
int priority)
__u16 flags, int priority)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
struct sctp_shutdown_event *sse;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
skb = sctp_event2skb(event);
sse = (struct sctp_shutdown_event *)
skb_put(event->parent, sizeof(struct sctp_shutdown_event));
skb_put(skb, sizeof(struct sctp_shutdown_event));
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
......@@ -587,7 +570,7 @@ sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event->parent, asoc);
sctp_ulpevent_set_owner(skb, asoc);
sse->sse_assoc_id = sctp_assoc2id(asoc);
return event;
......@@ -600,13 +583,13 @@ sctp_ulpevent_t *sctp_ulpevent_make_shutdown_event(
* to pass it to the upper layers. Go ahead and calculate the sndrcvinfo
* even if filtered out later.
*
* Socket Extensions for SCTP - draft-01
* Socket Extensions for SCTP
* 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*/
sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
sctp_chunk_t *chunk, int priority)
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
sctp_chunk_t *chunk, int priority)
{
sctp_ulpevent_t *event, *levent;
struct sctp_ulpevent *event;
struct sctp_sndrcvinfo *info;
struct sk_buff *skb, *list;
size_t padding, len;
......@@ -638,24 +621,19 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
sctp_ulpevent_set_owner_r(skb, asoc);
/* Embed the event fields inside the cloned skb. */
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_skb2event(skb);
/* Initialize event with flags 0. */
event = sctp_ulpevent_init(event, skb, 0);
event = sctp_ulpevent_init(event, 0);
if (!event)
goto fail_init;
event->malloced = 1;
for (list = skb_shinfo(skb)->frag_list; list; list = list->next) {
/* Note: Not clearing the entire event struct as
* this is just a fragment of the real event. However,
* we still need to do rwnd accounting.
*/
for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
sctp_ulpevent_set_owner_r(list, asoc);
/* Initialize event with flags 0. */
levent = sctp_ulpevent_init(event, skb, 0);
if (!levent)
goto fail_init;
levent->malloced = 1;
}
info = (struct sctp_sndrcvinfo *) &event->sndrcvinfo;
......@@ -707,18 +685,26 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
* MSG_UNORDERED - This flag is present when the message was sent
* non-ordered.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
info->sinfo_flags |= MSG_UNORDERED;
/* FIXME: For reassembly, we need to have the fragmentation bits.
* This really does not belong in the event structure, but
* its difficult to fix everything at the same time. Eventually,
* we should create and skb based chunk structure. This structure
* storage can be converted to an event. --jgrimm
/* sinfo_cumtsn: 32 bit (unsigned integer)
*
* This field will hold the current cumulative TSN as
* known by the underlying SCTP layer. Note this field is
* ignored when sending and only valid for a receive
* operation when sinfo_flags are set to MSG_UNORDERED.
*/
info->sinfo_cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
}
/* Note: For reassembly, we need to have the fragmentation bits.
* For now, merge these into the msg_flags, since those bit
* possitions are not used.
*/
event->chunk_flags = chunk->chunk_hdr->flags;
event->msg_flags |= chunk->chunk_hdr->flags;
/* With -04 draft, tsn moves into sndrcvinfo. */
/* With 04 draft, tsn moves into sndrcvinfo. */
info->sinfo_tsn = ntohl(chunk->subh.data_hdr->tsn);
/* Context is not used on receive. */
......@@ -745,19 +731,79 @@ sctp_ulpevent_t *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
return NULL;
}
/* Create a partial delivery related event.
*
* 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
*
* When a reciever is engaged in a partial delivery of a
* message this notification will be used to inidicate
* various events.
*/
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const sctp_association_t *asoc, __u32 indication, int priority)
{
struct sctp_ulpevent *event;
struct sctp_rcv_pdapi_event *pd;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
MSG_NOTIFICATION, priority);
if (!event)
goto fail;
skb = sctp_event2skb(event);
pd = (struct sctp_rcv_pdapi_event *)
skb_put(skb, sizeof(struct sctp_rcv_pdapi_event));
/* pdapi_type
* It should be SCTP_PARTIAL_DELIVERY_EVENT
*
* pdapi_flags: 16 bits (unsigned integer)
* Currently unused.
*/
pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
pd->pdapi_flags = 0;
/* pdapi_length: 32 bits (unsigned integer)
*
* This field is the total length of the notification data, including
* the notification header. It will generally be sizeof (struct
* sctp_rcv_pdapi_event).
*/
pd->pdapi_length = sizeof(struct sctp_rcv_pdapi_event);
/* pdapi_indication: 32 bits (unsigned integer)
*
* This field holds the indication being sent to the application.
*/
pd->pdapi_indication = indication;
/* pdapi_assoc_id: sizeof (sctp_assoc_t)
*
* The association id field, holds the identifier for the association.
*/
pd->pdapi_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* Return the notification type, assuming this is a notification
* event.
*/
__u16 sctp_ulpevent_get_notification_type(const sctp_ulpevent_t *event)
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
{
union sctp_notification *notification;
struct sk_buff *skb;
notification = (union sctp_notification *) event->parent->data;
skb = sctp_event2skb((struct sctp_ulpevent *)event);
notification = (union sctp_notification *) skb->data;
return notification->h.sn_type;
}
/* Copy out the sndrcvinfo into a msghdr. */
void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
if (!sctp_ulpevent_is_notification(event)) {
......@@ -771,7 +817,7 @@ void sctp_ulpevent_read_sndrcvinfo(const sctp_ulpevent_t *event,
static void sctp_rcvmsg_rfree(struct sk_buff *skb)
{
sctp_association_t *asoc;
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
......@@ -779,16 +825,17 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
* Use the local private area of the skb to track the owning
* association.
*/
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_skb2event(skb);
asoc = event->asoc;
sctp_assoc_rwnd_increase(asoc, skb_headlen(skb));
sctp_association_put(asoc);
}
/* Charge receive window for bytes recieved. */
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *asoc)
static void sctp_ulpevent_set_owner_r(struct sk_buff *skb,
sctp_association_t *asoc)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
/* The current stack structures assume that the rcv buffer is
* per socket. For UDP-style sockets this is not true as
......@@ -798,7 +845,7 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a
*/
sctp_association_hold(asoc);
skb->sk = asoc->base.sk;
event = (sctp_ulpevent_t *) skb->cb;
event = sctp_skb2event(skb);
event->asoc = asoc;
skb->destructor = sctp_rcvmsg_rfree;
......@@ -809,26 +856,26 @@ static void sctp_ulpevent_set_owner_r(struct sk_buff *skb, sctp_association_t *a
/* A simple destructor to give up the reference to the association. */
static void sctp_ulpevent_rfree(struct sk_buff *skb)
{
sctp_ulpevent_t *event;
event = (sctp_ulpevent_t *)skb->cb;
struct sctp_ulpevent *event;
event = sctp_skb2event(skb);
sctp_association_put(event->asoc);
}
/* Hold the association in case the msg_name needs read out of
* the association.
/* Hold the association in case the msg_name needs read out of
* the association.
*/
static void sctp_ulpevent_set_owner(struct sk_buff *skb,
const sctp_association_t *asoc)
const struct sctp_association *asoc)
{
sctp_ulpevent_t *event;
struct sctp_ulpevent *event;
/* Cast away the const, as we are just wanting to
* bump the reference count.
*/
sctp_association_hold((sctp_association_t *)asoc);
sctp_association_hold((struct sctp_association *)asoc);
skb->sk = asoc->base.sk;
event = (sctp_ulpevent_t *)skb->cb;
event->asoc = (sctp_association_t *)asoc;
event = sctp_skb2event(skb);
event->asoc = (struct sctp_association *)asoc;
skb->destructor = sctp_ulpevent_rfree;
}
......@@ -84,6 +84,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0;
ulpq->malloced = 0;
return ulpq;
......@@ -96,15 +97,16 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
struct sk_buff *skb;
struct sctp_ulpevent *event;
while ((skb = skb_dequeue(&ulpq->lobby))) {
event = (struct sctp_ulpevent *) skb->cb;
while ((skb = __skb_dequeue(&ulpq->lobby))) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
while ((skb = skb_dequeue(&ulpq->reasm))) {
event = (struct sctp_ulpevent *) skb->cb;
while ((skb = __skb_dequeue(&ulpq->reasm))) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
}
/* Dispose of a ulpqueue. */
......@@ -117,7 +119,7 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq)
/* Process an incoming DATA chunk. */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
int priority)
int priority)
{
struct sk_buff_head temp;
sctp_data_chunk_t *hdr;
......@@ -125,12 +127,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
/* FIXME: Instead of event being the skb clone, we really should
* have a new skb based chunk structure that we can convert to
* an event. Temporarily, I'm carrying a few chunk fields in
* the event to allow reassembly. Its too painful to change
* everything at once. --jgrimm
*/
/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, priority);
if (!event)
return -ENOMEM;
......@@ -139,10 +136,10 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
if (event) {
if ((event) && (event->msg_flags & MSG_EOR)){
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
skb_queue_tail(&temp, event->parent);
__skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
......@@ -154,10 +151,40 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
return 0;
}
/* Clear the partial delivery mode for this socket. Note: This
* assumes that no association is currently in partial delivery mode.
*/
int sctp_clear_pd(struct sock *sk)
{
struct sctp_opt *sp;
sp = sctp_sk(sk);
sp->pd_mode = 0;
if (!skb_queue_empty(&sp->pd_lobby)) {
struct list_head *list;
sctp_skb_list_tail(&sp->pd_lobby, &sk->receive_queue);
list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
INIT_LIST_HEAD(list);
return 1;
}
return 0;
}
/* Clear the pd_mode and restart any pending messages waiting for delivery. */
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
{
ulpq->pd_mode = 0;
return sctp_clear_pd(ulpq->asoc->base.sk);
}
/* Add a new event for propagation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
struct sk_buff_head *queue;
int clear_pd = 0;
/* If the socket is just going to throw this away, do not
* even try to deliver it.
......@@ -169,29 +196,55 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
goto out_free;
/* If we are in partial delivery mode, post to the lobby until
* partial delivery is cleared, unless, of course _this_ is
* the association the cause of the partial delivery.
*/
if (!sctp_sk(sk)->pd_mode) {
queue = &sk->receive_queue;
} else if (ulpq->pd_mode) {
if (event->msg_flags & MSG_NOTIFICATION)
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->receive_queue;
}
} else
queue = &sctp_sk(sk)->pd_lobby;
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
if (event->parent->list)
sctp_skb_list_tail(event->parent->list, &sk->receive_queue);
if (sctp_event2skb(event)->list)
sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
else
skb_queue_tail(&sk->receive_queue, event->parent);
skb_queue_tail(queue, sctp_event2skb(event));
wake_up_interruptible(sk->sleep);
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
* queue.
*/
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
if (queue == &sk->receive_queue)
wake_up_interruptible(sk->sleep);
return 1;
out_free:
if (event->parent->list)
skb_queue_purge(event->parent->list);
if (sctp_event2skb(event)->list)
skb_queue_purge(sctp_event2skb(event)->list);
else
kfree_skb(event->parent);
kfree_skb(sctp_event2skb(event));
return 0;
}
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */
static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
......@@ -202,7 +255,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
/* Find the right place in this list. We store them by TSN. */
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (struct sctp_ulpevent *)pos->cb;
cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
if (TSN_lt(tsn, ctsn))
......@@ -211,9 +264,10 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
/* If the queue is empty, we have a different function to call. */
if (skb_peek(&ulpq->reasm))
__skb_insert(event->parent, pos->prev, pos, &ulpq->reasm);
__skb_insert(sctp_event2skb(event), pos->prev, pos,
&ulpq->reasm);
else
__skb_queue_tail(&ulpq->reasm, event->parent);
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
}
/* Helper function to return an event corresponding to the reassembled
......@@ -231,7 +285,10 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
pos = f_frag->next;
if (f_frag == l_frag)
pos = NULL;
else
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list, list = list->next);
......@@ -246,7 +303,8 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, f_frag->list);
do {
while (pos) {
pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
......@@ -262,25 +320,27 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
pos->next = pnext;
pos = pnext;
} while (1);
};
event = (sctp_ulpevent_t *) f_frag->cb;
event = sctp_skb2event(f_frag);
SCTP_INC_STATS(SctpReasmUsrMsgs);
return event;
}
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
sctp_ulpevent_t *retval = NULL;
struct sctp_ulpevent *retval = NULL;
/* Initialized to 0 just to avoid compiler warning message. Will
/* Initialized to 0 just to avoid compiler warning message. Will
* never be used with this value. It is referenced only after it
* is set when we find the first fragment of a message.
*/
......@@ -296,10 +356,10 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
* start the next pass when we find another first fragment.
*/
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
switch (cevent->chunk_flags & SCTP_DATA_FRAG_MASK) {
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
first_frag = pos;
next_tsn = ctsn + 1;
......@@ -313,7 +373,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
break;
case SCTP_DATA_LAST_FRAG:
if ((first_frag) && (ctsn == next_tsn))
if (first_frag && (ctsn == next_tsn))
retval = sctp_make_reassembled_event(
first_frag, pos);
else
......@@ -324,34 +384,162 @@ static inline sctp_ulpevent_t *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *
/* We have the reassembled event. There is no need to look
* further.
*/
if (retval)
if (retval) {
retval->msg_flags |= MSG_EOR;
break;
}
}
return retval;
}
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
/* Retrieve the next set of fragments of a partial message. */
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
{
sctp_ulpevent_t *retval = NULL;
struct sk_buff *pos, *tmp, *last_frag, *first_frag;
struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn;
int is_last;
struct sctp_ulpevent *retval;
/* FIXME: We should be using some new chunk structure here
* instead of carrying chunk fields in the event structure.
* This is temporary as it is too painful to change everything
* at once.
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for the first
* sequence of fragmented chunks.
*/
if (skb_queue_empty(&ulpq->reasm))
return NULL;
last_frag = first_frag = NULL;
retval = NULL;
next_tsn = 0;
is_last = 0;
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else if (next_tsn == ctsn)
next_tsn++;
else
goto done;
break;
case SCTP_DATA_LAST_FRAG:
if (!first_frag)
first_frag = pos;
else if (ctsn != next_tsn)
goto done;
last_frag = pos;
is_last = 1;
goto done;
default:
return NULL;
};
}
/* We have the reassembled event. There is no need to look
* further.
*/
done:
retval = sctp_make_reassembled_event(first_frag, last_frag);
if (is_last)
retval->msg_flags |= MSG_EOR;
return retval;
}
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
/* Check if this is part of a fragmented message. */
if (SCTP_DATA_NOT_FRAG == (event->chunk_flags & SCTP_DATA_FRAG_MASK))
if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
event->msg_flags |= MSG_EOR;
return event;
}
sctp_ulpq_store_reasm(ulpq, event);
retval = sctp_ulpq_retrieve_reassembled(ulpq);
if (!ulpq->pd_mode)
retval = sctp_ulpq_retrieve_reassembled(ulpq);
else {
__u32 ctsn, ctsnap;
/* Do not even bother unless this is the next tsn to
* be delivered.
*/
ctsn = event->sndrcvinfo.sinfo_tsn;
ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
if (TSN_lte(ctsn, ctsnap))
retval = sctp_ulpq_retrieve_partial(ulpq);
}
return retval;
}
/* Retrieve the first part (sequential fragments) for partial delivery. */
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp, *last_frag, *first_frag;
struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn;
struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that start a datagram.
*/
if (skb_queue_empty(&ulpq->reasm))
return NULL;
last_frag = first_frag = NULL;
retval = NULL;
next_tsn = 0;
sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
cevent = sctp_skb2event(pos);
ctsn = cevent->sndrcvinfo.sinfo_tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else
goto done;
break;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag)
return NULL;
if (ctsn == next_tsn) {
next_tsn++;
last_frag = pos;
} else
goto done;
break;
default:
return NULL;
};
}
/* We have the reassembled event. There is no need to look
* further.
*/
done:
retval = sctp_make_reassembled_event(first_frag, last_frag);
return retval;
}
......@@ -359,7 +547,7 @@ static inline sctp_ulpevent_t *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
* ordered by an an incoming chunk.
*/
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
......@@ -373,7 +561,7 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
cssn = cevent->sndrcvinfo.sinfo_ssn;
......@@ -390,32 +578,31 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
__skb_unlink(pos, pos->list);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(event->parent->list, pos);
__skb_queue_tail(sctp_event2skb(event)->list, pos);
}
}
/* Helper function to store chunks needing ordering. */
static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
struct sctp_ulpevent *event)
{
struct sk_buff *pos, *tmp;
sctp_ulpevent_t *cevent;
struct sctp_ulpevent *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
sid = event->sndrcvinfo.sinfo_stream;
ssn = event->sndrcvinfo.sinfo_ssn;
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
*/
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (sctp_ulpevent_t *) pos->cb;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->sndrcvinfo.sinfo_stream;
cssn = cevent->sndrcvinfo.sinfo_ssn;
......@@ -427,25 +614,20 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
/* If the queue is empty, we have a different function to call. */
if (skb_peek(&ulpq->lobby))
__skb_insert(event->parent, pos->prev, pos, &ulpq->lobby);
__skb_insert(sctp_event2skb(event), pos->prev, pos,
&ulpq->lobby);
else
__skb_queue_tail(&ulpq->lobby, event->parent);
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
}
static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
sctp_ulpevent_t *event)
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
/* FIXME: We should be using some new chunk structure here
* instead of carrying chunk fields in the event structure.
* This is temporary as it is too painful to change everything
* at once.
*/
/* Check if this message needs ordering. */
if (SCTP_DATA_UNORDERED & event->chunk_flags)
if (SCTP_DATA_UNORDERED & event->msg_flags)
return event;
/* Note: The stream ID must be verified before this routine. */
......@@ -472,3 +654,141 @@ static inline sctp_ulpevent_t *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return event;
}
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
/* Walk backwards through the list, reneges the newest tsns. */
while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, int priority)
{
struct sctp_ulpevent *event;
struct sctp_association *asoc;
asoc = ulpq->asoc;
/* Are we already in partial delivery mode? */
if (!sctp_sk(asoc->base.sk)->pd_mode) {
/* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */
if (event) {
sctp_ulpq_tail_event(ulpq, event);
sctp_sk(asoc->base.sk)->pd_mode = 1;
ulpq->pd_mode = 1;
return;
}
}
}
/* Renege some packets to make room for an incoming chunk. */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
int priority)
{
struct sctp_association *asoc;
__u16 needed, freed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, priority);
sctp_ulpq_partial_delivery(ulpq, chunk, priority);
}
return;
}
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int priority)
{
struct sctp_ulpevent *ev = NULL;
struct sock *sk;
if (!ulpq->pd_mode)
return;
sk = ulpq->asoc->base.sk;
if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
priority);
if (ev)
skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
wake_up_interruptible(sk->sleep);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment