Commit febb7e5d authored by Sridhar Samudrala's avatar Sridhar Samudrala

[SCTP] Partial Reliability Extension support.

parent 982f0efd
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
......@@ -93,6 +93,9 @@ typedef enum {
SCTP_CID_ECN_CWR = 13,
SCTP_CID_SHUTDOWN_COMPLETE = 14,
/* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN = 0xC0,
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
SCTP_CID_ASCONF_ACK = 0x80,
......@@ -168,6 +171,9 @@ typedef enum {
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12),
SCTP_PARAM_ECN_CAPABLE = __constant_htons(0x8000),
/* PR-SCTP Sec 3.1 */
SCTP_PARAM_FWD_TSN_SUPPORT = __constant_htons(0xc000),
/* Add-IP Extension. Section 3.2 */
SCTP_PARAM_ADD_IP = __constant_htons(0xc001),
SCTP_PARAM_DEL_IP = __constant_htons(0xc002),
......@@ -472,9 +478,67 @@ typedef struct sctp_cwr_chunk {
sctp_cwrhdr_t cwr_hdr;
} __attribute__((packed)) sctp_cwr_chunk_t;
/*
* ADDIP Section 3.1 New Chunk Types
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
*
* Forward Cumulative TSN chunk has the following format:
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Type = 192 | Flags = 0x00 | Length = Variable |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | New Cumulative TSN |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Stream-1 | Stream Sequence-1 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* \ /
* / \
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Stream-N | Stream Sequence-N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Chunk Flags:
*
* Set to all zeros on transmit and ignored on receipt.
*
* New Cumulative TSN: 32 bit u_int
*
* This indicates the new cumulative TSN to the data receiver. Upon
* the reception of this value, the data receiver MUST consider
* any missing TSNs earlier than or equal to this value as received
* and stop reporting them as gaps in any subsequent SACKs.
*
* Stream-N: 16 bit u_int
*
* This field holds a stream number that was skipped by this
* FWD-TSN.
*
* Stream Sequence-N: 16 bit u_int
* This field holds the sequence number associated with the stream
* that was skipped. The stream sequence field holds the largest stream
* sequence number in this stream being skipped. The receiver of
* the FWD-TSN's can use the Stream-N and Stream Sequence-N fields
* to enable delivery of any stranded TSN's that remain on the stream
* re-ordering queues. This field MUST NOT report TSN's corresponding
* to DATA chunk that are marked as unordered. For ordered DATA
* chunks this field MUST be filled in.
*/
struct sctp_fwdtsn_skip {
__u16 stream;
__u16 ssn;
} __attribute__((packed));
struct sctp_fwdtsn_hdr {
__u32 new_cum_tsn;
struct sctp_fwdtsn_skip skip[0];
} __attribute((packed));
struct sctp_fwdtsn_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_fwdtsn_hdr fwdtsn_hdr;
} __attribute((packed));
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
......
......@@ -599,8 +599,7 @@ enum {
NET_SCTP_PRESERVE_ENABLE = 11,
NET_SCTP_MAX_BURST = 12,
NET_SCTP_ADDIP_ENABLE = 13,
NET_SCTP_RMEM = 14,
NET_SCTP_WMEM = 15,
NET_SCTP_PRSCTP_ENABLE = 14,
};
/* /proc/sys/net/bridge */
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (C) 1999-2001 Cisco, Motorola
*
* This file is part of the SCTP kernel reference Implementation
......@@ -29,6 +29,7 @@
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -90,6 +91,8 @@ typedef enum {
SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_SETUP_T4, /* ADDIP, setup T4 RTO timer parms. */
SCTP_CMD_PROCESS_OPERR, /* Process an ERROR chunk. */
SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
SCTP_CMD_LAST
} sctp_verb_t;
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
......@@ -68,6 +68,8 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
#define SCTP_NUM_ADDIP_CHUNK_TYPES 2
#define SCTP_NUM_PRSCTP_CHUNK_TYPES 1
/* These are the different flavours of event. */
typedef enum {
......
......@@ -475,6 +475,14 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
err = (sctp_errhdr_t *)((void *)err + \
WORD_ROUND(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
#define _sctp_walk_fwdtsn(pos, chunk, end)\
for (pos = chunk->subh.fwdtsn_hdr->skip;\
(void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
pos++)
/* Round an int up to the next multiple of 4. */
#define WORD_ROUND(s) (((s)+3)&~3)
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
......@@ -141,6 +141,9 @@ sctp_state_fn_t sctp_sf_cookie_echoed_err;
sctp_state_fn_t sctp_sf_do_5_2_6_stale;
sctp_state_fn_t sctp_sf_do_asconf;
sctp_state_fn_t sctp_sf_do_asconf_ack;
sctp_state_fn_t sctp_sf_do_9_2_reshutack;
sctp_state_fn_t sctp_sf_eat_fwd_tsn;
sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast;
/* Prototypes for primitive event state functions. */
sctp_state_fn_t sctp_sf_do_prm_asoc;
......@@ -170,25 +173,6 @@ sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
sctp_state_fn_t sctp_sf_do_6_2_sack;
sctp_state_fn_t sctp_sf_autoclose_timer_expire;
/* These are state functions which are either obsolete or not in use yet.
* If any of these functions needs to be revived, it should be renamed with
* the "sctp_sf_xxx" prefix, and be moved to the above prototype groups.
*/
/* Prototypes for chunk state functions. Not in use. */
sctp_state_fn_t sctp_sf_do_9_2_reshutack;
sctp_state_fn_t sctp_sf_do_9_2_reshut;
sctp_state_fn_t sctp_sf_do_9_2_shutack;
/* Prototypes for timeout event state functions. Not in use. */
sctp_state_fn_t sctp_do_4_2_reinit;
sctp_state_fn_t sctp_do_4_3_reecho;
sctp_state_fn_t sctp_do_9_2_reshut;
sctp_state_fn_t sctp_do_9_2_reshutack;
sctp_state_fn_t sctp_do_8_3_hb_err;
sctp_state_fn_t sctp_heartoff;
/* Prototypes for utility support functions. */
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
......@@ -277,6 +261,9 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf);
int sctp_process_asconf_ack(struct sctp_association *asoc,
struct sctp_chunk *asconf_ack);
struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist);
void sctp_chunk_assign_tsn(struct sctp_chunk *);
void sctp_chunk_assign_ssn(struct sctp_chunk *);
......
......@@ -193,6 +193,9 @@ extern struct sctp_globals {
/* Flag to indicate if addip is enabled. */
int addip_enable;
/* Flag to indicate if PR-SCTP is enabled. */
int prsctp_enable;
} sctp_globals;
#define sctp_rto_initial (sctp_globals.rto_initial)
......@@ -221,6 +224,7 @@ extern struct sctp_globals {
#define sctp_local_addr_list (sctp_globals.local_addr_list)
#define sctp_local_addr_lock (sctp_globals.local_addr_lock)
#define sctp_addip_enable (sctp_globals.addip_enable)
#define sctp_prsctp_enable (sctp_globals.prsctp_enable)
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
......@@ -317,6 +321,8 @@ struct sctp_cookie {
/* This holds the originating address of the INIT packet. */
union sctp_addr peer_addr;
__u8 prsctp_capable;
/* This is a shim for my peer's INIT packet, followed by
* a copy of the raw address list of the association.
* The length of the raw address list is saved in the
......@@ -413,6 +419,13 @@ static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
return stream->ssn[id]++;
}
/* Skip over this ssn and all below. */
static inline void sctp_ssn_skip(struct sctp_stream *stream, __u16 id,
__u16 ssn)
{
stream->ssn[id] = ssn+1;
}
/*
* Pointers to address related SCTP functions.
* (i.e. things that depend on the address family.)
......@@ -514,8 +527,8 @@ struct sctp_datamsg {
/* Did the messenge fail to send? */
int send_error;
char send_failed;
/* Control whether fragments from this message can expire. */
char can_expire;
/* Control whether chunks from this message can be abandoned. */
char can_abandon;
};
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
......@@ -527,8 +540,8 @@ void sctp_datamsg_hold(struct sctp_datamsg *);
void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_track(struct sctp_chunk *);
void sctp_datamsg_assign(struct sctp_datamsg *, struct sctp_chunk *);
void sctp_datamsg_fail(struct sctp_chunk *, int error);
int sctp_datamsg_expires(struct sctp_chunk *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
/* RFC2960 1.4 Key Terms
......@@ -583,6 +596,7 @@ struct sctp_chunk {
struct sctp_cwrhdr *ecn_cwr_hdr;
struct sctp_errhdr *err_hdr;
struct sctp_addiphdr *addip_hdr;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
} subh;
__u8 *chunk_end;
......@@ -890,7 +904,7 @@ struct sctp_transport {
/* A flag which indicates the occurrence of a changeover */
char changeover_active;
/* A glag which indicates whether the change of primary is
/* A flag which indicates whether the change of primary is
* the first switch to this destination address during an
* active switch.
*/
......@@ -993,6 +1007,11 @@ struct sctp_outq {
*/
struct list_head retransmit;
/* Put chunks on this list to save them for FWD TSN processing as
* they were abandoned.
*/
struct list_head abandoned;
/* How many unackd bytes do we have in-flight? */
__u32 outstanding_bytes;
......@@ -1356,16 +1375,25 @@ struct sctp_association {
struct sctp_tsnmap tsn_map;
__u8 _map[sctp_tsnmap_storage_size(SCTP_TSN_MAP_SIZE)];
/* Do we need to sack the peer? */
__u8 sack_needed;
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
* : is received it is incremented. If this value
* : reaches 2 or more, a SACK is sent and the
* : value is reset to 0. Note: This is used only
* : when no DATA chunks are received out of
* : order. When DATA chunks are out of order,
* : SACK's are not delayed (see Section 6).
*/
__u8 sack_needed; /* Do we need to sack the peer? */
/* These are capabilities which our peer advertised. */
__u8 ecn_capable; /* Can peer do ECN? */
__u8 ipv4_address; /* Peer understands IPv4 addresses? */
__u8 ipv6_address; /* Peer understands IPv6 addresses? */
__u8 hostname_address;/* Peer understands DNS addresses? */
/* Does peer support ADDIP? */
__u8 asconf_capable;
__u8 asconf_capable; /* Does peer support ADDIP? */
__u8 prsctp_capable; /* Can peer do PR-SCTP? */
/* This mask is used to disable sending the ASCONF chunk
* with specified parameter to peer.
......@@ -1458,6 +1486,9 @@ struct sctp_association {
__u32 ctsn_ack_point;
/* PR-SCTP Advanced.Peer.Ack.Point */
__u32 adv_peer_ack_point;
/* Highest TSN that is acknowledged by incoming SACKs. */
__u32 highest_sacked;
......@@ -1498,19 +1529,7 @@ struct sctp_association {
/* The message size at which SCTP fragmentation will occur. */
__u32 frag_point;
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
* : is received it is incremented. If this value
* : reaches 2 or more, a SACK is sent and the
* : value is reset to 0. Note: This is used only
* : when no DATA chunks are received out of
* : order. When DATA chunks are out of order,
* : SACK's are not delayed (see Section 6).
*/
/* Do we need to send an ack?
* When counters[SctpCounterAckState] is above 1 we do!
*/
/* Currently only one counter is used to count INIT errors. */
int counters[SCTP_NUMBER_COUNTERS];
/* Default send parameters. */
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
......@@ -37,6 +37,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -145,6 +146,9 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
void sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN and all lower as seen. */
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
/* Retrieve the Cumulative TSN ACK Point. */
static inline __u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
{
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
......@@ -38,6 +38,7 @@
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -79,6 +80,9 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *, int);
/* Clear the partial data delivery condition on this socket. */
int sctp_clear_pd(struct sock *sk);
/* Skip over an SSN. */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
#endif /* __sctp_ulpqueue_h__ */
......
......@@ -210,6 +210,7 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
asoc->next_tsn = asoc->c.initial_tsn;
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
asoc->highest_sacked = asoc->ctsn_ack_point;
asoc->last_cwr_tsn = asoc->ctsn_ack_point;
asoc->unack_data = 0;
......@@ -959,6 +960,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
asoc->adv_peer_ack_point = new->adv_peer_ack_point;
/* Reinitialize SSN for both local streams
* and peer's streams.
......@@ -967,6 +969,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
} else {
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
......
/* SCTP kernel reference Implementation
* Copyright (c) 2003 International Business Machines Corp.
* (C) Copyright IBM Corp. 2003, 2004
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -31,6 +31,7 @@
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -55,7 +56,8 @@ void sctp_datamsg_init(struct sctp_datamsg *msg)
atomic_set(&msg->refcnt, 1);
msg->send_failed = 0;
msg->send_error = 0;
msg->can_expire = 0;
msg->can_abandon = 0;
msg->expires_at = 0;
INIT_LIST_HEAD(&msg->chunks);
}
......@@ -182,14 +184,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
* have the same expiration.
*/
if (sinfo->sinfo_timetolive) {
struct timeval tv;
__u32 ttl = sinfo->sinfo_timetolive;
/* sinfo_timetolive is in milliseconds */
tv.tv_sec = ttl / 1000;
tv.tv_usec = ttl % 1000 * 1000;
msg->expires_at = jiffies + timeval_to_jiffies(&tv);
msg->can_expire = 1;
msg->expires_at = jiffies +
MSECS_TO_JIFFIES(sinfo->sinfo_timetolive);
msg->can_abandon = 1;
SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n",
__FUNCTION__, msg, msg->expires_at, jiffies);
}
max = asoc->frag_point;
......@@ -288,14 +288,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
}
/* Check whether this message has expired. */
int sctp_datamsg_expires(struct sctp_chunk *chunk)
int sctp_chunk_abandoned(struct sctp_chunk *chunk)
{
struct sctp_datamsg *msg = chunk->msg;
/* FIXME: When PR-SCTP is supported we can make this
* check more lenient.
*/
if (!msg->can_expire)
if (!msg->can_abandon)
return 0;
if (time_after(jiffies, msg->expires_at))
......@@ -305,7 +302,7 @@ int sctp_datamsg_expires(struct sctp_chunk *chunk)
}
/* This chunk (and consequently entire message) has failed in its sending. */
void sctp_datamsg_fail(struct sctp_chunk *chunk, int error)
void sctp_chunk_fail(struct sctp_chunk *chunk, int error)
{
chunk->msg->send_failed = 1;
chunk->msg->send_error = error;
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -43,6 +43,7 @@
* Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -88,6 +89,9 @@ const char *sctp_cname(const sctp_subtype_t cid)
case SCTP_CID_ASCONF_ACK:
return "ASCONF_ACK";
case SCTP_CID_FWD_TSN:
return "FWD_TSN";
default:
return "unknown chunk";
};
......
......@@ -278,6 +278,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* It is OK to send this chunk. */
__skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk);
packet->size += chunk_len;
chunk->transport = packet->transport;
finish:
return retval;
}
......@@ -637,7 +638,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
asoc->peer.rwnd = rwnd;
/* Has been accepted for transmission. */
chunk->msg->can_expire = 0;
if (!asoc->peer.prsctp_capable)
chunk->msg->can_abandon = 0;
finish:
return retval;
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 Intel Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -69,6 +69,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
__u32 highest_new_tsn,
int count_of_newacks);
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
struct sctp_chunk *ch)
......@@ -222,6 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
skb_queue_head_init(&q->control);
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
q->outstanding_bytes = 0;
q->empty = 1;
......@@ -246,7 +249,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
/* Mark as part of a failed message. */
sctp_datamsg_fail(chunk, q->error);
sctp_chunk_fail(chunk, q->error);
sctp_chunk_free(chunk);
}
}
......@@ -256,7 +259,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
sctp_datamsg_fail(chunk, q->error);
sctp_chunk_fail(chunk, q->error);
sctp_chunk_free(chunk);
}
......@@ -265,7 +268,16 @@ void sctp_outq_teardown(struct sctp_outq *q)
list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
sctp_datamsg_fail(chunk, q->error);
sctp_chunk_fail(chunk, q->error);
sctp_chunk_free(chunk);
}
/* Throw away any chunks that are in the abandoned queue. */
list_for_each_safe(lchunk, temp, &q->abandoned) {
list_del_init(lchunk);
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
sctp_chunk_fail(chunk, q->error);
sctp_chunk_free(chunk);
}
......@@ -273,7 +285,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
while ((chunk = sctp_outq_dequeue_data(q))) {
/* Mark as send failure. */
sctp_datamsg_fail(chunk, q->error);
sctp_chunk_fail(chunk, q->error);
sctp_chunk_free(chunk);
}
......@@ -357,32 +369,30 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
return error;
}
/* Insert a chunk into the retransmit queue. Chunks on the retransmit
* queue are kept in order, based on the TSNs.
/* Insert a chunk into the sorted list based on the TSNs. The retransmit list
* and the abandoned list are in ascending order.
*/
void sctp_retransmit_insert(struct list_head *tlchunk, struct sctp_outq *q)
void sctp_insert_list(struct list_head *head, struct list_head *new)
{
struct list_head *rlchunk;
struct sctp_chunk *tchunk, *rchunk;
__u32 ttsn, rtsn;
struct list_head *pos;
struct sctp_chunk *nchunk, *lchunk;
__u32 ntsn, ltsn;
int done = 0;
tchunk = list_entry(tlchunk, struct sctp_chunk, transmitted_list);
ttsn = ntohl(tchunk->subh.data_hdr->tsn);
nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
ntsn = ntohl(nchunk->subh.data_hdr->tsn);
list_for_each(rlchunk, &q->retransmit) {
rchunk = list_entry(rlchunk, struct sctp_chunk,
transmitted_list);
rtsn = ntohl(rchunk->subh.data_hdr->tsn);
if (TSN_lt(ttsn, rtsn)) {
list_add(tlchunk, rlchunk->prev);
list_for_each(pos, head) {
lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
ltsn = ntohl(lchunk->subh.data_hdr->tsn);
if (TSN_lt(ntsn, ltsn)) {
list_add(new, pos->prev);
done = 1;
break;
}
}
if (!done) {
list_add_tail(tlchunk, &q->retransmit);
}
if (!done)
list_add_tail(new, head);
}
/* Mark all the eligible packets on a transport for retransmission. */
......@@ -398,6 +408,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
/* If the chunk is abandoned, move it to abandoned list. */
if (sctp_chunk_abandoned(chunk)) {
list_del_init(lchunk);
sctp_insert_list(&q->abandoned, lchunk);
continue;
}
/* If we are doing retransmission due to a fast retransmit,
* only the chunk's that are marked for fast retransmit
* should be added to the retransmit queue. If we are doing
......@@ -438,10 +455,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
}
/* Move the chunk to the retransmit queue. The chunks
* on the retransmit queue is always kept in order.
* on the retransmit queue are always kept in order.
*/
list_del_init(lchunk);
sctp_retransmit_insert(lchunk, q);
sctp_insert_list(&q->retransmit, lchunk);
}
}
......@@ -484,6 +501,12 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_mark(q, transport, fast_retransmit);
/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
* following the procedures outlined in C1 - C5.
*/
sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error)
......@@ -717,6 +740,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
case SCTP_CID_ECN_CWR:
case SCTP_CID_ASCONF:
case SCTP_CID_ASCONF_ACK:
case SCTP_CID_FWD_TSN:
sctp_packet_transmit_chunk(packet, chunk);
break;
......@@ -795,15 +819,15 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (chunk->sinfo.sinfo_stream >=
asoc->c.sinit_num_ostreams) {
/* Mark as s failed send. */
sctp_datamsg_fail(chunk, SCTP_ERROR_INV_STRM);
/* Mark as failed send. */
sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
sctp_chunk_free(chunk);
continue;
}
/* Has this chunk expired? */
if (sctp_datamsg_expires(chunk)) {
sctp_datamsg_fail(chunk, 0);
if (sctp_chunk_abandoned(chunk)) {
sctp_chunk_fail(chunk, 0);
sctp_chunk_free(chunk);
continue;
}
......@@ -980,7 +1004,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
{
struct sctp_association *asoc = q->asoc;
struct sctp_transport *transport;
struct sctp_chunk *tchunk;
struct sctp_chunk *tchunk = NULL;
struct list_head *lchunk, *transport_list, *pos, *temp;
sctp_sack_variable_t *frags = sack->variable;
__u32 sack_ctsn, ctsn, tsn;
......@@ -1083,11 +1107,6 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
ctsn = asoc->ctsn_ack_point;
SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
__FUNCTION__, sack_ctsn);
SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association "
"%p is 0x%x.\n", __FUNCTION__, asoc, ctsn);
/* Throw away stuff rotting on the sack queue. */
list_for_each_safe(lchunk, temp, &q->sacked) {
tchunk = list_entry(lchunk, struct sctp_chunk,
......@@ -1112,10 +1131,19 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
asoc->peer.rwnd = sack_a_rwnd;
sctp_generate_fwdtsn(q, sack_ctsn);
SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
__FUNCTION__, sack_ctsn);
SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
"%p is 0x%x. Adv peer ack point: 0x%x\n",
__FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point);
/* See if all chunks are acked.
* Make sure the empty queue handler will get run later.
*/
q->empty = skb_queue_empty(&q->out) && list_empty(&q->retransmit);
q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) &&
list_empty(&q->retransmit);
if (!q->empty)
goto finish;
......@@ -1191,6 +1219,12 @@ static void sctp_check_transmitted(struct sctp_outq *q,
tchunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
if (sctp_chunk_abandoned(tchunk)) {
/* Move the chunk to abandoned list. */
sctp_insert_list(&q->abandoned, lchunk);
continue;
}
tsn = ntohl(tchunk->subh.data_hdr->tsn);
if (sctp_acked(sack, tsn)) {
/* If this queue is the retransmit queue, the
......@@ -1572,3 +1606,123 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
pass:
return 1;
}
static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
int nskips, __u16 stream)
{
int i;
for (i = 0; i < nskips; i++) {
if (skiplist[i].stream == stream)
return i;
}
return i;
}
/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
{
struct sctp_association *asoc = q->asoc;
struct sctp_chunk *ftsn_chunk = NULL;
struct sctp_fwdtsn_skip ftsn_skip_arr[10];
int nskips = 0;
int skip_pos = 0;
__u32 tsn;
struct sctp_chunk *chunk;
struct list_head *lchunk, *temp;
/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
* received SACK.
*
* If (Advanced.Peer.Ack.Point < SackCumAck), then update
* Advanced.Peer.Ack.Point to be equal to SackCumAck.
*/
if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
asoc->adv_peer_ack_point = ctsn;
/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
* locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
* the chunk next in the out-queue space is marked as "abandoned" as
* shown in the following example:
*
* Assuming that a SACK arrived with the Cumulative TSN ACK 102
* and the Advanced.Peer.Ack.Point is updated to this value:
*
* out-queue at the end of ==> out-queue after Adv.Ack.Point
* normal SACK processing local advancement
* ... ...
* Adv.Ack.Pt-> 102 acked 102 acked
* 103 abandoned 103 abandoned
* 104 abandoned Adv.Ack.P-> 104 abandoned
* 105 105
* 106 acked 106 acked
* ... ...
*
* In this example, the data sender successfully advanced the
* "Advanced.Peer.Ack.Point" from 102 to 104 locally.
*/
list_for_each_safe(lchunk, temp, &q->abandoned) {
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
tsn = ntohl(chunk->subh.data_hdr->tsn);
/* Remove any chunks in the abandoned queue that are acked by
* the ctsn.
*/
if (TSN_lte(tsn, ctsn)) {
list_del_init(lchunk);
if (!chunk->tsn_gap_acked) {
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
}
sctp_chunk_free(chunk);
} else {
if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
asoc->adv_peer_ack_point = tsn;
if (chunk->chunk_hdr->flags &
SCTP_DATA_UNORDERED)
continue;
skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
nskips,
chunk->subh.data_hdr->stream);
ftsn_skip_arr[skip_pos].stream =
chunk->subh.data_hdr->stream;
ftsn_skip_arr[skip_pos].ssn =
chunk->subh.data_hdr->ssn;
if (skip_pos == nskips)
nskips++;
if (nskips == 10)
break;
} else
break;
}
}
/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
* is greater than the Cumulative TSN ACK carried in the received
* SACK, the data sender MUST send the data receiver a FORWARD TSN
* chunk containing the latest value of the
* "Advanced.Peer.Ack.Point".
*
* C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
* list each stream and sequence number in the forwarded TSN. This
* information will enable the receiver to easily find any
* stranded TSN's waiting on stream reorder queues. Each stream
* SHOULD only be reported once; this means that if multiple
* abandoned messages occur in the same stream then only the
* highest abandoned stream sequence number is reported. If the
* total size of the FORWARD TSN does NOT fit in a single MTU then
* the sender of the FORWARD TSN SHOULD lower the
* Advanced.Peer.Ack.Point to the last TSN that will fit in a
* single MTU.
*/
if (asoc->adv_peer_ack_point > ctsn)
ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
nskips, &ftsn_skip_arr[0]);
if (ftsn_chunk) {
__skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
SCTP_INC_STATS(SctpOutCtrlChunks);
}
}
......@@ -1127,6 +1127,9 @@ __init int sctp_init(void)
/* Disable ADDIP by default. */
sctp_addip_enable = 0;
/* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1;
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
......
......@@ -6,10 +6,6 @@
*
* This file is part of the SCTP kernel reference Implementation
*
* This file includes part of the implementation of the add-IP extension,
* based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001,
* for the SCTP kernel reference Implementation.
*
* These functions work with the state functions in sctp_sm_statefuns.c
* to implement the state operations. These functions implement the
* steps which require modifying existing data structures.
......@@ -89,11 +85,13 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk)
* Note 2: The ECN capable field is reserved for future use of
* Explicit Congestion Notification.
*/
static const sctp_ecn_capable_param_t ecap_param = {
{
SCTP_PARAM_ECN_CAPABLE,
__constant_htons(sizeof(sctp_ecn_capable_param_t)),
}
static const struct sctp_paramhdr ecap_param = {
SCTP_PARAM_ECN_CAPABLE,
__constant_htons(sizeof(struct sctp_paramhdr)),
};
static const struct sctp_paramhdr prsctp_param = {
SCTP_PARAM_FWD_TSN_SUPPORT,
__constant_htons(sizeof(struct sctp_paramhdr)),
};
/* A helper to initialize to initialize an op error inside a
......@@ -196,6 +194,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
chunksize += sizeof(ecap_param);
if (sctp_prsctp_enable)
chunksize += sizeof(prsctp_param);
chunksize += vparam_len;
/* RFC 2960 3.3.2 Initiation (INIT) (1)
......@@ -232,6 +232,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
sctp_addto_chunk(retval, num_types * sizeof(__u16), &types);
sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
if (sctp_prsctp_enable)
sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
nodata:
if (addrs.v)
kfree(addrs.v);
......@@ -278,6 +280,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if (asoc->peer.ecn_capable)
chunksize += sizeof(ecap_param);
/* Tell peer that we'll do PR-SCTP only if peer advertised. */
if (asoc->peer.prsctp_capable)
chunksize += sizeof(prsctp_param);
/* Now allocate and fill out the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
if (!retval)
......@@ -293,6 +299,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
sctp_addto_chunk(retval, cookie_len, cookie);
if (asoc->peer.ecn_capable)
sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
if (asoc->peer.prsctp_capable)
sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
/* We need to remove the const qualifier at this point. */
retval->asoc = (struct sctp_association *) asoc;
......@@ -1286,6 +1294,9 @@ sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
/* Save the raw address list length in the cookie. */
cookie->c.raw_addr_list_len = addrs_len;
/* Remember PR-SCTP capability. */
cookie->c.prsctp_capable = asoc->peer.prsctp_capable;
/* Set an expiration time for the cookie. */
do_gettimeofday(&cookie->c.expiration);
TIMEVAL_ADD(asoc->cookie_life, cookie->c.expiration);
......@@ -1442,6 +1453,8 @@ struct sctp_association *sctp_unpack_cookie(
retval->next_tsn = retval->c.initial_tsn;
retval->ctsn_ack_point = retval->next_tsn - 1;
retval->addip_serial = retval->c.initial_tsn;
retval->adv_peer_ack_point = retval->ctsn_ack_point;
retval->peer.prsctp_capable = retval->c.prsctp_capable;
/* The INIT stuff will be done by the side effects. */
return retval;
......@@ -1653,6 +1666,10 @@ static int sctp_verify_param(const struct sctp_association *asoc,
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */
return sctp_process_hn_param(asoc, param, chunk, err_chunk);
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable)
break;
/* Fall Through */
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
......@@ -1968,6 +1985,12 @@ int sctp_process_param(struct sctp_association *asoc, union sctp_params param,
asoc->peer.ecn_capable = 1;
break;
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable) {
asoc->peer.prsctp_capable = 1;
break;
}
/* Fall Through */
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
......@@ -2622,3 +2645,38 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
return retval;
}
/* Make a FWD TSN chunk. */
struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist)
{
struct sctp_chunk *retval = NULL;
struct sctp_fwdtsn_chunk *ftsn_chunk;
struct sctp_fwdtsn_hdr ftsn_hdr;
struct sctp_fwdtsn_skip skip;
size_t hint;
int i;
hint = (nstreams + 1) * sizeof(__u32);
/* Maybe set the T-bit if we have no association. */
retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint);
if (!retval)
return NULL;
ftsn_chunk = (struct sctp_fwdtsn_chunk *)retval->subh.fwdtsn_hdr;
ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
retval->subh.fwdtsn_hdr =
sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
for (i = 0; i < nstreams; i++) {
skip.stream = skiplist[i].stream;
skip.ssn = skiplist[i].ssn;
sctp_addto_chunk(retval, sizeof(skip), &skip);
}
return retval;
}
......@@ -579,7 +579,7 @@ static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
/* Helper function to process the process SACK command. */
static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
sctp_sackhdr_t *sackh)
struct sctp_sackhdr *sackh)
{
int err;
......@@ -729,6 +729,19 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
}
}
/* Process variable FWDTSN chunk information. */
static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk)
{
struct sctp_fwdtsn_skip *skip;
/* Walk through all the skipped SSNs */
sctp_walk_fwdtsn(skip, chunk) {
sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
}
return;
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
......@@ -903,7 +916,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
struct timer_list *timer;
unsigned long timeout;
struct sctp_transport *t;
sctp_sackhdr_t sackh;
struct sctp_sackhdr sackh;
int local_cork = 0;
if (SCTP_EVENT_T_TIMEOUT != event_type)
......@@ -962,6 +975,18 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32);
break;
case SCTP_CMD_REPORT_FWDTSN:
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break;
case SCTP_CMD_PROCESS_FWDTSN:
sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr);
break;
case SCTP_CMD_GEN_SACK:
/* Generate a Selective ACK.
* The argument tells us whether to just count
......
......@@ -3204,6 +3204,143 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_DISCARD;
}
/*
* PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
*
* When a FORWARD TSN chunk arrives, the data receiver MUST first update
* its cumulative TSN point to the value carried in the FORWARD TSN
* chunk, and then MUST further advance its cumulative TSN point locally
* if possible.
* After the above processing, the data receiver MUST stop reporting any
* missing TSNs earlier than or equal to the new cumulative TSN point.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
/* RFC 2960 8.5 Verification Tag
*
* When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
/* Start the SACK timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
}
sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
/* RFC 2960 8.5 Verification Tag
*
* When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an unknown chunk.
*
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
......@@ -40,6 +40,7 @@
* Hui Huang <hui.huang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -50,7 +51,7 @@
#include <net/sctp/sm.h>
static const sctp_sm_table_entry_t bug = {
.fn = sctp_sf_bug,
.fn = sctp_sf_bug,
.name = "sctp_sf_bug"
};
......@@ -73,7 +74,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
return sctp_chunk_event_lookup(event_subtype.chunk, state);
break;
case SCTP_EVENT_T_TIMEOUT:
DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
timeout_event_table);
break;
......@@ -486,6 +487,34 @@ const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][
TYPE_SCTP_ASCONF_ACK,
}; /*state_fn_t addip_chunk_event_table[][] */
#define TYPE_SCTP_FWD_TSN { \
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
/* SCTP_STATE_CLOSED */ \
{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_ESTABLISHED */ \
{.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
{.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \
/* SCTP_STATE_SHUTDOWN_SENT */ \
{.fn = sctp_sf_eat_fwd_tsn_fast, .name = "sctp_sf_eat_fwd_tsn_fast"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
} /* TYPE_SCTP_FWD_TSN */
/* The primary index for this table is the chunk type.
* The secondary index for this table is the state.
*/
const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_FWD_TSN,
}; /*state_fn_t prsctp_chunk_event_table[][] */
static const sctp_sm_table_entry_t
chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
/* SCTP_STATE_EMPTY */
......@@ -924,6 +953,11 @@ const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
if (cid >= 0 && cid <= SCTP_CID_BASE_MAX)
return &chunk_event_table[cid][state];
if (sctp_prsctp_enable) {
if (cid == SCTP_CID_FWD_TSN)
return &prsctp_chunk_event_table[0][state];
}
if (sctp_addip_enable) {
if (cid == SCTP_CID_ASCONF)
return &addip_chunk_event_table[0][state];
......
/* SCTP kernel reference Implementation
* Copyright (c) 2002 International Business Machines Corp.
* (C) Copyright IBM Corp. 2002, 2004
* Copyright (c) 2002 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
......@@ -35,6 +35,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -170,6 +171,14 @@ static ctl_table sctp_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec
},
{
.ctl_name = NET_SCTP_PRSCTP_ENABLE,
.procname = "prsctp_enable",
.data = &sctp_prsctp_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
},
{ .ctl_name = 0 }
};
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
......@@ -36,6 +36,7 @@
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -253,6 +254,40 @@ int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
return ended;
}
/* Mark this and any lower TSN as seen. */
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
/* Vacuously mark any TSN which precedes the map base or
* exceeds the end of the map.
*/
if (TSN_lt(tsn, map->base_tsn))
return;
if (!TSN_lt(tsn, map->base_tsn + map->len + map->len))
return;
/* Bump the max. */
if (TSN_lt(map->max_tsn_seen, tsn))
map->max_tsn_seen = tsn;
/* Assert: TSN is in range. */
gap = tsn - map->base_tsn + 1;
/* Mark the TSNs as received. */
if (gap <= map->len)
memset(map->tsn_map, 0x01, gap);
else {
memset(map->tsn_map, 0x01, map->len);
memset(map->overflow_map, 0x01, (gap - map->len));
}
/* Go fixup any internal TSN mapping variables including
* cumulative_tsn_ack_point.
*/
sctp_tsnmap_update(map);
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
......
......@@ -680,6 +680,71 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return event;
}
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
struct sctp_ulpevent *event = NULL;
struct sctp_stream *in;
struct sk_buff_head temp;
__u16 csid, cssn;
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (cssn != sctp_ssn_peek(in, csid))
break;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, csid);
__skb_unlink(pos, pos->list);
if (!event) {
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
} else {
/* Attach all gathered skbs to the event. */
__skb_queue_tail(sctp_event2skb(event)->list, pos);
}
}
/* Send event to the ULP. */
if (event)
sctp_ulpq_tail_event(ulpq, event);
}
/* Skip over an SSN. */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
/* Note: The stream ID must be verified before this routine. */
in = &ulpq->asoc->ssnmap->in;
/* Is this an old SSN? If so ignore. */
if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
return;
/* Mark that we are no longer expecting this SSN or lower. */
sctp_ssn_skip(in, sid, ssn);
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq);
return;
}
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment