Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9b10f4ba
Commit
9b10f4ba
authored
Apr 19, 2004
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge nuts.davemloft.net:/disk1/BK/network-2.6
into nuts.davemloft.net:/disk1/BK/net-2.6
parents
28f95425
9e130292
Changes
25
Hide whitespace changes
Inline
Side-by-side
Showing
25 changed files
with
862 additions
and
324 deletions
+862
-324
include/linux/sctp.h
include/linux/sctp.h
+67
-3
include/linux/sysctl.h
include/linux/sysctl.h
+1
-2
include/net/sctp/command.h
include/net/sctp/command.h
+4
-1
include/net/sctp/constants.h
include/net/sctp/constants.h
+3
-11
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+13
-2
include/net/sctp/sm.h
include/net/sctp/sm.h
+7
-20
include/net/sctp/structs.h
include/net/sctp/structs.h
+56
-68
include/net/sctp/tsnmap.h
include/net/sctp/tsnmap.h
+5
-1
include/net/sctp/ulpqueue.h
include/net/sctp/ulpqueue.h
+5
-1
net/sctp/associola.c
net/sctp/associola.c
+5
-10
net/sctp/chunk.c
net/sctp/chunk.c
+13
-31
net/sctp/debug.c
net/sctp/debug.c
+5
-1
net/sctp/output.c
net/sctp/output.c
+49
-47
net/sctp/outqueue.c
net/sctp/outqueue.c
+219
-92
net/sctp/protocol.c
net/sctp/protocol.c
+7
-1
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+67
-9
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+27
-2
net/sctp/sm_statefuns.c
net/sctp/sm_statefuns.c
+139
-11
net/sctp/sm_statetable.c
net/sctp/sm_statetable.c
+37
-3
net/sctp/socket.c
net/sctp/socket.c
+12
-5
net/sctp/sysctl.c
net/sctp/sysctl.c
+10
-1
net/sctp/transport.c
net/sctp/transport.c
+2
-1
net/sctp/tsnmap.c
net/sctp/tsnmap.c
+36
-1
net/sctp/ulpevent.c
net/sctp/ulpevent.c
+8
-0
net/sctp/ulpqueue.c
net/sctp/ulpqueue.c
+65
-0
No files found.
include/linux/sctp.h
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 200
3
* (C) Copyright IBM Corp. 2001, 200
4
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
...
...
@@ -93,6 +93,9 @@ typedef enum {
SCTP_CID_ECN_CWR
=
13
,
SCTP_CID_SHUTDOWN_COMPLETE
=
14
,
/* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN
=
0xC0
,
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF
=
0xC1
,
SCTP_CID_ASCONF_ACK
=
0x80
,
...
...
@@ -168,6 +171,9 @@ typedef enum {
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES
=
__constant_htons
(
12
),
SCTP_PARAM_ECN_CAPABLE
=
__constant_htons
(
0x8000
),
/* PR-SCTP Sec 3.1 */
SCTP_PARAM_FWD_TSN_SUPPORT
=
__constant_htons
(
0xc000
),
/* Add-IP Extension. Section 3.2 */
SCTP_PARAM_ADD_IP
=
__constant_htons
(
0xc001
),
SCTP_PARAM_DEL_IP
=
__constant_htons
(
0xc002
),
...
...
@@ -472,9 +478,67 @@ typedef struct sctp_cwr_chunk {
sctp_cwrhdr_t
cwr_hdr
;
}
__attribute__
((
packed
))
sctp_cwr_chunk_t
;
/*
* ADDIP Section 3.1 New Chunk Types
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
*
* Forward Cumulative TSN chunk has the following format:
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Type = 192 | Flags = 0x00 | Length = Variable |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | New Cumulative TSN |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Stream-1 | Stream Sequence-1 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* \ /
* / \
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Stream-N | Stream Sequence-N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* Chunk Flags:
*
* Set to all zeros on transmit and ignored on receipt.
*
* New Cumulative TSN: 32 bit u_int
*
* This indicates the new cumulative TSN to the data receiver. Upon
* the reception of this value, the data receiver MUST consider
* any missing TSNs earlier than or equal to this value as received
* and stop reporting them as gaps in any subsequent SACKs.
*
* Stream-N: 16 bit u_int
*
* This field holds a stream number that was skipped by this
* FWD-TSN.
*
* Stream Sequence-N: 16 bit u_int
* This field holds the sequence number associated with the stream
* that was skipped. The stream sequence field holds the largest stream
* sequence number in this stream being skipped. The receiver of
* the FWD-TSN's can use the Stream-N and Stream Sequence-N fields
* to enable delivery of any stranded TSN's that remain on the stream
* re-ordering queues. This field MUST NOT report TSN's corresponding
* to DATA chunk that are marked as unordered. For ordered DATA
* chunks this field MUST be filled in.
*/
struct
sctp_fwdtsn_skip
{
__u16
stream
;
__u16
ssn
;
}
__attribute__
((
packed
));
struct
sctp_fwdtsn_hdr
{
__u32
new_cum_tsn
;
struct
sctp_fwdtsn_skip
skip
[
0
];
}
__attribute
((
packed
));
struct
sctp_fwdtsn_chunk
{
struct
sctp_chunkhdr
chunk_hdr
;
struct
sctp_fwdtsn_hdr
fwdtsn_hdr
;
}
__attribute
((
packed
));
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
...
...
include/linux/sysctl.h
View file @
9b10f4ba
...
...
@@ -602,8 +602,7 @@ enum {
NET_SCTP_PRESERVE_ENABLE
=
11
,
NET_SCTP_MAX_BURST
=
12
,
NET_SCTP_ADDIP_ENABLE
=
13
,
NET_SCTP_RMEM
=
14
,
NET_SCTP_WMEM
=
15
,
NET_SCTP_PRSCTP_ENABLE
=
14
,
};
/* /proc/sys/net/bridge */
...
...
include/net/sctp/command.h
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 200
3
* (C) Copyright IBM Corp. 2001, 200
4
* Copyright (C) 1999-2001 Cisco, Motorola
*
* This file is part of the SCTP kernel reference Implementation
...
...
@@ -29,6 +29,7 @@
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -90,6 +91,8 @@ typedef enum {
SCTP_CMD_RENEGE
,
/* Renege data on an association. */
SCTP_CMD_SETUP_T4
,
/* ADDIP, setup T4 RTO timer parms. */
SCTP_CMD_PROCESS_OPERR
,
/* Process an ERROR chunk. */
SCTP_CMD_REPORT_FWDTSN
,
/* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN
,
/* Skips were reported, so process further. */
SCTP_CMD_LAST
}
sctp_verb_t
;
...
...
include/net/sctp/constants.h
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 200
3
* (C) Copyright IBM Corp. 2001, 200
4
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
...
...
@@ -57,15 +57,6 @@ enum { SCTP_MAX_STREAM = 0xffff };
enum
{
SCTP_DEFAULT_OUTSTREAMS
=
10
};
enum
{
SCTP_DEFAULT_INSTREAMS
=
SCTP_MAX_STREAM
};
/* Define the amount of space to reserve for SCTP, IP, LL.
* There is a little bit of waste that we are always allocating
* for ipv6 headers, but this seems worth the simplicity.
*/
#define SCTP_IP_OVERHEAD ((sizeof(struct sctphdr)\
+ sizeof(struct ipv6hdr)\
+ MAX_HEADER))
/* Since CIDs are sparse, we need all four of the following
* symbols. CIDs are dense through SCTP_CID_BASE_MAX.
*/
...
...
@@ -77,6 +68,8 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
#define SCTP_NUM_ADDIP_CHUNK_TYPES 2
#define SCTP_NUM_PRSCTP_CHUNK_TYPES 1
/* These are the different flavours of event. */
typedef
enum
{
...
...
@@ -355,7 +348,6 @@ typedef enum {
SCTP_XMIT_OK
,
SCTP_XMIT_PMTU_FULL
,
SCTP_XMIT_RWND_FULL
,
SCTP_XMIT_MUST_FRAG
,
SCTP_XMIT_NAGLE_DELAY
,
}
sctp_xmit_t
;
...
...
include/net/sctp/sctp.h
View file @
9b10f4ba
...
...
@@ -437,12 +437,15 @@ static inline __s32 sctp_jitter(__u32 rto)
static
inline
int
sctp_frag_point
(
const
struct
sctp_opt
*
sp
,
int
pmtu
)
{
int
frag
=
pmtu
;
frag
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
frag
-=
sizeof
(
struct
sctp_sack_chunk
);
frag
-=
sp
->
pf
->
af
->
net_header_len
;
frag
-=
sizeof
(
struct
sctphdr
)
+
sizeof
(
struct
sctp_data_chunk
);
if
(
sp
->
user_frag
)
frag
=
min_t
(
int
,
frag
,
sp
->
user_frag
);
frag
=
min_t
(
int
,
frag
,
SCTP_MAX_CHUNK_LEN
);
return
frag
;
}
...
...
@@ -472,6 +475,14 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
err = (sctp_errhdr_t *)((void *)err + \
WORD_ROUND(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
#define _sctp_walk_fwdtsn(pos, chunk, end)\
for (pos = chunk->subh.fwdtsn_hdr->skip;\
(void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
pos++)
/* Round an int up to the next multiple of 4. */
#define WORD_ROUND(s) (((s)+3)&~3)
...
...
include/net/sctp/sm.h
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 200
3
* (C) Copyright IBM Corp. 2001, 200
4
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
...
...
@@ -141,6 +141,9 @@ sctp_state_fn_t sctp_sf_cookie_echoed_err;
sctp_state_fn_t
sctp_sf_do_5_2_6_stale
;
sctp_state_fn_t
sctp_sf_do_asconf
;
sctp_state_fn_t
sctp_sf_do_asconf_ack
;
sctp_state_fn_t
sctp_sf_do_9_2_reshutack
;
sctp_state_fn_t
sctp_sf_eat_fwd_tsn
;
sctp_state_fn_t
sctp_sf_eat_fwd_tsn_fast
;
/* Prototypes for primitive event state functions. */
sctp_state_fn_t
sctp_sf_do_prm_asoc
;
...
...
@@ -170,25 +173,6 @@ sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
sctp_state_fn_t
sctp_sf_do_6_2_sack
;
sctp_state_fn_t
sctp_sf_autoclose_timer_expire
;
/* These are state functions which are either obsolete or not in use yet.
* If any of these functions needs to be revived, it should be renamed with
* the "sctp_sf_xxx" prefix, and be moved to the above prototype groups.
*/
/* Prototypes for chunk state functions. Not in use. */
sctp_state_fn_t
sctp_sf_do_9_2_reshutack
;
sctp_state_fn_t
sctp_sf_do_9_2_reshut
;
sctp_state_fn_t
sctp_sf_do_9_2_shutack
;
/* Prototypes for timeout event state functions. Not in use. */
sctp_state_fn_t
sctp_do_4_2_reinit
;
sctp_state_fn_t
sctp_do_4_3_reecho
;
sctp_state_fn_t
sctp_do_9_2_reshut
;
sctp_state_fn_t
sctp_do_9_2_reshutack
;
sctp_state_fn_t
sctp_do_8_3_hb_err
;
sctp_state_fn_t
sctp_heartoff
;
/* Prototypes for utility support functions. */
__u8
sctp_get_chunk_type
(
struct
sctp_chunk
*
chunk
);
const
sctp_sm_table_entry_t
*
sctp_sm_lookup_event
(
sctp_event_t
,
...
...
@@ -277,6 +261,9 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct
sctp_chunk
*
asconf
);
int
sctp_process_asconf_ack
(
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
asconf_ack
);
struct
sctp_chunk
*
sctp_make_fwdtsn
(
const
struct
sctp_association
*
asoc
,
__u32
new_cum_tsn
,
size_t
nstreams
,
struct
sctp_fwdtsn_skip
*
skiplist
);
void
sctp_chunk_assign_tsn
(
struct
sctp_chunk
*
);
void
sctp_chunk_assign_ssn
(
struct
sctp_chunk
*
);
...
...
include/net/sctp/structs.h
View file @
9b10f4ba
...
...
@@ -193,6 +193,9 @@ extern struct sctp_globals {
/* Flag to indicate if addip is enabled. */
int
addip_enable
;
/* Flag to indicate if PR-SCTP is enabled. */
int
prsctp_enable
;
}
sctp_globals
;
#define sctp_rto_initial (sctp_globals.rto_initial)
...
...
@@ -221,6 +224,7 @@ extern struct sctp_globals {
#define sctp_local_addr_list (sctp_globals.local_addr_list)
#define sctp_local_addr_lock (sctp_globals.local_addr_lock)
#define sctp_addip_enable (sctp_globals.addip_enable)
#define sctp_prsctp_enable (sctp_globals.prsctp_enable)
/* SCTP Socket type: UDP or TCP style. */
typedef
enum
{
...
...
@@ -317,6 +321,8 @@ struct sctp_cookie {
/* This holds the originating address of the INIT packet. */
union
sctp_addr
peer_addr
;
__u8
prsctp_capable
;
/* This is a shim for my peer's INIT packet, followed by
* a copy of the raw address list of the association.
* The length of the raw address list is saved in the
...
...
@@ -413,6 +419,13 @@ static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
return
stream
->
ssn
[
id
]
++
;
}
/* Skip over this ssn and all below. */
static
inline
void
sctp_ssn_skip
(
struct
sctp_stream
*
stream
,
__u16
id
,
__u16
ssn
)
{
stream
->
ssn
[
id
]
=
ssn
+
1
;
}
/*
* Pointers to address related SCTP functions.
* (i.e. things that depend on the address family.)
...
...
@@ -514,8 +527,8 @@ struct sctp_datamsg {
/* Did the messenge fail to send? */
int
send_error
;
char
send_failed
;
/* Control whether
fragments from this message can expire
. */
char
can_
expire
;
/* Control whether
chunks from this message can be abandoned
. */
char
can_
abandon
;
};
struct
sctp_datamsg
*
sctp_datamsg_from_user
(
struct
sctp_association
*
,
...
...
@@ -527,8 +540,8 @@ void sctp_datamsg_hold(struct sctp_datamsg *);
void
sctp_datamsg_free
(
struct
sctp_datamsg
*
);
void
sctp_datamsg_track
(
struct
sctp_chunk
*
);
void
sctp_datamsg_assign
(
struct
sctp_datamsg
*
,
struct
sctp_chunk
*
);
void
sctp_
datamsg
_fail
(
struct
sctp_chunk
*
,
int
error
);
int
sctp_
datamsg_expires
(
struct
sctp_chunk
*
);
void
sctp_
chunk
_fail
(
struct
sctp_chunk
*
,
int
error
);
int
sctp_
chunk_abandoned
(
struct
sctp_chunk
*
);
/* RFC2960 1.4 Key Terms
...
...
@@ -583,6 +596,7 @@ struct sctp_chunk {
struct
sctp_cwrhdr
*
ecn_cwr_hdr
;
struct
sctp_errhdr
*
err_hdr
;
struct
sctp_addiphdr
*
addip_hdr
;
struct
sctp_fwdtsn_hdr
*
fwdtsn_hdr
;
}
subh
;
__u8
*
chunk_end
;
...
...
@@ -667,6 +681,9 @@ struct sctp_packet {
/* This contains the payload chunks. */
struct
sk_buff_head
chunks
;
/* This is the overhead of the sctp and ip headers. */
size_t
overhead
;
/* This is the total size of all chunks INCLUDING padding. */
size_t
size
;
...
...
@@ -676,16 +693,6 @@ struct sctp_packet {
*/
struct
sctp_transport
*
transport
;
/* Allow a callback for getting a high priority chunk
* bundled early into the packet (This is used for ECNE).
*/
sctp_packet_phandler_t
*
get_prepend_chunk
;
/* This packet should advertise ECN capability to the network
* via the ECT bit.
*/
char
ecn_capable
;
/* This packet contains a COOKIE-ECHO chunk. */
char
has_cookie_echo
;
...
...
@@ -698,29 +705,21 @@ struct sctp_packet {
int
malloced
;
};
typedef
int
(
sctp_outq_thandler_t
)(
struct
sctp_outq
*
,
void
*
);
typedef
int
(
sctp_outq_ehandler_t
)(
struct
sctp_outq
*
);
typedef
struct
sctp_packet
*
(
sctp_outq_ohandler_init_t
)
(
struct
sctp_packet
*
,
struct
sctp_transport
*
,
__u16
sport
,
__u16
dport
);
typedef
struct
sctp_packet
*
(
sctp_outq_ohandler_config_t
)
(
struct
sctp_packet
*
,
__u32
vtag
,
int
ecn_capable
,
sctp_packet_phandler_t
*
get_prepend_chunk
);
typedef
sctp_xmit_t
(
sctp_outq_ohandler_t
)(
struct
sctp_packet
*
,
struct
sctp_chunk
*
);
typedef
int
(
sctp_outq_ohandler_force_t
)(
struct
sctp_packet
*
);
sctp_outq_ohandler_init_t
sctp_packet_init
;
sctp_outq_ohandler_config_t
sctp_packet_config
;
sctp_outq_ohandler_t
sctp_packet_append_chunk
;
sctp_outq_ohandler_t
sctp_packet_transmit_chunk
;
sctp_outq_ohandler_force_t
sctp_packet_transmit
;
struct
sctp_packet
*
sctp_packet_init
(
struct
sctp_packet
*
,
struct
sctp_transport
*
,
__u16
sport
,
__u16
dport
);
struct
sctp_packet
*
sctp_packet_config
(
struct
sctp_packet
*
,
__u32
vtag
,
int
);
sctp_xmit_t
sctp_packet_transmit_chunk
(
struct
sctp_packet
*
,
struct
sctp_chunk
*
);
sctp_xmit_t
sctp_packet_append_chunk
(
struct
sctp_packet
*
,
struct
sctp_chunk
*
);
int
sctp_packet_transmit
(
struct
sctp_packet
*
);
void
sctp_packet_free
(
struct
sctp_packet
*
);
static
inline
int
sctp_packet_empty
(
struct
sctp_packet
*
packet
)
{
return
(
packet
->
size
==
packet
->
overhead
);
}
/* This represents a remote transport address.
* For local transport addresses, we just use union sctp_addr.
...
...
@@ -905,7 +904,7 @@ struct sctp_transport {
/* A flag which indicates the occurrence of a changeover */
char
changeover_active
;
/* A
g
lag which indicates whether the change of primary is
/* A
f
lag which indicates whether the change of primary is
* the first switch to this destination address during an
* active switch.
*/
...
...
@@ -1008,15 +1007,10 @@ struct sctp_outq {
*/
struct
list_head
retransmit
;
/* Call these functions to send chunks down to the next lower
* layer. This is always sctp_packet, but we separate the two
* structures to make testing simpler.
/* Put chunks on this list to save them for FWD TSN processing as
* they were abandoned.
*/
sctp_outq_ohandler_init_t
*
init_output
;
sctp_outq_ohandler_config_t
*
config_output
;
sctp_outq_ohandler_t
*
append_output
;
sctp_outq_ohandler_t
*
build_output
;
sctp_outq_ohandler_force_t
*
force_output
;
struct
list_head
abandoned
;
/* How many unackd bytes do we have in-flight? */
__u32
outstanding_bytes
;
...
...
@@ -1039,12 +1033,6 @@ int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk);
int
sctp_outq_flush
(
struct
sctp_outq
*
,
int
);
int
sctp_outq_sack
(
struct
sctp_outq
*
,
struct
sctp_sackhdr
*
);
int
sctp_outq_is_empty
(
const
struct
sctp_outq
*
);
int
sctp_outq_set_output_handlers
(
struct
sctp_outq
*
,
sctp_outq_ohandler_init_t
init
,
sctp_outq_ohandler_config_t
config
,
sctp_outq_ohandler_t
append
,
sctp_outq_ohandler_t
build
,
sctp_outq_ohandler_force_t
force
);
void
sctp_outq_restart
(
struct
sctp_outq
*
);
void
sctp_retransmit
(
struct
sctp_outq
*
,
struct
sctp_transport
*
,
...
...
@@ -1387,16 +1375,25 @@ struct sctp_association {
struct
sctp_tsnmap
tsn_map
;
__u8
_map
[
sctp_tsnmap_storage_size
(
SCTP_TSN_MAP_SIZE
)];
/* Do we need to sack the peer? */
__u8
sack_needed
;
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
* : is received it is incremented. If this value
* : reaches 2 or more, a SACK is sent and the
* : value is reset to 0. Note: This is used only
* : when no DATA chunks are received out of
* : order. When DATA chunks are out of order,
* : SACK's are not delayed (see Section 6).
*/
__u8
sack_needed
;
/* Do we need to sack the peer? */
/* These are capabilities which our peer advertised. */
__u8
ecn_capable
;
/* Can peer do ECN? */
__u8
ipv4_address
;
/* Peer understands IPv4 addresses? */
__u8
ipv6_address
;
/* Peer understands IPv6 addresses? */
__u8
hostname_address
;
/* Peer understands DNS addresses? */
/* Does peer support ADDIP? */
__u8
asconf_capable
;
__u8
asconf_capable
;
/* Does peer support ADDIP? */
__u8
prsctp_capable
;
/* Can peer do PR-SCTP? */
/* This mask is used to disable sending the ASCONF chunk
* with specified parameter to peer.
...
...
@@ -1489,6 +1486,9 @@ struct sctp_association {
__u32
ctsn_ack_point
;
/* PR-SCTP Advanced.Peer.Ack.Point */
__u32
adv_peer_ack_point
;
/* Highest TSN that is acknowledged by incoming SACKs. */
__u32
highest_sacked
;
...
...
@@ -1529,19 +1529,7 @@ struct sctp_association {
/* The message size at which SCTP fragmentation will occur. */
__u32
frag_point
;
/* Ack State : This flag indicates if the next received
* : packet is to be responded to with a
* : SACK. This is initializedto 0. When a packet
* : is received it is incremented. If this value
* : reaches 2 or more, a SACK is sent and the
* : value is reset to 0. Note: This is used only
* : when no DATA chunks are received out of
* : order. When DATA chunks are out of order,
* : SACK's are not delayed (see Section 6).
*/
/* Do we need to send an ack?
* When counters[SctpCounterAckState] is above 1 we do!
*/
/* Currently only one counter is used to count INIT errors. */
int
counters
[
SCTP_NUMBER_COUNTERS
];
/* Default send parameters. */
...
...
include/net/sctp/tsnmap.h
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
...
...
@@ -37,6 +37,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -145,6 +146,9 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
void
sctp_tsnmap_mark
(
struct
sctp_tsnmap
*
,
__u32
tsn
);
/* Mark this TSN and all lower as seen. */
void
sctp_tsnmap_skip
(
struct
sctp_tsnmap
*
map
,
__u32
tsn
);
/* Retrieve the Cumulative TSN ACK Point. */
static
inline
__u32
sctp_tsnmap_get_ctsn
(
const
struct
sctp_tsnmap
*
map
)
{
...
...
include/net/sctp/ulpqueue.h
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
...
...
@@ -38,6 +38,7 @@
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -79,6 +80,9 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *, int);
/* Clear the partial data delivery condition on this socket. */
int
sctp_clear_pd
(
struct
sock
*
sk
);
/* Skip over an SSN. */
void
sctp_ulpq_skip
(
struct
sctp_ulpq
*
ulpq
,
__u16
sid
,
__u16
ssn
);
#endif
/* __sctp_ulpqueue_h__ */
...
...
net/sctp/associola.c
View file @
9b10f4ba
...
...
@@ -210,6 +210,7 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
asoc
->
next_tsn
=
asoc
->
c
.
initial_tsn
;
asoc
->
ctsn_ack_point
=
asoc
->
next_tsn
-
1
;
asoc
->
adv_peer_ack_point
=
asoc
->
ctsn_ack_point
;
asoc
->
highest_sacked
=
asoc
->
ctsn_ack_point
;
asoc
->
last_cwr_tsn
=
asoc
->
ctsn_ack_point
;
asoc
->
unack_data
=
0
;
...
...
@@ -261,12 +262,6 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
/* Create an output queue. */
sctp_outq_init
(
asoc
,
&
asoc
->
outqueue
);
sctp_outq_set_output_handlers
(
&
asoc
->
outqueue
,
sctp_packet_init
,
sctp_packet_config
,
sctp_packet_append_chunk
,
sctp_packet_transmit_chunk
,
sctp_packet_transmit
);
if
(
!
sctp_ulpq_init
(
&
asoc
->
ulpq
,
asoc
))
goto
fail_init
;
...
...
@@ -482,10 +477,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
*/
(
asoc
->
outqueue
.
init_output
)(
&
peer
->
packet
,
peer
,
asoc
->
base
.
bind_addr
.
port
,
asoc
->
peer
.
port
);
sctp_packet_init
(
&
peer
->
packet
,
peer
,
asoc
->
base
.
bind_addr
.
port
,
asoc
->
peer
.
port
);
/* 7.2.1 Slow-Start
*
...
...
@@ -967,6 +960,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc
->
next_tsn
=
new
->
next_tsn
;
asoc
->
ctsn_ack_point
=
new
->
ctsn_ack_point
;
asoc
->
adv_peer_ack_point
=
new
->
adv_peer_ack_point
;
/* Reinitialize SSN for both local streams
* and peer's streams.
...
...
@@ -975,6 +969,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
}
else
{
asoc
->
ctsn_ack_point
=
asoc
->
next_tsn
-
1
;
asoc
->
adv_peer_ack_point
=
asoc
->
ctsn_ack_point
;
if
(
!
asoc
->
ssnmap
)
{
/* Move the ssnmap. */
asoc
->
ssnmap
=
new
->
ssnmap
;
...
...
net/sctp/chunk.c
View file @
9b10f4ba
/* SCTP kernel reference Implementation
*
Copyright (c) 2003 International Business Machines Corp.
*
(C) Copyright IBM Corp. 2003, 2004
*
* This file is part of the SCTP kernel reference Implementation
*
...
...
@@ -31,6 +31,7 @@
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -55,7 +56,8 @@ void sctp_datamsg_init(struct sctp_datamsg *msg)
atomic_set
(
&
msg
->
refcnt
,
1
);
msg
->
send_failed
=
0
;
msg
->
send_error
=
0
;
msg
->
can_expire
=
0
;
msg
->
can_abandon
=
0
;
msg
->
expires_at
=
0
;
INIT_LIST_HEAD
(
&
msg
->
chunks
);
}
...
...
@@ -182,34 +184,17 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
* have the same expiration.
*/
if
(
sinfo
->
sinfo_timetolive
)
{
struct
timeval
tv
;
__u32
ttl
=
sinfo
->
sinfo_timetolive
;
/* sinfo_timetolive is in milliseconds */
tv
.
tv_sec
=
ttl
/
1000
;
tv
.
tv_usec
=
ttl
%
1000
*
1000
;
msg
->
expires_at
=
jiffies
+
timeval_to_jiffies
(
&
tv
);
msg
->
can_expire
=
1
;
msg
->
expires_at
=
jiffies
+
MSECS_TO_JIFFIES
(
sinfo
->
sinfo_timetolive
);
msg
->
can_abandon
=
1
;
SCTP_DEBUG_PRINTK
(
"%s: msg:%p expires_at: %ld jiffies:%ld
\n
"
,
__FUNCTION__
,
msg
,
msg
->
expires_at
,
jiffies
);
}
/* What is a reasonable fragmentation point right now? */
max
=
asoc
->
pmtu
;
if
(
max
<
SCTP_MIN_PMTU
)
max
=
SCTP_MIN_PMTU
;
max
-=
SCTP_IP_OVERHEAD
;
/* Make sure not beyond maximum chunk size. */
if
(
max
>
SCTP_MAX_CHUNK_LEN
)
max
=
SCTP_MAX_CHUNK_LEN
;
max
=
asoc
->
frag_point
;
/* Subtract out the overhead of a data chunk header. */
max
-=
sizeof
(
struct
sctp_data_chunk
);
whole
=
0
;
/* If user has specified smaller fragmentation, make it so. */
if
(
sctp_sk
(
asoc
->
base
.
sk
)
->
user_frag
)
max
=
min_t
(
int
,
max
,
sctp_sk
(
asoc
->
base
.
sk
)
->
user_frag
);
first_len
=
max
;
/* Encourage Cookie-ECHO bundling. */
...
...
@@ -303,14 +288,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
}
/* Check whether this message has expired. */
int
sctp_
datamsg_expires
(
struct
sctp_chunk
*
chunk
)
int
sctp_
chunk_abandoned
(
struct
sctp_chunk
*
chunk
)
{
struct
sctp_datamsg
*
msg
=
chunk
->
msg
;
/* FIXME: When PR-SCTP is supported we can make this
* check more lenient.
*/
if
(
!
msg
->
can_expire
)
if
(
!
msg
->
can_abandon
)
return
0
;
if
(
time_after
(
jiffies
,
msg
->
expires_at
))
...
...
@@ -320,7 +302,7 @@ int sctp_datamsg_expires(struct sctp_chunk *chunk)
}
/* This chunk (and consequently entire message) has failed in its sending. */
void
sctp_
datamsg
_fail
(
struct
sctp_chunk
*
chunk
,
int
error
)
void
sctp_
chunk
_fail
(
struct
sctp_chunk
*
chunk
,
int
error
)
{
chunk
->
msg
->
send_failed
=
1
;
chunk
->
msg
->
send_error
=
error
;
...
...
net/sctp/debug.c
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
...
...
@@ -43,6 +43,7 @@
* Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -88,6 +89,9 @@ const char *sctp_cname(const sctp_subtype_t cid)
case
SCTP_CID_ASCONF_ACK
:
return
"ASCONF_ACK"
;
case
SCTP_CID_FWD_TSN
:
return
"FWD_TSN"
;
default:
return
"unknown chunk"
;
};
...
...
net/sctp/output.c
View file @
9b10f4ba
...
...
@@ -62,29 +62,35 @@
#include <net/sctp/sm.h>
/* Forward declarations for private helpers. */
static
void
sctp_packet_reset
(
struct
sctp_packet
*
packet
);
static
sctp_xmit_t
sctp_packet_append_data
(
struct
sctp_packet
*
packet
,
struct
sctp_chunk
*
chunk
);
/* Config a packet.
* This appears to be a followup set of initializations.
)
* This appears to be a followup set of initializations.
*/
struct
sctp_packet
*
sctp_packet_config
(
struct
sctp_packet
*
packet
,
__u32
vtag
,
int
ecn_capable
,
sctp_packet_phandler_t
*
prepend_handler
)
__u32
vtag
,
int
ecn_capable
)
{
int
packet_empty
=
(
packet
->
size
==
SCTP_IP_OVERHEAD
);
struct
sctp_chunk
*
chunk
=
NULL
;
SCTP_DEBUG_PRINTK
(
"%s: packet:%p vtag:0x%x
\n
"
,
__FUNCTION__
,
packet
,
vtag
);
packet
->
vtag
=
vtag
;
packet
->
ecn_capable
=
ecn_capable
;
packet
->
get_prepend_chunk
=
prepend_handler
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
/* We might need to call the prepend_handler right away. */
if
(
packet_empty
)
sctp_packet_reset
(
packet
);
if
(
ecn_capable
&&
sctp_packet_empty
(
packet
))
{
chunk
=
sctp_get_ecne_prepend
(
packet
->
transport
->
asoc
);
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
if
(
chunk
)
sctp_packet_append_chunk
(
packet
,
chunk
);
}
return
packet
;
}
...
...
@@ -93,19 +99,30 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
struct
sctp_transport
*
transport
,
__u16
sport
,
__u16
dport
)
{
struct
sctp_association
*
asoc
=
transport
->
asoc
;
size_t
overhead
;
SCTP_DEBUG_PRINTK
(
"%s: packet:%p transport:%p
\n
"
,
__FUNCTION__
,
packet
,
transport
);
packet
->
transport
=
transport
;
packet
->
source_port
=
sport
;
packet
->
destination_port
=
dport
;
skb_queue_head_init
(
&
packet
->
chunks
);
packet
->
size
=
SCTP_IP_OVERHEAD
;
if
(
asoc
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
overhead
=
sp
->
pf
->
af
->
net_header_len
;
}
else
{
overhead
=
sizeof
(
struct
ipv6hdr
);
}
overhead
+=
sizeof
(
struct
sctphdr
);
packet
->
overhead
=
overhead
;
packet
->
size
=
overhead
;
packet
->
vtag
=
0
;
packet
->
ecn_capable
=
0
;
packet
->
get_prepend_chunk
=
NULL
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
packet
->
malloced
=
0
;
sctp_packet_reset
(
packet
);
return
packet
;
}
...
...
@@ -114,6 +131,8 @@ void sctp_packet_free(struct sctp_packet *packet)
{
struct
sctp_chunk
*
chunk
;
SCTP_DEBUG_PRINTK
(
"%s: packet:%p
\n
"
,
__FUNCTION__
,
packet
);
while
((
chunk
=
(
struct
sctp_chunk
*
)
__skb_dequeue
(
&
packet
->
chunks
)))
sctp_chunk_free
(
chunk
);
...
...
@@ -134,6 +153,9 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
sctp_xmit_t
retval
;
int
error
=
0
;
SCTP_DEBUG_PRINTK
(
"%s: packet:%p chunk:%p
\n
"
,
__FUNCTION__
,
packet
,
chunk
);
switch
((
retval
=
(
sctp_packet_append_chunk
(
packet
,
chunk
))))
{
case
SCTP_XMIT_PMTU_FULL
:
if
(
!
packet
->
has_cookie_echo
)
{
...
...
@@ -148,7 +170,6 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
}
break
;
case
SCTP_XMIT_MUST_FRAG
:
case
SCTP_XMIT_RWND_FULL
:
case
SCTP_XMIT_OK
:
case
SCTP_XMIT_NAGLE_DELAY
:
...
...
@@ -201,6 +222,9 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
size_t
pmtu
;
int
too_big
;
SCTP_DEBUG_PRINTK
(
"%s: packet:%p chunk:%p
\n
"
,
__FUNCTION__
,
packet
,
chunk
);
retval
=
sctp_packet_bundle_sack
(
packet
,
chunk
);
psize
=
packet
->
size
;
...
...
@@ -215,17 +239,14 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* Decide if we need to fragment or resubmit later. */
if
(
too_big
)
{
int
packet_empty
=
(
packet
->
size
==
SCTP_IP_OVERHEAD
);
/* Both control chunks and data chunks with TSNs are
* non-fragmentable.
*/
if
(
packet_empty
||
!
sctp_chunk_is_data
(
chunk
))
{
if
(
sctp_packet_empty
(
packet
)
||
!
sctp_chunk_is_data
(
chunk
))
{
/* We no longer do re-fragmentation.
* Just fragment at the IP layer, if we
* actually hit this condition
*/
packet
->
ipfragok
=
1
;
goto
append
;
...
...
@@ -233,9 +254,6 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
retval
=
SCTP_XMIT_PMTU_FULL
;
goto
finish
;
}
}
else
{
/* The chunk fits in the packet. */
goto
append
;
}
append:
...
...
@@ -260,6 +278,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* It is OK to send this chunk. */
__skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
packet
->
size
+=
chunk_len
;
chunk
->
transport
=
packet
->
transport
;
finish:
return
retval
;
}
...
...
@@ -283,6 +302,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
__u8
has_data
=
0
;
struct
dst_entry
*
dst
;
SCTP_DEBUG_PRINTK
(
"%s: packet:%p
\n
"
,
__FUNCTION__
,
packet
);
/* Do NOT generate a chunkless packet. */
chunk
=
(
struct
sctp_chunk
*
)
skb_peek
(
&
packet
->
chunks
);
if
(
unlikely
(
!
chunk
))
...
...
@@ -297,7 +318,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
goto
nomem
;
/* Make sure the outbound skb has enough header room reserved. */
skb_reserve
(
nskb
,
SCTP_IP_OVERHEAD
);
skb_reserve
(
nskb
,
packet
->
overhead
);
/* Set the owning socket so that we know where to get the
* destination IP address.
...
...
@@ -471,7 +492,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
(
*
tp
->
af_specific
->
sctp_xmit
)(
nskb
,
tp
,
packet
->
ipfragok
);
out:
packet
->
size
=
SCTP_IP_OVERHEAD
;
packet
->
size
=
packet
->
overhead
;
return
err
;
no_route:
kfree_skb
(
nskb
);
...
...
@@ -497,7 +518,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
goto
out
;
nomem:
err
=
-
ENOMEM
;
printk
(
"%s alloc_skb failed.
\n
"
,
__FUNCTION__
);
goto
err
;
}
...
...
@@ -505,25 +525,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* 2nd Level Abstractions
********************************************************************/
/*
* This private function resets the packet to a fresh state.
*/
static
void
sctp_packet_reset
(
struct
sctp_packet
*
packet
)
{
struct
sctp_chunk
*
chunk
=
NULL
;
packet
->
size
=
SCTP_IP_OVERHEAD
;
if
(
packet
->
get_prepend_chunk
)
chunk
=
packet
->
get_prepend_chunk
(
packet
->
transport
->
asoc
);
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
if
(
chunk
)
sctp_packet_append_chunk
(
packet
,
chunk
);
}
/* This private function handles the specifics of appending DATA chunks. */
static
sctp_xmit_t
sctp_packet_append_data
(
struct
sctp_packet
*
packet
,
struct
sctp_chunk
*
chunk
)
...
...
@@ -609,7 +610,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* if any previously transmitted data on the connection remains
* unacknowledged.
*/
if
(
!
sp
->
nodelay
&&
SCTP_IP_OVERHEAD
==
packet
->
size
&&
if
(
!
sp
->
nodelay
&&
sctp_packet_empty
(
packet
)
&&
q
->
outstanding_bytes
&&
sctp_state
(
asoc
,
ESTABLISHED
))
{
unsigned
len
=
datasize
+
q
->
out_qlen
;
...
...
@@ -617,7 +618,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* data will fit or delay in hopes of bundling a full
* sized packet.
*/
if
(
len
<
asoc
->
pmtu
-
SCTP_IP_OVERHEAD
)
{
if
(
len
<
asoc
->
pmtu
-
packet
->
overhead
)
{
retval
=
SCTP_XMIT_NAGLE_DELAY
;
goto
finish
;
}
...
...
@@ -637,7 +638,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
asoc
->
peer
.
rwnd
=
rwnd
;
/* Has been accepted for transmission. */
chunk
->
msg
->
can_expire
=
0
;
if
(
!
asoc
->
peer
.
prsctp_capable
)
chunk
->
msg
->
can_abandon
=
0
;
finish:
return
retval
;
...
...
net/sctp/outqueue.c
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 Intel Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
...
...
@@ -69,6 +69,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
__u32
highest_new_tsn
,
int
count_of_newacks
);
static
void
sctp_generate_fwdtsn
(
struct
sctp_outq
*
q
,
__u32
sack_ctsn
);
/* Add data to the front of the queue. */
static
inline
void
sctp_outq_head_data
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
ch
)
...
...
@@ -222,12 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
skb_queue_head_init
(
&
q
->
control
);
INIT_LIST_HEAD
(
&
q
->
retransmit
);
INIT_LIST_HEAD
(
&
q
->
sacked
);
q
->
init_output
=
NULL
;
q
->
config_output
=
NULL
;
q
->
append_output
=
NULL
;
q
->
build_output
=
NULL
;
q
->
force_output
=
NULL
;
INIT_LIST_HEAD
(
&
q
->
abandoned
);
q
->
outstanding_bytes
=
0
;
q
->
empty
=
1
;
...
...
@@ -252,7 +249,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
/* Mark as part of a failed message. */
sctp_
datamsg
_fail
(
chunk
,
q
->
error
);
sctp_
chunk
_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
}
...
...
@@ -262,7 +259,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
list_del_init
(
lchunk
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
sctp_
datamsg
_fail
(
chunk
,
q
->
error
);
sctp_
chunk
_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
...
...
@@ -271,7 +268,16 @@ void sctp_outq_teardown(struct sctp_outq *q)
list_del_init
(
lchunk
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
sctp_datamsg_fail
(
chunk
,
q
->
error
);
sctp_chunk_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
/* Throw away any chunks that are in the abandoned queue. */
list_for_each_safe
(
lchunk
,
temp
,
&
q
->
abandoned
)
{
list_del_init
(
lchunk
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
sctp_chunk_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
...
...
@@ -279,7 +285,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* Mark as send failure. */
sctp_
datamsg
_fail
(
chunk
,
q
->
error
);
sctp_
chunk
_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
...
...
@@ -363,32 +369,30 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
return
error
;
}
/* Insert a chunk into the
retransmit queue. Chunks on the retransmi
t
*
queue are kept in order, based on the TSNs
.
/* Insert a chunk into the
sorted list based on the TSNs. The retransmit lis
t
*
and the abandoned list are in ascending order
.
*/
void
sctp_
retransmit_insert
(
struct
list_head
*
tlchunk
,
struct
sctp_outq
*
q
)
void
sctp_
insert_list
(
struct
list_head
*
head
,
struct
list_head
*
new
)
{
struct
list_head
*
rlchunk
;
struct
sctp_chunk
*
tchunk
,
*
r
chunk
;
__u32
ttsn
,
r
tsn
;
struct
list_head
*
pos
;
struct
sctp_chunk
*
nchunk
,
*
l
chunk
;
__u32
ntsn
,
l
tsn
;
int
done
=
0
;
tchunk
=
list_entry
(
tlchunk
,
struct
sctp_chunk
,
transmitted_list
);
ttsn
=
ntohl
(
t
chunk
->
subh
.
data_hdr
->
tsn
);
nchunk
=
list_entry
(
new
,
struct
sctp_chunk
,
transmitted_list
);
ntsn
=
ntohl
(
n
chunk
->
subh
.
data_hdr
->
tsn
);
list_for_each
(
rlchunk
,
&
q
->
retransmit
)
{
rchunk
=
list_entry
(
rlchunk
,
struct
sctp_chunk
,
transmitted_list
);
rtsn
=
ntohl
(
rchunk
->
subh
.
data_hdr
->
tsn
);
if
(
TSN_lt
(
ttsn
,
rtsn
))
{
list_add
(
tlchunk
,
rlchunk
->
prev
);
list_for_each
(
pos
,
head
)
{
lchunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
transmitted_list
);
ltsn
=
ntohl
(
lchunk
->
subh
.
data_hdr
->
tsn
);
if
(
TSN_lt
(
ntsn
,
ltsn
))
{
list_add
(
new
,
pos
->
prev
);
done
=
1
;
break
;
}
}
if
(
!
done
)
{
list_add_tail
(
tlchunk
,
&
q
->
retransmit
);
}
if
(
!
done
)
list_add_tail
(
new
,
head
);
}
/* Mark all the eligible packets on a transport for retransmission. */
...
...
@@ -404,6 +408,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
/* If the chunk is abandoned, move it to abandoned list. */
if
(
sctp_chunk_abandoned
(
chunk
))
{
list_del_init
(
lchunk
);
sctp_insert_list
(
&
q
->
abandoned
,
lchunk
);
continue
;
}
/* If we are doing retransmission due to a fast retransmit,
* only the chunk's that are marked for fast retransmit
* should be added to the retransmit queue. If we are doing
...
...
@@ -444,10 +455,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
}
/* Move the chunk to the retransmit queue. The chunks
* on the retransmit queue
is
always kept in order.
* on the retransmit queue
are
always kept in order.
*/
list_del_init
(
lchunk
);
sctp_
retransmit_insert
(
lchunk
,
q
);
sctp_
insert_list
(
&
q
->
retransmit
,
lchunk
);
}
}
...
...
@@ -490,6 +501,12 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_mark
(
q
,
transport
,
fast_retransmit
);
/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
* following the procedures outlined in C1 - C5.
*/
sctp_generate_fwdtsn
(
q
,
q
->
asoc
->
ctsn_ack_point
);
error
=
sctp_outq_flush
(
q
,
/* rtx_timeout */
1
);
if
(
error
)
...
...
@@ -552,12 +569,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
}
/* Attempt to append this chunk to the packet. */
status
=
(
*
q
->
append_output
)
(
pkt
,
chunk
);
status
=
sctp_packet_append_chunk
(
pkt
,
chunk
);
switch
(
status
)
{
case
SCTP_XMIT_PMTU_FULL
:
/* Send this packet. */
if
((
error
=
(
*
q
->
force_output
)
(
pkt
))
==
0
)
if
((
error
=
sctp_packet_transmit
(
pkt
))
==
0
)
*
start_timer
=
1
;
/* If we are retransmitting, we should only
...
...
@@ -573,7 +590,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
case
SCTP_XMIT_RWND_FULL
:
/* Send this packet. */
if
((
error
=
(
*
q
->
force_output
)
(
pkt
))
==
0
)
if
((
error
=
sctp_packet_transmit
(
pkt
))
==
0
)
*
start_timer
=
1
;
/* Stop sending DATA as there is no more room
...
...
@@ -583,6 +600,16 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
lchunk
=
NULL
;
break
;
case
SCTP_XMIT_NAGLE_DELAY
:
/* Send this packet. */
if
((
error
=
sctp_packet_transmit
(
pkt
))
==
0
)
*
start_timer
=
1
;
/* Stop sending DATA because of nagle delay. */
list_add
(
lchunk
,
lqueue
);
lchunk
=
NULL
;
break
;
default:
/* The append was successful, so add this chunk to
* the transmitted list.
...
...
@@ -625,13 +652,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
struct
sctp_packet
*
packet
;
struct
sctp_packet
singleton
;
struct
sctp_association
*
asoc
=
q
->
asoc
;
int
ecn_capable
=
asoc
->
peer
.
ecn_capable
;
__u16
sport
=
asoc
->
base
.
bind_addr
.
port
;
__u16
dport
=
asoc
->
peer
.
port
;
__u32
vtag
=
asoc
->
peer
.
i
.
init_tag
;
/* This is the ECNE handler for singleton packets. */
sctp_packet_phandler_t
*
s_ecne_handler
=
NULL
;
sctp_packet_phandler_t
*
ecne_handler
=
NULL
;
struct
sk_buff_head
*
queue
;
struct
sctp_transport
*
transport
=
NULL
;
struct
sctp_transport
*
new_transport
;
...
...
@@ -656,10 +679,6 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
* within a SCTP packet in increasing order of TSN.
* ...
*/
if
(
ecn_capable
)
{
s_ecne_handler
=
&
sctp_get_no_prepend
;
ecne_handler
=
&
sctp_get_ecne_prepend
;
}
queue
=
&
q
->
control
;
while
((
chunk
=
(
struct
sctp_chunk
*
)
skb_dequeue
(
queue
)))
{
...
...
@@ -686,8 +705,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
&
transport_list
);
}
packet
=
&
transport
->
packet
;
(
*
q
->
config_output
)
(
packet
,
vtag
,
ecn_capable
,
ecne_handler
);
sctp_packet_config
(
packet
,
vtag
,
asoc
->
peer
.
ecn_capable
);
}
switch
(
chunk
->
chunk_hdr
->
type
)
{
...
...
@@ -700,11 +719,10 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
case
SCTP_CID_INIT
:
case
SCTP_CID_INIT_ACK
:
case
SCTP_CID_SHUTDOWN_COMPLETE
:
(
*
q
->
init_output
)(
&
singleton
,
transport
,
sport
,
dport
);
(
*
q
->
config_output
)(
&
singleton
,
vtag
,
ecn_capable
,
s_ecne_handler
);
(
void
)
(
*
q
->
build_output
)(
&
singleton
,
chunk
);
error
=
(
*
q
->
force_output
)(
&
singleton
);
sctp_packet_init
(
&
singleton
,
transport
,
sport
,
dport
);
sctp_packet_config
(
&
singleton
,
vtag
,
0
);
sctp_packet_append_chunk
(
&
singleton
,
chunk
);
error
=
sctp_packet_transmit
(
&
singleton
);
if
(
error
<
0
)
return
error
;
break
;
...
...
@@ -720,12 +738,10 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
case
SCTP_CID_COOKIE_ACK
:
case
SCTP_CID_ECN_ECNE
:
case
SCTP_CID_ECN_CWR
:
(
void
)
(
*
q
->
build_output
)(
packet
,
chunk
);
break
;
case
SCTP_CID_ASCONF
:
case
SCTP_CID_ASCONF_ACK
:
(
void
)
(
*
q
->
build_output
)(
packet
,
chunk
);
case
SCTP_CID_FWD_TSN
:
sctp_packet_transmit_chunk
(
packet
,
chunk
);
break
;
default:
...
...
@@ -770,8 +786,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
packet
=
&
transport
->
packet
;
(
*
q
->
config_output
)
(
packet
,
vtag
,
ecn_capable
,
ecne_handler
);
sctp_packet_config
(
packet
,
vtag
,
asoc
->
peer
.
ecn_capable
);
retran:
error
=
sctp_outq_flush_rtx
(
q
,
packet
,
rtx_timeout
,
&
start_timer
);
...
...
@@ -803,15 +819,15 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if
(
chunk
->
sinfo
.
sinfo_stream
>=
asoc
->
c
.
sinit_num_ostreams
)
{
/* Mark as
s
failed send. */
sctp_
datamsg
_fail
(
chunk
,
SCTP_ERROR_INV_STRM
);
/* Mark as failed send. */
sctp_
chunk
_fail
(
chunk
,
SCTP_ERROR_INV_STRM
);
sctp_chunk_free
(
chunk
);
continue
;
}
/* Has this chunk expired? */
if
(
sctp_
datamsg_expires
(
chunk
))
{
sctp_
datamsg
_fail
(
chunk
,
0
);
if
(
sctp_
chunk_abandoned
(
chunk
))
{
sctp_
chunk
_fail
(
chunk
,
0
);
sctp_chunk_free
(
chunk
);
continue
;
}
...
...
@@ -836,11 +852,11 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
packet
=
&
transport
->
packet
;
(
*
q
->
config_output
)
(
packet
,
vtag
,
ecn_capable
,
ecne_handler
);
sctp_packet_config
(
packet
,
vtag
,
asoc
->
peer
.
ecn_capable
);
}
SCTP_DEBUG_PRINTK
(
"sctp_
transmit_packet
(%p, %p[%s]), "
,
SCTP_DEBUG_PRINTK
(
"sctp_
outq_flush
(%p, %p[%s]), "
,
q
,
chunk
,
chunk
&&
chunk
->
chunk_hdr
?
sctp_cname
(
SCTP_ST_CHUNK
(
...
...
@@ -855,7 +871,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
atomic_read
(
&
chunk
->
skb
->
users
)
:
-
1
);
/* Add the chunk to the packet. */
status
=
(
*
q
->
build_output
)
(
packet
,
chunk
);
status
=
sctp_packet_transmit_chunk
(
packet
,
chunk
);
switch
(
status
)
{
case
SCTP_XMIT_PMTU_FULL
:
...
...
@@ -879,7 +895,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
BUG
();
}
/* BUG: We assume that the
(*q->force_output())
/* BUG: We assume that the
sctp_packet_transmit()
* call below will succeed all the time and add the
* chunk to the transmitted list and restart the
* timers.
...
...
@@ -922,33 +938,14 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
struct
sctp_transport
*
t
=
list_entry
(
ltransport
,
struct
sctp_transport
,
send_ready
);
if
(
t
!=
transport
)
transport
=
t
;
packet
=
&
transport
->
packet
;
if
(
packet
->
size
!=
SCTP_IP_OVERHEAD
)
error
=
(
*
q
->
force_output
)(
packet
);
packet
=
&
t
->
packet
;
if
(
!
sctp_packet_empty
(
packet
))
error
=
sctp_packet_transmit
(
packet
);
}
return
error
;
}
/* Set the various output handling callbacks. */
int
sctp_outq_set_output_handlers
(
struct
sctp_outq
*
q
,
sctp_outq_ohandler_init_t
init
,
sctp_outq_ohandler_config_t
config
,
sctp_outq_ohandler_t
append
,
sctp_outq_ohandler_t
build
,
sctp_outq_ohandler_force_t
force
)
{
q
->
init_output
=
init
;
q
->
config_output
=
config
;
q
->
append_output
=
append
;
q
->
build_output
=
build
;
q
->
force_output
=
force
;
return
0
;
}
/* Update unack_data based on the incoming SACK chunk */
static
void
sctp_sack_update_unack_data
(
struct
sctp_association
*
assoc
,
struct
sctp_sackhdr
*
sack
)
...
...
@@ -1007,7 +1004,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
{
struct
sctp_association
*
asoc
=
q
->
asoc
;
struct
sctp_transport
*
transport
;
struct
sctp_chunk
*
tchunk
;
struct
sctp_chunk
*
tchunk
=
NULL
;
struct
list_head
*
lchunk
,
*
transport_list
,
*
pos
,
*
temp
;
sctp_sack_variable_t
*
frags
=
sack
->
variable
;
__u32
sack_ctsn
,
ctsn
,
tsn
;
...
...
@@ -1110,11 +1107,6 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
ctsn
=
asoc
->
ctsn_ack_point
;
SCTP_DEBUG_PRINTK
(
"%s: sack Cumulative TSN Ack is 0x%x.
\n
"
,
__FUNCTION__
,
sack_ctsn
);
SCTP_DEBUG_PRINTK
(
"%s: Cumulative TSN Ack of association "
"%p is 0x%x.
\n
"
,
__FUNCTION__
,
asoc
,
ctsn
);
/* Throw away stuff rotting on the sack queue. */
list_for_each_safe
(
lchunk
,
temp
,
&
q
->
sacked
)
{
tchunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
...
...
@@ -1139,10 +1131,19 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
asoc
->
peer
.
rwnd
=
sack_a_rwnd
;
sctp_generate_fwdtsn
(
q
,
sack_ctsn
);
SCTP_DEBUG_PRINTK
(
"%s: sack Cumulative TSN Ack is 0x%x.
\n
"
,
__FUNCTION__
,
sack_ctsn
);
SCTP_DEBUG_PRINTK
(
"%s: Cumulative TSN Ack of association, "
"%p is 0x%x. Adv peer ack point: 0x%x
\n
"
,
__FUNCTION__
,
asoc
,
ctsn
,
asoc
->
adv_peer_ack_point
);
/* See if all chunks are acked.
* Make sure the empty queue handler will get run later.
*/
q
->
empty
=
skb_queue_empty
(
&
q
->
out
)
&&
list_empty
(
&
q
->
retransmit
);
q
->
empty
=
skb_queue_empty
(
&
q
->
out
)
&&
skb_queue_empty
(
&
q
->
control
)
&&
list_empty
(
&
q
->
retransmit
);
if
(
!
q
->
empty
)
goto
finish
;
...
...
@@ -1218,6 +1219,12 @@ static void sctp_check_transmitted(struct sctp_outq *q,
tchunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
if
(
sctp_chunk_abandoned
(
tchunk
))
{
/* Move the chunk to abandoned list. */
sctp_insert_list
(
&
q
->
abandoned
,
lchunk
);
continue
;
}
tsn
=
ntohl
(
tchunk
->
subh
.
data_hdr
->
tsn
);
if
(
sctp_acked
(
sack
,
tsn
))
{
/* If this queue is the retransmit queue, the
...
...
@@ -1599,3 +1606,123 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
pass:
return
1
;
}
static
inline
int
sctp_get_skip_pos
(
struct
sctp_fwdtsn_skip
*
skiplist
,
int
nskips
,
__u16
stream
)
{
int
i
;
for
(
i
=
0
;
i
<
nskips
;
i
++
)
{
if
(
skiplist
[
i
].
stream
==
stream
)
return
i
;
}
return
i
;
}
/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
static
void
sctp_generate_fwdtsn
(
struct
sctp_outq
*
q
,
__u32
ctsn
)
{
struct
sctp_association
*
asoc
=
q
->
asoc
;
struct
sctp_chunk
*
ftsn_chunk
=
NULL
;
struct
sctp_fwdtsn_skip
ftsn_skip_arr
[
10
];
int
nskips
=
0
;
int
skip_pos
=
0
;
__u32
tsn
;
struct
sctp_chunk
*
chunk
;
struct
list_head
*
lchunk
,
*
temp
;
/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
* received SACK.
*
* If (Advanced.Peer.Ack.Point < SackCumAck), then update
* Advanced.Peer.Ack.Point to be equal to SackCumAck.
*/
if
(
TSN_lt
(
asoc
->
adv_peer_ack_point
,
ctsn
))
asoc
->
adv_peer_ack_point
=
ctsn
;
/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
* locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
* the chunk next in the out-queue space is marked as "abandoned" as
* shown in the following example:
*
* Assuming that a SACK arrived with the Cumulative TSN ACK 102
* and the Advanced.Peer.Ack.Point is updated to this value:
*
* out-queue at the end of ==> out-queue after Adv.Ack.Point
* normal SACK processing local advancement
* ... ...
* Adv.Ack.Pt-> 102 acked 102 acked
* 103 abandoned 103 abandoned
* 104 abandoned Adv.Ack.P-> 104 abandoned
* 105 105
* 106 acked 106 acked
* ... ...
*
* In this example, the data sender successfully advanced the
* "Advanced.Peer.Ack.Point" from 102 to 104 locally.
*/
list_for_each_safe
(
lchunk
,
temp
,
&
q
->
abandoned
)
{
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
tsn
=
ntohl
(
chunk
->
subh
.
data_hdr
->
tsn
);
/* Remove any chunks in the abandoned queue that are acked by
* the ctsn.
*/
if
(
TSN_lte
(
tsn
,
ctsn
))
{
list_del_init
(
lchunk
);
if
(
!
chunk
->
tsn_gap_acked
)
{
chunk
->
transport
->
flight_size
-=
sctp_data_size
(
chunk
);
q
->
outstanding_bytes
-=
sctp_data_size
(
chunk
);
}
sctp_chunk_free
(
chunk
);
}
else
{
if
(
TSN_lte
(
tsn
,
asoc
->
adv_peer_ack_point
+
1
))
{
asoc
->
adv_peer_ack_point
=
tsn
;
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
continue
;
skip_pos
=
sctp_get_skip_pos
(
&
ftsn_skip_arr
[
0
],
nskips
,
chunk
->
subh
.
data_hdr
->
stream
);
ftsn_skip_arr
[
skip_pos
].
stream
=
chunk
->
subh
.
data_hdr
->
stream
;
ftsn_skip_arr
[
skip_pos
].
ssn
=
chunk
->
subh
.
data_hdr
->
ssn
;
if
(
skip_pos
==
nskips
)
nskips
++
;
if
(
nskips
==
10
)
break
;
}
else
break
;
}
}
/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
* is greater than the Cumulative TSN ACK carried in the received
* SACK, the data sender MUST send the data receiver a FORWARD TSN
* chunk containing the latest value of the
* "Advanced.Peer.Ack.Point".
*
* C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
* list each stream and sequence number in the forwarded TSN. This
* information will enable the receiver to easily find any
* stranded TSN's waiting on stream reorder queues. Each stream
* SHOULD only be reported once; this means that if multiple
* abandoned messages occur in the same stream then only the
* highest abandoned stream sequence number is reported. If the
* total size of the FORWARD TSN does NOT fit in a single MTU then
* the sender of the FORWARD TSN SHOULD lower the
* Advanced.Peer.Ack.Point to the last TSN that will fit in a
* single MTU.
*/
if
(
asoc
->
adv_peer_ack_point
>
ctsn
)
ftsn_chunk
=
sctp_make_fwdtsn
(
asoc
,
asoc
->
adv_peer_ack_point
,
nskips
,
&
ftsn_skip_arr
[
0
]);
if
(
ftsn_chunk
)
{
__skb_queue_tail
(
&
q
->
control
,
(
struct
sk_buff
*
)
ftsn_chunk
);
SCTP_INC_STATS
(
SctpOutCtrlChunks
);
}
}
net/sctp/protocol.c
View file @
9b10f4ba
...
...
@@ -1004,7 +1004,9 @@ __init int sctp_init(void)
goto
err_init_mibs
;
/* Initialize proc fs directory. */
sctp_proc_init
();
status
=
sctp_proc_init
();
if
(
status
)
goto
err_init_proc
;
/* Initialize object count debugging. */
sctp_dbg_objcnt_init
();
...
...
@@ -1127,6 +1129,9 @@ __init int sctp_init(void)
/* Disable ADDIP by default. */
sctp_addip_enable
=
0
;
/* Enable PR-SCTP by default. */
sctp_prsctp_enable
=
1
;
sctp_sysctl_register
();
INIT_LIST_HEAD
(
&
sctp_address_families
);
...
...
@@ -1171,6 +1176,7 @@ __init int sctp_init(void)
sizeof
(
struct
sctp_hashbucket
)));
err_ahash_alloc:
sctp_dbg_objcnt_exit
();
err_init_proc:
sctp_proc_exit
();
cleanup_sctp_mibs
();
err_init_mibs:
...
...
net/sctp/sm_make_chunk.c
View file @
9b10f4ba
...
...
@@ -6,10 +6,6 @@
*
* This file is part of the SCTP kernel reference Implementation
*
* This file includes part of the implementation of the add-IP extension,
* based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001,
* for the SCTP kernel reference Implementation.
*
* These functions work with the state functions in sctp_sm_statefuns.c
* to implement the state operations. These functions implement the
* steps which require modifying existing data structures.
...
...
@@ -89,11 +85,13 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk)
* Note 2: The ECN capable field is reserved for future use of
* Explicit Congestion Notification.
*/
static
const
sctp_ecn_capable_param_t
ecap_param
=
{
{
SCTP_PARAM_ECN_CAPABLE
,
__constant_htons
(
sizeof
(
sctp_ecn_capable_param_t
)),
}
static
const
struct
sctp_paramhdr
ecap_param
=
{
SCTP_PARAM_ECN_CAPABLE
,
__constant_htons
(
sizeof
(
struct
sctp_paramhdr
)),
};
static
const
struct
sctp_paramhdr
prsctp_param
=
{
SCTP_PARAM_FWD_TSN_SUPPORT
,
__constant_htons
(
sizeof
(
struct
sctp_paramhdr
)),
};
/* A helper to initialize to initialize an op error inside a
...
...
@@ -196,6 +194,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
chunksize
=
sizeof
(
init
)
+
addrs_len
+
SCTP_SAT_LEN
(
num_types
);
chunksize
+=
sizeof
(
ecap_param
);
if
(
sctp_prsctp_enable
)
chunksize
+=
sizeof
(
prsctp_param
);
chunksize
+=
vparam_len
;
/* RFC 2960 3.3.2 Initiation (INIT) (1)
...
...
@@ -232,6 +232,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
sctp_addto_chunk
(
retval
,
num_types
*
sizeof
(
__u16
),
&
types
);
sctp_addto_chunk
(
retval
,
sizeof
(
ecap_param
),
&
ecap_param
);
if
(
sctp_prsctp_enable
)
sctp_addto_chunk
(
retval
,
sizeof
(
prsctp_param
),
&
prsctp_param
);
nodata:
if
(
addrs
.
v
)
kfree
(
addrs
.
v
);
...
...
@@ -278,6 +280,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if
(
asoc
->
peer
.
ecn_capable
)
chunksize
+=
sizeof
(
ecap_param
);
/* Tell peer that we'll do PR-SCTP only if peer advertised. */
if
(
asoc
->
peer
.
prsctp_capable
)
chunksize
+=
sizeof
(
prsctp_param
);
/* Now allocate and fill out the chunk. */
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_INIT_ACK
,
0
,
chunksize
);
if
(
!
retval
)
...
...
@@ -293,6 +299,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
sctp_addto_chunk
(
retval
,
cookie_len
,
cookie
);
if
(
asoc
->
peer
.
ecn_capable
)
sctp_addto_chunk
(
retval
,
sizeof
(
ecap_param
),
&
ecap_param
);
if
(
asoc
->
peer
.
prsctp_capable
)
sctp_addto_chunk
(
retval
,
sizeof
(
prsctp_param
),
&
prsctp_param
);
/* We need to remove the const qualifier at this point. */
retval
->
asoc
=
(
struct
sctp_association
*
)
asoc
;
...
...
@@ -1286,6 +1294,9 @@ sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
/* Save the raw address list length in the cookie. */
cookie
->
c
.
raw_addr_list_len
=
addrs_len
;
/* Remember PR-SCTP capability. */
cookie
->
c
.
prsctp_capable
=
asoc
->
peer
.
prsctp_capable
;
/* Set an expiration time for the cookie. */
do_gettimeofday
(
&
cookie
->
c
.
expiration
);
TIMEVAL_ADD
(
asoc
->
cookie_life
,
cookie
->
c
.
expiration
);
...
...
@@ -1442,6 +1453,8 @@ struct sctp_association *sctp_unpack_cookie(
retval
->
next_tsn
=
retval
->
c
.
initial_tsn
;
retval
->
ctsn_ack_point
=
retval
->
next_tsn
-
1
;
retval
->
addip_serial
=
retval
->
c
.
initial_tsn
;
retval
->
adv_peer_ack_point
=
retval
->
ctsn_ack_point
;
retval
->
peer
.
prsctp_capable
=
retval
->
c
.
prsctp_capable
;
/* The INIT stuff will be done by the side effects. */
return
retval
;
...
...
@@ -1653,6 +1666,10 @@ static int sctp_verify_param(const struct sctp_association *asoc,
case
SCTP_PARAM_HOST_NAME_ADDRESS
:
/* Tell the peer, we won't support this param. */
return
sctp_process_hn_param
(
asoc
,
param
,
chunk
,
err_chunk
);
case
SCTP_PARAM_FWD_TSN_SUPPORT
:
if
(
sctp_prsctp_enable
)
break
;
/* Fall Through */
default:
SCTP_DEBUG_PRINTK
(
"Unrecognized param: %d for chunk %d.
\n
"
,
ntohs
(
param
.
p
->
type
),
cid
);
...
...
@@ -1968,6 +1985,12 @@ int sctp_process_param(struct sctp_association *asoc, union sctp_params param,
asoc
->
peer
.
ecn_capable
=
1
;
break
;
case
SCTP_PARAM_FWD_TSN_SUPPORT
:
if
(
sctp_prsctp_enable
)
{
asoc
->
peer
.
prsctp_capable
=
1
;
break
;
}
/* Fall Through */
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
...
...
@@ -2622,3 +2645,38 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
return
retval
;
}
/* Make a FWD TSN chunk. */
struct
sctp_chunk
*
sctp_make_fwdtsn
(
const
struct
sctp_association
*
asoc
,
__u32
new_cum_tsn
,
size_t
nstreams
,
struct
sctp_fwdtsn_skip
*
skiplist
)
{
struct
sctp_chunk
*
retval
=
NULL
;
struct
sctp_fwdtsn_chunk
*
ftsn_chunk
;
struct
sctp_fwdtsn_hdr
ftsn_hdr
;
struct
sctp_fwdtsn_skip
skip
;
size_t
hint
;
int
i
;
hint
=
(
nstreams
+
1
)
*
sizeof
(
__u32
);
/* Maybe set the T-bit if we have no association. */
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_FWD_TSN
,
0
,
hint
);
if
(
!
retval
)
return
NULL
;
ftsn_chunk
=
(
struct
sctp_fwdtsn_chunk
*
)
retval
->
subh
.
fwdtsn_hdr
;
ftsn_hdr
.
new_cum_tsn
=
htonl
(
new_cum_tsn
);
retval
->
subh
.
fwdtsn_hdr
=
sctp_addto_chunk
(
retval
,
sizeof
(
ftsn_hdr
),
&
ftsn_hdr
);
for
(
i
=
0
;
i
<
nstreams
;
i
++
)
{
skip
.
stream
=
skiplist
[
i
].
stream
;
skip
.
ssn
=
skiplist
[
i
].
ssn
;
sctp_addto_chunk
(
retval
,
sizeof
(
skip
),
&
skip
);
}
return
retval
;
}
net/sctp/sm_sideeffect.c
View file @
9b10f4ba
...
...
@@ -579,7 +579,7 @@ static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
/* Helper function to process the process SACK command. */
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
struct
sctp_association
*
asoc
,
s
ctp_sackhdr_t
*
sackh
)
s
truct
sctp_sackhdr
*
sackh
)
{
int
err
;
...
...
@@ -729,6 +729,19 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
}
}
/* Process variable FWDTSN chunk information. */
static
void
sctp_cmd_process_fwdtsn
(
struct
sctp_ulpq
*
ulpq
,
struct
sctp_chunk
*
chunk
)
{
struct
sctp_fwdtsn_skip
*
skip
;
/* Walk through all the skipped SSNs */
sctp_walk_fwdtsn
(
skip
,
chunk
)
{
sctp_ulpq_skip
(
ulpq
,
ntohs
(
skip
->
stream
),
ntohs
(
skip
->
ssn
));
}
return
;
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
...
...
@@ -903,7 +916,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
s
ctp_sackhdr_t
sackh
;
s
truct
sctp_sackhdr
sackh
;
int
local_cork
=
0
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
...
...
@@ -962,6 +975,18 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_REPORT_FWDTSN
:
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_PROCESS_FWDTSN
:
sctp_cmd_process_fwdtsn
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_GEN_SACK
:
/* Generate a Selective ACK.
* The argument tells us whether to just count
...
...
net/sctp/sm_statefuns.c
View file @
9b10f4ba
...
...
@@ -3204,6 +3204,143 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
return
SCTP_DISPOSITION_DISCARD
;
}
/*
* PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
*
* When a FORWARD TSN chunk arrives, the data receiver MUST first update
* its cumulative TSN point to the value carried in the FORWARD TSN
* chunk, and then MUST further advance its cumulative TSN point locally
* if possible.
* After the above processing, the data receiver MUST stop reporting any
* missing TSNs earlier than or equal to the new cumulative TSN point.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_eat_fwd_tsn
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
struct
sctp_chunk
*
chunk
=
arg
;
struct
sctp_fwdtsn_hdr
*
fwdtsn_hdr
;
__u16
len
;
__u32
tsn
;
/* RFC 2960 8.5 Verification Tag
*
* When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if
(
ntohl
(
chunk
->
sctp_hdr
->
vtag
)
!=
asoc
->
c
.
my_vtag
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_BAD_TAG
,
SCTP_NULL
());
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
}
fwdtsn_hdr
=
(
struct
sctp_fwdtsn_hdr
*
)
chunk
->
skb
->
data
;
chunk
->
subh
.
fwdtsn_hdr
=
fwdtsn_hdr
;
len
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
len
-=
sizeof
(
struct
sctp_chunkhdr
);
skb_pull
(
chunk
->
skb
,
len
);
tsn
=
ntohl
(
fwdtsn_hdr
->
new_cum_tsn
);
SCTP_DEBUG_PRINTK
(
"%s: TSN 0x%x.
\n
"
,
__FUNCTION__
,
tsn
);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if
(
sctp_tsnmap_check
(
&
asoc
->
peer
.
tsn_map
,
tsn
)
<
0
)
goto
discard_noforce
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_FWDTSN
,
SCTP_U32
(
tsn
));
if
(
len
>
sizeof
(
struct
sctp_fwdtsn_hdr
))
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_FWDTSN
,
SCTP_CHUNK
(
chunk
));
/* Count this as receiving DATA. */
if
(
asoc
->
autoclose
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_AUTOCLOSE
));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_GEN_SACK
,
SCTP_NOFORCE
());
/* Start the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
return
SCTP_DISPOSITION_CONSUME
;
discard_noforce:
return
SCTP_DISPOSITION_DISCARD
;
}
sctp_disposition_t
sctp_sf_eat_fwd_tsn_fast
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
struct
sctp_chunk
*
chunk
=
arg
;
struct
sctp_fwdtsn_hdr
*
fwdtsn_hdr
;
__u16
len
;
__u32
tsn
;
/* RFC 2960 8.5 Verification Tag
*
* When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if
(
ntohl
(
chunk
->
sctp_hdr
->
vtag
)
!=
asoc
->
c
.
my_vtag
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_BAD_TAG
,
SCTP_NULL
());
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
}
fwdtsn_hdr
=
(
struct
sctp_fwdtsn_hdr
*
)
chunk
->
skb
->
data
;
chunk
->
subh
.
fwdtsn_hdr
=
fwdtsn_hdr
;
len
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
len
-=
sizeof
(
struct
sctp_chunkhdr
);
skb_pull
(
chunk
->
skb
,
len
);
tsn
=
ntohl
(
fwdtsn_hdr
->
new_cum_tsn
);
SCTP_DEBUG_PRINTK
(
"%s: TSN 0x%x.
\n
"
,
__FUNCTION__
,
tsn
);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if
(
sctp_tsnmap_check
(
&
asoc
->
peer
.
tsn_map
,
tsn
)
<
0
)
goto
gen_shutdown
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_FWDTSN
,
SCTP_U32
(
tsn
));
if
(
len
>
sizeof
(
struct
sctp_fwdtsn_hdr
))
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_FWDTSN
,
SCTP_CHUNK
(
chunk
));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_GEN_SHUTDOWN
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_GEN_SACK
,
SCTP_FORCE
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
));
return
SCTP_DISPOSITION_CONSUME
;
}
/*
* Process an unknown chunk.
*
...
...
@@ -4671,28 +4808,20 @@ struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
/* Make a transport for the bucket, Eliza... */
transport
=
sctp_transport_new
(
sctp_source
(
chunk
),
GFP_ATOMIC
);
if
(
!
transport
)
goto
nomem
;
/* Allocate a new packet for sending the response. */
packet
=
t_new
(
struct
sctp_packet
,
GFP_ATOMIC
);
if
(
!
packet
)
goto
nomem_packet
;
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route
(
transport
,
(
union
sctp_addr
*
)
&
chunk
->
dest
,
sctp_sk
(
sctp_get_ctl_sock
()));
packet
=
sctp_packet_init
(
packet
,
transport
,
sport
,
dport
);
packet
=
sctp_packet_config
(
packet
,
vtag
,
0
,
NULL
);
packet
=
sctp_packet_init
(
&
transport
->
packet
,
transport
,
sport
,
dport
);
packet
=
sctp_packet_config
(
packet
,
vtag
,
0
);
return
packet
;
nomem_packet:
sctp_transport_free
(
transport
);
nomem:
return
NULL
;
}
...
...
@@ -4701,7 +4830,6 @@ struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
void
sctp_ootb_pkt_free
(
struct
sctp_packet
*
packet
)
{
sctp_transport_free
(
packet
->
transport
);
sctp_packet_free
(
packet
);
}
/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
...
...
net/sctp/sm_statetable.c
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 200
3
* (C) Copyright IBM Corp. 2001, 200
4
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
...
...
@@ -40,6 +40,7 @@
* Hui Huang <hui.huang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -50,7 +51,7 @@
#include <net/sctp/sm.h>
static
const
sctp_sm_table_entry_t
bug
=
{
.
fn
=
sctp_sf_bug
,
.
fn
=
sctp_sf_bug
,
.
name
=
"sctp_sf_bug"
};
...
...
@@ -73,7 +74,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
return
sctp_chunk_event_lookup
(
event_subtype
.
chunk
,
state
);
break
;
case
SCTP_EVENT_T_TIMEOUT
:
DO_LOOKUP
(
SCTP_EVENT_TIMEOUT_MAX
,
timeout
,
DO_LOOKUP
(
SCTP_EVENT_TIMEOUT_MAX
,
timeout
,
timeout_event_table
);
break
;
...
...
@@ -486,6 +487,34 @@ const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][
TYPE_SCTP_ASCONF_ACK
,
};
/*state_fn_t addip_chunk_event_table[][] */
#define TYPE_SCTP_FWD_TSN { \
/* SCTP_STATE_EMPTY */
\
{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
/* SCTP_STATE_CLOSED */
\
{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
/* SCTP_STATE_COOKIE_WAIT */
\
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_COOKIE_ECHOED */
\
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_ESTABLISHED */
\
{.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */
\
{.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \
/* SCTP_STATE_SHUTDOWN_SENT */
\
{.fn = sctp_sf_eat_fwd_tsn_fast, .name = "sctp_sf_eat_fwd_tsn_fast"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */
\
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */
\
{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
}
/* TYPE_SCTP_FWD_TSN */
/* The primary index for this table is the chunk type.
* The secondary index for this table is the state.
*/
const
sctp_sm_table_entry_t
prsctp_chunk_event_table
[
SCTP_NUM_PRSCTP_CHUNK_TYPES
][
SCTP_STATE_NUM_STATES
]
=
{
TYPE_SCTP_FWD_TSN
,
};
/*state_fn_t prsctp_chunk_event_table[][] */
static
const
sctp_sm_table_entry_t
chunk_event_table_unknown
[
SCTP_STATE_NUM_STATES
]
=
{
/* SCTP_STATE_EMPTY */
...
...
@@ -924,6 +953,11 @@ const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
if
(
cid
>=
0
&&
cid
<=
SCTP_CID_BASE_MAX
)
return
&
chunk_event_table
[
cid
][
state
];
if
(
sctp_prsctp_enable
)
{
if
(
cid
==
SCTP_CID_FWD_TSN
)
return
&
prsctp_chunk_event_table
[
0
][
state
];
}
if
(
sctp_addip_enable
)
{
if
(
cid
==
SCTP_CID_ASCONF
)
return
&
addip_chunk_event_table
[
0
][
state
];
...
...
net/sctp/socket.c
View file @
9b10f4ba
...
...
@@ -1948,6 +1948,9 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char *optval, int optlen)
*/
static
int
sctp_setsockopt_maxseg
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
struct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
int
val
;
if
(
optlen
<
sizeof
(
int
))
...
...
@@ -1956,7 +1959,15 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char *optval, int optlen)
return
-
EFAULT
;
if
((
val
<
8
)
||
(
val
>
SCTP_MAX_CHUNK_LEN
))
return
-
EINVAL
;
sctp_sk
(
sk
)
->
user_frag
=
val
;
sp
->
user_frag
=
val
;
if
(
val
)
{
/* Update the frag_point of the existing associations. */
list_for_each
(
pos
,
&
(
sp
->
ep
->
asocs
))
{
asoc
=
list_entry
(
pos
,
struct
sctp_association
,
asocs
);
asoc
->
frag_point
=
sctp_frag_point
(
sp
,
asoc
->
pmtu
);
}
}
return
0
;
}
...
...
@@ -2531,10 +2542,6 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
status
.
sstat_penddata
=
sctp_tsnmap_pending
(
&
asoc
->
peer
.
tsn_map
);
status
.
sstat_instrms
=
asoc
->
c
.
sinit_max_instreams
;
status
.
sstat_outstrms
=
asoc
->
c
.
sinit_num_ostreams
;
/* Just in time frag_point update. */
if
(
sctp_sk
(
sk
)
->
user_frag
)
asoc
->
frag_point
=
min_t
(
int
,
asoc
->
frag_point
,
sctp_sk
(
sk
)
->
user_frag
);
status
.
sstat_fragmentation_point
=
asoc
->
frag_point
;
status
.
sstat_primary
.
spinfo_assoc_id
=
sctp_assoc2id
(
transport
->
asoc
);
memcpy
(
&
status
.
sstat_primary
.
spinfo_address
,
...
...
net/sctp/sysctl.c
View file @
9b10f4ba
/* SCTP kernel reference Implementation
*
Copyright (c) 2002 International Business Machines Corp.
*
(C) Copyright IBM Corp. 2002, 2004
* Copyright (c) 2002 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
...
...
@@ -35,6 +35,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -170,6 +171,14 @@ static ctl_table sctp_table[] = {
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
},
{
.
ctl_name
=
NET_SCTP_PRSCTP_ENABLE
,
.
procname
=
"prsctp_enable"
,
.
data
=
&
sctp_prsctp_enable
,
.
maxlen
=
sizeof
(
int
),
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
},
{
.
ctl_name
=
0
}
};
...
...
net/sctp/transport.c
View file @
9b10f4ba
...
...
@@ -118,7 +118,6 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
INIT_LIST_HEAD
(
&
peer
->
transmitted
);
INIT_LIST_HEAD
(
&
peer
->
send_ready
);
INIT_LIST_HEAD
(
&
peer
->
transports
);
sctp_packet_init
(
&
peer
->
packet
,
peer
,
0
,
0
);
/* Set up the retransmission timer. */
init_timer
(
&
peer
->
T3_rtx_timer
);
...
...
@@ -169,6 +168,8 @@ void sctp_transport_destroy(struct sctp_transport *transport)
if
(
transport
->
asoc
)
sctp_association_put
(
transport
->
asoc
);
sctp_packet_free
(
&
transport
->
packet
);
dst_release
(
transport
->
dst
);
kfree
(
transport
);
SCTP_DBG_OBJCNT_DEC
(
transport
);
...
...
net/sctp/tsnmap.c
View file @
9b10f4ba
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
...
...
@@ -36,6 +36,7 @@
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -253,6 +254,40 @@ int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
return
ended
;
}
/* Mark this and any lower TSN as seen. */
void
sctp_tsnmap_skip
(
struct
sctp_tsnmap
*
map
,
__u32
tsn
)
{
__s32
gap
;
/* Vacuously mark any TSN which precedes the map base or
* exceeds the end of the map.
*/
if
(
TSN_lt
(
tsn
,
map
->
base_tsn
))
return
;
if
(
!
TSN_lt
(
tsn
,
map
->
base_tsn
+
map
->
len
+
map
->
len
))
return
;
/* Bump the max. */
if
(
TSN_lt
(
map
->
max_tsn_seen
,
tsn
))
map
->
max_tsn_seen
=
tsn
;
/* Assert: TSN is in range. */
gap
=
tsn
-
map
->
base_tsn
+
1
;
/* Mark the TSNs as received. */
if
(
gap
<=
map
->
len
)
memset
(
map
->
tsn_map
,
0x01
,
gap
);
else
{
memset
(
map
->
tsn_map
,
0x01
,
map
->
len
);
memset
(
map
->
overflow_map
,
0x01
,
(
gap
-
map
->
len
));
}
/* Go fixup any internal TSN mapping variables including
* cumulative_tsn_ack_point.
*/
sctp_tsnmap_update
(
map
);
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
...
...
net/sctp/ulpevent.c
View file @
9b10f4ba
...
...
@@ -839,6 +839,9 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
sctp_ulpevent_set_owner
(
event
,
asoc
);
sctp_assoc_rwnd_decrease
(
asoc
,
skb_headlen
(
skb
));
if
(
!
skb
->
data_len
)
return
;
/* Note: Not clearing the entire event struct as this is just a
* fragment of the real event. However, we still need to do rwnd
* accounting.
...
...
@@ -867,6 +870,9 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
skb
=
sctp_event2skb
(
event
);
sctp_assoc_rwnd_increase
(
event
->
asoc
,
skb_headlen
(
skb
));
if
(
!
skb
->
data_len
)
goto
done
;
/* Don't forget the fragments. */
for
(
frag
=
skb_shinfo
(
skb
)
->
frag_list
;
frag
;
frag
=
frag
->
next
)
{
/* NOTE: skb_shinfos are recursive. Although IP returns
...
...
@@ -875,6 +881,8 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
*/
sctp_ulpevent_release_data
(
sctp_skb2event
(
frag
));
}
done:
sctp_ulpevent_release_owner
(
event
);
}
...
...
net/sctp/ulpqueue.c
View file @
9b10f4ba
...
...
@@ -680,6 +680,71 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return
event
;
}
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
static
inline
void
sctp_ulpq_reap_ordered
(
struct
sctp_ulpq
*
ulpq
)
{
struct
sk_buff
*
pos
,
*
tmp
;
struct
sctp_ulpevent
*
cevent
;
struct
sctp_ulpevent
*
event
=
NULL
;
struct
sctp_stream
*
in
;
struct
sk_buff_head
temp
;
__u16
csid
,
cssn
;
in
=
&
ulpq
->
asoc
->
ssnmap
->
in
;
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each
(
pos
,
&
ulpq
->
lobby
,
tmp
)
{
cevent
=
(
struct
sctp_ulpevent
*
)
pos
->
cb
;
csid
=
cevent
->
stream
;
cssn
=
cevent
->
ssn
;
if
(
cssn
!=
sctp_ssn_peek
(
in
,
csid
))
break
;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next
(
in
,
csid
);
__skb_unlink
(
pos
,
pos
->
list
);
if
(
!
event
)
{
/* Create a temporary list to collect chunks on. */
event
=
sctp_skb2event
(
pos
);
skb_queue_head_init
(
&
temp
);
__skb_queue_tail
(
&
temp
,
sctp_event2skb
(
event
));
}
else
{
/* Attach all gathered skbs to the event. */
__skb_queue_tail
(
sctp_event2skb
(
event
)
->
list
,
pos
);
}
}
/* Send event to the ULP. */
if
(
event
)
sctp_ulpq_tail_event
(
ulpq
,
event
);
}
/* Skip over an SSN. */
void
sctp_ulpq_skip
(
struct
sctp_ulpq
*
ulpq
,
__u16
sid
,
__u16
ssn
)
{
struct
sctp_stream
*
in
;
/* Note: The stream ID must be verified before this routine. */
in
=
&
ulpq
->
asoc
->
ssnmap
->
in
;
/* Is this an old SSN? If so ignore. */
if
(
SSN_lt
(
ssn
,
sctp_ssn_peek
(
in
,
sid
)))
return
;
/* Mark that we are no longer expecting this SSN or lower. */
sctp_ssn_skip
(
in
,
sid
,
ssn
);
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered
(
ulpq
);
return
;
}
/* Renege 'needed' bytes from the ordering queue. */
static
__u16
sctp_ulpq_renege_order
(
struct
sctp_ulpq
*
ulpq
,
__u16
needed
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment