Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
15931833
Commit
15931833
authored
Mar 19, 2003
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://linux-lksctp.bkbits.net/lksctp-2.5
into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents
9ff01f5f
74f53f9a
Changes
21
Show whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
1058 additions
and
529 deletions
+1058
-529
include/net/sctp/command.h
include/net/sctp/command.h
+8
-8
include/net/sctp/constants.h
include/net/sctp/constants.h
+14
-8
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+13
-36
include/net/sctp/sm.h
include/net/sctp/sm.h
+23
-23
include/net/sctp/structs.h
include/net/sctp/structs.h
+27
-29
include/net/sctp/user.h
include/net/sctp/user.h
+2
-0
net/sctp/associola.c
net/sctp/associola.c
+48
-23
net/sctp/bind_addr.c
net/sctp/bind_addr.c
+1
-1
net/sctp/endpointola.c
net/sctp/endpointola.c
+5
-2
net/sctp/ipv6.c
net/sctp/ipv6.c
+87
-4
net/sctp/output.c
net/sctp/output.c
+60
-43
net/sctp/outqueue.c
net/sctp/outqueue.c
+80
-55
net/sctp/protocol.c
net/sctp/protocol.c
+107
-24
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+24
-30
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+31
-10
net/sctp/sm_statefuns.c
net/sctp/sm_statefuns.c
+16
-18
net/sctp/socket.c
net/sctp/socket.c
+485
-185
net/sctp/sysctl.c
net/sctp/sysctl.c
+1
-1
net/sctp/transport.c
net/sctp/transport.c
+2
-2
net/sctp/tsnmap.c
net/sctp/tsnmap.c
+2
-2
net/sctp/ulpqueue.c
net/sctp/ulpqueue.c
+22
-25
No files found.
include/net/sctp/command.h
View file @
15931833
...
...
@@ -110,13 +110,13 @@ typedef union {
sctp_event_timeout_t
to
;
sctp_counter_t
counter
;
void
*
ptr
;
s
ctp_chunk_t
*
chunk
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_chunk
*
chunk
;
s
truct
sctp_association
*
asoc
;
struct
sctp_transport
*
transport
;
s
ctp_bind_addr_t
*
bp
;
s
truct
sctp_bind_addr
*
bp
;
sctp_init_chunk_t
*
init
;
struct
sctp_ulpevent
*
ulpevent
;
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
sctp_sackhdr_t
*
sackh
;
}
sctp_arg_t
;
...
...
@@ -158,13 +158,13 @@ SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state)
SCTP_ARG_CONSTRUCTOR
(
COUNTER
,
sctp_counter_t
,
counter
)
SCTP_ARG_CONSTRUCTOR
(
TO
,
sctp_event_timeout_t
,
to
)
SCTP_ARG_CONSTRUCTOR
(
PTR
,
void
*
,
ptr
)
SCTP_ARG_CONSTRUCTOR
(
CHUNK
,
s
ctp_chunk_t
*
,
chunk
)
SCTP_ARG_CONSTRUCTOR
(
ASOC
,
s
ctp_association_t
*
,
asoc
)
SCTP_ARG_CONSTRUCTOR
(
CHUNK
,
s
truct
sctp_chunk
*
,
chunk
)
SCTP_ARG_CONSTRUCTOR
(
ASOC
,
s
truct
sctp_association
*
,
asoc
)
SCTP_ARG_CONSTRUCTOR
(
TRANSPORT
,
struct
sctp_transport
*
,
transport
)
SCTP_ARG_CONSTRUCTOR
(
BA
,
s
ctp_bind_addr_t
*
,
bp
)
SCTP_ARG_CONSTRUCTOR
(
BA
,
s
truct
sctp_bind_addr
*
,
bp
)
SCTP_ARG_CONSTRUCTOR
(
PEER_INIT
,
sctp_init_chunk_t
*
,
init
)
SCTP_ARG_CONSTRUCTOR
(
ULPEVENT
,
struct
sctp_ulpevent
*
,
ulpevent
)
SCTP_ARG_CONSTRUCTOR
(
PACKET
,
s
ctp_packet_
t
*
,
packet
)
SCTP_ARG_CONSTRUCTOR
(
PACKET
,
s
truct
sctp_packe
t
*
,
packet
)
SCTP_ARG_CONSTRUCTOR
(
SACKH
,
sctp_sackhdr_t
*
,
sackh
)
typedef
struct
{
...
...
include/net/sctp/constants.h
View file @
15931833
...
...
@@ -210,14 +210,19 @@ typedef enum {
/* These are values for sk->state.
* For a UDP-style SCTP socket, the states are defined as follows
* (at this point of time, may change later after more discussions: FIXME)
* A socket in SCTP_SS_UNCONNECTED state indicates that it is not willing
* to accept new associations, but it can initiate the creation of new
* ones.
* A socket in SCTP_SS_LISTENING state indicates that it is willing to
* - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
* accept new associations, but it can initiate the creation of new ones.
* - A socket in SCTP_SS_LISTENING state indicates that it is willing to
* accept new associations and can initiate the creation of new ones.
* A socket in SCTP_SS_ESTABLISHED state indicates that it is a peeled off
*
-
A socket in SCTP_SS_ESTABLISHED state indicates that it is a peeled off
* socket with one association.
* For a TCP-style SCTP socket, the states are defined as follows
* - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
* accept new associations, but it can initiate the creation of new ones.
* - A socket in SCTP_SS_LISTENING state indicates that it is willing to
* accept new associations, but cannot initiate the creation of new ones.
* - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single
* association in ESTABLISHED state.
*/
typedef
enum
{
SCTP_SS_CLOSED
=
TCP_CLOSE
,
...
...
@@ -345,6 +350,7 @@ typedef enum {
SCTP_XMIT_PMTU_FULL
,
SCTP_XMIT_RWND_FULL
,
SCTP_XMIT_MUST_FRAG
,
SCTP_XMIT_NAGLE_DELAY
,
}
sctp_xmit_t
;
/* These are the commands for manipulating transports. */
...
...
include/net/sctp/sctp.h
View file @
15931833
...
...
@@ -121,9 +121,10 @@
/*
* sctp_protocol.c
*/
extern
s
ctp_protocol_t
sctp_proto
;
extern
s
truct
sctp_protocol
sctp_proto
;
extern
struct
sock
*
sctp_get_ctl_sock
(
void
);
extern
int
sctp_copy_local_addr_list
(
sctp_protocol_t
*
,
sctp_bind_addr_t
*
,
extern
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
,
struct
sctp_bind_addr
*
,
sctp_scope_t
,
int
priority
,
int
flags
);
extern
struct
sctp_pf
*
sctp_get_pf_specific
(
sa_family_t
family
);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
...
...
@@ -312,30 +313,21 @@ static inline void sctp_sysctl_unregister(void) { return; }
#endif
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
extern
int
sctp_v6_init
(
void
);
extern
void
sctp_v6_exit
(
void
);
static
inline
int
sctp_ipv6_addr_type
(
const
struct
in6_addr
*
addr
)
{
return
ipv6_addr_type
((
struct
in6_addr
*
)
addr
);
}
#define SCTP_SAT_LEN (sizeof(sctp_paramhdr_t) + 2 * sizeof(__u16))
/* Note: These V6 macros are obsolescent. */
/* Use this macro to enclose code fragments which are V6-dependent. */
#define SCTP_V6(m...) m
#define SCTP_V6_SUPPORT 1
#else
/* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#define sctp_ipv6_addr_type(a) 0
#define SCTP_SAT_LEN (sizeof(sctp_paramhdr_t) + 1 * sizeof(__u16))
#define SCTP_V6(m...)
/* Do nothing. */
#undef SCTP_V6_SUPPORT
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
...
...
@@ -348,25 +340,10 @@ static inline sctp_assoc_t sctp_assoc2id(const sctp_association_t *asoc)
return
(
sctp_assoc_t
)
asoc
;
}
/* Look up the association by its id. */
static
inline
sctp_association_t
*
sctp_id2assoc
(
const
struct
sock
*
sk
,
sctp_assoc_t
id
)
{
sctp_association_t
*
asoc
=
NULL
;
/* First, verify that this is a kernel address. */
if
(
sctp_is_valid_kaddr
((
unsigned
long
)
id
))
{
sctp_association_t
*
temp
=
(
sctp_association_t
*
)
id
;
/* Verify that this _is_ an sctp_association_t
* data structure and if so, that the socket matches.
*/
if
((
SCTP_ASSOC_EYECATCHER
==
temp
->
eyecatcher
)
&&
(
temp
->
base
.
sk
==
sk
))
asoc
=
temp
;
}
/* Look up the association by its id. */
sctp_association_t
*
sctp_id2assoc
(
struct
sock
*
sk
,
sctp_assoc_t
id
);
return
asoc
;
}
/* A macro to walk a list of skbs. */
#define sctp_skb_for_each(pos, head, tmp) \
...
...
@@ -494,7 +471,7 @@ extern void sctp_put_port(struct sock *sk);
/* Static inline functions. */
/* Return the SCTP protocol structure. */
static
inline
s
ctp_protocol_t
*
sctp_get_protocol
(
void
)
static
inline
s
truct
sctp_protocol
*
sctp_get_protocol
(
void
)
{
return
&
sctp_proto
;
}
...
...
@@ -523,21 +500,21 @@ static inline int ipver2af(__u8 ipver)
/* This is the hash function for the SCTP port hash table. */
static
inline
int
sctp_phashfn
(
__u16
lport
)
{
s
ctp_protocol_t
*
sctp_proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
sctp_proto
=
sctp_get_protocol
();
return
(
lport
&
(
sctp_proto
->
port_hashsize
-
1
));
}
/* This is the hash function for the endpoint hash table. */
static
inline
int
sctp_ep_hashfn
(
__u16
lport
)
{
s
ctp_protocol_t
*
sctp_proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
sctp_proto
=
sctp_get_protocol
();
return
(
lport
&
(
sctp_proto
->
ep_hashsize
-
1
));
}
/* This is the hash function for the association hash table. */
static
inline
int
sctp_assoc_hashfn
(
__u16
lport
,
__u16
rport
)
{
s
ctp_protocol_t
*
sctp_proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
sctp_proto
=
sctp_get_protocol
();
int
h
=
(
lport
<<
16
)
+
rport
;
h
^=
h
>>
8
;
return
(
h
&
(
sctp_proto
->
assoc_hashsize
-
1
));
...
...
@@ -549,7 +526,7 @@ static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
*/
static
inline
int
sctp_vtag_hashfn
(
__u16
lport
,
__u16
rport
,
__u32
vtag
)
{
s
ctp_protocol_t
*
sctp_proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
sctp_proto
=
sctp_get_protocol
();
int
h
=
(
lport
<<
16
)
+
rport
;
h
^=
vtag
;
return
(
h
&
(
sctp_proto
->
assoc_hashsize
-
1
));
...
...
include/net/sctp/sm.h
View file @
15931833
...
...
@@ -313,18 +313,18 @@ void sctp_generate_t3_rtx_event(unsigned long peer);
void
sctp_generate_heartbeat_event
(
unsigned
long
peer
);
sctp_sackhdr_t
*
sctp_sm_pull_sack
(
sctp_chunk_t
*
);
s
ctp_packet_t
*
sctp_abort_pkt_new
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
s
truct
sctp_packet
*
sctp_abort_pkt_new
(
const
struct
sctp_endpoint
*
,
const
struct
sctp_association
*
,
struct
sctp_chunk
*
chunk
,
const
void
*
payload
,
size_t
paylen
);
s
ctp_packet_t
*
sctp_ootb_pkt_new
(
const
sctp_association_t
*
asoc
,
const
sctp_chunk_t
*
chunk
);
void
sctp_ootb_pkt_free
(
s
ctp_packet_t
*
packet
);
s
truct
sctp_packet
*
sctp_ootb_pkt_new
(
const
struct
sctp_association
*
,
const
struct
sctp_chunk
*
);
void
sctp_ootb_pkt_free
(
s
truct
sctp_packet
*
);
sctp_cookie_param_t
*
sctp_pack_cookie
(
const
s
ctp_endpoint_t
*
,
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
,
int
*
cookie_len
,
sctp_pack_cookie
(
const
s
truct
sctp_endpoint
*
,
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
,
int
*
cookie_len
,
const
__u8
*
,
int
addrs_len
);
sctp_association_t
*
sctp_unpack_cookie
(
const
sctp_endpoint_t
*
,
const
sctp_association_t
*
,
...
...
include/net/sctp/structs.h
View file @
15931833
...
...
@@ -86,10 +86,8 @@ struct sctp_opt;
struct
sctp_endpoint_common
;
struct
sctp_ssnmap
;
typedef
struct
sctp_protocol
sctp_protocol_t
;
typedef
struct
sctp_endpoint
sctp_endpoint_t
;
typedef
struct
sctp_association
sctp_association_t
;
typedef
struct
sctp_packet
sctp_packet_t
;
typedef
struct
sctp_chunk
sctp_chunk_t
;
typedef
struct
sctp_bind_addr
sctp_bind_addr_t
;
typedef
struct
sctp_endpoint_common
sctp_endpoint_common_t
;
...
...
@@ -262,6 +260,9 @@ struct sctp_pf {
const
union
sctp_addr
*
,
struct
sctp_opt
*
);
int
(
*
bind_verify
)
(
struct
sctp_opt
*
,
union
sctp_addr
*
);
int
(
*
supported_addrs
)(
const
struct
sctp_opt
*
,
__u16
*
);
struct
sock
*
(
*
create_accept_sk
)
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
);
struct
sctp_af
*
af
;
};
...
...
@@ -366,8 +367,6 @@ typedef struct sctp_signed_cookie {
sctp_cookie_t
c
;
}
sctp_signed_cookie_t
;
/* This is another convenience type to allocate memory for address
* params for the maximum size and pass such structures around
* internally.
...
...
@@ -604,26 +603,26 @@ struct sctp_packet {
typedef
int
(
sctp_outq_thandler_t
)(
struct
sctp_outq
*
,
void
*
);
typedef
int
(
sctp_outq_ehandler_t
)(
struct
sctp_outq
*
);
typedef
s
ctp_packet_
t
*
(
sctp_outq_ohandler_init_t
)
(
s
ctp_packet_
t
*
,
typedef
s
truct
sctp_packe
t
*
(
sctp_outq_ohandler_init_t
)
(
s
truct
sctp_packe
t
*
,
struct
sctp_transport
*
,
__u16
sport
,
__u16
dport
);
typedef
s
ctp_packet_
t
*
(
sctp_outq_ohandler_config_t
)
(
s
ctp_packet_
t
*
,
typedef
s
truct
sctp_packe
t
*
(
sctp_outq_ohandler_config_t
)
(
s
truct
sctp_packe
t
*
,
__u32
vtag
,
int
ecn_capable
,
sctp_packet_phandler_t
*
get_prepend_chunk
);
typedef
sctp_xmit_t
(
sctp_outq_ohandler_t
)(
s
ctp_packet_
t
*
,
typedef
sctp_xmit_t
(
sctp_outq_ohandler_t
)(
s
truct
sctp_packe
t
*
,
sctp_chunk_t
*
);
typedef
int
(
sctp_outq_ohandler_force_t
)(
s
ctp_packet_
t
*
);
typedef
int
(
sctp_outq_ohandler_force_t
)(
s
truct
sctp_packe
t
*
);
sctp_outq_ohandler_init_t
sctp_packet_init
;
sctp_outq_ohandler_config_t
sctp_packet_config
;
sctp_outq_ohandler_t
sctp_packet_append_chunk
;
sctp_outq_ohandler_t
sctp_packet_transmit_chunk
;
sctp_outq_ohandler_force_t
sctp_packet_transmit
;
void
sctp_packet_free
(
s
ctp_packet_
t
*
);
void
sctp_packet_free
(
s
truct
sctp_packe
t
*
);
/* This represents a remote transport address.
...
...
@@ -789,7 +788,7 @@ struct sctp_transport {
struct
list_head
transmitted
;
/* We build bundle-able packets for this transport here. */
s
ctp_packet_
t
packet
;
s
truct
sctp_packe
t
packet
;
/* This is the list of transports that have chunks to send. */
struct
list_head
send_ready
;
...
...
@@ -865,12 +864,11 @@ void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
struct
sctp_outq
{
sctp_association_t
*
asoc
;
/* BUG: This really should be an array of streams.
* This really holds a list of chunks (one stream).
* FIXME: If true, why so?
*/
/* Data pending that has never been transmitted. */
struct
sk_buff_head
out
;
unsigned
out_qlen
;
/* Total length of queued data chunks. */
/* These are control chunks we want to send. */
struct
sk_buff_head
control
;
...
...
@@ -885,7 +883,7 @@ struct sctp_outq {
struct
list_head
retransmit
;
/* Call these functions to send chunks down to the next lower
* layer. This is always
SCTP
_packet, but we separate the two
* layer. This is always
sctp
_packet, but we separate the two
* structures to make testing simpler.
*/
sctp_outq_ohandler_init_t
*
init_output
;
...
...
@@ -1098,8 +1096,9 @@ static inline sctp_endpoint_t *sctp_ep(sctp_endpoint_common_t *base)
}
/* These are function signatures for manipulating endpoints. */
sctp_endpoint_t
*
sctp_endpoint_new
(
sctp_protocol_t
*
,
struct
sock
*
,
int
);
sctp_endpoint_t
*
sctp_endpoint_init
(
sctp_endpoint_t
*
,
sctp_protocol_t
*
,
sctp_endpoint_t
*
sctp_endpoint_new
(
struct
sctp_protocol
*
,
struct
sock
*
,
int
);
sctp_endpoint_t
*
sctp_endpoint_init
(
struct
sctp_endpoint
*
,
struct
sctp_protocol
*
,
struct
sock
*
,
int
priority
);
void
sctp_endpoint_free
(
sctp_endpoint_t
*
);
void
sctp_endpoint_put
(
sctp_endpoint_t
*
);
...
...
@@ -1111,7 +1110,6 @@ sctp_association_t *sctp_endpoint_lookup_assoc(const sctp_endpoint_t *ep,
int
sctp_endpoint_is_peeled_off
(
sctp_endpoint_t
*
,
const
union
sctp_addr
*
);
sctp_endpoint_t
*
sctp_endpoint_is_match
(
sctp_endpoint_t
*
,
const
union
sctp_addr
*
);
int
sctp_has_association
(
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
);
...
...
@@ -1587,7 +1585,7 @@ struct sctp_transport *sctp_assoc_lookup_paddr(const sctp_association_t *,
struct
sctp_transport
*
sctp_assoc_add_peer
(
sctp_association_t
*
,
const
union
sctp_addr
*
address
,
const
int
priority
);
void
sctp_assoc_control_transport
(
s
ctp_association_t
*
,
void
sctp_assoc_control_transport
(
s
truct
sctp_association
*
,
struct
sctp_transport
*
,
sctp_transport_cmd_t
,
sctp_sn_error_t
);
struct
sctp_transport
*
sctp_assoc_lookup_tsn
(
sctp_association_t
*
,
__u32
);
...
...
@@ -1597,14 +1595,14 @@ struct sctp_transport *sctp_assoc_is_match(sctp_association_t *,
void
sctp_assoc_migrate
(
sctp_association_t
*
,
struct
sock
*
);
void
sctp_assoc_update
(
sctp_association_t
*
dst
,
sctp_association_t
*
src
);
__u32
__sctp_association_get_next_tsn
(
sctp_association_t
*
);
__u32
__sctp_association_get_tsn_block
(
sctp_association_t
*
,
int
);
__u16
__sctp_association_get_next_ssn
(
sctp_association_t
*
,
__u16
sid
);
void
sctp_assoc_sync_pmtu
(
sctp_association_t
*
);
void
sctp_assoc_rwnd_increase
(
sctp_association_t
*
,
int
);
void
sctp_assoc_rwnd_decrease
(
sctp_association_t
*
,
int
);
__u32
sctp_association_get_next_tsn
(
struct
sctp_association
*
);
__u32
sctp_association_get_tsn_block
(
struct
sctp_association
*
,
int
);
void
sctp_assoc_sync_pmtu
(
struct
sctp_association
*
);
void
sctp_assoc_rwnd_increase
(
struct
sctp_association
*
,
int
);
void
sctp_assoc_rwnd_decrease
(
struct
sctp_association
*
,
int
);
void
sctp_assoc_set_primary
(
struct
sctp_association
*
,
struct
sctp_transport
*
);
int
sctp_assoc_set_bind_addr_from_ep
(
sctp_association_t
*
,
int
);
int
sctp_assoc_set_bind_addr_from_cookie
(
sctp_association_t
*
,
sctp_cookie_t
*
,
int
);
...
...
include/net/sctp/user.h
View file @
15931833
...
...
@@ -108,6 +108,8 @@ enum sctp_optname {
#define SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS_NUM
SCTP_GET_LOCAL_ADDRS
,
/* Get all local addresss. */
#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS
SCTP_NODELAY
,
/* Get/set nodelay option. */
#define SCTP_NODELAY SCTP_NODELAY
};
...
...
net/sctp/associola.c
View file @
15931833
...
...
@@ -181,7 +181,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
else
asoc
->
rwnd
=
sk
->
rcvbuf
;
asoc
->
a_rwnd
=
0
;
asoc
->
a_rwnd
=
asoc
->
rwnd
;
asoc
->
rwnd_over
=
0
;
...
...
@@ -360,9 +360,25 @@ static void sctp_association_destroy(sctp_association_t *asoc)
}
}
/* Change the primary destination address for the peer. */
void
sctp_assoc_set_primary
(
struct
sctp_association
*
asoc
,
struct
sctp_transport
*
transport
)
{
asoc
->
peer
.
primary_path
=
transport
;
/* Set a default msg_name for events. */
memcpy
(
&
asoc
->
peer
.
primary_addr
,
&
transport
->
ipaddr
,
sizeof
(
union
sctp_addr
));
/* If the primary path is changing, assume that the
* user wants to use this new path.
*/
if
(
transport
->
active
)
asoc
->
peer
.
active_path
=
transport
;
}
/* Add a transport address to an association. */
struct
sctp_transport
*
sctp_assoc_add_peer
(
s
ctp_association_t
*
asoc
,
struct
sctp_transport
*
sctp_assoc_add_peer
(
s
truct
sctp_association
*
asoc
,
const
union
sctp_addr
*
addr
,
int
priority
)
{
...
...
@@ -397,17 +413,16 @@ struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *asoc,
* If not and the current association PMTU is higher than the new
* peer's PMTU, reset the association PMTU to the new peer's PMTU.
*/
if
(
asoc
->
pmtu
)
{
if
(
asoc
->
pmtu
)
asoc
->
pmtu
=
min_t
(
int
,
peer
->
pmtu
,
asoc
->
pmtu
);
}
else
{
else
asoc
->
pmtu
=
peer
->
pmtu
;
}
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
asoc
->
frag_point
=
asoc
->
pmtu
-
(
SCTP_IP_OVERHEAD
+
sizeof
(
sctp_data_chunk_t
)
);
asoc
->
frag_point
=
asoc
->
pmtu
;
asoc
->
frag_point
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
...
...
@@ -460,11 +475,7 @@ struct sctp_transport *sctp_assoc_add_peer(sctp_association_t *asoc,
/* If we do not yet have a primary path, set one. */
if
(
NULL
==
asoc
->
peer
.
primary_path
)
{
asoc
->
peer
.
primary_path
=
peer
;
/* Set a default msg_name for events. */
memcpy
(
&
asoc
->
peer
.
primary_addr
,
&
peer
->
ipaddr
,
sizeof
(
union
sctp_addr
));
asoc
->
peer
.
active_path
=
peer
;
sctp_assoc_set_primary
(
asoc
,
peer
);
asoc
->
peer
.
retran_path
=
peer
;
}
...
...
@@ -603,7 +614,7 @@ void sctp_association_put(sctp_association_t *asoc)
/* Allocate the next TSN, Transmission Sequence Number, for the given
* association.
*/
__u32
__
sctp_association_get_next_tsn
(
sctp_association_t
*
asoc
)
__u32
sctp_association_get_next_tsn
(
sctp_association_t
*
asoc
)
{
/* From Section 1.6 Serial Number Arithmetic:
* Transmission Sequence Numbers wrap around when they reach
...
...
@@ -618,7 +629,7 @@ __u32 __sctp_association_get_next_tsn(sctp_association_t *asoc)
}
/* Allocate 'num' TSNs by incrementing the association's TSN by num. */
__u32
__
sctp_association_get_tsn_block
(
sctp_association_t
*
asoc
,
int
num
)
__u32
sctp_association_get_tsn_block
(
sctp_association_t
*
asoc
,
int
num
)
{
__u32
retval
=
asoc
->
next_tsn
;
...
...
@@ -983,6 +994,24 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
__FUNCTION__
,
asoc
,
asoc
->
pmtu
,
asoc
->
frag_point
);
}
/* Should we send a SACK to update our peer? */
static
inline
int
sctp_peer_needs_update
(
struct
sctp_association
*
asoc
)
{
switch
(
asoc
->
state
)
{
case
SCTP_STATE_ESTABLISHED
:
case
SCTP_STATE_SHUTDOWN_PENDING
:
case
SCTP_STATE_SHUTDOWN_RECEIVED
:
if
((
asoc
->
rwnd
>
asoc
->
a_rwnd
)
&&
((
asoc
->
rwnd
-
asoc
->
a_rwnd
)
>=
min_t
(
__u32
,
(
asoc
->
base
.
sk
->
rcvbuf
>>
1
),
asoc
->
pmtu
)))
return
1
;
break
;
default:
break
;
}
return
0
;
}
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
void
sctp_assoc_rwnd_increase
(
sctp_association_t
*
asoc
,
int
len
)
{
...
...
@@ -1009,10 +1038,8 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
if
((
asoc
->
state
==
SCTP_STATE_ESTABLISHED
)
&&
(
asoc
->
rwnd
>
asoc
->
a_rwnd
)
&&
((
asoc
->
rwnd
-
asoc
->
a_rwnd
)
>=
min_t
(
__u32
,
(
asoc
->
base
.
sk
->
rcvbuf
>>
1
),
asoc
->
pmtu
)))
{
if
(
sctp_peer_needs_update
(
asoc
))
{
asoc
->
a_rwnd
=
asoc
->
rwnd
;
SCTP_DEBUG_PRINTK
(
"%s: Sending window update SACK- asoc: %p "
"rwnd: %u a_rwnd: %u
\n
"
,
__FUNCTION__
,
asoc
,
asoc
->
rwnd
,
asoc
->
a_rwnd
);
...
...
@@ -1020,9 +1047,6 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
if
(
!
sack
)
return
;
/* Update the last advertised rwnd value. */
asoc
->
a_rwnd
=
asoc
->
rwnd
;
asoc
->
peer
.
sack_needed
=
0
;
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
...
...
@@ -1046,7 +1070,8 @@ void sctp_assoc_rwnd_decrease(sctp_association_t *asoc, int len)
asoc
->
rwnd
=
0
;
}
SCTP_DEBUG_PRINTK
(
"%s: asoc %p rwnd decreased by %d to (%u, %u)
\n
"
,
__FUNCTION__
,
asoc
,
len
,
asoc
->
rwnd
,
asoc
->
rwnd_over
);
__FUNCTION__
,
asoc
,
len
,
asoc
->
rwnd
,
asoc
->
rwnd_over
);
}
/* Build the bind address list for the association based on info from the
...
...
net/sctp/bind_addr.c
View file @
15931833
...
...
@@ -302,7 +302,7 @@ int sctp_bind_addr_match(sctp_bind_addr_t *bp, const union sctp_addr *addr,
static
int
sctp_copy_one_addr
(
sctp_bind_addr_t
*
dest
,
union
sctp_addr
*
addr
,
sctp_scope_t
scope
,
int
priority
,
int
flags
)
{
s
ctp_protocol_t
*
proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
proto
=
sctp_get_protocol
();
int
error
=
0
;
if
(
sctp_is_any
(
addr
))
{
...
...
net/sctp/endpointola.c
View file @
15931833
...
...
@@ -65,7 +65,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep);
/* Create a sctp_endpoint_t with all that boring stuff initialized.
* Returns NULL if there isn't enough memory.
*/
sctp_endpoint_t
*
sctp_endpoint_new
(
s
ctp_protocol_t
*
proto
,
sctp_endpoint_t
*
sctp_endpoint_new
(
s
truct
sctp_protocol
*
proto
,
struct
sock
*
sk
,
int
priority
)
{
sctp_endpoint_t
*
ep
;
...
...
@@ -89,7 +89,8 @@ sctp_endpoint_t *sctp_endpoint_new(sctp_protocol_t *proto,
/*
* Initialize the base fields of the endpoint structure.
*/
sctp_endpoint_t
*
sctp_endpoint_init
(
sctp_endpoint_t
*
ep
,
sctp_protocol_t
*
proto
,
sctp_endpoint_t
*
sctp_endpoint_init
(
sctp_endpoint_t
*
ep
,
struct
sctp_protocol
*
proto
,
struct
sock
*
sk
,
int
priority
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
...
...
@@ -194,6 +195,8 @@ void sctp_endpoint_destroy(sctp_endpoint_t *ep)
{
SCTP_ASSERT
(
ep
->
base
.
dead
,
"Endpoint is not dead"
,
return
);
ep
->
base
.
sk
->
state
=
SCTP_SS_CLOSED
;
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint
(
ep
);
...
...
net/sctp/ipv6.c
View file @
15931833
...
...
@@ -432,6 +432,62 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
return
retval
;
}
/* Create and initialize a new sk for the socket to be returned by accept(). */
struct
sock
*
sctp_v6_create_accept_sk
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
)
{
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
sock
*
newsk
;
struct
inet_opt
*
newinet
;
struct
ipv6_pinfo
*
newnp
,
*
np
=
inet6_sk
(
sk
);
struct
sctp6_sock
*
newsctp6sk
;
newsk
=
sk_alloc
(
PF_INET6
,
GFP_KERNEL
,
sizeof
(
struct
sctp6_sock
),
sk
->
slab
);
if
(
!
newsk
)
goto
out
;
sock_init_data
(
NULL
,
newsk
);
newsk
->
type
=
SOCK_STREAM
;
newsk
->
prot
=
sk
->
prot
;
newsk
->
no_check
=
sk
->
no_check
;
newsk
->
reuse
=
sk
->
reuse
;
newsk
->
destruct
=
inet_sock_destruct
;
newsk
->
zapped
=
0
;
newsk
->
family
=
PF_INET6
;
newsk
->
protocol
=
IPPROTO_SCTP
;
newsk
->
backlog_rcv
=
sk
->
prot
->
backlog_rcv
;
newsctp6sk
=
(
struct
sctp6_sock
*
)
newsk
;
newsctp6sk
->
pinet6
=
&
newsctp6sk
->
inet6
;
newinet
=
inet_sk
(
newsk
);
newnp
=
inet6_sk
(
newsk
);
memcpy
(
newnp
,
np
,
sizeof
(
struct
ipv6_pinfo
));
ipv6_addr_copy
(
&
newnp
->
daddr
,
&
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
);
newinet
->
sport
=
inet
->
sport
;
newinet
->
dport
=
asoc
->
peer
.
port
;
#ifdef INET_REFCNT_DEBUG
atomic_inc
(
&
inet6_sock_nr
);
atomic_inc
(
&
inet_sock_nr
);
#endif
if
(
0
!=
newsk
->
prot
->
init
(
newsk
))
{
inet_sock_release
(
newsk
);
newsk
=
NULL
;
}
out:
return
newsk
;
}
/* Initialize a PF_INET6 socket msg_name. */
static
void
sctp_inet6_msgname
(
char
*
msgname
,
int
*
addr_len
)
{
...
...
@@ -564,6 +620,20 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
return
af
->
available
(
addr
);
}
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Note: In the future, we may want to look at sock options
* to determine whether a PF_INET6 socket really wants to have IPV4
* addresses.
* Returns number of addresses supported.
*/
static
int
sctp_inet6_supported_addrs
(
const
struct
sctp_opt
*
opt
,
__u16
*
types
)
{
types
[
0
]
=
SCTP_PARAM_IPV4_ADDRESS
;
types
[
1
]
=
SCTP_PARAM_IPV6_ADDRESS
;
return
2
;
}
static
struct
proto_ops
inet6_seqpacket_ops
=
{
.
family
=
PF_INET6
,
.
release
=
inet6_release
,
...
...
@@ -583,7 +653,7 @@ static struct proto_ops inet6_seqpacket_ops = {
.
mmap
=
sock_no_mmap
,
};
static
struct
inet_protosw
sctpv6_protosw
=
{
static
struct
inet_protosw
sctpv6_
seqpacket_
protosw
=
{
.
type
=
SOCK_SEQPACKET
,
.
protocol
=
IPPROTO_SCTP
,
.
prot
=
&
sctp_prot
,
...
...
@@ -592,6 +662,15 @@ static struct inet_protosw sctpv6_protosw = {
.
no_check
=
0
,
.
flags
=
SCTP_PROTOSW_FLAG
};
static
struct
inet_protosw
sctpv6_stream_protosw
=
{
.
type
=
SOCK_STREAM
,
.
protocol
=
IPPROTO_SCTP
,
.
prot
=
&
sctp_prot
,
.
ops
=
&
inet6_seqpacket_ops
,
.
capability
=
-
1
,
.
no_check
=
0
,
.
flags
=
SCTP_PROTOSW_FLAG
};
static
struct
inet6_protocol
sctpv6_protocol
=
{
.
handler
=
sctp_rcv
,
...
...
@@ -626,6 +705,8 @@ static struct sctp_pf sctp_pf_inet6_specific = {
.
af_supported
=
sctp_inet6_af_supported
,
.
cmp_addr
=
sctp_inet6_cmp_addr
,
.
bind_verify
=
sctp_inet6_bind_verify
,
.
supported_addrs
=
sctp_inet6_supported_addrs
,
.
create_accept_sk
=
sctp_v6_create_accept_sk
,
.
af
=
&
sctp_ipv6_specific
,
};
...
...
@@ -636,8 +717,9 @@ int sctp_v6_init(void)
if
(
inet6_add_protocol
(
&
sctpv6_protocol
,
IPPROTO_SCTP
)
<
0
)
return
-
EAGAIN
;
/* Add SCTPv6 to inetsw6 linked list. */
inet6_register_protosw
(
&
sctpv6_protosw
);
/* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */
inet6_register_protosw
(
&
sctpv6_seqpacket_protosw
);
inet6_register_protosw
(
&
sctpv6_stream_protosw
);
/* Register the SCTP specfic PF_INET6 functions. */
sctp_register_pf
(
&
sctp_pf_inet6_specific
,
PF_INET6
);
...
...
@@ -656,6 +738,7 @@ void sctp_v6_exit(void)
{
list_del
(
&
sctp_ipv6_specific
.
list
);
inet6_del_protocol
(
&
sctpv6_protocol
,
IPPROTO_SCTP
);
inet6_unregister_protosw
(
&
sctpv6_protosw
);
inet6_unregister_protosw
(
&
sctpv6_seqpacket_protosw
);
inet6_unregister_protosw
(
&
sctpv6_stream_protosw
);
unregister_inet6addr_notifier
(
&
sctp_inetaddr_notifier
);
}
net/sctp/output.c
View file @
15931833
...
...
@@ -62,16 +62,15 @@
#include <net/sctp/sm.h>
/* Forward declarations for private helpers. */
static
void
sctp_packet_reset
(
s
ctp_packet_
t
*
packet
);
static
sctp_xmit_t
sctp_packet_append_data
(
s
ctp_packet_
t
*
packet
,
s
ctp_chunk_t
*
chunk
);
static
void
sctp_packet_reset
(
s
truct
sctp_packe
t
*
packet
);
static
sctp_xmit_t
sctp_packet_append_data
(
s
truct
sctp_packe
t
*
packet
,
s
truct
sctp_chunk
*
chunk
);
/* Config a packet.
* This appears to be a followup set of initializations.)
*/
sctp_packet_t
*
sctp_packet_config
(
sctp_packet_t
*
packet
,
__u32
vtag
,
int
ecn_capable
,
struct
sctp_packet
*
sctp_packet_config
(
struct
sctp_packet
*
packet
,
__u32
vtag
,
int
ecn_capable
,
sctp_packet_phandler_t
*
prepend_handler
)
{
int
packet_empty
=
(
packet
->
size
==
SCTP_IP_OVERHEAD
);
...
...
@@ -89,10 +88,9 @@ sctp_packet_t *sctp_packet_config(sctp_packet_t *packet,
}
/* Initialize the packet structure. */
s
ctp_packet_t
*
sctp_packet_init
(
sctp_packet_
t
*
packet
,
s
truct
sctp_packet
*
sctp_packet_init
(
struct
sctp_packe
t
*
packet
,
struct
sctp_transport
*
transport
,
__u16
sport
,
__u16
dport
)
__u16
sport
,
__u16
dport
)
{
packet
->
transport
=
transport
;
packet
->
source_port
=
sport
;
...
...
@@ -109,14 +107,12 @@ sctp_packet_t *sctp_packet_init(sctp_packet_t *packet,
}
/* Free a packet. */
void
sctp_packet_free
(
s
ctp_packet_
t
*
packet
)
void
sctp_packet_free
(
s
truct
sctp_packe
t
*
packet
)
{
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
while
(
NULL
!=
(
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
&
packet
->
chunks
)))
{
while
((
chunk
=
(
struct
sctp_chunk
*
)
__skb_dequeue
(
&
packet
->
chunks
)))
sctp_free_chunk
(
chunk
);
}
if
(
packet
->
malloced
)
kfree
(
packet
);
...
...
@@ -129,8 +125,8 @@ void sctp_packet_free(sctp_packet_t *packet)
* as it can fit in the packet, but any more data that does not fit in this
* packet can be sent only after receiving the COOKIE_ACK.
*/
sctp_xmit_t
sctp_packet_transmit_chunk
(
s
ctp_packet_
t
*
packet
,
s
ctp_chunk_t
*
chunk
)
sctp_xmit_t
sctp_packet_transmit_chunk
(
s
truct
sctp_packe
t
*
packet
,
s
truct
sctp_chunk
*
chunk
)
{
sctp_xmit_t
retval
;
int
error
=
0
;
...
...
@@ -152,6 +148,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(sctp_packet_t *packet,
case
SCTP_XMIT_MUST_FRAG
:
case
SCTP_XMIT_RWND_FULL
:
case
SCTP_XMIT_OK
:
case
SCTP_XMIT_NAGLE_DELAY
:
break
;
};
...
...
@@ -161,7 +158,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(sctp_packet_t *packet,
/* Append a chunk to the offered packet reporting back any inability to do
* so.
*/
sctp_xmit_t
sctp_packet_append_chunk
(
sctp_packet_t
*
packet
,
sctp_chunk_t
*
chunk
)
sctp_xmit_t
sctp_packet_append_chunk
(
struct
sctp_packet
*
packet
,
struct
sctp_chunk
*
chunk
)
{
sctp_xmit_t
retval
=
SCTP_XMIT_OK
;
__u16
chunk_len
=
WORD_ROUND
(
ntohs
(
chunk
->
chunk_hdr
->
length
));
...
...
@@ -223,7 +221,7 @@ sctp_xmit_t sctp_packet_append_chunk(sctp_packet_t *packet, sctp_chunk_t *chunk)
}
/* It is OK to send this chunk. */
skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
__
skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
packet
->
size
+=
chunk_len
;
finish:
return
retval
;
...
...
@@ -234,18 +232,18 @@ sctp_xmit_t sctp_packet_append_chunk(sctp_packet_t *packet, sctp_chunk_t *chunk)
*
* The return value is a normal kernel error return value.
*/
int
sctp_packet_transmit
(
s
ctp_packet_
t
*
packet
)
int
sctp_packet_transmit
(
s
truct
sctp_packe
t
*
packet
)
{
struct
sctp_transport
*
transport
=
packet
->
transport
;
s
ctp_association_t
*
asoc
=
transport
->
asoc
;
s
truct
sctp_association
*
asoc
=
transport
->
asoc
;
struct
sctphdr
*
sh
;
__u32
crc32
;
struct
sk_buff
*
nskb
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
struct
sock
*
sk
;
int
err
=
0
;
int
padding
;
/* How much padding do we need? */
__u8
packet_
has_data
=
0
;
__u8
has_data
=
0
;
struct
dst_entry
*
dst
;
/* Do NOT generate a chunkless packet... */
...
...
@@ -253,7 +251,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
return
err
;
/* Set up convenience variables... */
chunk
=
(
s
ctp_chunk_t
*
)
(
packet
->
chunks
.
next
);
chunk
=
(
s
truct
sctp_chunk
*
)
(
packet
->
chunks
.
next
);
sk
=
chunk
->
skb
->
sk
;
/* Allocate the new skb. */
...
...
@@ -291,8 +289,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
* [This whole comment explains WORD_ROUND() below.]
*/
SCTP_DEBUG_PRINTK
(
"***sctp_transmit_packet***
\n
"
);
while
(
NULL
!=
(
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
&
packet
->
chunks
)))
{
while
((
chunk
=
(
struct
sctp_chunk
*
)
__skb_dequeue
(
&
packet
->
chunks
)))
{
chunk
->
num_times_sent
++
;
chunk
->
sent_at
=
jiffies
;
if
(
sctp_chunk_is_data
(
chunk
))
{
...
...
@@ -309,7 +306,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
chunk
->
rtt_in_progress
=
1
;
transport
->
rto_pending
=
1
;
}
packet_
has_data
=
1
;
has_data
=
1
;
}
memcpy
(
skb_put
(
nskb
,
chunk
->
skb
->
len
),
chunk
->
skb
->
data
,
chunk
->
skb
->
len
);
...
...
@@ -399,7 +396,7 @@ int sctp_packet_transmit(sctp_packet_t *packet)
asoc
->
peer
.
last_sent_to
=
transport
;
}
if
(
packet_
has_data
)
{
if
(
has_data
)
{
struct
timer_list
*
timer
;
unsigned
long
timeout
;
...
...
@@ -456,9 +453,9 @@ int sctp_packet_transmit(sctp_packet_t *packet)
/*
* This private function resets the packet to a fresh state.
*/
static
void
sctp_packet_reset
(
s
ctp_packet_
t
*
packet
)
static
void
sctp_packet_reset
(
s
truct
sctp_packe
t
*
packet
)
{
s
ctp_chunk_t
*
chunk
=
NULL
;
s
truct
sctp_chunk
*
chunk
=
NULL
;
packet
->
size
=
SCTP_IP_OVERHEAD
;
...
...
@@ -473,13 +470,16 @@ static void sctp_packet_reset(sctp_packet_t *packet)
}
/* This private function handles the specifics of appending DATA chunks. */
static
sctp_xmit_t
sctp_packet_append_data
(
s
ctp_packet_t
*
packet
,
s
ctp_chunk_t
*
chunk
)
static
sctp_xmit_t
sctp_packet_append_data
(
s
truct
sctp_packet
*
packet
,
s
truct
sctp_chunk
*
chunk
)
{
sctp_xmit_t
retval
=
SCTP_XMIT_OK
;
size_t
datasize
,
rwnd
,
inflight
;
struct
sctp_transport
*
transport
=
packet
->
transport
;
__u32
max_burst_bytes
;
struct
sctp_association
*
asoc
=
transport
->
asoc
;
struct
sctp_opt
*
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
struct
sctp_outq
*
q
=
&
asoc
->
outqueue
;
/* RFC 2960 6.1 Transmission of DATA Chunks
*
...
...
@@ -494,8 +494,8 @@ static sctp_xmit_t sctp_packet_append_data(sctp_packet_t *packet,
* receiver to the data sender.
*/
rwnd
=
transport
->
asoc
->
peer
.
rwnd
;
inflight
=
transport
->
asoc
->
outqueue
.
outstanding_bytes
;
rwnd
=
asoc
->
peer
.
rwnd
;
inflight
=
asoc
->
outqueue
.
outstanding_bytes
;
datasize
=
sctp_data_size
(
chunk
);
...
...
@@ -517,7 +517,7 @@ static sctp_xmit_t sctp_packet_append_data(sctp_packet_t *packet,
* if ((flightsize + Max.Burst * MTU) < cwnd)
* cwnd = flightsize + Max.Burst * MTU
*/
max_burst_bytes
=
transport
->
asoc
->
max_burst
*
transport
->
asoc
->
pmtu
;
max_burst_bytes
=
asoc
->
max_burst
*
asoc
->
pmtu
;
if
((
transport
->
flight_size
+
max_burst_bytes
)
<
transport
->
cwnd
)
{
transport
->
cwnd
=
transport
->
flight_size
+
max_burst_bytes
;
SCTP_DEBUG_PRINTK
(
"%s: cwnd limited by max_burst: "
...
...
@@ -543,27 +543,44 @@ static sctp_xmit_t sctp_packet_append_data(sctp_packet_t *packet,
* When a Fast Retransmit is being performed the sender SHOULD
* ignore the value of cwnd and SHOULD NOT delay retransmission.
*/
if
(
!
chunk
->
fast_retransmit
)
{
if
(
!
chunk
->
fast_retransmit
)
if
(
transport
->
flight_size
>=
transport
->
cwnd
)
{
retval
=
SCTP_XMIT_RWND_FULL
;
goto
finish
;
}
/* Nagle's algorithm to solve small-packet problem:
* Inhibit the sending of new chunks when new outgoing data arrives
* if any previously transmitted data on the connection remains
* unacknowledged.
*/
if
(
!
sp
->
nodelay
&&
SCTP_IP_OVERHEAD
==
packet
->
size
&&
q
->
outstanding_bytes
&&
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
{
unsigned
len
=
datasize
+
q
->
out_qlen
;
/* Check whether this chunk and all the rest of pending
* data will fit or delay in hopes of bundling a full
* sized packet.
*/
if
(
len
<
asoc
->
pmtu
-
SCTP_IP_OVERHEAD
)
{
retval
=
SCTP_XMIT_NAGLE_DELAY
;
goto
finish
;
}
}
/* Keep track of how many bytes are in flight over this transport. */
transport
->
flight_size
+=
datasize
;
/* Keep track of how many bytes are in flight to the receiver. */
transport
->
asoc
->
outqueue
.
outstanding_bytes
+=
datasize
;
asoc
->
outqueue
.
outstanding_bytes
+=
datasize
;
/* Update our view of the receiver's rwnd. */
if
(
datasize
<
rwnd
)
{
if
(
datasize
<
rwnd
)
rwnd
-=
datasize
;
}
else
{
else
rwnd
=
0
;
}
transport
->
asoc
->
peer
.
rwnd
=
rwnd
;
asoc
->
peer
.
rwnd
=
rwnd
;
finish:
return
retval
;
...
...
net/sctp/outqueue.c
View file @
15931833
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001
-2003
Intel Corp.
* Copyright (c) 2001-2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
...
...
@@ -62,6 +62,43 @@ static void sctp_check_transmitted(struct sctp_outq *q,
sctp_sackhdr_t
*
sack
,
__u32
highest_new_tsn
);
/* Add data to the front of the queue. */
static
inline
void
sctp_outq_head_data
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
ch
)
{
__skb_queue_head
(
&
q
->
out
,
(
struct
sk_buff
*
)
ch
);
q
->
out_qlen
+=
ch
->
skb
->
len
;
return
;
}
/* Take data from the front of the queue. */
static
inline
struct
sctp_chunk
*
sctp_outq_dequeue_data
(
struct
sctp_outq
*
q
)
{
struct
sctp_chunk
*
ch
;
ch
=
(
struct
sctp_chunk
*
)
__skb_dequeue
(
&
q
->
out
);
if
(
ch
)
q
->
out_qlen
-=
ch
->
skb
->
len
;
return
ch
;
}
/* Add data chunk to the end of the queue. */
static
inline
void
sctp_outq_tail_data
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
ch
)
{
__skb_queue_tail
(
&
q
->
out
,
(
struct
sk_buff
*
)
ch
);
q
->
out_qlen
+=
ch
->
skb
->
len
;
return
;
}
/* Insert a chunk behind chunk 'pos'. */
static
inline
void
sctp_outq_insert_data
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
ch
,
struct
sctp_chunk
*
pos
)
{
__skb_insert
((
struct
sk_buff
*
)
ch
,
(
struct
sk_buff
*
)
pos
->
prev
,
(
struct
sk_buff
*
)
pos
,
pos
->
list
);
q
->
out_qlen
+=
ch
->
skb
->
len
;
}
/* Generate a new outqueue. */
struct
sctp_outq
*
sctp_outq_new
(
sctp_association_t
*
asoc
)
{
...
...
@@ -97,6 +134,7 @@ void sctp_outq_init(sctp_association_t *asoc, struct sctp_outq *q)
q
->
empty
=
1
;
q
->
malloced
=
0
;
q
->
out_qlen
=
0
;
}
/* Free the outqueue structure and any related pending chunks.
...
...
@@ -133,7 +171,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
}
/* Throw away any leftover data chunks. */
while
((
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
&
q
->
out
)))
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
sctp_free_chunk
(
chunk
);
/* Throw away any leftover control chunks. */
...
...
@@ -192,7 +230,7 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
sctp_cname
(
SCTP_ST_CHUNK
(
chunk
->
chunk_hdr
->
type
))
:
"Illegal Chunk"
);
s
kb_queue_tail
(
&
q
->
out
,
(
struct
sk_buff
*
)
chunk
);
s
ctp_outq_tail_data
(
q
,
chunk
);
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
SCTP_INC_STATS
(
SctpOutUnorderChunks
);
else
...
...
@@ -201,7 +239,7 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
break
;
};
}
else
{
skb_queue_tail
(
&
q
->
control
,
(
struct
sk_buff
*
)
chunk
);
__
skb_queue_tail
(
&
q
->
control
,
(
struct
sk_buff
*
)
chunk
);
SCTP_INC_STATS
(
SctpOutCtrlChunks
);
}
...
...
@@ -351,7 +389,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
*
* The return value is a normal kernel error return value.
*/
static
int
sctp_outq_flush_rtx
(
struct
sctp_outq
*
q
,
s
ctp_packet_
t
*
pkt
,
static
int
sctp_outq_flush_rtx
(
struct
sctp_outq
*
q
,
s
truct
sctp_packe
t
*
pkt
,
int
rtx_timeout
,
int
*
start_timer
)
{
struct
list_head
*
lqueue
;
...
...
@@ -385,17 +423,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, sctp_packet_t *pkt,
while
(
lchunk
)
{
chunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
#if 0
/* If a chunk has been tried for more than SCTP_DEF_MAX_SEND
* times, discard it, and check the empty flag of the outqueue.
*
* --xguo
*/
if (chunk->snd_count > SCTP_DEF_MAX_SEND) {
sctp_free_chunk(chunk);
continue;
}
#endif
/* Make sure that Gap Acked TSNs are not retransmitted. A
* simple approach is just to move such TSNs out of the
...
...
@@ -461,8 +488,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, sctp_packet_t *pkt,
* queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation.
*/
void
sctp_xmit_frag
(
struct
sctp_outq
*
q
,
struct
s
k_buff
*
pos
,
sctp_packet_t
*
packet
,
sctp_chunk_t
*
frag
,
__u32
tsn
)
void
sctp_xmit_frag
(
struct
sctp_outq
*
q
,
struct
s
ctp_chunk
*
pos
,
struct
sctp_packet
*
packet
,
struct
sctp_chunk
*
frag
,
__u32
tsn
)
{
struct
sctp_transport
*
transport
=
packet
->
transport
;
struct
sk_buff_head
*
queue
=
&
q
->
out
;
...
...
@@ -480,11 +507,10 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: q not empty. "
"adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
{
skb_insert
(
pos
,
(
struct
sk_buff
*
)
frag
);
}
else
{
skb_queue_tail
(
queue
,
(
struct
sk_buff
*
)
frag
);
}
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
return
;
}
...
...
@@ -496,11 +522,10 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: rwnd full. "
"adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
{
skb_insert
(
pos
,
(
struct
sk_buff
*
)
frag
);
}
else
{
skb_queue_tail
(
queue
,
(
struct
sk_buff
*
)
frag
);
}
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
break
;
case
SCTP_XMIT_OK
:
...
...
@@ -512,11 +537,10 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: force output "
"failed. adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
{
skb_insert
(
pos
,
(
struct
sk_buff
*
)
frag
);
}
else
{
skb_queue_tail
(
queue
,
(
struct
sk_buff
*
)
frag
);
}
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
}
else
{
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: force output "
"success. 0x%x sent
\n
"
,
...
...
@@ -537,14 +561,14 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
* The argument 'frag' point to the first fragment and it holds the list
* of all the other fragments in the 'frag_list' field.
*/
void
sctp_xmit_fragmented_chunks
(
struct
sctp_outq
*
q
,
s
ctp_packet_t
*
packe
t
,
void
sctp_xmit_fragmented_chunks
(
struct
sctp_outq
*
q
,
s
truct
sctp_packet
*
pk
t
,
sctp_chunk_t
*
frag
)
{
sctp_association_t
*
asoc
=
frag
->
asoc
;
struct
list_head
*
lfrag
,
*
frag_list
;
__u32
tsn
;
int
nfrags
=
1
;
struct
s
k_buff
*
pos
;
struct
s
ctp_chunk
*
pos
;
/* Count the number of fragments. */
frag_list
=
&
frag
->
frag_list
;
...
...
@@ -553,17 +577,17 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, sctp_packet_t *packet,
}
/* Get a TSN block of nfrags TSNs. */
tsn
=
__
sctp_association_get_tsn_block
(
asoc
,
nfrags
);
tsn
=
sctp_association_get_tsn_block
(
asoc
,
nfrags
);
pos
=
skb_peek
(
&
q
->
out
);
pos
=
(
struct
sctp_chunk
*
)
skb_peek
(
&
q
->
out
);
/* Transmit the first fragment. */
sctp_xmit_frag
(
q
,
pos
,
p
acke
t
,
frag
,
tsn
++
);
sctp_xmit_frag
(
q
,
pos
,
p
k
t
,
frag
,
tsn
++
);
/* Transmit the rest of fragments. */
frag_list
=
&
frag
->
frag_list
;
list_for_each
(
lfrag
,
frag_list
)
{
frag
=
list_entry
(
lfrag
,
sctp_chunk_t
,
frag_list
);
sctp_xmit_frag
(
q
,
pos
,
p
acke
t
,
frag
,
tsn
++
);
sctp_xmit_frag
(
q
,
pos
,
p
k
t
,
frag
,
tsn
++
);
}
}
...
...
@@ -672,15 +696,14 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
*
* Description: Send everything in q which we legally can, subject to
* congestion limitations.
*
* Note: This function can be called from multiple contexts so appropriate
* * Note: This function can be called from multiple contexts so appropriate
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
int
sctp_outq_flush
(
struct
sctp_outq
*
q
,
int
rtx_timeout
)
{
s
ctp_packet_
t
*
packet
;
s
ctp_packet_
t
singleton
;
s
truct
sctp_packe
t
*
packet
;
s
truct
sctp_packe
t
singleton
;
sctp_association_t
*
asoc
=
q
->
asoc
;
int
ecn_capable
=
asoc
->
peer
.
ecn_capable
;
__u16
sport
=
asoc
->
base
.
bind_addr
.
port
;
...
...
@@ -719,7 +742,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
queue
=
&
q
->
control
;
while
(
NULL
!=
(
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
queue
)))
{
while
((
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
queue
)))
{
/* Pick the right transport to use. */
new_transport
=
chunk
->
transport
;
...
...
@@ -852,7 +875,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
/* Finally, transmit new packets. */
start_timer
=
0
;
queue
=
&
q
->
out
;
while
(
NULL
!=
(
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
queue
)))
{
while
(
NULL
!=
(
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
...
...
@@ -925,6 +949,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
switch
(
status
)
{
case
SCTP_XMIT_PMTU_FULL
:
case
SCTP_XMIT_RWND_FULL
:
case
SCTP_XMIT_NAGLE_DELAY
:
/* We could not append this chunk, so put
* the chunk back on the output queue.
*/
...
...
@@ -932,7 +957,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
"not transmit TSN: 0x%x, status: %d
\n
"
,
ntohl
(
chunk
->
subh
.
data_hdr
->
tsn
),
status
);
s
kb_queue_head
(
queue
,
(
struct
sk_buff
*
)
chunk
);
s
ctp_outq_head_data
(
q
,
chunk
);
goto
sctp_flush_out
;
break
;
...
...
@@ -994,6 +1019,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
sctp_flush_out:
/* Before returning, examine all the transports touched in
* this call. Right now, we bluntly force clear all the
* transports. Things might change after we implement Nagle.
...
...
@@ -1163,11 +1189,10 @@ int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
sack_a_rwnd
=
ntohl
(
sack
->
a_rwnd
);
outstanding
=
q
->
outstanding_bytes
;
if
(
outstanding
<
sack_a_rwnd
)
{
if
(
outstanding
<
sack_a_rwnd
)
sack_a_rwnd
-=
outstanding
;
}
else
{
else
sack_a_rwnd
=
0
;
}
asoc
->
peer
.
rwnd
=
sack_a_rwnd
;
...
...
net/sctp/protocol.c
View file @
15931833
...
...
@@ -58,7 +58,7 @@
#include <net/inet_common.h>
/* Global data structures. */
s
ctp_protocol_t
sctp_proto
;
s
truct
sctp_protocol
sctp_proto
;
struct
proc_dir_entry
*
proc_net_sctp
;
DEFINE_SNMP_STAT
(
struct
sctp_mib
,
sctp_statistics
);
...
...
@@ -152,7 +152,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
/* Extract our IP addresses from the system and stash them in the
* protocol structure.
*/
static
void
__sctp_get_local_addr_list
(
s
ctp_protocol_t
*
proto
)
static
void
__sctp_get_local_addr_list
(
s
truct
sctp_protocol
*
proto
)
{
struct
net_device
*
dev
;
struct
list_head
*
pos
;
...
...
@@ -168,7 +168,7 @@ static void __sctp_get_local_addr_list(sctp_protocol_t *proto)
read_unlock
(
&
dev_base_lock
);
}
static
void
sctp_get_local_addr_list
(
s
ctp_protocol_t
*
proto
)
static
void
sctp_get_local_addr_list
(
s
truct
sctp_protocol
*
proto
)
{
long
flags
__attribute__
((
unused
));
...
...
@@ -178,7 +178,7 @@ static void sctp_get_local_addr_list(sctp_protocol_t *proto)
}
/* Free the existing local addresses. */
static
void
__sctp_free_local_addr_list
(
s
ctp_protocol_t
*
proto
)
static
void
__sctp_free_local_addr_list
(
s
truct
sctp_protocol
*
proto
)
{
struct
sockaddr_storage_list
*
addr
;
struct
list_head
*
pos
,
*
temp
;
...
...
@@ -191,7 +191,7 @@ static void __sctp_free_local_addr_list(sctp_protocol_t *proto)
}
/* Free the existing local addresses. */
static
void
sctp_free_local_addr_list
(
s
ctp_protocol_t
*
proto
)
static
void
sctp_free_local_addr_list
(
s
truct
sctp_protocol
*
proto
)
{
long
flags
__attribute__
((
unused
));
...
...
@@ -201,8 +201,9 @@ static void sctp_free_local_addr_list(sctp_protocol_t *proto)
}
/* Copy the local addresses which are valid for 'scope' into 'bp'. */
int
sctp_copy_local_addr_list
(
sctp_protocol_t
*
proto
,
sctp_bind_addr_t
*
bp
,
sctp_scope_t
scope
,
int
priority
,
int
copy_flags
)
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
proto
,
struct
sctp_bind_addr
*
bp
,
sctp_scope_t
scope
,
int
priority
,
int
copy_flags
)
{
struct
sockaddr_storage_list
*
addr
;
int
error
=
0
;
...
...
@@ -479,6 +480,61 @@ void sctp_v4_get_saddr(sctp_association_t *asoc,
}
/* Create and initialize a new sk for the socket returned by accept(). */
struct
sock
*
sctp_v4_create_accept_sk
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
)
{
struct
sock
*
newsk
;
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
newinet
;
newsk
=
sk_alloc
(
PF_INET
,
GFP_KERNEL
,
sizeof
(
struct
sctp_sock
),
sk
->
slab
);
if
(
!
newsk
)
goto
out
;
sock_init_data
(
NULL
,
newsk
);
newsk
->
type
=
SOCK_STREAM
;
newsk
->
prot
=
sk
->
prot
;
newsk
->
no_check
=
sk
->
no_check
;
newsk
->
reuse
=
sk
->
reuse
;
newsk
->
destruct
=
inet_sock_destruct
;
newsk
->
zapped
=
0
;
newsk
->
family
=
PF_INET
;
newsk
->
protocol
=
IPPROTO_SCTP
;
newsk
->
backlog_rcv
=
sk
->
prot
->
backlog_rcv
;
newinet
=
inet_sk
(
newsk
);
newinet
->
sport
=
inet
->
sport
;
newinet
->
saddr
=
inet
->
saddr
;
newinet
->
rcv_saddr
=
inet
->
saddr
;
newinet
->
dport
=
asoc
->
peer
.
port
;
newinet
->
daddr
=
asoc
->
peer
.
primary_addr
.
v4
.
sin_addr
.
s_addr
;
newinet
->
pmtudisc
=
inet
->
pmtudisc
;
newinet
->
id
=
0
;
newinet
->
ttl
=
sysctl_ip_default_ttl
;
newinet
->
mc_loop
=
1
;
newinet
->
mc_ttl
=
1
;
newinet
->
mc_index
=
0
;
newinet
->
mc_list
=
NULL
;
#ifdef INET_REFCNT_DEBUG
atomic_inc
(
&
inet_sock_nr
);
#endif
if
(
0
!=
newsk
->
prot
->
init
(
newsk
))
{
inet_sock_release
(
newsk
);
newsk
=
NULL
;
}
out:
return
newsk
;
}
/* Event handler for inet address addition/deletion events.
* Basically, whenever there is an event, we re-build our local address list.
*/
...
...
@@ -501,10 +557,13 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long event,
*/
int
sctp_ctl_sock_init
(
void
)
{
int
err
=
0
;
int
family
=
PF_INET
;
int
err
;
sa_family_t
family
;
SCTP_V6
(
family
=
PF_INET6
;)
if
(
sctp_get_pf_specific
(
PF_INET6
))
family
=
PF_INET6
;
else
family
=
PF_INET
;
err
=
sock_create
(
family
,
SOCK_SEQPACKET
,
IPPROTO_SCTP
,
&
sctp_ctl_socket
);
...
...
@@ -630,6 +689,16 @@ static int sctp_inet_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
return
sctp_v4_available
(
addr
);
}
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Returns number of addresses supported.
*/
static
int
sctp_inet_supported_addrs
(
const
struct
sctp_opt
*
opt
,
__u16
*
types
)
{
types
[
0
]
=
SCTP_PARAM_IPV4_ADDRESS
;
return
1
;
}
/* Wrapper routine that calls the ip transmit routine. */
static
inline
int
sctp_v4_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
...
...
@@ -652,6 +721,8 @@ static struct sctp_pf sctp_pf_inet = {
.
af_supported
=
sctp_inet_af_supported
,
.
cmp_addr
=
sctp_inet_cmp_addr
,
.
bind_verify
=
sctp_inet_bind_verify
,
.
supported_addrs
=
sctp_inet_supported_addrs
,
.
create_accept_sk
=
sctp_v4_create_accept_sk
,
.
af
=
&
sctp_ipv4_specific
,
};
...
...
@@ -682,7 +753,7 @@ struct proto_ops inet_seqpacket_ops = {
};
/* Registration with AF_INET family. */
st
ruct
inet_protosw
sctp
_protosw
=
{
st
atic
struct
inet_protosw
sctp_seqpacket
_protosw
=
{
.
type
=
SOCK_SEQPACKET
,
.
protocol
=
IPPROTO_SCTP
,
.
prot
=
&
sctp_prot
,
...
...
@@ -691,6 +762,15 @@ struct inet_protosw sctp_protosw = {
.
no_check
=
0
,
.
flags
=
SCTP_PROTOSW_FLAG
};
static
struct
inet_protosw
sctp_stream_protosw
=
{
.
type
=
SOCK_STREAM
,
.
protocol
=
IPPROTO_SCTP
,
.
prot
=
&
sctp_prot
,
.
ops
=
&
inet_seqpacket_ops
,
.
capability
=
-
1
,
.
no_check
=
0
,
.
flags
=
SCTP_PROTOSW_FLAG
};
/* Register with IP layer. */
static
struct
inet_protocol
sctp_protocol
=
{
...
...
@@ -797,8 +877,9 @@ __init int sctp_init(void)
if
(
inet_add_protocol
(
&
sctp_protocol
,
IPPROTO_SCTP
)
<
0
)
return
-
EAGAIN
;
/* Add SCTP to inetsw linked list. */
inet_register_protosw
(
&
sctp_protosw
);
/* Add SCTP(TCP and UDP style) to inetsw linked list. */
inet_register_protosw
(
&
sctp_seqpacket_protosw
);
inet_register_protosw
(
&
sctp_stream_protosw
);
/* Allocate and initialise sctp mibs. */
status
=
init_sctp_mibs
();
...
...
@@ -944,7 +1025,8 @@ __init int sctp_init(void)
cleanup_sctp_mibs
();
err_init_mibs:
inet_del_protocol
(
&
sctp_protocol
,
IPPROTO_SCTP
);
inet_unregister_protosw
(
&
sctp_protosw
);
inet_unregister_protosw
(
&
sctp_seqpacket_protosw
);
inet_unregister_protosw
(
&
sctp_stream_protosw
);
return
status
;
}
...
...
@@ -977,7 +1059,8 @@ __exit void sctp_exit(void)
cleanup_sctp_mibs
();
inet_del_protocol
(
&
sctp_protocol
,
IPPROTO_SCTP
);
inet_unregister_protosw
(
&
sctp_protosw
);
inet_unregister_protosw
(
&
sctp_seqpacket_protosw
);
inet_unregister_protosw
(
&
sctp_stream_protosw
);
}
module_init
(
sctp_init
);
...
...
net/sctp/sm_make_chunk.c
View file @
15931833
...
...
@@ -66,29 +66,6 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 4: This parameter, when present, specifies all the
* address types the sending endpoint can support. The absence
* of this parameter indicates that the sending endpoint can
* support any address type.
*/
static
const
sctp_supported_addrs_param_t
sat_param
=
{
{
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES
,
__constant_htons
(
SCTP_SAT_LEN
),
}
};
/* gcc 3.2 doesn't allow initialization of zero-length arrays. So the above
* structure is split and the address types array is initialized using a
* fixed length array.
*/
static
const
__u16
sat_addr_types
[
2
]
=
{
SCTP_PARAM_IPV4_ADDRESS
,
SCTP_V6
(
SCTP_PARAM_IPV6_ADDRESS
,)
};
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 2: The ECN capable field is reserved for future use of
...
...
@@ -174,7 +151,10 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
union
sctp_params
addrs
;
size_t
chunksize
;
sctp_chunk_t
*
retval
=
NULL
;
int
addrs_len
=
0
;
int
num_types
,
addrs_len
=
0
;
struct
sctp_opt
*
sp
;
sctp_supported_addrs_param_t
sat
;
__u16
types
[
2
];
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
...
...
@@ -195,7 +175,11 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
init
.
num_inbound_streams
=
htons
(
asoc
->
c
.
sinit_max_instreams
);
init
.
initial_tsn
=
htonl
(
asoc
->
c
.
initial_tsn
);
chunksize
=
sizeof
(
init
)
+
addrs_len
+
SCTP_SAT_LEN
;
/* How many address types are needed? */
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
num_types
=
sp
->
pf
->
supported_addrs
(
sp
,
types
);
chunksize
=
sizeof
(
init
)
+
addrs_len
+
SCTP_SAT_LEN
(
num_types
);
chunksize
+=
sizeof
(
ecap_param
);
chunksize
+=
vparam_len
;
...
...
@@ -220,8 +204,18 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
retval
->
param_hdr
.
v
=
sctp_addto_chunk
(
retval
,
addrs_len
,
addrs
.
v
);
sctp_addto_chunk
(
retval
,
sizeof
(
sctp_paramhdr_t
),
&
sat_param
);
sctp_addto_chunk
(
retval
,
sizeof
(
sat_addr_types
),
sat_addr_types
);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 4: This parameter, when present, specifies all the
* address types the sending endpoint can support. The absence
* of this parameter indicates that the sending endpoint can
* support any address type.
*/
sat
.
param_hdr
.
type
=
SCTP_PARAM_SUPPORTED_ADDRESS_TYPES
;
sat
.
param_hdr
.
length
=
htons
(
SCTP_SAT_LEN
(
num_types
));
sctp_addto_chunk
(
retval
,
sizeof
(
sat
),
&
sat
);
sctp_addto_chunk
(
retval
,
num_types
*
sizeof
(
__u16
),
&
types
);
sctp_addto_chunk
(
retval
,
sizeof
(
ecap_param
),
&
ecap_param
);
nodata:
if
(
addrs
.
v
)
...
...
@@ -604,7 +598,7 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
/* Initialize the SACK header. */
sack
.
cum_tsn_ack
=
htonl
(
ctsn
);
sack
.
a_rwnd
=
htonl
(
asoc
->
rwnd
);
sack
.
a_rwnd
=
htonl
(
asoc
->
a_
rwnd
);
sack
.
num_gap_ack_blocks
=
htons
(
num_gabs
);
sack
.
num_dup_tsns
=
htons
(
num_dup_tsns
);
...
...
@@ -1159,7 +1153,7 @@ int sctp_datachunks_from_user(sctp_association_t *asoc,
first_len
=
max
;
/* Encourage Cookie-ECHO bundling. */
if
(
asoc
->
state
<
SCTP_STATE_
ESTABLISH
ED
)
{
if
(
asoc
->
state
<
SCTP_STATE_
COOKIE_ECHO
ED
)
{
whole
=
msg_len
/
(
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
);
/* Account for the DATA to be bundled with the COOKIE-ECHO. */
...
...
@@ -1282,7 +1276,7 @@ void sctp_chunk_assign_tsn(sctp_chunk_t *chunk)
* assign a TSN.
*/
chunk
->
subh
.
data_hdr
->
tsn
=
htonl
(
__
sctp_association_get_next_tsn
(
chunk
->
asoc
));
htonl
(
sctp_association_get_next_tsn
(
chunk
->
asoc
));
chunk
->
has_tsn
=
1
;
}
}
...
...
net/sctp/sm_sideeffect.c
View file @
15931833
...
...
@@ -105,8 +105,8 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *, sctp_association_t *,
#define DEBUG_POST_SFX \
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
error, asoc, \
sctp_state_tbl[sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc))?asoc->state:SCTP_STATE_CLOSED])
sctp_state_tbl[
(asoc &&
sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc))
)
?asoc->state:SCTP_STATE_CLOSED])
/*
* This is the master state machine processing function.
...
...
@@ -256,7 +256,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_cmd_t
*
cmd
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
...
...
@@ -716,13 +716,12 @@ int sctp_gen_sack(sctp_association_t *asoc, int force, sctp_cmd_seq_t *commands)
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
/* Update the last advertised rwnd value. */
asoc
->
a_rwnd
=
asoc
->
rwnd
;
asoc
->
peer
.
sack_needed
=
0
;
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
...
...
@@ -1223,13 +1222,35 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_state_t
state
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
/* Wake up any process waiting for the association to
* get established.
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
waitqueue_active
(
&
asoc
->
wait
)))
if
(
waitqueue_active
(
&
asoc
->
wait
))
wake_up_interruptible
(
&
asoc
->
wait
);
/* Wake up any processes waiting in the sk's sleep queue of
* a TCP-style or UDP-style peeled-off socket in
* sctp_wait_for_accept() or sctp_wait_for_packet().
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sk
->
state_change
(
sk
);
}
/* Change the sk->state of a TCP-style socket that has sucessfully
* completed a connect() call.
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
net/sctp/sm_statefuns.c
View file @
15931833
...
...
@@ -189,7 +189,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
sctp_chunk_t
*
repl
;
sctp_association_t
*
new_asoc
;
sctp_chunk_t
*
err_chunk
;
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
sctp_unrecognized_param_t
*
unk_param
;
int
len
;
...
...
@@ -354,10 +354,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
sctp_init_chunk_t
*
initchunk
;
__u32
init_tag
;
sctp_chunk_t
*
err_chunk
;
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
sctp_disposition_t
ret
;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
...
...
@@ -912,14 +911,14 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
sctp_cmd_seq_t
*
commands
)
{
int
len
;
s
ctp_packet_
t
*
pkt
;
s
truct
sctp_packe
t
*
pkt
;
sctp_addr_param_t
*
addrparm
;
sctp_errhdr_t
*
errhdr
;
sctp_endpoint_t
*
ep
;
char
buffer
[
sizeof
(
sctp_errhdr_t
)
+
sizeof
(
sctp_addr_param_t
)];
/* Build the error on the stack. We are way to malloc
*
malloc crazy
throughout the code today.
/* Build the error on the stack. We are way to malloc
crazy
* throughout the code today.
*/
errhdr
=
(
sctp_errhdr_t
*
)
buffer
;
addrparm
=
(
sctp_addr_param_t
*
)
errhdr
->
variable
;
...
...
@@ -1105,11 +1104,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
sctp_chunk_t
*
repl
;
sctp_association_t
*
new_asoc
;
sctp_chunk_t
*
err_chunk
;
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
sctp_unrecognized_param_t
*
unk_param
;
int
len
;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
...
...
@@ -2751,7 +2749,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const sctp_endpoint_t *ep,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_packet_
t
*
packet
=
NULL
;
s
truct
sctp_packe
t
*
packet
=
NULL
;
sctp_chunk_t
*
chunk
=
arg
;
sctp_chunk_t
*
abort
;
...
...
@@ -2953,7 +2951,7 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const sctp_endpoint_t *ep,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_packet_
t
*
packet
=
NULL
;
s
truct
sctp_packe
t
*
packet
=
NULL
;
sctp_chunk_t
*
chunk
=
arg
;
sctp_chunk_t
*
shut
;
...
...
@@ -4377,13 +4375,13 @@ sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *chunk)
/* Create an ABORT packet to be sent as a response, with the specified
* error causes.
*/
s
ctp_packet_
t
*
sctp_abort_pkt_new
(
const
sctp_endpoint_t
*
ep
,
s
truct
sctp_packe
t
*
sctp_abort_pkt_new
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
const
void
*
payload
,
size_t
paylen
)
{
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
sctp_chunk_t
*
abort
;
packet
=
sctp_ootb_pkt_new
(
asoc
,
chunk
);
...
...
@@ -4413,10 +4411,10 @@ sctp_packet_t *sctp_abort_pkt_new(const sctp_endpoint_t *ep,
}
/* Allocate a packet for responding in the OOTB conditions. */
s
ctp_packet_
t
*
sctp_ootb_pkt_new
(
const
sctp_association_t
*
asoc
,
s
truct
sctp_packe
t
*
sctp_ootb_pkt_new
(
const
sctp_association_t
*
asoc
,
const
sctp_chunk_t
*
chunk
)
{
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
struct
sctp_transport
*
transport
;
__u16
sport
;
__u16
dport
;
...
...
@@ -4449,7 +4447,7 @@ sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
goto
nomem
;
/* Allocate a new packet for sending the response. */
packet
=
t_new
(
s
ctp_packet_
t
,
GFP_ATOMIC
);
packet
=
t_new
(
s
truct
sctp_packe
t
,
GFP_ATOMIC
);
if
(
!
packet
)
goto
nomem_packet
;
...
...
@@ -4471,7 +4469,7 @@ sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
}
/* Free the packet allocated earlier for responding in the OOTB condition. */
void
sctp_ootb_pkt_free
(
s
ctp_packet_
t
*
packet
)
void
sctp_ootb_pkt_free
(
s
truct
sctp_packe
t
*
packet
)
{
sctp_transport_free
(
packet
->
transport
);
sctp_packet_free
(
packet
);
...
...
@@ -4484,7 +4482,7 @@ void sctp_send_stale_cookie_err(const sctp_endpoint_t *ep,
sctp_cmd_seq_t
*
commands
,
sctp_chunk_t
*
err_chunk
)
{
s
ctp_packet_
t
*
packet
;
s
truct
sctp_packe
t
*
packet
;
if
(
err_chunk
)
{
packet
=
sctp_ootb_pkt_new
(
asoc
,
chunk
);
...
...
net/sctp/socket.c
View file @
15931833
...
...
@@ -88,12 +88,46 @@ static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
int
msg_len
);
static
int
sctp_wait_for_packet
(
struct
sock
*
sk
,
int
*
err
,
long
*
timeo_p
);
static
int
sctp_wait_for_connect
(
struct
sctp_association
*
,
long
*
timeo_p
);
static
int
sctp_wait_for_accept
(
struct
sock
*
sk
,
long
timeo
);
static
inline
int
sctp_verify_addr
(
struct
sock
*
,
union
sctp_addr
*
,
int
);
static
int
sctp_bindx_add
(
struct
sock
*
,
struct
sockaddr_storage
*
,
int
);
static
int
sctp_bindx_rem
(
struct
sock
*
,
struct
sockaddr_storage
*
,
int
);
static
int
sctp_do_bind
(
struct
sock
*
,
union
sctp_addr
*
,
int
);
static
int
sctp_autobind
(
struct
sock
*
sk
);
static
void
sctp_sock_migrate
(
struct
sock
*
,
struct
sock
*
,
struct
sctp_association
*
,
sctp_socket_type_t
);
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
sctp_association_t
*
sctp_id2assoc
(
struct
sock
*
sk
,
sctp_assoc_t
id
)
{
sctp_association_t
*
asoc
=
NULL
;
/* If this is not a UDP-style socket, assoc id should be
* ignored.
*/
if
(
SCTP_SOCKET_UDP
!=
sctp_sk
(
sk
)
->
type
)
{
if
(
!
list_empty
(
&
sctp_sk
(
sk
)
->
ep
->
asocs
))
asoc
=
list_entry
(
sctp_sk
(
sk
)
->
ep
->
asocs
.
next
,
sctp_association_t
,
asocs
);
return
asoc
;
}
/* First, verify that this is a kernel address. */
if
(
sctp_is_valid_kaddr
((
unsigned
long
)
id
))
{
sctp_association_t
*
temp
=
(
sctp_association_t
*
)
id
;
/* Verify that this _is_ an sctp_association_t
* data structure and if so, that the socket matches.
*/
if
((
SCTP_ASSOC_EYECATCHER
==
temp
->
eyecatcher
)
&&
(
temp
->
base
.
sk
==
sk
))
asoc
=
temp
;
}
return
asoc
;
}
/* API 3.1.2 bind() - UDP Style Syntax
* The syntax of bind() is,
...
...
@@ -818,19 +852,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
}
}
else
{
/* For a peeled-off socket, ignore any associd specified by
* the user with SNDRCVINFO.
*/
if
(
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
==
sp
->
type
)
{
if
(
list_empty
(
&
ep
->
asocs
))
{
err
=
-
EINVAL
;
goto
out_unlock
;
}
asoc
=
list_entry
(
ep
->
asocs
.
next
,
sctp_association_t
,
asocs
);
}
else
if
(
associd
)
{
asoc
=
sctp_id2assoc
(
sk
,
associd
);
}
if
(
!
asoc
)
{
err
=
-
EINVAL
;
goto
out_unlock
;
...
...
@@ -1126,17 +1148,19 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
int
err
=
0
;
int
skb_len
;
SCTP_DEBUG_PRINTK
(
"sctp_recvmsg("
"%s: %p, %s: %p, %s: %d, %s: %d, %s: "
"0x%x, %s: %p)
\n
"
,
"sk"
,
sk
,
"msghdr"
,
msg
,
"len"
,
len
,
"knoblauch"
,
noblock
,
"flags"
,
flags
,
"addr_len"
,
addr_len
);
SCTP_DEBUG_PRINTK
(
"sctp_recvmsg(%s: %p, %s: %p, %s: %d, %s: %d, %s: "
"0x%x, %s: %p)
\n
"
,
"sk"
,
sk
,
"msghdr"
,
msg
,
"len"
,
len
,
"knoblauch"
,
noblock
,
"flags"
,
flags
,
"addr_len"
,
addr_len
);
sctp_lock_sock
(
sk
);
if
((
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_ESTABLISHED
!=
sk
->
state
))
{
err
=
-
ENOTCONN
;
goto
out
;
}
skb
=
sctp_skb_recv_datagram
(
sk
,
flags
,
noblock
,
&
err
);
if
(
!
skb
)
goto
out
;
...
...
@@ -1207,7 +1231,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
return
err
;
}
static
in
line
in
t
sctp_setsockopt_disable_fragments
(
struct
sock
*
sk
,
static
int
sctp_setsockopt_disable_fragments
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
int
val
;
...
...
@@ -1223,7 +1247,7 @@ static inline int sctp_setsockopt_disable_fragments(struct sock *sk,
return
0
;
}
static
in
line
int
sctp_setsockopt_se
t_events
(
struct
sock
*
sk
,
char
*
optval
,
static
in
t
sctp_setsockop
t_events
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
if
(
optlen
!=
sizeof
(
struct
sctp_event_subscribe
))
...
...
@@ -1233,7 +1257,7 @@ static inline int sctp_setsockopt_set_events(struct sock *sk, char *optval,
return
0
;
}
static
in
line
in
t
sctp_setsockopt_autoclose
(
struct
sock
*
sk
,
char
*
optval
,
static
int
sctp_setsockopt_autoclose
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
...
...
@@ -1250,9 +1274,8 @@ static inline int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
return
0
;
}
static
inline
int
sctp_setsockopt_set_peer_addr_params
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
static
int
sctp_setsockopt_peer_addr_params
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
struct
sctp_paddrparams
params
;
sctp_association_t
*
asoc
;
...
...
@@ -1290,8 +1313,7 @@ static inline int sctp_setsockopt_set_peer_addr_params(struct sock *sk,
error
=
sctp_primitive_REQUESTHEARTBEAT
(
asoc
,
trans
);
if
(
error
)
return
error
;
}
else
{
}
else
{
/* The value of the heartbeat interval, in milliseconds. A value of 0,
* when modifying the parameter, specifies that the heartbeat on this
* address should be disabled.
...
...
@@ -1311,7 +1333,7 @@ static inline int sctp_setsockopt_set_peer_addr_params(struct sock *sk,
return
0
;
}
static
in
line
in
t
sctp_setsockopt_initmsg
(
struct
sock
*
sk
,
char
*
optval
,
static
int
sctp_setsockopt_initmsg
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
if
(
optlen
!=
sizeof
(
struct
sctp_initmsg
))
...
...
@@ -1336,7 +1358,7 @@ static inline int sctp_setsockopt_initmsg(struct sock *sk, char *optval,
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
* to this call if the caller is using the UDP model.
*/
static
in
line
int
sctp_setsockopt_se
t_default_send_param
(
struct
sock
*
sk
,
static
in
t
sctp_setsockop
t_default_send_param
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
struct
sctp_sndrcvinfo
info
;
...
...
@@ -1359,6 +1381,66 @@ static inline int sctp_setsockopt_set_default_send_param(struct sock *sk,
return
0
;
}
/* 7.1.10 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static
int
sctp_setsockopt_peer_prim
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
struct
sctp_setpeerprim
prim
;
struct
sctp_association
*
asoc
;
union
sctp_addr
*
addr
;
struct
sctp_transport
*
trans
;
if
(
optlen
!=
sizeof
(
struct
sctp_setpeerprim
))
return
-
EINVAL
;
if
(
copy_from_user
(
&
prim
,
optval
,
sizeof
(
struct
sctp_setpeerprim
)))
return
-
EFAULT
;
asoc
=
sctp_id2assoc
(
sk
,
prim
.
sspp_assoc_id
);
if
(
!
asoc
)
return
-
EINVAL
;
/* Find the requested address. */
addr
=
(
union
sctp_addr
*
)
&
(
prim
.
sspp_addr
);
trans
=
sctp_assoc_lookup_paddr
(
asoc
,
addr
);
if
(
!
trans
)
return
-
ENOENT
;
sctp_assoc_set_primary
(
asoc
,
trans
);
return
0
;
}
/*
*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
* generally sent as soon as possible and no unnecessary delays are
* introduced, at the cost of more packets in the network. Expects an
* integer boolean flag.
*/
static
int
sctp_setsockopt_nodelay
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
__u8
val
;
if
(
optlen
<
sizeof
(
__u8
))
return
-
EINVAL
;
if
(
get_user
(
val
,
(
__u8
*
)
optval
))
return
-
EFAULT
;
sctp_sk
(
sk
)
->
nodelay
=
(
val
==
0
)
?
0
:
1
;
return
0
;
}
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
...
...
@@ -1434,7 +1516,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
break
;
case
SCTP_SET_EVENTS
:
retval
=
sctp_setsockopt_
set_
events
(
sk
,
optval
,
optlen
);
retval
=
sctp_setsockopt_events
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_AUTOCLOSE
:
...
...
@@ -1442,8 +1524,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
break
;
case
SCTP_SET_PEER_ADDR_PARAMS
:
retval
=
sctp_setsockopt_set_peer_addr_params
(
sk
,
optval
,
optlen
);
retval
=
sctp_setsockopt_peer_addr_params
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_INITMSG
:
...
...
@@ -1451,8 +1532,16 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
break
;
case
SCTP_SET_DEFAULT_SEND_PARAM
:
retval
=
sctp_setsockopt_set_default_send_param
(
sk
,
optval
,
optlen
);
retval
=
sctp_setsockopt_default_send_param
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_SET_PEER_PRIMARY_ADDR
:
retval
=
sctp_setsockopt_peer_prim
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_NODELAY
:
retval
=
sctp_setsockopt_nodelay
(
sk
,
optval
,
optlen
);
break
;
default:
...
...
@@ -1503,8 +1592,14 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
sp
=
sctp_sk
(
sk
);
ep
=
sp
->
ep
;
/* connect() cannot be done on a peeled-off socket. */
if
(
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
==
sp
->
type
)
{
/* connect() cannot be done on a socket that is already in ESTABLISHED
* state - UDP-style peeled off socket or a TCP-style socket that
* is already connected.
* It cannot be done even on a TCP-style listening socket.
*/
if
((
SCTP_SS_ESTABLISHED
==
sk
->
state
)
||
((
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_LISTENING
==
sk
->
state
)))
{
err
=
-
EISCONN
;
goto
out_unlock
;
}
...
...
@@ -1513,6 +1608,8 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
if
(
err
)
goto
out_unlock
;
if
(
addr_len
>
sizeof
(
to
))
addr_len
=
sizeof
(
to
);
memcpy
(
&
to
,
uaddr
,
addr_len
);
to
.
v4
.
sin_port
=
ntohs
(
to
.
v4
.
sin_port
);
...
...
@@ -1585,13 +1682,63 @@ SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags)
return
-
EOPNOTSUPP
;
/* STUB */
}
/* FIXME: Write comments. */
/* 4.1.4 accept() - TCP Style Syntax
*
* Applications use accept() call to remove an established SCTP
* association from the accept queue of the endpoint. A new socket
* descriptor will be returned from accept() to represent the newly
* formed association.
*/
SCTP_STATIC
struct
sock
*
sctp_accept
(
struct
sock
*
sk
,
int
flags
,
int
*
err
)
{
int
error
=
-
EOPNOTSUPP
;
struct
sctp_opt
*
sp
;
struct
sctp_endpoint
*
ep
;
struct
sock
*
newsk
=
NULL
;
struct
sctp_association
*
assoc
;
long
timeo
;
int
error
=
0
;
sctp_lock_sock
(
sk
);
sp
=
sctp_sk
(
sk
);
ep
=
sp
->
ep
;
if
(
SCTP_SOCKET_TCP
!=
sp
->
type
)
{
error
=
-
EOPNOTSUPP
;
goto
out
;
}
if
(
SCTP_SS_LISTENING
!=
sk
->
state
)
{
error
=
-
EINVAL
;
goto
out
;
}
timeo
=
sock_rcvtimeo
(
sk
,
sk
->
socket
->
file
->
f_flags
&
O_NONBLOCK
);
error
=
sctp_wait_for_accept
(
sk
,
timeo
);
if
(
error
)
goto
out
;
/* We treat the list of associations on the endpoint as the accept
* queue and pick the first association on the list.
*/
assoc
=
list_entry
(
ep
->
asocs
.
next
,
struct
sctp_association
,
asocs
);
newsk
=
sp
->
pf
->
create_accept_sk
(
sk
,
assoc
);
if
(
!
newsk
)
{
error
=
-
ENOMEM
;
goto
out
;
}
/* Populate the fields of the newsk from the oldsk and migrate the
* assoc to the newsk.
*/
sctp_sock_migrate
(
sk
,
newsk
,
assoc
,
SCTP_SOCKET_TCP
);
out:
sctp_release_sock
(
sk
);
*
err
=
error
;
return
NULL
;
return
newsk
;
}
/* FIXME: Write Comments. */
...
...
@@ -1607,7 +1754,7 @@ SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
SCTP_STATIC
int
sctp_init_sock
(
struct
sock
*
sk
)
{
sctp_endpoint_t
*
ep
;
s
ctp_protocol_t
*
proto
;
s
truct
sctp_protocol
*
proto
;
struct
sctp_opt
*
sp
;
SCTP_DEBUG_PRINTK
(
"sctp_init_sock(sk: %p)
\n
"
,
sk
);
...
...
@@ -1617,7 +1764,16 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp
=
sctp_sk
(
sk
);
/* Initialize the SCTP per socket area. */
switch
(
sk
->
type
)
{
case
SOCK_SEQPACKET
:
sp
->
type
=
SCTP_SOCKET_UDP
;
break
;
case
SOCK_STREAM
:
sp
->
type
=
SCTP_SOCKET_TCP
;
break
;
default:
return
-
ESOCKTNOSUPPORT
;
}
/* FIXME: The next draft (04) of the SCTP Sockets Extensions
* should include a socket option for manipulating these
...
...
@@ -1665,7 +1821,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp
->
disable_fragments
=
0
;
/* Turn on/off any Nagle-like algorithm. */
sp
->
nodelay
=
0
;
sp
->
nodelay
=
1
;
/* Auto-close idle associations after the configured
* number of seconds. A value of 0 disables this
...
...
@@ -1714,11 +1870,17 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
/* STUB */
}
/* 7.2.1 Association Status (SCTP_STATUS)
* Applications can retrieve current status information about an
* association, including association state, peer receiver window size,
* number of unacked data chunks, and number of data chunks pending
* receipt. This information is read-only.
*/
static
int
sctp_getsockopt_sctp_status
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
struct
sctp_status
status
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
assoc
=
NULL
;
struct
sctp_transport
*
transport
;
sctp_assoc_t
associd
;
...
...
@@ -1735,21 +1897,11 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
}
associd
=
status
.
sstat_assoc_id
;
if
((
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
!=
sctp_sk
(
sk
)
->
type
)
&&
associd
)
{
assoc
=
sctp_id2assoc
(
sk
,
associd
);
if
(
!
assoc
)
{
retval
=
-
EINVAL
;
goto
out
;
}
}
else
{
ep
=
sctp_sk
(
sk
)
->
ep
;
if
(
list_empty
(
&
ep
->
asocs
))
{
retval
=
-
EINVAL
;
goto
out
;
}
assoc
=
list_entry
(
ep
->
asocs
.
next
,
sctp_association_t
,
asocs
);
}
transport
=
assoc
->
peer
.
primary_path
;
...
...
@@ -1788,7 +1940,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
return
(
retval
);
}
static
in
line
in
t
sctp_getsockopt_disable_fragments
(
struct
sock
*
sk
,
int
len
,
static
int
sctp_getsockopt_disable_fragments
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
int
val
;
...
...
@@ -1805,7 +1957,7 @@ static inline int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
return
0
;
}
static
in
line
in
t
sctp_getsockopt_set_events
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
static
int
sctp_getsockopt_set_events
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
if
(
len
!=
sizeof
(
struct
sctp_event_subscribe
))
return
-
EINVAL
;
...
...
@@ -1814,7 +1966,7 @@ static inline int sctp_getsockopt_set_events(struct sock *sk, int len, char *opt
return
0
;
}
static
in
line
in
t
sctp_getsockopt_autoclose
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
static
int
sctp_getsockopt_autoclose
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
/* Applicable to UDP-style socket only */
if
(
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
...
...
@@ -1832,11 +1984,6 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
struct
sock
*
oldsk
=
assoc
->
base
.
sk
;
struct
sock
*
newsk
;
struct
socket
*
tmpsock
;
sctp_endpoint_t
*
newep
;
struct
sctp_opt
*
oldsp
=
sctp_sk
(
oldsk
);
struct
sctp_opt
*
newsp
;
struct
sk_buff
*
skb
,
*
tmp
;
struct
sctp_ulpevent
*
event
;
int
err
=
0
;
/* An association cannot be branched off from an already peeled-off
...
...
@@ -1846,88 +1993,24 @@ SCTP_STATIC int sctp_do_peeloff(sctp_association_t *assoc, struct socket **newso
return
-
EOPNOTSUPP
;
/* Create a new socket. */
err
=
sock_create
(
PF_INET
,
SOCK_SEQPACKET
,
IPPROTO_SCTP
,
&
tmpsock
);
err
=
sock_create
(
oldsk
->
family
,
SOCK_SEQPACKET
,
IPPROTO_SCTP
,
&
tmpsock
);
if
(
err
<
0
)
return
err
;
newsk
=
tmpsock
->
sk
;
newsp
=
sctp_sk
(
newsk
);
newep
=
newsp
->
ep
;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
newsk
->
sndbuf
=
oldsk
->
sndbuf
;
newsk
->
rcvbuf
=
oldsk
->
rcvbuf
;
*
newsp
=
*
oldsp
;
/* Restore the ep value that was overwritten with the above structure
* copy.
*/
newsp
->
ep
=
newep
;
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each
(
skb
,
&
oldsk
->
receive_queue
,
tmp
)
{
event
=
sctp_skb2event
(
skb
);
if
(
event
->
asoc
==
assoc
)
{
__skb_unlink
(
skb
,
skb
->
list
);
__skb_queue_tail
(
&
newsk
->
receive_queue
,
skb
);
}
}
/* Clean up an messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to recieve_queue.
/* Populate the fields of the newsk from the oldsk and migrate the
* assoc to the newsk.
*/
skb_queue_head_init
(
&
newsp
->
pd_lobby
);
sctp_sk
(
newsk
)
->
pd_mode
=
assoc
->
ulpq
.
pd_mode
;;
if
(
sctp_sk
(
oldsk
)
->
pd_mode
)
{
struct
sk_buff_head
*
queue
;
/* Decide which queue to move pd_lobby skbs to. */
if
(
assoc
->
ulpq
.
pd_mode
)
{
queue
=
&
newsp
->
pd_lobby
;
}
else
queue
=
&
newsk
->
receive_queue
;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each
(
skb
,
&
oldsp
->
pd_lobby
,
tmp
)
{
event
=
sctp_skb2event
(
skb
);
if
(
event
->
asoc
==
assoc
)
{
__skb_unlink
(
skb
,
skb
->
list
);
__skb_queue_tail
(
queue
,
skb
);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if
(
assoc
->
ulpq
.
pd_mode
)
sctp_clear_pd
(
oldsk
);
}
/* Set the type of socket to indicate that it is peeled off from the
* original socket.
*/
newsp
->
type
=
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
;
/* Migrate the association to the new socket. */
sctp_assoc_migrate
(
assoc
,
newsk
);
sctp_sock_migrate
(
oldsk
,
newsk
,
assoc
,
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
*
newsock
=
tmpsock
;
return
err
;
}
static
in
line
in
t
sctp_getsockopt_peeloff
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
static
int
sctp_getsockopt_peeloff
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
sctp_peeloff_arg_t
peeloff
;
struct
socket
*
newsock
;
...
...
@@ -1970,8 +2053,8 @@ static inline int sctp_getsockopt_peeloff(struct sock *sk, int len, char *optval
return
retval
;
}
static
in
line
int
sctp_getsockopt_get_peer_addr_params
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
static
in
t
sctp_getsockopt_peer_addr_params
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
struct
sctp_paddrparams
params
;
sctp_association_t
*
asoc
;
...
...
@@ -2014,7 +2097,7 @@ static inline int sctp_getsockopt_get_peer_addr_params(struct sock *sk,
return
0
;
}
static
in
line
in
t
sctp_getsockopt_initmsg
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
static
int
sctp_getsockopt_initmsg
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
if
(
len
!=
sizeof
(
struct
sctp_initmsg
))
return
-
EINVAL
;
...
...
@@ -2023,7 +2106,7 @@ static inline int sctp_getsockopt_initmsg(struct sock *sk, int len, char *optval
return
0
;
}
static
in
line
int
sctp_getsockopt_get_peer_addrs_num
(
struct
sock
*
sk
,
int
len
,
static
in
t
sctp_getsockopt_peer_addrs_num
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
sctp_assoc_t
id
;
...
...
@@ -2053,7 +2136,7 @@ static inline int sctp_getsockopt_get_peer_addrs_num(struct sock *sk, int len,
return
0
;
}
static
in
line
int
sctp_getsockopt_ge
t_peer_addrs
(
struct
sock
*
sk
,
int
len
,
static
in
t
sctp_getsockop
t_peer_addrs
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
sctp_association_t
*
asoc
;
...
...
@@ -2093,7 +2176,7 @@ static inline int sctp_getsockopt_get_peer_addrs(struct sock *sk, int len,
return
0
;
}
static
in
line
int
sctp_getsockopt_ge
t_local_addrs_num
(
struct
sock
*
sk
,
int
len
,
static
in
t
sctp_getsockop
t_local_addrs_num
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
sctp_assoc_t
id
;
...
...
@@ -2132,7 +2215,7 @@ static inline int sctp_getsockopt_get_local_addrs_num(struct sock *sk, int len,
return
0
;
}
static
in
line
int
sctp_getsockopt_ge
t_local_addrs
(
struct
sock
*
sk
,
int
len
,
static
in
t
sctp_getsockop
t_local_addrs
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
sctp_bind_addr_t
*
bp
;
...
...
@@ -2183,6 +2266,40 @@ static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
return
0
;
}
/* 7.1.10 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
*
* Requests that the local SCTP stack use the enclosed peer address as
* the association primary. The enclosed address must be one of the
* association peer's addresses.
*/
static
int
sctp_getsockopt_peer_prim
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
struct
sctp_setpeerprim
prim
;
struct
sctp_association
*
asoc
;
if
(
len
!=
sizeof
(
struct
sctp_setpeerprim
))
return
-
EINVAL
;
if
(
copy_from_user
(
&
prim
,
optval
,
sizeof
(
struct
sctp_setpeerprim
)))
return
-
EFAULT
;
asoc
=
sctp_id2assoc
(
sk
,
prim
.
sspp_assoc_id
);
if
(
!
asoc
)
return
-
EINVAL
;
if
(
!
asoc
->
peer
.
primary_path
)
return
-
ENOTCONN
;
memcpy
(
&
prim
.
sspp_addr
,
&
asoc
->
peer
.
primary_path
->
ipaddr
,
sizeof
(
union
sctp_addr
));
if
(
copy_to_user
(
optval
,
&
prim
,
sizeof
(
struct
sctp_setpeerprim
)))
return
-
EFAULT
;
return
0
;
}
/*
*
* 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
...
...
@@ -2200,7 +2317,7 @@ static inline int sctp_getsockopt_get_local_addrs(struct sock *sk, int len,
*
* For getsockopt, it get the default sctp_sndrcvinfo structure.
*/
static
in
line
int
sctp_getsockopt_se
t_default_send_param
(
struct
sock
*
sk
,
static
in
t
sctp_getsockop
t_default_send_param
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
struct
sctp_sndrcvinfo
info
;
...
...
@@ -2227,6 +2344,33 @@ static inline int sctp_getsockopt_set_default_send_param(struct sock *sk,
return
0
;
}
/*
*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
* generally sent as soon as possible and no unnecessary delays are
* introduced, at the cost of more packets in the network. Expects an
* integer boolean flag.
*/
static
int
sctp_getsockopt_nodelay
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
__u8
val
;
if
(
len
<
sizeof
(
__u8
))
return
-
EINVAL
;
len
=
sizeof
(
__u8
);
val
=
(
sctp_sk
(
sk
)
->
nodelay
==
1
);
if
(
put_user
(
len
,
optlen
))
return
-
EFAULT
;
if
(
copy_to_user
(
optval
,
&
val
,
len
))
return
-
EFAULT
;
return
0
;
}
SCTP_STATIC
int
sctp_getsockopt
(
struct
sock
*
sk
,
int
level
,
int
optname
,
char
*
optval
,
int
*
optlen
)
{
...
...
@@ -2257,58 +2401,52 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case
SCTP_STATUS
:
retval
=
sctp_getsockopt_sctp_status
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_DISABLE_FRAGMENTS
:
retval
=
sctp_getsockopt_disable_fragments
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_SET_EVENTS
:
retval
=
sctp_getsockopt_set_events
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_AUTOCLOSE
:
retval
=
sctp_getsockopt_autoclose
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_SOCKOPT_PEELOFF
:
retval
=
sctp_getsockopt_peeloff
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_PEER_ADDR_PARAMS
:
retval
=
sctp_getsockopt_
get_
peer_addr_params
(
sk
,
len
,
optval
,
retval
=
sctp_getsockopt_peer_addr_params
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_INITMSG
:
retval
=
sctp_getsockopt_initmsg
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_PEER_ADDRS_NUM
:
retval
=
sctp_getsockopt_
get_peer_addrs_num
(
sk
,
len
,
optval
,
retval
=
sctp_getsockopt_
peer_addrs_num
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_LOCAL_ADDRS_NUM
:
retval
=
sctp_getsockopt_
get_
local_addrs_num
(
sk
,
len
,
optval
,
retval
=
sctp_getsockopt_local_addrs_num
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_PEER_ADDRS
:
retval
=
sctp_getsockopt_
get_
peer_addrs
(
sk
,
len
,
optval
,
retval
=
sctp_getsockopt_peer_addrs
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_LOCAL_ADDRS
:
retval
=
sctp_getsockopt_
get_
local_addrs
(
sk
,
len
,
optval
,
retval
=
sctp_getsockopt_local_addrs
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_SET_DEFAULT_SEND_PARAM
:
retval
=
sctp_getsockopt_
set_
default_send_param
(
sk
,
len
,
retval
=
sctp_getsockopt_default_send_param
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_SET_PEER_PRIMARY_ADDR
:
retval
=
sctp_getsockopt_peer_prim
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_NODELAY
:
retval
=
sctp_getsockopt_nodelay
(
sk
,
len
,
optval
,
optlen
);
break
;
default:
retval
=
-
ENOPROTOOPT
;
break
;
...
...
@@ -2331,7 +2469,7 @@ static void sctp_unhash(struct sock *sk)
/* Check if port is acceptable. Possibly find first available port.
*
* The port hash table (contained in the 'global' SCTP protocol storage
* returned by s
ctp_protocol_t *
sctp_get_protocol()). The hash
* returned by s
truct sctp_protocol *
sctp_get_protocol()). The hash
* table is an array of 4096 lists (sctp_bind_hashbucket_t). Each
* list (the list number is the port number hashed out, so as you
* would expect from a hash function, all the ports in a given list have
...
...
@@ -2346,7 +2484,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
sctp_bind_hashbucket_t
*
head
;
/* hash list */
sctp_bind_bucket_t
*
pp
;
/* hash list port iterator */
s
ctp_protocol_t
*
sctp
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
sctp
=
sctp_get_protocol
();
unsigned
short
snum
;
int
ret
;
...
...
@@ -2543,6 +2681,9 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
return
-
EINVAL
;
if
(
sk
->
state
==
SCTP_SS_LISTENING
)
return
0
;
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
...
...
@@ -2562,6 +2703,40 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
return
0
;
}
/*
* 4.1.3 listen() - TCP Style Syntax
*
* Applications uses listen() to ready the SCTP endpoint for accepting
* inbound associations.
*/
SCTP_STATIC
int
sctp_stream_listen
(
struct
sock
*
sk
,
int
backlog
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
sctp_endpoint_t
*
ep
=
sp
->
ep
;
if
(
sk
->
state
==
SCTP_SS_LISTENING
)
return
0
;
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
* picks an ephemeral port and will choose an address set equivalent
* to binding with a wildcard address.
*
* This is not currently spelled out in the SCTP sockets
* extensions draft, but follows the practice as seen in TCP
* sockets.
*/
if
(
!
ep
->
base
.
bind_addr
.
port
)
{
if
(
sctp_autobind
(
sk
))
return
-
EAGAIN
;
}
sk
->
state
=
SCTP_SS_LISTENING
;
sk
->
max_ack_backlog
=
backlog
;
sctp_hash_endpoint
(
ep
);
return
0
;
}
/*
* Move a socket to LISTENING state.
*/
...
...
@@ -2579,10 +2754,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
case
SOCK_SEQPACKET
:
err
=
sctp_seqpacket_listen
(
sk
,
backlog
);
break
;
case
SOCK_STREAM
:
/* FIXME for TCP-style sockets. */
err
=
-
EOPNOTSUPP
;
err
=
sctp_stream_listen
(
sk
,
backlog
);
break
;
default:
goto
out
;
...
...
@@ -2684,7 +2858,7 @@ static sctp_bind_bucket_t *sctp_bucket_create(sctp_bind_hashbucket_t *head, unsi
/* FIXME: Commments! */
static
__inline__
void
__sctp_put_port
(
struct
sock
*
sk
)
{
s
ctp_protocol_t
*
sctp_proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
sctp_proto
=
sctp_get_protocol
();
sctp_bind_hashbucket_t
*
head
=
&
sctp_proto
->
port_hashtable
[
sctp_phashfn
(
inet_sk
(
sk
)
->
num
)];
sctp_bind_bucket_t
*
pp
;
...
...
@@ -2967,7 +3141,8 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int no
}
/* Verify that this is a valid address. */
static
int
sctp_verify_addr
(
struct
sock
*
sk
,
union
sctp_addr
*
addr
,
int
len
)
static
inline
int
sctp_verify_addr
(
struct
sock
*
sk
,
union
sctp_addr
*
addr
,
int
len
)
{
struct
sctp_af
*
af
;
...
...
@@ -3213,7 +3388,7 @@ static int sctp_wait_for_connect(sctp_association_t *asoc, long *timeo_p)
return
err
;
do_error:
err
=
-
ECONN
ABORT
ED
;
err
=
-
ECONN
REFUS
ED
;
goto
out
;
do_interrupted:
...
...
@@ -3225,6 +3400,131 @@ static int sctp_wait_for_connect(sctp_association_t *asoc, long *timeo_p)
goto
out
;
}
static
int
sctp_wait_for_accept
(
struct
sock
*
sk
,
long
timeo
)
{
struct
sctp_endpoint
*
ep
;
int
err
=
0
;
DECLARE_WAITQUEUE
(
wait
,
current
);
ep
=
sctp_sk
(
sk
)
->
ep
;
add_wait_queue_exclusive
(
sk
->
sleep
,
&
wait
);
for
(;;)
{
__set_current_state
(
TASK_INTERRUPTIBLE
);
if
(
list_empty
(
&
ep
->
asocs
))
{
sctp_release_sock
(
sk
);
timeo
=
schedule_timeout
(
timeo
);
sctp_lock_sock
(
sk
);
}
err
=
-
EINVAL
;
if
(
sk
->
state
!=
SCTP_SS_LISTENING
)
break
;
err
=
0
;
if
(
!
list_empty
(
&
ep
->
asocs
))
break
;
err
=
sock_intr_errno
(
timeo
);
if
(
signal_pending
(
current
))
break
;
err
=
-
EAGAIN
;
if
(
!
timeo
)
break
;
}
remove_wait_queue
(
sk
->
sleep
,
&
wait
);
__set_current_state
(
TASK_RUNNING
);
return
err
;
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
void
sctp_sock_migrate
(
struct
sock
*
oldsk
,
struct
sock
*
newsk
,
struct
sctp_association
*
assoc
,
sctp_socket_type_t
type
)
{
struct
sctp_opt
*
oldsp
=
sctp_sk
(
oldsk
);
struct
sctp_opt
*
newsp
=
sctp_sk
(
newsk
);
sctp_endpoint_t
*
newep
=
newsp
->
ep
;
struct
sk_buff
*
skb
,
*
tmp
;
struct
sctp_ulpevent
*
event
;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
*/
newsk
->
sndbuf
=
oldsk
->
sndbuf
;
newsk
->
rcvbuf
=
oldsk
->
rcvbuf
;
*
newsp
=
*
oldsp
;
/* Restore the ep value that was overwritten with the above structure
* copy.
*/
newsp
->
ep
=
newep
;
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
sctp_skb_for_each
(
skb
,
&
oldsk
->
receive_queue
,
tmp
)
{
event
=
sctp_skb2event
(
skb
);
if
(
event
->
asoc
==
assoc
)
{
__skb_unlink
(
skb
,
skb
->
list
);
__skb_queue_tail
(
&
newsk
->
receive_queue
,
skb
);
}
}
/* Clean up any messages pending delivery due to partial
* delivery. Three cases:
* 1) No partial deliver; no work.
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
* 3) Peeling off non-partial delivery; move pd_lobby to recieve_queue.
*/
skb_queue_head_init
(
&
newsp
->
pd_lobby
);
sctp_sk
(
newsk
)
->
pd_mode
=
assoc
->
ulpq
.
pd_mode
;;
if
(
sctp_sk
(
oldsk
)
->
pd_mode
)
{
struct
sk_buff_head
*
queue
;
/* Decide which queue to move pd_lobby skbs to. */
if
(
assoc
->
ulpq
.
pd_mode
)
{
queue
=
&
newsp
->
pd_lobby
;
}
else
queue
=
&
newsk
->
receive_queue
;
/* Walk through the pd_lobby, looking for skbs that
* need moved to the new socket.
*/
sctp_skb_for_each
(
skb
,
&
oldsp
->
pd_lobby
,
tmp
)
{
event
=
sctp_skb2event
(
skb
);
if
(
event
->
asoc
==
assoc
)
{
__skb_unlink
(
skb
,
skb
->
list
);
__skb_queue_tail
(
queue
,
skb
);
}
}
/* Clear up any skbs waiting for the partial
* delivery to finish.
*/
if
(
assoc
->
ulpq
.
pd_mode
)
sctp_clear_pd
(
oldsk
);
}
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
* TCP-style socket..
*/
newsp
->
type
=
type
;
/* Migrate the association to the new socket. */
sctp_assoc_migrate
(
assoc
,
newsk
);
newsk
->
state
=
SCTP_SS_ESTABLISHED
;
}
/* This proto struct describes the ULP interface for SCTP. */
struct
proto
sctp_prot
=
{
.
name
=
"SCTP"
,
...
...
net/sctp/sysctl.c
View file @
15931833
...
...
@@ -42,7 +42,7 @@
#include <net/sctp/structs.h>
#include <linux/sysctl.h>
extern
s
ctp_protocol_t
sctp_proto
;
extern
s
truct
sctp_protocol
sctp_proto
;
static
ctl_table
sctp_table
[]
=
{
{
...
...
net/sctp/transport.c
View file @
15931833
...
...
@@ -83,7 +83,7 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
const
union
sctp_addr
*
addr
,
int
priority
)
{
s
ctp_protocol_t
*
proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
proto
=
sctp_get_protocol
();
/* Copy in the address. */
peer
->
ipaddr
=
*
addr
;
...
...
@@ -262,7 +262,7 @@ void sctp_transport_put(struct sctp_transport *transport)
/* Update transport's RTO based on the newly calculated RTT. */
void
sctp_transport_update_rto
(
struct
sctp_transport
*
tp
,
__u32
rtt
)
{
s
ctp_protocol_t
*
proto
=
sctp_get_protocol
();
s
truct
sctp_protocol
*
proto
=
sctp_get_protocol
();
/* Check for valid transport. */
SCTP_ASSERT
(
tp
,
"NULL transport"
,
return
);
...
...
net/sctp/tsnmap.c
View file @
15931833
...
...
@@ -250,7 +250,7 @@ int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
/* The Gap Ack Block happens to end at the end of the
* overflow map.
*/
if
(
started
&
!
ended
)
{
if
(
started
&
&
!
ended
)
{
ended
++
;
_end
=
map
->
len
+
map
->
len
-
1
;
}
...
...
net/sctp/ulpqueue.c
View file @
15931833
...
...
@@ -220,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
if
(
sctp_event2skb
(
event
)
->
list
)
sctp_skb_list_tail
(
sctp_event2skb
(
event
)
->
list
,
queue
);
else
skb_queue_tail
(
queue
,
sctp_event2skb
(
event
));
__
skb_queue_tail
(
queue
,
sctp_event2skb
(
event
));
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
...
...
@@ -230,7 +230,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
sctp_ulpq_clear_pd
(
ulpq
);
if
(
queue
==
&
sk
->
receive_queue
)
wake_up_interruptible
(
sk
->
sleep
);
sk
->
data_ready
(
sk
,
0
);
return
1
;
out_free:
...
...
@@ -247,14 +247,14 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
static
inline
void
sctp_ulpq_store_reasm
(
struct
sctp_ulpq
*
ulpq
,
struct
sctp_ulpevent
*
event
)
{
struct
sk_buff
*
pos
,
*
tmp
;
struct
sk_buff
*
pos
;
struct
sctp_ulpevent
*
cevent
;
__u32
tsn
,
ctsn
;
tsn
=
event
->
sndrcvinfo
.
sinfo_tsn
;
/* Find the right place in this list. We store them by TSN. */
s
ctp_skb_for_each
(
pos
,
&
ulpq
->
reasm
,
tmp
)
{
s
kb_queue_walk
(
&
ulpq
->
reasm
,
pos
)
{
cevent
=
sctp_skb2event
(
pos
);
ctsn
=
cevent
->
sndrcvinfo
.
sinfo_tsn
;
...
...
@@ -334,7 +334,7 @@ static inline struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *
*/
static
inline
struct
sctp_ulpevent
*
sctp_ulpq_retrieve_reassembled
(
struct
sctp_ulpq
*
ulpq
)
{
struct
sk_buff
*
pos
,
*
tmp
;
struct
sk_buff
*
pos
;
struct
sctp_ulpevent
*
cevent
;
struct
sk_buff
*
first_frag
=
NULL
;
__u32
ctsn
,
next_tsn
;
...
...
@@ -355,7 +355,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
* fragment in order. If not, first_frag is reset to NULL and we
* start the next pass when we find another first fragment.
*/
s
ctp_skb_for_each
(
pos
,
&
ulpq
->
reasm
,
tmp
)
{
s
kb_queue_walk
(
&
ulpq
->
reasm
,
pos
)
{
cevent
=
sctp_skb2event
(
pos
);
ctsn
=
cevent
->
sndrcvinfo
.
sinfo_tsn
;
...
...
@@ -374,29 +374,26 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
case
SCTP_DATA_LAST_FRAG
:
if
(
first_frag
&&
(
ctsn
==
next_tsn
))
retval
=
sctp_make_reassembled_event
(
first_frag
,
pos
);
goto
found
;
else
first_frag
=
NULL
;
break
;
};
/* We have the reassembled event. There is no need to look
* further.
*/
if
(
retval
)
{
retval
->
msg_flags
|=
MSG_EOR
;
break
;
}
}
done:
return
retval
;
found:
retval
=
sctp_make_reassembled_event
(
first_frag
,
pos
);
if
(
retval
)
retval
->
msg_flags
|=
MSG_EOR
;
goto
done
;
}
/* Retrieve the next set of fragments of a partial message. */
static
inline
struct
sctp_ulpevent
*
sctp_ulpq_retrieve_partial
(
struct
sctp_ulpq
*
ulpq
)
{
struct
sk_buff
*
pos
,
*
tmp
,
*
last_frag
,
*
first_frag
;
struct
sk_buff
*
pos
,
*
last_frag
,
*
first_frag
;
struct
sctp_ulpevent
*
cevent
;
__u32
ctsn
,
next_tsn
;
int
is_last
;
...
...
@@ -415,7 +412,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
next_tsn
=
0
;
is_last
=
0
;
s
ctp_skb_for_each
(
pos
,
&
ulpq
->
reasm
,
tmp
)
{
s
kb_queue_walk
(
&
ulpq
->
reasm
,
pos
)
{
cevent
=
sctp_skb2event
(
pos
);
ctsn
=
cevent
->
sndrcvinfo
.
sinfo_tsn
;
...
...
@@ -448,7 +445,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
*/
done:
retval
=
sctp_make_reassembled_event
(
first_frag
,
last_frag
);
if
(
is_last
)
if
(
retval
&&
is_last
)
retval
->
msg_flags
|=
MSG_EOR
;
return
retval
;
...
...
@@ -490,7 +487,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
/* Retrieve the first part (sequential fragments) for partial delivery. */
static
inline
struct
sctp_ulpevent
*
sctp_ulpq_retrieve_first
(
struct
sctp_ulpq
*
ulpq
)
{
struct
sk_buff
*
pos
,
*
tmp
,
*
last_frag
,
*
first_frag
;
struct
sk_buff
*
pos
,
*
last_frag
,
*
first_frag
;
struct
sctp_ulpevent
*
cevent
;
__u32
ctsn
,
next_tsn
;
struct
sctp_ulpevent
*
retval
;
...
...
@@ -507,7 +504,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
retval
=
NULL
;
next_tsn
=
0
;
s
ctp_skb_for_each
(
pos
,
&
ulpq
->
reasm
,
tmp
)
{
s
kb_queue_walk
(
&
ulpq
->
reasm
,
pos
)
{
cevent
=
sctp_skb2event
(
pos
);
ctsn
=
cevent
->
sndrcvinfo
.
sinfo_tsn
;
...
...
@@ -590,7 +587,7 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
static
inline
void
sctp_ulpq_store_ordered
(
struct
sctp_ulpq
*
ulpq
,
struct
sctp_ulpevent
*
event
)
{
struct
sk_buff
*
pos
,
*
tmp
;
struct
sk_buff
*
pos
;
struct
sctp_ulpevent
*
cevent
;
__u16
sid
,
csid
;
__u16
ssn
,
cssn
;
...
...
@@ -601,7 +598,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
*/
s
ctp_skb_for_each
(
pos
,
&
ulpq
->
lobby
,
tmp
)
{
s
kb_queue_walk
(
&
ulpq
->
lobby
,
pos
)
{
cevent
=
(
struct
sctp_ulpevent
*
)
pos
->
cb
;
csid
=
cevent
->
sndrcvinfo
.
sinfo_stream
;
cssn
=
cevent
->
sndrcvinfo
.
sinfo_ssn
;
...
...
@@ -786,9 +783,9 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int priority)
SCTP_PARTIAL_DELIVERY_ABORTED
,
priority
);
if
(
ev
)
skb_queue_tail
(
&
sk
->
receive_queue
,
sctp_event2skb
(
ev
));
__
skb_queue_tail
(
&
sk
->
receive_queue
,
sctp_event2skb
(
ev
));
/* If there is data waiting, send it up the socket now. */
if
(
sctp_ulpq_clear_pd
(
ulpq
)
||
ev
)
wake_up_interruptible
(
sk
->
sleep
);
sk
->
data_ready
(
sk
,
0
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment